Merge potential next release v0.4 (#187) Breaking Changes

* First draft of the new n-dimensional arrays + NB use case
* Improves default implementation of multiple Array methods
* Refactors tree methods
* Adds matrix decomposition routines
* Adds matrix decomposition methods to ndarray and nalgebra bindings
* Refactoring + linear regression now uses array2
* Ridge & Linear regression
* LBFGS optimizer & logistic regression
* LBFGS optimizer & logistic regression
* Changes linear methods, metrics and model selection methods to new n-dimensional arrays
* Switches KNN and clustering algorithms to new n-d array layer
* Refactors distance metrics
* Optimizes knn and clustering methods
* Refactors metrics module
* Switches decomposition methods to n-dimensional arrays
* Linalg refactoring - cleanup rng merge (#172)
* Remove legacy DenseMatrix and BaseMatrix implementation. Port the new Number, FloatNumber and Array implementation into module structure.
* Exclude AUC metrics. Needs reimplementation
* Improve developers walkthrough

New traits system in place at `src/numbers` and `src/linalg`
Co-authored-by: Lorenzo <tunedconsulting@gmail.com>

* Provide SupervisedEstimator with a constructor to avoid explicit dynamical box allocation in 'cross_validate' and 'cross_validate_predict' as required by the use of 'dyn' as per Rust 2021
* Implement getters to use as_ref() in src/neighbors
* Implement getters to use as_ref() in src/naive_bayes
* Implement getters to use as_ref() in src/linear
* Add Clone to src/naive_bayes
* Change signature for cross_validate and other model_selection functions to abide to use of dyn in Rust 2021
* Implement ndarray-bindings. Remove FloatNumber from implementations
* Drop nalgebra-bindings support (as decided in conf-call to go for ndarray)
* Remove benches. Benches will have their own repo at smartcore-benches
* Implement SVC
* Implement SVC serialization. Move search parameters in dedicated module
* Implement SVR. Definitely too slow
* Fix compilation issues for wasm (#202)

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
* Fix tests (#203)

* Port linalg/traits/stats.rs
* Improve methods naming
* Improve Display for DenseMatrix

Co-authored-by: Montana Low <montanalow@users.noreply.github.com>
Co-authored-by: VolodymyrOrlov <volodymyr.orlov@gmail.com>
This commit is contained in:
Lorenzo
2022-10-31 10:44:57 +00:00
committed by GitHub
parent bb71656137
commit 52eb6ce023
110 changed files with 10327 additions and 9107 deletions
+152 -90
View File
@@ -19,7 +19,7 @@
//! Example:
//!
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linear::ridge_regression::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
@@ -57,15 +57,18 @@
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
use std::fmt::Debug;
use std::marker::PhantomData;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
use crate::linalg::basic::arrays::{Array1, Array2};
use crate::linalg::traits::cholesky::CholeskyDecomposable;
use crate::linalg::traits::svd::SVDDecomposable;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone, Eq, PartialEq)]
@@ -86,7 +89,7 @@ impl Default for RidgeRegressionSolverName {
/// Ridge Regression parameters
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
pub struct RidgeRegressionParameters<T: RealNumber> {
pub struct RidgeRegressionParameters<T: Number + RealNumber> {
/// Solver to use for estimation of regression coefficients.
pub solver: RidgeRegressionSolverName,
/// Controls the strength of the penalty to the loss function.
@@ -99,7 +102,7 @@ pub struct RidgeRegressionParameters<T: RealNumber> {
/// Ridge Regression grid search parameters
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
pub struct RidgeRegressionSearchParameters<T: RealNumber> {
pub struct RidgeRegressionSearchParameters<T: Number + RealNumber> {
#[cfg_attr(feature = "serde", serde(default))]
/// Solver to use for estimation of regression coefficients.
pub solver: Vec<RidgeRegressionSolverName>,
@@ -113,14 +116,14 @@ pub struct RidgeRegressionSearchParameters<T: RealNumber> {
}
/// Ridge Regression grid search iterator
pub struct RidgeRegressionSearchParametersIterator<T: RealNumber> {
pub struct RidgeRegressionSearchParametersIterator<T: Number + RealNumber> {
ridge_regression_search_parameters: RidgeRegressionSearchParameters<T>,
current_solver: usize,
current_alpha: usize,
current_normalize: usize,
}
impl<T: RealNumber> IntoIterator for RidgeRegressionSearchParameters<T> {
impl<T: Number + RealNumber> IntoIterator for RidgeRegressionSearchParameters<T> {
type Item = RidgeRegressionParameters<T>;
type IntoIter = RidgeRegressionSearchParametersIterator<T>;
@@ -134,7 +137,7 @@ impl<T: RealNumber> IntoIterator for RidgeRegressionSearchParameters<T> {
}
}
impl<T: RealNumber> Iterator for RidgeRegressionSearchParametersIterator<T> {
impl<T: Number + RealNumber> Iterator for RidgeRegressionSearchParametersIterator<T> {
type Item = RidgeRegressionParameters<T>;
fn next(&mut self) -> Option<Self::Item> {
@@ -171,7 +174,7 @@ impl<T: RealNumber> Iterator for RidgeRegressionSearchParametersIterator<T> {
}
}
impl<T: RealNumber> Default for RidgeRegressionSearchParameters<T> {
impl<T: Number + RealNumber> Default for RidgeRegressionSearchParameters<T> {
fn default() -> Self {
let default_params = RidgeRegressionParameters::default();
@@ -186,13 +189,20 @@ impl<T: RealNumber> Default for RidgeRegressionSearchParameters<T> {
/// Ridge regression
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug)]
pub struct RidgeRegression<T: RealNumber, M: Matrix<T>> {
coefficients: M,
intercept: T,
_solver: RidgeRegressionSolverName,
pub struct RidgeRegression<
TX: Number + RealNumber,
TY: Number,
X: Array2<TX> + CholeskyDecomposable<TX> + SVDDecomposable<TX>,
Y: Array1<TY>,
> {
coefficients: Option<X>,
intercept: Option<TX>,
solver: Option<RidgeRegressionSolverName>,
_phantom_ty: PhantomData<TY>,
_phantom_y: PhantomData<Y>,
}
impl<T: RealNumber> RidgeRegressionParameters<T> {
impl<T: Number + RealNumber> RidgeRegressionParameters<T> {
/// Regularization parameter.
pub fn with_alpha(mut self, alpha: T) -> Self {
self.alpha = alpha;
@@ -210,51 +220,84 @@ impl<T: RealNumber> RidgeRegressionParameters<T> {
}
}
impl<T: RealNumber> Default for RidgeRegressionParameters<T> {
impl<T: Number + RealNumber> Default for RidgeRegressionParameters<T> {
fn default() -> Self {
RidgeRegressionParameters {
solver: RidgeRegressionSolverName::default(),
alpha: T::one(),
alpha: T::from_f64(1.0).unwrap(),
normalize: true,
}
}
}
impl<T: RealNumber, M: Matrix<T>> PartialEq for RidgeRegression<T, M> {
impl<
TX: Number + RealNumber,
TY: Number,
X: Array2<TX> + CholeskyDecomposable<TX> + SVDDecomposable<TX>,
Y: Array1<TY>,
> PartialEq for RidgeRegression<TX, TY, X, Y>
{
fn eq(&self, other: &Self) -> bool {
self.coefficients == other.coefficients
&& (self.intercept - other.intercept).abs() <= T::epsilon()
self.intercept() == other.intercept()
&& self.coefficients().shape() == other.coefficients().shape()
&& self
.coefficients()
.iterator(0)
.zip(other.coefficients().iterator(0))
.all(|(&a, &b)| (a - b).abs() <= TX::epsilon())
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, RidgeRegressionParameters<T>>
for RidgeRegression<T, M>
impl<
TX: Number + RealNumber,
TY: Number,
X: Array2<TX> + CholeskyDecomposable<TX> + SVDDecomposable<TX>,
Y: Array1<TY>,
> SupervisedEstimator<X, Y, RidgeRegressionParameters<TX>> for RidgeRegression<TX, TY, X, Y>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: RidgeRegressionParameters<T>,
) -> Result<Self, Failed> {
fn new() -> Self {
Self {
coefficients: Option::None,
intercept: Option::None,
solver: Option::None,
_phantom_ty: PhantomData,
_phantom_y: PhantomData,
}
}
fn fit(x: &X, y: &Y, parameters: RidgeRegressionParameters<TX>) -> Result<Self, Failed> {
RidgeRegression::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for RidgeRegression<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
impl<
TX: Number + RealNumber,
TY: Number,
X: Array2<TX> + CholeskyDecomposable<TX> + SVDDecomposable<TX>,
Y: Array1<TY>,
> Predictor<X, Y> for RidgeRegression<TX, TY, X, Y>
{
fn predict(&self, x: &X) -> Result<Y, Failed> {
self.predict(x)
}
}
impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
impl<
TX: Number + RealNumber,
TY: Number,
X: Array2<TX> + CholeskyDecomposable<TX> + SVDDecomposable<TX>,
Y: Array1<TY>,
> RidgeRegression<TX, TY, X, Y>
{
/// Fits ridge regression to your data.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `y` - target values
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
pub fn fit(
x: &M,
y: &M::RowVector,
parameters: RidgeRegressionParameters<T>,
) -> Result<RidgeRegression<T, M>, Failed> {
x: &X,
y: &Y,
parameters: RidgeRegressionParameters<TX>,
) -> Result<RidgeRegression<TX, TY, X, Y>, Failed> {
//w = inv(X^t X + alpha*Id) * X.T y
let (n, p) = x.shape();
@@ -265,11 +308,16 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
));
}
if y.len() != n {
if y.shape() != n {
return Err(Failed::fit("Number of rows in X should = len(y)"));
}
let y_column = M::from_row_vector(y.clone()).transpose();
let y_column = X::from_iterator(
y.iterator(0).map(|&v| TX::from(v).unwrap()),
y.shape(),
1,
0,
);
let (w, b) = if parameters.normalize {
let (scaled_x, col_mean, col_std) = Self::rescale_x(x)?;
@@ -278,7 +326,7 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
let mut x_t_x = x_t.matmul(&scaled_x);
for i in 0..p {
x_t_x.add_element_mut(i, i, parameters.alpha);
x_t_x.add_element_mut((i, i), parameters.alpha);
}
let mut w = match parameters.solver {
@@ -287,16 +335,16 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
};
for (i, col_std_i) in col_std.iter().enumerate().take(p) {
w.set(i, 0, w.get(i, 0) / *col_std_i);
w.set((i, 0), *w.get((i, 0)) / *col_std_i);
}
let mut b = T::zero();
let mut b = TX::zero();
for (i, col_mean_i) in col_mean.iter().enumerate().take(p) {
b += w.get(i, 0) * *col_mean_i;
b += *w.get((i, 0)) * *col_mean_i;
}
let b = y.mean() - b;
let b = TX::from_f64(y.mean_by()).unwrap() - b;
(w, b)
} else {
@@ -305,7 +353,7 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
let mut x_t_x = x_t.matmul(x);
for i in 0..p {
x_t_x.add_element_mut(i, i, parameters.alpha);
x_t_x.add_element_mut((i, i), parameters.alpha);
}
let w = match parameters.solver {
@@ -313,22 +361,32 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
RidgeRegressionSolverName::SVD => x_t_x.svd_solve_mut(x_t_y)?,
};
(w, T::zero())
(w, TX::zero())
};
Ok(RidgeRegression {
intercept: b,
coefficients: w,
_solver: parameters.solver,
intercept: Some(b),
coefficients: Some(w),
solver: Some(parameters.solver),
_phantom_ty: PhantomData,
_phantom_y: PhantomData,
})
}
fn rescale_x(x: &M) -> Result<(M, Vec<T>, Vec<T>), Failed> {
let col_mean = x.mean(0);
let col_std = x.std(0);
fn rescale_x(x: &X) -> Result<(X, Vec<TX>, Vec<TX>), Failed> {
let col_mean: Vec<TX> = x
.mean_by(0)
.iter()
.map(|&v| TX::from_f64(v).unwrap())
.collect();
let col_std: Vec<TX> = x
.std_dev(0)
.iter()
.map(|&v| TX::from_f64(v).unwrap())
.collect();
for (i, col_std_i) in col_std.iter().enumerate() {
if (*col_std_i - T::zero()).abs() < T::epsilon() {
if (*col_std_i - TX::zero()).abs() < TX::epsilon() {
return Err(Failed::fit(&format!(
"Cannot rescale constant column {}",
i
@@ -343,28 +401,31 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
/// Predict target values from `x`
/// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features.
pub fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
pub fn predict(&self, x: &X) -> Result<Y, Failed> {
let (nrows, _) = x.shape();
let mut y_hat = x.matmul(&self.coefficients);
y_hat.add_mut(&M::fill(nrows, 1, self.intercept));
Ok(y_hat.transpose().to_row_vector())
let mut y_hat = x.matmul(self.coefficients());
y_hat.add_mut(&X::fill(nrows, 1, self.intercept.unwrap()));
Ok(Y::from_iterator(
y_hat.iterator(0).map(|&v| TY::from(v).unwrap()),
nrows,
))
}
/// Get estimates regression coefficients
pub fn coefficients(&self) -> &M {
&self.coefficients
pub fn coefficients(&self) -> &X {
self.coefficients.as_ref().unwrap()
}
/// Get estimate of intercept
pub fn intercept(&self) -> T {
self.intercept
pub fn intercept(&self) -> &TX {
self.intercept.as_ref().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::*;
use crate::linalg::basic::matrix::DenseMatrix;
use crate::metrics::mean_absolute_error;
#[test]
@@ -438,39 +499,40 @@ mod tests {
assert!(mean_absolute_error(&y_hat_svd, &y) < 2.0);
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
#[cfg(feature = "serde")]
fn serde() {
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
&[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
&[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
&[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
&[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
&[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
&[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
&[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
&[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
&[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
&[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
// TODO: implement serialization for new DenseMatrix
// #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
// #[test]
// #[cfg(feature = "serde")]
// fn serde() {
// let x = DenseMatrix::from_2d_array(&[
// &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
// &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
// &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
// &[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
// &[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
// &[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
// &[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
// &[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
// &[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
// &[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
// &[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
// &[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
// &[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
// &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
// &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
// &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
// ]);
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
];
// let y = vec![
// 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
// 114.2, 115.7, 116.9,
// ];
let lr = RidgeRegression::fit(&x, &y, Default::default()).unwrap();
// let lr = RidgeRegression::fit(&x, &y, Default::default()).unwrap();
let deserialized_lr: RidgeRegression<f64, DenseMatrix<f64>> =
serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
// let deserialized_lr: RidgeRegression<f64, f64, DenseMatrix<f64>, Vec<f64>> =
// serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
assert_eq!(lr, deserialized_lr);
}
// assert_eq!(lr, deserialized_lr);
// }
}