Merge potential next release v0.4 (#187) Breaking Changes

* First draft of the new n-dimensional arrays + NB use case
* Improves default implementation of multiple Array methods
* Refactors tree methods
* Adds matrix decomposition routines
* Adds matrix decomposition methods to ndarray and nalgebra bindings
* Refactoring + linear regression now uses array2
* Ridge & Linear regression
* LBFGS optimizer & logistic regression
* LBFGS optimizer & logistic regression
* Changes linear methods, metrics and model selection methods to new n-dimensional arrays
* Switches KNN and clustering algorithms to new n-d array layer
* Refactors distance metrics
* Optimizes knn and clustering methods
* Refactors metrics module
* Switches decomposition methods to n-dimensional arrays
* Linalg refactoring - cleanup rng merge (#172)
* Remove legacy DenseMatrix and BaseMatrix implementation. Port the new Number, FloatNumber and Array implementation into module structure.
* Exclude AUC metrics. Needs reimplementation
* Improve developers walkthrough

New traits system in place at `src/numbers` and `src/linalg`
Co-authored-by: Lorenzo <tunedconsulting@gmail.com>

* Provide SupervisedEstimator with a constructor to avoid explicit dynamical box allocation in 'cross_validate' and 'cross_validate_predict' as required by the use of 'dyn' as per Rust 2021
* Implement getters to use as_ref() in src/neighbors
* Implement getters to use as_ref() in src/naive_bayes
* Implement getters to use as_ref() in src/linear
* Add Clone to src/naive_bayes
* Change signature for cross_validate and other model_selection functions to abide to use of dyn in Rust 2021
* Implement ndarray-bindings. Remove FloatNumber from implementations
* Drop nalgebra-bindings support (as decided in conf-call to go for ndarray)
* Remove benches. Benches will have their own repo at smartcore-benches
* Implement SVC
* Implement SVC serialization. Move search parameters in dedicated module
* Implement SVR. Definitely too slow
* Fix compilation issues for wasm (#202)

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
* Fix tests (#203)

* Port linalg/traits/stats.rs
* Improve methods naming
* Improve Display for DenseMatrix

Co-authored-by: Montana Low <montanalow@users.noreply.github.com>
Co-authored-by: VolodymyrOrlov <volodymyr.orlov@gmail.com>
This commit is contained in:
Lorenzo
2022-10-31 10:44:57 +00:00
committed by GitHub
parent bb71656137
commit 52eb6ce023
110 changed files with 10327 additions and 9107 deletions
+169 -116
View File
@@ -17,7 +17,7 @@
//! Example:
//!
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linear::elastic_net::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
@@ -55,36 +55,38 @@
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
use std::fmt::Debug;
use std::marker::PhantomData;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
use crate::linalg::basic::arrays::{Array, Array1, Array2, MutArray};
use crate::numbers::basenum::Number;
use crate::numbers::floatnum::FloatNumber;
use crate::numbers::realnum::RealNumber;
use crate::linear::lasso_optimizer::InteriorPointOptimizer;
/// Elastic net parameters
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
pub struct ElasticNetParameters<T: RealNumber> {
pub struct ElasticNetParameters {
#[cfg_attr(feature = "serde", serde(default))]
/// Regularization parameter.
pub alpha: T,
pub alpha: f64,
#[cfg_attr(feature = "serde", serde(default))]
/// The elastic net mixing parameter, with 0 <= l1_ratio <= 1.
/// For l1_ratio = 0 the penalty is an L2 penalty.
/// For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
pub l1_ratio: T,
pub l1_ratio: f64,
#[cfg_attr(feature = "serde", serde(default))]
/// If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the standard deviation.
pub normalize: bool,
#[cfg_attr(feature = "serde", serde(default))]
/// The tolerance for the optimization
pub tol: T,
pub tol: f64,
#[cfg_attr(feature = "serde", serde(default))]
/// The maximum number of iterations
pub max_iter: usize,
@@ -93,21 +95,23 @@ pub struct ElasticNetParameters<T: RealNumber> {
/// Elastic net
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug)]
pub struct ElasticNet<T: RealNumber, M: Matrix<T>> {
coefficients: M,
intercept: T,
pub struct ElasticNet<TX: FloatNumber + RealNumber, TY: Number, X: Array2<TX>, Y: Array1<TY>> {
coefficients: Option<X>,
intercept: Option<TX>,
_phantom_ty: PhantomData<TY>,
_phantom_y: PhantomData<Y>,
}
impl<T: RealNumber> ElasticNetParameters<T> {
impl ElasticNetParameters {
/// Regularization parameter.
pub fn with_alpha(mut self, alpha: T) -> Self {
pub fn with_alpha(mut self, alpha: f64) -> Self {
self.alpha = alpha;
self
}
/// The elastic net mixing parameter, with 0 <= l1_ratio <= 1.
/// For l1_ratio = 0 the penalty is an L2 penalty.
/// For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
pub fn with_l1_ratio(mut self, l1_ratio: T) -> Self {
pub fn with_l1_ratio(mut self, l1_ratio: f64) -> Self {
self.l1_ratio = l1_ratio;
self
}
@@ -117,7 +121,7 @@ impl<T: RealNumber> ElasticNetParameters<T> {
self
}
/// The tolerance for the optimization
pub fn with_tol(mut self, tol: T) -> Self {
pub fn with_tol(mut self, tol: f64) -> Self {
self.tol = tol;
self
}
@@ -128,13 +132,13 @@ impl<T: RealNumber> ElasticNetParameters<T> {
}
}
impl<T: RealNumber> Default for ElasticNetParameters<T> {
impl Default for ElasticNetParameters {
fn default() -> Self {
ElasticNetParameters {
alpha: T::one(),
l1_ratio: T::half(),
alpha: 1.0,
l1_ratio: 0.5,
normalize: true,
tol: T::from_f64(1e-4).unwrap(),
tol: 1e-4,
max_iter: 1000,
}
}
@@ -143,29 +147,29 @@ impl<T: RealNumber> Default for ElasticNetParameters<T> {
/// ElasticNet grid search parameters
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
pub struct ElasticNetSearchParameters<T: RealNumber> {
pub struct ElasticNetSearchParameters {
#[cfg_attr(feature = "serde", serde(default))]
/// Regularization parameter.
pub alpha: Vec<T>,
pub alpha: Vec<f64>,
#[cfg_attr(feature = "serde", serde(default))]
/// The elastic net mixing parameter, with 0 <= l1_ratio <= 1.
/// For l1_ratio = 0 the penalty is an L2 penalty.
/// For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
pub l1_ratio: Vec<T>,
pub l1_ratio: Vec<f64>,
#[cfg_attr(feature = "serde", serde(default))]
/// If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the standard deviation.
pub normalize: Vec<bool>,
#[cfg_attr(feature = "serde", serde(default))]
/// The tolerance for the optimization
pub tol: Vec<T>,
pub tol: Vec<f64>,
#[cfg_attr(feature = "serde", serde(default))]
/// The maximum number of iterations
pub max_iter: Vec<usize>,
}
/// ElasticNet grid search iterator
pub struct ElasticNetSearchParametersIterator<T: RealNumber> {
lasso_regression_search_parameters: ElasticNetSearchParameters<T>,
pub struct ElasticNetSearchParametersIterator {
lasso_regression_search_parameters: ElasticNetSearchParameters,
current_alpha: usize,
current_l1_ratio: usize,
current_normalize: usize,
@@ -173,9 +177,9 @@ pub struct ElasticNetSearchParametersIterator<T: RealNumber> {
current_max_iter: usize,
}
impl<T: RealNumber> IntoIterator for ElasticNetSearchParameters<T> {
type Item = ElasticNetParameters<T>;
type IntoIter = ElasticNetSearchParametersIterator<T>;
impl IntoIterator for ElasticNetSearchParameters {
type Item = ElasticNetParameters;
type IntoIter = ElasticNetSearchParametersIterator;
fn into_iter(self) -> Self::IntoIter {
ElasticNetSearchParametersIterator {
@@ -189,8 +193,8 @@ impl<T: RealNumber> IntoIterator for ElasticNetSearchParameters<T> {
}
}
impl<T: RealNumber> Iterator for ElasticNetSearchParametersIterator<T> {
type Item = ElasticNetParameters<T>;
impl Iterator for ElasticNetSearchParametersIterator {
type Item = ElasticNetParameters;
fn next(&mut self) -> Option<Self::Item> {
if self.current_alpha == self.lasso_regression_search_parameters.alpha.len()
@@ -246,7 +250,7 @@ impl<T: RealNumber> Iterator for ElasticNetSearchParametersIterator<T> {
}
}
impl<T: RealNumber> Default for ElasticNetSearchParameters<T> {
impl Default for ElasticNetSearchParameters {
fn default() -> Self {
let default_params = ElasticNetParameters::default();
@@ -260,49 +264,73 @@ impl<T: RealNumber> Default for ElasticNetSearchParameters<T> {
}
}
impl<T: RealNumber, M: Matrix<T>> PartialEq for ElasticNet<T, M> {
impl<TX: FloatNumber + RealNumber, TY: Number, X: Array2<TX>, Y: Array1<TY>> PartialEq
for ElasticNet<TX, TY, X, Y>
{
fn eq(&self, other: &Self) -> bool {
self.coefficients == other.coefficients
&& (self.intercept - other.intercept).abs() <= T::epsilon()
if self.intercept() != other.intercept() {
return false;
}
if self.coefficients().shape() != other.coefficients().shape() {
return false;
}
self.coefficients()
.iterator(0)
.zip(other.coefficients().iterator(0))
.all(|(&a, &b)| (a - b).abs() <= TX::epsilon())
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, ElasticNetParameters<T>>
for ElasticNet<T, M>
impl<TX: FloatNumber + RealNumber, TY: Number, X: Array2<TX>, Y: Array1<TY>>
SupervisedEstimator<X, Y, ElasticNetParameters> for ElasticNet<TX, TY, X, Y>
{
fn fit(x: &M, y: &M::RowVector, parameters: ElasticNetParameters<T>) -> Result<Self, Failed> {
fn new() -> Self {
Self {
coefficients: Option::None,
intercept: Option::None,
_phantom_ty: PhantomData,
_phantom_y: PhantomData,
}
}
fn fit(x: &X, y: &Y, parameters: ElasticNetParameters) -> Result<Self, Failed> {
ElasticNet::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for ElasticNet<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
impl<TX: FloatNumber + RealNumber, TY: Number, X: Array2<TX>, Y: Array1<TY>> Predictor<X, Y>
for ElasticNet<TX, TY, X, Y>
{
fn predict(&self, x: &X) -> Result<Y, Failed> {
self.predict(x)
}
}
impl<T: RealNumber, M: Matrix<T>> ElasticNet<T, M> {
impl<TX: FloatNumber + RealNumber, TY: Number, X: Array2<TX>, Y: Array1<TY>>
ElasticNet<TX, TY, X, Y>
{
/// Fits elastic net regression to your data.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `y` - target values
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
pub fn fit(
x: &M,
y: &M::RowVector,
parameters: ElasticNetParameters<T>,
) -> Result<ElasticNet<T, M>, Failed> {
x: &X,
y: &Y,
parameters: ElasticNetParameters,
) -> Result<ElasticNet<TX, TY, X, Y>, Failed> {
let (n, p) = x.shape();
if y.len() != n {
if y.shape() != n {
return Err(Failed::fit("Number of rows in X should = len(y)"));
}
let n_float = T::from_usize(n).unwrap();
let n_float = n as f64;
let l1_reg = parameters.alpha * parameters.l1_ratio * n_float;
let l2_reg = parameters.alpha * (T::one() - parameters.l1_ratio) * n_float;
let l1_reg = TX::from_f64(parameters.alpha * parameters.l1_ratio * n_float).unwrap();
let l2_reg =
TX::from_f64(parameters.alpha * (1.0 - parameters.l1_ratio) * n_float).unwrap();
let y_mean = y.mean();
let y_mean = TX::from_f64(y.mean_by()).unwrap();
let (w, b) = if parameters.normalize {
let (scaled_x, col_mean, col_std) = Self::rescale_x(x)?;
@@ -311,68 +339,92 @@ impl<T: RealNumber, M: Matrix<T>> ElasticNet<T, M> {
let mut optimizer = InteriorPointOptimizer::new(&x, p);
let mut w =
optimizer.optimize(&x, &y, l1_reg * gamma, parameters.max_iter, parameters.tol)?;
let mut w = optimizer.optimize(
&x,
&y,
l1_reg * gamma,
parameters.max_iter,
TX::from_f64(parameters.tol).unwrap(),
)?;
for i in 0..p {
w.set(i, 0, gamma * w.get(i, 0) / col_std[i]);
w.set(i, gamma * *w.get(i) / col_std[i]);
}
let mut b = T::zero();
let mut b = TX::zero();
for i in 0..p {
b += w.get(i, 0) * col_mean[i];
b += *w.get(i) * col_mean[i];
}
b = y_mean - b;
(w, b)
(X::from_column(&w), b)
} else {
let (x, y, gamma) = Self::augment_x_and_y(x, y, l2_reg);
let mut optimizer = InteriorPointOptimizer::new(&x, p);
let mut w =
optimizer.optimize(&x, &y, l1_reg * gamma, parameters.max_iter, parameters.tol)?;
let mut w = optimizer.optimize(
&x,
&y,
l1_reg * gamma,
parameters.max_iter,
TX::from_f64(parameters.tol).unwrap(),
)?;
for i in 0..p {
w.set(i, 0, gamma * w.get(i, 0));
w.set(i, gamma * *w.get(i));
}
(w, y_mean)
(X::from_column(&w), y_mean)
};
Ok(ElasticNet {
intercept: b,
coefficients: w,
intercept: Some(b),
coefficients: Some(w),
_phantom_ty: PhantomData,
_phantom_y: PhantomData,
})
}
/// Predict target values from `x`
/// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features.
pub fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
pub fn predict(&self, x: &X) -> Result<Y, Failed> {
let (nrows, _) = x.shape();
let mut y_hat = x.matmul(&self.coefficients);
y_hat.add_mut(&M::fill(nrows, 1, self.intercept));
Ok(y_hat.transpose().to_row_vector())
let mut y_hat = x.matmul(self.coefficients.as_ref().unwrap());
let bias = X::fill(nrows, 1, self.intercept.unwrap());
y_hat.add_mut(&bias);
Ok(Y::from_iterator(
y_hat.iterator(0).map(|&v| TY::from(v).unwrap()),
nrows,
))
}
/// Get estimates regression coefficients
pub fn coefficients(&self) -> &M {
&self.coefficients
pub fn coefficients(&self) -> &X {
self.coefficients.as_ref().unwrap()
}
/// Get estimate of intercept
pub fn intercept(&self) -> T {
self.intercept
pub fn intercept(&self) -> &TX {
self.intercept.as_ref().unwrap()
}
fn rescale_x(x: &M) -> Result<(M, Vec<T>, Vec<T>), Failed> {
let col_mean = x.mean(0);
let col_std = x.std(0);
fn rescale_x(x: &X) -> Result<(X, Vec<TX>, Vec<TX>), Failed> {
let col_mean: Vec<TX> = x
.mean_by(0)
.iter()
.map(|&v| TX::from_f64(v).unwrap())
.collect();
let col_std: Vec<TX> = x
.std_dev(0)
.iter()
.map(|&v| TX::from_f64(v).unwrap())
.collect();
for i in 0..col_std.len() {
if (col_std[i] - T::zero()).abs() < T::epsilon() {
for (i, col_std_i) in col_std.iter().enumerate() {
if (*col_std_i - TX::zero()).abs() < TX::epsilon() {
return Err(Failed::fit(&format!(
"Cannot rescale constant column {}",
i
@@ -385,25 +437,25 @@ impl<T: RealNumber, M: Matrix<T>> ElasticNet<T, M> {
Ok((scaled_x, col_mean, col_std))
}
fn augment_x_and_y(x: &M, y: &M::RowVector, l2_reg: T) -> (M, M::RowVector, T) {
fn augment_x_and_y(x: &X, y: &Y, l2_reg: TX) -> (X, Vec<TX>, TX) {
let (n, p) = x.shape();
let gamma = T::one() / (T::one() + l2_reg).sqrt();
let gamma = TX::one() / (TX::one() + l2_reg).sqrt();
let padding = gamma * l2_reg.sqrt();
let mut y2 = M::RowVector::zeros(n + p);
for i in 0..y.len() {
y2.set(i, y.get(i));
let mut y2 = Vec::<TX>::zeros(n + p);
for i in 0..y.shape() {
y2.set(i, TX::from(*y.get(i)).unwrap());
}
let mut x2 = M::zeros(n + p, p);
let mut x2 = X::zeros(n + p, p);
for j in 0..p {
for i in 0..n {
x2.set(i, j, gamma * x.get(i, j));
x2.set((i, j), gamma * *x.get((i, j)));
}
x2.set(j + n, j, padding);
x2.set((j + n, j), padding);
}
(x2, y2, gamma)
@@ -413,7 +465,7 @@ impl<T: RealNumber, M: Matrix<T>> ElasticNet<T, M> {
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::*;
use crate::linalg::basic::matrix::DenseMatrix;
use crate::metrics::mean_absolute_error;
#[test]
@@ -546,43 +598,44 @@ mod tests {
assert!(mae_l1 < 2.0);
assert!(mae_l2 < 2.0);
assert!(l1_model.coefficients().get(0, 0) > l1_model.coefficients().get(1, 0));
assert!(l1_model.coefficients().get(0, 0) > l1_model.coefficients().get(2, 0));
assert!(l1_model.coefficients().get((0, 0)) > l1_model.coefficients().get((1, 0)));
assert!(l1_model.coefficients().get((0, 0)) > l1_model.coefficients().get((2, 0)));
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
#[cfg(feature = "serde")]
fn serde() {
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
&[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
&[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
&[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
&[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
&[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
&[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
&[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
&[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
&[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
&[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
// TODO: serialization for the new DenseMatrix needs to be implemented
// #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
// #[test]
// #[cfg(feature = "serde")]
// fn serde() {
// let x = DenseMatrix::from_2d_array(&[
// &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
// &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
// &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
// &[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
// &[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
// &[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
// &[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
// &[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
// &[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
// &[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
// &[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
// &[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
// &[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
// &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
// &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
// &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
// ]);
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
];
// let y = vec![
// 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
// 114.2, 115.7, 116.9,
// ];
let lr = ElasticNet::fit(&x, &y, Default::default()).unwrap();
// let lr = ElasticNet::fit(&x, &y, Default::default()).unwrap();
let deserialized_lr: ElasticNet<f64, DenseMatrix<f64>> =
serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
// let deserialized_lr: ElasticNet<f64, f64, DenseMatrix<f64>, Vec<f64>> =
// serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
assert_eq!(lr, deserialized_lr);
}
// assert_eq!(lr, deserialized_lr);
// }
}