Merge potential next release v0.4 (#187) Breaking Changes

* First draft of the new n-dimensional arrays + NB use case
* Improves default implementation of multiple Array methods
* Refactors tree methods
* Adds matrix decomposition routines
* Adds matrix decomposition methods to ndarray and nalgebra bindings
* Refactoring + linear regression now uses array2
* Ridge & Linear regression
* LBFGS optimizer & logistic regression
* LBFGS optimizer & logistic regression
* Changes linear methods, metrics and model selection methods to new n-dimensional arrays
* Switches KNN and clustering algorithms to new n-d array layer
* Refactors distance metrics
* Optimizes knn and clustering methods
* Refactors metrics module
* Switches decomposition methods to n-dimensional arrays
* Linalg refactoring - cleanup rng merge (#172)
* Remove legacy DenseMatrix and BaseMatrix implementation. Port the new Number, FloatNumber and Array implementation into module structure.
* Exclude AUC metrics. Needs reimplementation
* Improve developers walkthrough

New traits system in place at `src/numbers` and `src/linalg`
Co-authored-by: Lorenzo <tunedconsulting@gmail.com>

* Provide SupervisedEstimator with a constructor to avoid explicit dynamical box allocation in 'cross_validate' and 'cross_validate_predict' as required by the use of 'dyn' as per Rust 2021
* Implement getters to use as_ref() in src/neighbors
* Implement getters to use as_ref() in src/naive_bayes
* Implement getters to use as_ref() in src/linear
* Add Clone to src/naive_bayes
* Change signature for cross_validate and other model_selection functions to abide to use of dyn in Rust 2021
* Implement ndarray-bindings. Remove FloatNumber from implementations
* Drop nalgebra-bindings support (as decided in conf-call to go for ndarray)
* Remove benches. Benches will have their own repo at smartcore-benches
* Implement SVC
* Implement SVC serialization. Move search parameters in dedicated module
* Implement SVR. Definitely too slow
* Fix compilation issues for wasm (#202)

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
* Fix tests (#203)

* Port linalg/traits/stats.rs
* Improve methods naming
* Improve Display for DenseMatrix

Co-authored-by: Montana Low <montanalow@users.noreply.github.com>
Co-authored-by: VolodymyrOrlov <volodymyr.orlov@gmail.com>
This commit is contained in:
Lorenzo
2022-10-31 10:44:57 +00:00
committed by GitHub
parent bb71656137
commit 52eb6ce023
110 changed files with 10327 additions and 9107 deletions
+10 -10
View File
@@ -5,7 +5,7 @@
//!
//! ### Usage Example
//! ```
//! use smartcore::linalg::naive::dense_matrix::DenseMatrix;
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams};
//! let data = DenseMatrix::from_2d_array(&[
//! &[1.5, 1.0, 1.5, 3.0],
@@ -27,10 +27,10 @@
use std::iter;
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::linalg::basic::arrays::Array2;
use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable};
use crate::preprocessing::series_encoder::CategoryMapper;
use crate::preprocessing::traits::{CategoricalFloat, Categorizable};
/// OneHotEncoder Parameters
#[derive(Debug, Clone)]
@@ -106,7 +106,7 @@ impl OneHotEncoder {
pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed>
where
T: Categorizable,
M: Matrix<T>,
M: Array2<T>,
{
match (params.col_idx_categorical, params.infer_categorical) {
(None, false) => Err(Failed::fit(
@@ -157,7 +157,7 @@ impl OneHotEncoder {
pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed>
where
T: Categorizable,
M: Matrix<T>,
M: Array2<T>,
{
let (nrows, p) = x.shape();
let additional_params: Vec<usize> = self
@@ -174,7 +174,7 @@ impl OneHotEncoder {
for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() {
let cidx = new_col_idx[old_cidx];
let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category());
let col_iter = (0..nrows).map(|r| x.get((r, old_cidx)).to_category());
let sencoder = &self.category_mappers[pidx];
let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c));
@@ -188,7 +188,7 @@ impl OneHotEncoder {
Some(v) => {
// copy one hot vectors to their place in the data matrix;
for (col_ofst, &val) in v.iter().enumerate() {
res.set(row, cidx + col_ofst, val);
res.set((row, cidx + col_ofst), val);
}
}
}
@@ -209,8 +209,8 @@ impl OneHotEncoder {
}
for r in 0..nrows {
let val = x.get(r, old_p);
res.set(r, new_p, val);
let val = x.get((r, old_p));
res.set((r, new_p), *val);
}
}
@@ -221,7 +221,7 @@ impl OneHotEncoder {
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::linalg::basic::matrix::DenseMatrix;
use crate::preprocessing::series_encoder::CategoryMapper;
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
+1 -1
View File
@@ -1,7 +1,7 @@
/// Transform a data matrix by replacing all categorical variables with their one-hot vector equivalents
pub mod categorical;
mod data_traits;
/// Preprocess numerical matrices.
pub mod numerical;
/// Encode a series (column, array) of categorical variables as one-hot vectors
pub mod series_encoder;
mod traits;
+70 -58
View File
@@ -4,7 +4,7 @@
//! ### Usage Example
//! ```
//! use smartcore::api::{Transformer, UnsupervisedEstimator};
//! use smartcore::linalg::naive::dense_matrix::DenseMatrix;
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::preprocessing::numerical;
//! let data = DenseMatrix::from_2d_vec(&vec![
//! vec![0.0, 0.0],
@@ -27,10 +27,13 @@
//! ])
//! );
//! ```
use std::marker::PhantomData;
use crate::api::{Transformer, UnsupervisedEstimator};
use crate::error::{Failed, FailedError};
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@@ -59,29 +62,46 @@ impl Default for StandardScalerParameters {
/// scaling sensitive models like neural network or nearest
/// neighbors based models.
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct StandardScaler<T: RealNumber> {
means: Vec<T>,
stds: Vec<T>,
#[derive(Clone, Debug, Default, PartialEq)]
pub struct StandardScaler<T: Number + RealNumber> {
means: Vec<f64>,
stds: Vec<f64>,
parameters: StandardScalerParameters,
_phantom: PhantomData<T>,
}
impl<T: RealNumber> StandardScaler<T> {
#[allow(dead_code)]
impl<T: Number + RealNumber> StandardScaler<T> {
fn new(parameters: StandardScalerParameters) -> Self
where
T: Number + RealNumber,
{
Self {
means: vec![],
stds: vec![],
parameters: StandardScalerParameters {
with_mean: parameters.with_mean,
with_std: parameters.with_std,
},
_phantom: PhantomData,
}
}
/// When the mean should be adjusted, the column mean
/// should be kept. Otherwise, replace it by zero.
fn adjust_column_mean(&self, mean: T) -> T {
fn adjust_column_mean(&self, mean: f64) -> f64 {
if self.parameters.with_mean {
mean
} else {
T::zero()
0f64
}
}
/// When the standard-deviation should be adjusted, the column
/// standard-deviation should be kept. Otherwise, replace it by one.
fn adjust_column_std(&self, std: T) -> T {
fn adjust_column_std(&self, std: f64) -> f64 {
if self.parameters.with_std {
ensure_std_valid(std)
} else {
T::one()
1f64
}
}
}
@@ -90,19 +110,24 @@ impl<T: RealNumber> StandardScaler<T> {
/// negative or zero, it should replaced by the smallest
/// positive value the type can have. That way we can savely
/// divide the columns with the resulting scalar.
fn ensure_std_valid<T: RealNumber>(value: T) -> T {
fn ensure_std_valid<T: Number + RealNumber>(value: T) -> T {
value.max(T::min_positive_value())
}
/// During `fit` the `StandardScaler` computes the column means and standard deviation.
impl<T: RealNumber, M: Matrix<T>> UnsupervisedEstimator<M, StandardScalerParameters>
impl<T: Number + RealNumber, M: Array2<T>> UnsupervisedEstimator<M, StandardScalerParameters>
for StandardScaler<T>
{
fn fit(x: &M, parameters: StandardScalerParameters) -> Result<Self, Failed> {
fn fit(x: &M, parameters: StandardScalerParameters) -> Result<Self, Failed>
where
T: Number + RealNumber,
M: Array2<T>,
{
Ok(Self {
means: x.column_mean(),
stds: x.std(0),
stds: x.std_dev(0),
parameters,
_phantom: Default::default(),
})
}
}
@@ -110,7 +135,7 @@ impl<T: RealNumber, M: Matrix<T>> UnsupervisedEstimator<M, StandardScalerParamet
/// During `transform` the `StandardScaler` applies the summary statistics
/// computed during `fit` to set the mean of each column to zero and the
/// standard deviation to one.
impl<T: RealNumber, M: Matrix<T>> Transformer<M> for StandardScaler<T> {
impl<T: Number + RealNumber, M: Array2<T>> Transformer<M> for StandardScaler<T> {
fn transform(&self, x: &M) -> Result<M, Failed> {
let (_, n_cols) = x.shape();
if n_cols != self.means.len() {
@@ -131,8 +156,8 @@ impl<T: RealNumber, M: Matrix<T>> Transformer<M> for StandardScaler<T> {
.enumerate()
.map(|(column_index, (column_mean, column_std))| {
x.take_column(column_index)
.sub_scalar(self.adjust_column_mean(*column_mean))
.div_scalar(self.adjust_column_std(*column_std))
.sub_scalar(T::from(self.adjust_column_mean(*column_mean)).unwrap())
.div_scalar(T::from(self.adjust_column_std(*column_std)).unwrap())
})
.collect(),
)
@@ -144,8 +169,8 @@ impl<T: RealNumber, M: Matrix<T>> Transformer<M> for StandardScaler<T> {
/// a matrix by stacking the columns horizontally.
fn build_matrix_from_columns<T, M>(columns: Vec<M>) -> Option<M>
where
T: RealNumber,
M: Matrix<T>,
T: Number + RealNumber,
M: Array2<T>,
{
if let Some(output_matrix) = columns.first().cloned() {
return Some(
@@ -166,7 +191,7 @@ mod tests {
mod helper_functionality {
use super::super::{build_matrix_from_columns, ensure_std_valid};
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::linalg::basic::matrix::DenseMatrix;
#[test]
fn combine_three_columns() {
@@ -197,20 +222,16 @@ mod tests {
mod standard_scaler {
use super::super::{StandardScaler, StandardScalerParameters};
use crate::api::{Transformer, UnsupervisedEstimator};
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::linalg::BaseMatrix;
use crate::linalg::basic::arrays::Array2;
use crate::linalg::basic::matrix::DenseMatrix;
#[test]
fn dont_adjust_mean_if_used() {
assert_eq!(
(StandardScaler {
means: vec![],
stds: vec![],
parameters: StandardScalerParameters {
with_mean: true,
with_std: true
}
})
(StandardScaler::<f64>::new(StandardScalerParameters {
with_mean: true,
with_std: true
}))
.adjust_column_mean(1.0),
1.0
)
@@ -218,14 +239,10 @@ mod tests {
#[test]
fn replace_mean_with_zero_if_not_used() {
assert_eq!(
(StandardScaler {
means: vec![],
stds: vec![],
parameters: StandardScalerParameters {
with_mean: false,
with_std: true
}
})
(StandardScaler::<f64>::new(StandardScalerParameters {
with_mean: false,
with_std: true
}))
.adjust_column_mean(1.0),
0.0
)
@@ -233,14 +250,10 @@ mod tests {
#[test]
fn dont_adjust_std_if_used() {
assert_eq!(
(StandardScaler {
means: vec![],
stds: vec![],
parameters: StandardScalerParameters {
with_mean: true,
with_std: true
}
})
(StandardScaler::<f64>::new(StandardScalerParameters {
with_mean: true,
with_std: true
}))
.adjust_column_std(10.0),
10.0
)
@@ -248,14 +261,10 @@ mod tests {
#[test]
fn replace_std_with_one_if_not_used() {
assert_eq!(
(StandardScaler {
means: vec![],
stds: vec![],
parameters: StandardScalerParameters {
with_mean: true,
with_std: false
}
})
(StandardScaler::<f64>::new(StandardScalerParameters {
with_mean: true,
with_std: false
}))
.adjust_column_std(10.0),
1.0
)
@@ -331,7 +340,8 @@ mod tests {
parameters: StandardScalerParameters {
with_mean: true,
with_std: true
}
},
_phantom: Default::default(),
})
)
}
@@ -355,7 +365,7 @@ mod tests {
);
assert!(
&DenseMatrix::from_2d_vec(&vec![fitted_scaler.stds]).approximate_eq(
&DenseMatrix::<f64>::from_2d_vec(&vec![fitted_scaler.stds]).approximate_eq(
&DenseMatrix::from_2d_array(&[&[
0.29426447500954,
0.16758497615485,
@@ -378,6 +388,7 @@ mod tests {
with_mean: true,
with_std: false,
},
_phantom: Default::default(),
};
assert_eq!(
@@ -397,6 +408,7 @@ mod tests {
with_mean: false,
with_std: true,
},
_phantom: Default::default(),
};
assert_eq!(
+7 -7
View File
@@ -3,8 +3,8 @@
//! Encode a series of categorical features as a one-hot numeric array.
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::math::num::RealNumber;
use crate::linalg::basic::arrays::Array1;
use crate::numbers::realnum::RealNumber;
use std::collections::HashMap;
use std::hash::Hash;
@@ -132,7 +132,7 @@ where
pub fn get_one_hot<U, V>(&self, category: &C) -> Option<V>
where
U: RealNumber,
V: BaseVector<U>,
V: Array1<U>,
{
self.get_num(category)
.map(|&idx| make_one_hot::<U, V>(idx, self.num_categories))
@@ -142,15 +142,15 @@ where
pub fn invert_one_hot<U, V>(&self, one_hot: V) -> Result<C, Failed>
where
U: RealNumber,
V: BaseVector<U>,
V: Array1<U>,
{
let pos = U::one();
let oh_it = (0..one_hot.len()).map(|idx| one_hot.get(idx));
let oh_it = (0..one_hot.shape()).map(|idx| one_hot.get(idx));
let s: Vec<usize> = oh_it
.enumerate()
.filter_map(|(idx, v)| if v == pos { Some(idx) } else { None })
.filter_map(|(idx, v)| if *v == pos { Some(idx) } else { None })
.collect();
if s.len() == 1 {
@@ -187,7 +187,7 @@ where
pub fn make_one_hot<T, V>(category_idx: usize, num_categories: usize) -> V
where
T: RealNumber,
V: BaseVector<T>,
V: Array1<T>,
{
let pos = T::one();
let mut z = V::zeros(num_categories);
@@ -1,7 +1,7 @@
//! Traits to indicate that float variables can be viewed as categorical
//! This module assumes
use crate::math::num::RealNumber;
use crate::numbers::realnum::RealNumber;
pub type CategoricalFloat = u16;