Merge pull request #44 from smartcorelib/api

feat: consolidates API
This commit is contained in:
VolodymyrOrlov
2020-12-27 15:54:26 -08:00
committed by GitHub
25 changed files with 400 additions and 98 deletions
+43
View File
@@ -0,0 +1,43 @@
//! # Common Interfaces and API
//!
//! This module provides interfaces and uniform API with simple conventions
//! that are used in other modules for supervised and unsupervised learning.
use crate::error::Failed;
/// An estimator for unsupervised learning, that provides method `fit` to learn from data
pub trait UnsupervisedEstimator<X, P> {
/// Fit a model to a training dataset, estimate model's parameters.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `parameters` - hyperparameters of an algorithm
fn fit(x: &X, parameters: P) -> Result<Self, Failed>
where
Self: Sized,
P: Clone;
}
/// An estimator for supervised learning, , that provides method `fit` to learn from data and training values
pub trait SupervisedEstimator<X, Y, P> {
/// Fit a model to a training dataset, estimate model's parameters.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `y` - target training values of size _N_.
/// * `parameters` - hyperparameters of an algorithm
fn fit(x: &X, y: &Y, parameters: P) -> Result<Self, Failed>
where
Self: Sized,
P: Clone;
}
/// Implements method predict that estimates target value from new data
pub trait Predictor<X, Y> {
/// Estimate target values from new data.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
fn predict(&self, x: &X) -> Result<Y, Failed>;
}
/// Implements method transform that filters or modifies input data
pub trait Transformer<X> {
/// Transform data by modifying or filtering it
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
fn transform(&self, x: &X) -> Result<X, Failed>;
}
-10
View File
@@ -1,10 +0,0 @@
//! # Common Interfaces and methods
//!
//! This module consolidates interfaces and uniform basic API that is used elsewhere in the code.
use crate::error::Failed;
/// Implements method predict that offers a way to estimate target value from new data
pub trait Predictor<X, Y> {
fn predict(&self, x: &X) -> Result<Y, Failed>;
}
+45 -21
View File
@@ -15,8 +15,7 @@
//! let blobs = generator::make_blobs(100, 2, 3);
//! let x = DenseMatrix::from_vec(blobs.num_samples, blobs.num_features, &blobs.data);
//! // Fit the algorithm and predict cluster labels
//! let labels = DBSCAN::fit(&x, Distances::euclidian(),
//! DBSCANParameters::default().with_eps(3.0)).
//! let labels = DBSCAN::fit(&x, DBSCANParameters::default().with_eps(3.0)).
//! and_then(|dbscan| dbscan.predict(&x));
//!
//! println!("{:?}", labels);
@@ -33,9 +32,11 @@ use std::iter::Sum;
use serde::{Deserialize, Serialize};
use crate::algorithm::neighbour::{KNNAlgorithm, KNNAlgorithmName};
use crate::api::{Predictor, UnsupervisedEstimator};
use crate::error::Failed;
use crate::linalg::{row_iter, Matrix};
use crate::math::distance::Distance;
use crate::math::distance::euclidian::Euclidian;
use crate::math::distance::{Distance, Distances};
use crate::math::num::RealNumber;
use crate::tree::decision_tree_classifier::which_max;
@@ -50,7 +51,11 @@ pub struct DBSCAN<T: RealNumber, D: Distance<Vec<T>, T>> {
#[derive(Debug, Clone)]
/// DBSCAN clustering algorithm parameters
pub struct DBSCANParameters<T: RealNumber> {
pub struct DBSCANParameters<T: RealNumber, D: Distance<Vec<T>, T>> {
/// a function that defines a distance between each pair of point in training data.
/// This function should extend [`Distance`](../../math/distance/trait.Distance.html) trait.
/// See [`Distances`](../../math/distance/struct.Distances.html) for a list of available functions.
pub distance: D,
/// The number of samples (or total weight) in a neighborhood for a point to be considered as a core point.
pub min_samples: usize,
/// The maximum distance between two samples for one to be considered as in the neighborhood of the other.
@@ -59,7 +64,18 @@ pub struct DBSCANParameters<T: RealNumber> {
pub algorithm: KNNAlgorithmName,
}
impl<T: RealNumber> DBSCANParameters<T> {
impl<T: RealNumber, D: Distance<Vec<T>, T>> DBSCANParameters<T, D> {
/// a function that defines a distance between each pair of point in training data.
/// This function should extend [`Distance`](../../math/distance/trait.Distance.html) trait.
/// See [`Distances`](../../math/distance/struct.Distances.html) for a list of available functions.
pub fn with_distance<DD: Distance<Vec<T>, T>>(self, distance: DD) -> DBSCANParameters<T, DD> {
DBSCANParameters {
distance,
min_samples: self.min_samples,
eps: self.eps,
algorithm: self.algorithm,
}
}
/// The number of samples (or total weight) in a neighborhood for a point to be considered as a core point.
pub fn with_min_samples(mut self, min_samples: usize) -> Self {
self.min_samples = min_samples;
@@ -86,9 +102,10 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for DBSCAN<T, D> {
}
}
impl<T: RealNumber> Default for DBSCANParameters<T> {
impl<T: RealNumber> Default for DBSCANParameters<T, Euclidian> {
fn default() -> Self {
DBSCANParameters {
distance: Distances::euclidian(),
min_samples: 5,
eps: T::half(),
algorithm: KNNAlgorithmName::CoverTree,
@@ -96,6 +113,22 @@ impl<T: RealNumber> Default for DBSCANParameters<T> {
}
}
impl<T: RealNumber + Sum, M: Matrix<T>, D: Distance<Vec<T>, T>>
UnsupervisedEstimator<M, DBSCANParameters<T, D>> for DBSCAN<T, D>
{
fn fit(x: &M, parameters: DBSCANParameters<T, D>) -> Result<Self, Failed> {
DBSCAN::fit(x, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>, D: Distance<Vec<T>, T>> Predictor<M, M::RowVector>
for DBSCAN<T, D>
{
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
}
}
impl<T: RealNumber + Sum, D: Distance<Vec<T>, T>> DBSCAN<T, D> {
/// Fit algorithm to _NxM_ matrix where _N_ is number of samples and _M_ is number of features.
/// * `data` - training instances to cluster
@@ -103,8 +136,7 @@ impl<T: RealNumber + Sum, D: Distance<Vec<T>, T>> DBSCAN<T, D> {
/// * `parameters` - cluster parameters
pub fn fit<M: Matrix<T>>(
x: &M,
distance: D,
parameters: DBSCANParameters<T>,
parameters: DBSCANParameters<T, D>,
) -> Result<DBSCAN<T, D>, Failed> {
if parameters.min_samples < 1 {
return Err(Failed::fit(&"Invalid minPts".to_string()));
@@ -121,7 +153,9 @@ impl<T: RealNumber + Sum, D: Distance<Vec<T>, T>> DBSCAN<T, D> {
let n = x.shape().0;
let mut y = vec![unassigned; n];
let algo = parameters.algorithm.fit(row_iter(x).collect(), distance)?;
let algo = parameters
.algorithm
.fit(row_iter(x).collect(), parameters.distance)?;
for (i, e) in row_iter(x).enumerate() {
if y[i] == unassigned {
@@ -195,7 +229,6 @@ mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::DenseMatrix;
use crate::math::distance::euclidian::Euclidian;
use crate::math::distance::Distances;
#[test]
fn fit_predict_dbscan() {
@@ -215,16 +248,7 @@ mod tests {
let expected_labels = vec![0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0];
let dbscan = DBSCAN::fit(
&x,
Distances::euclidian(),
DBSCANParameters {
min_samples: 5,
eps: 1.0,
algorithm: KNNAlgorithmName::CoverTree,
},
)
.unwrap();
let dbscan = DBSCAN::fit(&x, DBSCANParameters::default().with_eps(1.0)).unwrap();
let predicted_labels = dbscan.predict(&x).unwrap();
@@ -256,7 +280,7 @@ mod tests {
&[5.2, 2.7, 3.9, 1.4],
]);
let dbscan = DBSCAN::fit(&x, Distances::euclidian(), Default::default()).unwrap();
let dbscan = DBSCAN::fit(&x, Default::default()).unwrap();
let deserialized_dbscan: DBSCAN<f64, Euclidian> =
serde_json::from_str(&serde_json::to_string(&dbscan).unwrap()).unwrap();
+42 -21
View File
@@ -43,7 +43,7 @@
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//!
//! let kmeans = KMeans::fit(&x, 2, Default::default()).unwrap(); // Fit to data, 2 clusters
//! let kmeans = KMeans::fit(&x, KMeansParameters::default().with_k(2)).unwrap(); // Fit to data, 2 clusters
//! let y_hat = kmeans.predict(&x).unwrap(); // use the same points for prediction
//! ```
//!
@@ -59,6 +59,7 @@ use std::iter::Sum;
use serde::{Deserialize, Serialize};
use crate::algorithm::neighbour::bbd_tree::BBDTree;
use crate::api::{Predictor, UnsupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::distance::euclidian::*;
@@ -101,11 +102,18 @@ impl<T: RealNumber> PartialEq for KMeans<T> {
#[derive(Debug, Clone)]
/// K-Means clustering algorithm parameters
pub struct KMeansParameters {
/// Number of clusters.
pub k: usize,
/// Maximum number of iterations of the k-means algorithm for a single run.
pub max_iter: usize,
}
impl KMeansParameters {
/// Number of clusters.
pub fn with_k(mut self, k: usize) -> Self {
self.k = k;
self
}
/// Maximum number of iterations of the k-means algorithm for a single run.
pub fn with_max_iter(mut self, max_iter: usize) -> Self {
self.max_iter = max_iter;
@@ -115,24 +123,37 @@ impl KMeansParameters {
impl Default for KMeansParameters {
fn default() -> Self {
KMeansParameters { max_iter: 100 }
KMeansParameters {
k: 2,
max_iter: 100,
}
}
}
impl<T: RealNumber + Sum, M: Matrix<T>> UnsupervisedEstimator<M, KMeansParameters> for KMeans<T> {
fn fit(x: &M, parameters: KMeansParameters) -> Result<Self, Failed> {
KMeans::fit(x, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for KMeans<T> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
}
}
impl<T: RealNumber + Sum> KMeans<T> {
/// Fit algorithm to _NxM_ matrix where _N_ is number of samples and _M_ is number of features.
/// * `data` - training instances to cluster
/// * `k` - number of clusters
/// * `parameters` - cluster parameters
pub fn fit<M: Matrix<T>>(
data: &M,
k: usize,
parameters: KMeansParameters,
) -> Result<KMeans<T>, Failed> {
pub fn fit<M: Matrix<T>>(data: &M, parameters: KMeansParameters) -> Result<KMeans<T>, Failed> {
let bbd = BBDTree::new(data);
if k < 2 {
return Err(Failed::fit(&format!("invalid number of clusters: {}", k)));
if parameters.k < 2 {
return Err(Failed::fit(&format!(
"invalid number of clusters: {}",
parameters.k
)));
}
if parameters.max_iter == 0 {
@@ -145,9 +166,9 @@ impl<T: RealNumber + Sum> KMeans<T> {
let (n, d) = data.shape();
let mut distortion = T::max_value();
let mut y = KMeans::kmeans_plus_plus(data, k);
let mut size = vec![0; k];
let mut centroids = vec![vec![T::zero(); d]; k];
let mut y = KMeans::kmeans_plus_plus(data, parameters.k);
let mut size = vec![0; parameters.k];
let mut centroids = vec![vec![T::zero(); d]; parameters.k];
for i in 0..n {
size[y[i]] += 1;
@@ -159,16 +180,16 @@ impl<T: RealNumber + Sum> KMeans<T> {
}
}
for i in 0..k {
for i in 0..parameters.k {
for j in 0..d {
centroids[i][j] /= T::from(size[i]).unwrap();
}
}
let mut sums = vec![vec![T::zero(); d]; k];
let mut sums = vec![vec![T::zero(); d]; parameters.k];
for _ in 1..=parameters.max_iter {
let dist = bbd.clustering(&centroids, &mut sums, &mut size, &mut y);
for i in 0..k {
for i in 0..parameters.k {
if size[i] > 0 {
for j in 0..d {
centroids[i][j] = T::from(sums[i][j]).unwrap() / T::from(size[i]).unwrap();
@@ -184,7 +205,7 @@ impl<T: RealNumber + Sum> KMeans<T> {
}
Ok(KMeans {
k,
k: parameters.k,
y,
size,
distortion,
@@ -280,10 +301,10 @@ mod tests {
fn invalid_k() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
assert!(KMeans::fit(&x, 0, Default::default()).is_err());
assert!(KMeans::fit(&x, KMeansParameters::default().with_k(0)).is_err());
assert_eq!(
"Fit failed: invalid number of clusters: 1",
KMeans::fit(&x, 1, Default::default())
KMeans::fit(&x, KMeansParameters::default().with_k(1))
.unwrap_err()
.to_string()
);
@@ -314,7 +335,7 @@ mod tests {
&[5.2, 2.7, 3.9, 1.4],
]);
let kmeans = KMeans::fit(&x, 2, Default::default()).unwrap();
let kmeans = KMeans::fit(&x, Default::default()).unwrap();
let y = kmeans.predict(&x).unwrap();
@@ -348,7 +369,7 @@ mod tests {
&[5.2, 2.7, 3.9, 1.4],
]);
let kmeans = KMeans::fit(&x, 2, Default::default()).unwrap();
let kmeans = KMeans::fit(&x, Default::default()).unwrap();
let deserialized_kmeans: KMeans<f64> =
serde_json::from_str(&serde_json::to_string(&kmeans).unwrap()).unwrap();
+34 -18
View File
@@ -37,7 +37,7 @@
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//!
//! let pca = PCA::fit(&iris, 2, Default::default()).unwrap(); // Reduce number of features to 2
//! let pca = PCA::fit(&iris, PCAParameters::default().with_n_components(2)).unwrap(); // Reduce number of features to 2
//!
//! let iris_reduced = pca.transform(&iris).unwrap();
//!
@@ -49,6 +49,7 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::api::{Transformer, UnsupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -83,12 +84,19 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for PCA<T, M> {
#[derive(Debug, Clone)]
/// PCA parameters
pub struct PCAParameters {
/// Number of components to keep.
pub n_components: usize,
/// By default, covariance matrix is used to compute principal components.
/// Enable this flag if you want to use correlation matrix instead.
pub use_correlation_matrix: bool,
}
impl PCAParameters {
/// Number of components to keep.
pub fn with_n_components(mut self, n_components: usize) -> Self {
self.n_components = n_components;
self
}
/// By default, covariance matrix is used to compute principal components.
/// Enable this flag if you want to use correlation matrix instead.
pub fn with_use_correlation_matrix(mut self, use_correlation_matrix: bool) -> Self {
@@ -100,24 +108,33 @@ impl PCAParameters {
impl Default for PCAParameters {
fn default() -> Self {
PCAParameters {
n_components: 2,
use_correlation_matrix: false,
}
}
}
impl<T: RealNumber, M: Matrix<T>> UnsupervisedEstimator<M, PCAParameters> for PCA<T, M> {
fn fit(x: &M, parameters: PCAParameters) -> Result<Self, Failed> {
PCA::fit(x, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Transformer<M> for PCA<T, M> {
fn transform(&self, x: &M) -> Result<M, Failed> {
self.transform(x)
}
}
impl<T: RealNumber, M: Matrix<T>> PCA<T, M> {
/// Fits PCA to your data.
/// * `data` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `n_components` - number of components to keep.
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
pub fn fit(
data: &M,
n_components: usize,
parameters: PCAParameters,
) -> Result<PCA<T, M>, Failed> {
pub fn fit(data: &M, parameters: PCAParameters) -> Result<PCA<T, M>, Failed> {
let (m, n) = data.shape();
if n_components > n {
if parameters.n_components > n {
return Err(Failed::fit(&format!(
"Number of components, n_components should be <= number of attributes ({})",
n
@@ -196,16 +213,16 @@ impl<T: RealNumber, M: Matrix<T>> PCA<T, M> {
}
}
let mut projection = M::zeros(n_components, n);
let mut projection = M::zeros(parameters.n_components, n);
for i in 0..n {
for j in 0..n_components {
for j in 0..parameters.n_components {
projection.set(j, i, eigenvectors.get(i, j));
}
}
let mut pmu = vec![T::zero(); n_components];
let mut pmu = vec![T::zero(); parameters.n_components];
for (k, mu_k) in mu.iter().enumerate().take(n) {
for (i, pmu_i) in pmu.iter_mut().enumerate().take(n_components) {
for (i, pmu_i) in pmu.iter_mut().enumerate().take(parameters.n_components) {
*pmu_i += projection.get(i, k) * (*mu_k);
}
}
@@ -318,7 +335,7 @@ mod tests {
&[0.0752, 0.2007],
]);
let pca = PCA::fit(&us_arrests, 2, Default::default()).unwrap();
let pca = PCA::fit(&us_arrests, Default::default()).unwrap();
assert!(expected.approximate_eq(&pca.components().abs(), 0.4));
}
@@ -414,7 +431,7 @@ mod tests {
302.04806302399646,
];
let pca = PCA::fit(&us_arrests, 4, Default::default()).unwrap();
let pca = PCA::fit(&us_arrests, PCAParameters::default().with_n_components(4)).unwrap();
assert!(pca
.eigenvectors
@@ -525,10 +542,9 @@ mod tests {
let pca = PCA::fit(
&us_arrests,
4,
PCAParameters {
use_correlation_matrix: true,
},
PCAParameters::default()
.with_n_components(4)
.with_use_correlation_matrix(true),
)
.unwrap();
@@ -573,7 +589,7 @@ mod tests {
&[5.2, 2.7, 3.9, 1.4],
]);
let pca = PCA::fit(&iris, 4, Default::default()).unwrap();
let pca = PCA::fit(&iris, Default::default()).unwrap();
let deserialized_pca: PCA<f64, DenseMatrix<f64>> =
serde_json::from_str(&serde_json::to_string(&pca).unwrap()).unwrap();
+32 -8
View File
@@ -34,7 +34,7 @@
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//!
//! let svd = SVD::fit(&iris, 2, Default::default()).unwrap(); // Reduce number of features to 2
//! let svd = SVD::fit(&iris, SVDParameters::default().with_n_components(2)).unwrap(); // Reduce number of features to 2
//!
//! let iris_reduced = svd.transform(&iris).unwrap();
//!
@@ -47,6 +47,7 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::api::{Transformer, UnsupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -67,11 +68,34 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for SVD<T, M> {
#[derive(Debug, Clone)]
/// SVD parameters
pub struct SVDParameters {}
pub struct SVDParameters {
/// Number of components to keep.
pub n_components: usize,
}
impl Default for SVDParameters {
fn default() -> Self {
SVDParameters {}
SVDParameters { n_components: 2 }
}
}
impl SVDParameters {
/// Number of components to keep.
pub fn with_n_components(mut self, n_components: usize) -> Self {
self.n_components = n_components;
self
}
}
impl<T: RealNumber, M: Matrix<T>> UnsupervisedEstimator<M, SVDParameters> for SVD<T, M> {
fn fit(x: &M, parameters: SVDParameters) -> Result<Self, Failed> {
SVD::fit(x, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Transformer<M> for SVD<T, M> {
fn transform(&self, x: &M) -> Result<M, Failed> {
self.transform(x)
}
}
@@ -80,10 +104,10 @@ impl<T: RealNumber, M: Matrix<T>> SVD<T, M> {
/// * `data` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `n_components` - number of components to keep.
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
pub fn fit(x: &M, n_components: usize, _: SVDParameters) -> Result<SVD<T, M>, Failed> {
pub fn fit(x: &M, parameters: SVDParameters) -> Result<SVD<T, M>, Failed> {
let (_, p) = x.shape();
if n_components >= p {
if parameters.n_components >= p {
return Err(Failed::fit(&format!(
"Number of components, n_components should be < number of attributes ({})",
p
@@ -92,7 +116,7 @@ impl<T: RealNumber, M: Matrix<T>> SVD<T, M> {
let svd = x.svd()?;
let components = svd.V.slice(0..p, 0..n_components);
let components = svd.V.slice(0..p, 0..parameters.n_components);
Ok(SVD {
components,
@@ -189,7 +213,7 @@ mod tests {
&[197.28420365, -11.66808306],
&[293.43187394, 1.91163633],
]);
let svd = SVD::fit(&x, 2, Default::default()).unwrap();
let svd = SVD::fit(&x, Default::default()).unwrap();
let x_transformed = svd.transform(&x).unwrap();
@@ -225,7 +249,7 @@ mod tests {
&[5.2, 2.7, 3.9, 1.4],
]);
let svd = SVD::fit(&iris, 2, Default::default()).unwrap();
let svd = SVD::fit(&iris, Default::default()).unwrap();
let deserialized_svd: SVD<f64, DenseMatrix<f64>> =
serde_json::from_str(&serde_json::to_string(&svd).unwrap()).unwrap();
+14 -1
View File
@@ -51,7 +51,7 @@ use std::fmt::Debug;
use rand::Rng;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -151,6 +151,19 @@ impl Default for RandomForestClassifierParameters {
}
}
impl<T: RealNumber, M: Matrix<T>>
SupervisedEstimator<M, M::RowVector, RandomForestClassifierParameters>
for RandomForestClassifier<T>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: RandomForestClassifierParameters,
) -> Result<Self, Failed> {
RandomForestClassifier::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for RandomForestClassifier<T> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+14 -1
View File
@@ -49,7 +49,7 @@ use std::fmt::Debug;
use rand::Rng;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -135,6 +135,19 @@ impl<T: RealNumber> PartialEq for RandomForestRegressor<T> {
}
}
impl<T: RealNumber, M: Matrix<T>>
SupervisedEstimator<M, M::RowVector, RandomForestRegressorParameters>
for RandomForestRegressor<T>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: RandomForestRegressorParameters,
) -> Result<Self, Failed> {
RandomForestRegressor::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for RandomForestRegressor<T> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+1 -1
View File
@@ -71,7 +71,7 @@
/// Various algorithms and helper methods that are used elsewhere in SmartCore
pub mod algorithm;
pub(crate) mod base;
pub mod api;
/// Algorithms for clustering of unlabeled data
pub mod cluster;
/// Various datasets
+9 -1
View File
@@ -58,7 +58,7 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
@@ -139,6 +139,14 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for ElasticNet<T, M> {
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, ElasticNetParameters<T>>
for ElasticNet<T, M>
{
fn fit(x: &M, y: &M::RowVector, parameters: ElasticNetParameters<T>) -> Result<Self, Failed> {
ElasticNet::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for ElasticNet<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+9 -1
View File
@@ -26,7 +26,7 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
@@ -95,6 +95,14 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for Lasso<T, M> {
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, LassoParameters<T>>
for Lasso<T, M>
{
fn fit(x: &M, y: &M::RowVector, parameters: LassoParameters<T>) -> Result<Self, Failed> {
Lasso::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for Lasso<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+13 -1
View File
@@ -64,7 +64,7 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -116,6 +116,18 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for LinearRegression<T, M> {
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, LinearRegressionParameters>
for LinearRegression<T, M>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: LinearRegressionParameters,
) -> Result<Self, Failed> {
LinearRegression::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for LinearRegression<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+13 -1
View File
@@ -58,7 +58,7 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -218,6 +218,18 @@ impl<'a, T: RealNumber, M: Matrix<T>> ObjectiveFunction<T, M>
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, LogisticRegressionParameters>
for LogisticRegression<T, M>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: LogisticRegressionParameters,
) -> Result<Self, Failed> {
LogisticRegression::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for LogisticRegression<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+13 -1
View File
@@ -60,7 +60,7 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
@@ -130,6 +130,18 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for RidgeRegression<T, M> {
}
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, RidgeRegressionParameters<T>>
for RidgeRegression<T, M>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: RidgeRegressionParameters<T>,
) -> Result<Self, Failed> {
RidgeRegression::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for RidgeRegression<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+1 -1
View File
@@ -9,7 +9,7 @@
//!
//! In SmartCore you can split your data into training and test datasets using `train_test_split` function.
use crate::base::Predictor;
use crate::api::Predictor;
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
+9 -1
View File
@@ -33,7 +33,7 @@
//! ## References:
//!
//! * ["Introduction to Information Retrieval", Manning C. D., Raghavan P., Schutze H., 2009, Chapter 13 ](https://nlp.stanford.edu/IR-book/information-retrieval-book.html)
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::row_iter;
use crate::linalg::BaseVector;
@@ -208,6 +208,14 @@ pub struct BernoulliNB<T: RealNumber, M: Matrix<T>> {
binarize: Option<T>,
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, BernoulliNBParameters<T>>
for BernoulliNB<T, M>
{
fn fit(x: &M, y: &M::RowVector, parameters: BernoulliNBParameters<T>) -> Result<Self, Failed> {
BernoulliNB::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for BernoulliNB<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+13 -1
View File
@@ -30,7 +30,7 @@
//! let nb = CategoricalNB::fit(&x, &y, Default::default()).unwrap();
//! let y_hat = nb.predict(&x).unwrap();
//! ```
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
@@ -242,6 +242,18 @@ pub struct CategoricalNB<T: RealNumber, M: Matrix<T>> {
inner: BaseNaiveBayes<T, M, CategoricalNBDistribution<T>>,
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, CategoricalNBParameters<T>>
for CategoricalNB<T, M>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: CategoricalNBParameters<T>,
) -> Result<Self, Failed> {
CategoricalNB::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for CategoricalNB<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+9 -1
View File
@@ -22,7 +22,7 @@
//! let nb = GaussianNB::fit(&x, &y, Default::default()).unwrap();
//! let y_hat = nb.predict(&x).unwrap();
//! ```
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::row_iter;
use crate::linalg::BaseVector;
@@ -183,6 +183,14 @@ pub struct GaussianNB<T: RealNumber, M: Matrix<T>> {
inner: BaseNaiveBayes<T, M, GaussianNBDistribution<T>>,
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, GaussianNBParameters<T>>
for GaussianNB<T, M>
{
fn fit(x: &M, y: &M::RowVector, parameters: GaussianNBParameters<T>) -> Result<Self, Failed> {
GaussianNB::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for GaussianNB<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+13 -1
View File
@@ -33,7 +33,7 @@
//! ## References:
//!
//! * ["Introduction to Information Retrieval", Manning C. D., Raghavan P., Schutze H., 2009, Chapter 13 ](https://nlp.stanford.edu/IR-book/information-retrieval-book.html)
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::row_iter;
use crate::linalg::BaseVector;
@@ -194,6 +194,18 @@ pub struct MultinomialNB<T: RealNumber, M: Matrix<T>> {
inner: BaseNaiveBayes<T, M, MultinomialNBDistribution<T>>,
}
impl<T: RealNumber, M: Matrix<T>> SupervisedEstimator<M, M::RowVector, MultinomialNBParameters<T>>
for MultinomialNB<T, M>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: MultinomialNBParameters<T>,
) -> Result<Self, Failed> {
MultinomialNB::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for MultinomialNB<T, M> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+13 -1
View File
@@ -36,7 +36,7 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::algorithm::neighbour::{KNNAlgorithm, KNNAlgorithmName};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::{row_iter, Matrix};
use crate::math::distance::euclidian::Euclidian;
@@ -139,6 +139,18 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for KNNClassifier<T, D> {
}
}
impl<T: RealNumber, M: Matrix<T>, D: Distance<Vec<T>, T>>
SupervisedEstimator<M, M::RowVector, KNNClassifierParameters<T, D>> for KNNClassifier<T, D>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: KNNClassifierParameters<T, D>,
) -> Result<Self, Failed> {
KNNClassifier::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>, D: Distance<Vec<T>, T>> Predictor<M, M::RowVector>
for KNNClassifier<T, D>
{
+13 -1
View File
@@ -39,7 +39,7 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::algorithm::neighbour::{KNNAlgorithm, KNNAlgorithmName};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::{row_iter, BaseVector, Matrix};
use crate::math::distance::euclidian::Euclidian;
@@ -133,6 +133,18 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for KNNRegressor<T, D> {
}
}
impl<T: RealNumber, M: Matrix<T>, D: Distance<Vec<T>, T>>
SupervisedEstimator<M, M::RowVector, KNNRegressorParameters<T, D>> for KNNRegressor<T, D>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: KNNRegressorParameters<T, D>,
) -> Result<Self, Failed> {
KNNRegressor::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>, D: Distance<Vec<T>, T>> Predictor<M, M::RowVector>
for KNNRegressor<T, D>
{
+9 -1
View File
@@ -78,7 +78,7 @@ use rand::seq::SliceRandom;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
@@ -185,6 +185,14 @@ impl<T: RealNumber, M: Matrix<T>> Default for SVCParameters<T, M, LinearKernel>
}
}
impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>>
SupervisedEstimator<M, M::RowVector, SVCParameters<T, M, K>> for SVC<T, M, K>
{
fn fit(x: &M, y: &M::RowVector, parameters: SVCParameters<T, M, K>) -> Result<Self, Failed> {
SVC::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Predictor<M, M::RowVector>
for SVC<T, M, K>
{
+9 -1
View File
@@ -70,7 +70,7 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::BaseVector;
use crate::linalg::Matrix;
@@ -174,6 +174,14 @@ impl<T: RealNumber, M: Matrix<T>> Default for SVRParameters<T, M, LinearKernel>
}
}
impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>>
SupervisedEstimator<M, M::RowVector, SVRParameters<T, M, K>> for SVR<T, M, K>
{
fn fit(x: &M, y: &M::RowVector, parameters: SVRParameters<T, M, K>) -> Result<Self, Failed> {
SVR::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Predictor<M, M::RowVector>
for SVR<T, M, K>
{
+14 -1
View File
@@ -71,7 +71,7 @@ use rand::seq::SliceRandom;
use serde::{Deserialize, Serialize};
use crate::algorithm::sort::quick_sort::QuickArgSort;
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -293,6 +293,19 @@ pub(in crate) fn which_max(x: &[usize]) -> usize {
which
}
impl<T: RealNumber, M: Matrix<T>>
SupervisedEstimator<M, M::RowVector, DecisionTreeClassifierParameters>
for DecisionTreeClassifier<T>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: DecisionTreeClassifierParameters,
) -> Result<Self, Failed> {
DecisionTreeClassifier::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for DecisionTreeClassifier<T> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)
+14 -1
View File
@@ -66,7 +66,7 @@ use rand::seq::SliceRandom;
use serde::{Deserialize, Serialize};
use crate::algorithm::sort::quick_sort::QuickArgSort;
use crate::base::Predictor;
use crate::api::{Predictor, SupervisedEstimator};
use crate::error::Failed;
use crate::linalg::Matrix;
use crate::math::num::RealNumber;
@@ -208,6 +208,19 @@ impl<'a, T: RealNumber, M: Matrix<T>> NodeVisitor<'a, T, M> {
}
}
impl<T: RealNumber, M: Matrix<T>>
SupervisedEstimator<M, M::RowVector, DecisionTreeRegressorParameters>
for DecisionTreeRegressor<T>
{
fn fit(
x: &M,
y: &M::RowVector,
parameters: DecisionTreeRegressorParameters,
) -> Result<Self, Failed> {
DecisionTreeRegressor::fit(x, y, parameters)
}
}
impl<T: RealNumber, M: Matrix<T>> Predictor<M, M::RowVector> for DecisionTreeRegressor<T> {
fn predict(&self, x: &M) -> Result<M::RowVector, Failed> {
self.predict(x)