fix: renames FloatExt to RealNumber

This commit is contained in:
Volodymyr Orlov
2020-08-29 20:17:01 -07:00
parent 8705867386
commit fa0918cee3
43 changed files with 238 additions and 208 deletions
+5 -5
View File
@@ -2,17 +2,17 @@ use std::fmt::Debug;
use crate::linalg::Matrix;
use crate::math::distance::euclidian::*;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Debug)]
pub struct BBDTree<T: FloatExt> {
pub struct BBDTree<T: RealNumber> {
nodes: Vec<BBDTreeNode<T>>,
index: Vec<usize>,
root: usize,
}
#[derive(Debug)]
struct BBDTreeNode<T: FloatExt> {
struct BBDTreeNode<T: RealNumber> {
count: usize,
index: usize,
center: Vec<T>,
@@ -23,7 +23,7 @@ struct BBDTreeNode<T: FloatExt> {
upper: Option<usize>,
}
impl<T: FloatExt> BBDTreeNode<T> {
impl<T: RealNumber> BBDTreeNode<T> {
fn new(d: usize) -> BBDTreeNode<T> {
BBDTreeNode {
count: 0,
@@ -38,7 +38,7 @@ impl<T: FloatExt> BBDTreeNode<T> {
}
}
impl<T: FloatExt> BBDTree<T> {
impl<T: RealNumber> BBDTree<T> {
pub fn new<M: Matrix<T>>(data: &M) -> BBDTree<T> {
let nodes = Vec::new();
+3 -3
View File
@@ -30,11 +30,11 @@ use serde::{Deserialize, Serialize};
use crate::algorithm::sort::heap_select::HeapSelect;
use crate::math::distance::Distance;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
/// Implements Cover Tree algorithm
#[derive(Serialize, Deserialize, Debug)]
pub struct CoverTree<T, F: FloatExt, D: Distance<T, F>> {
pub struct CoverTree<T, F: RealNumber, D: Distance<T, F>> {
base: F,
max_level: i8,
min_level: i8,
@@ -42,7 +42,7 @@ pub struct CoverTree<T, F: FloatExt, D: Distance<T, F>> {
nodes: Vec<Node<T>>,
}
impl<T: Debug, F: FloatExt, D: Distance<T, F>> CoverTree<T, F, D> {
impl<T: Debug, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D> {
/// Construct a cover tree.
/// * `data` - vector of data points to search for.
/// * `distance` - distance metric to use for searching. This function should extend [`Distance`](../algorithm/neighbour/index.html) interface.
+7 -7
View File
@@ -27,17 +27,17 @@ use std::marker::PhantomData;
use crate::algorithm::sort::heap_select::HeapSelect;
use crate::math::distance::Distance;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
/// Implements Linear Search algorithm, see [KNN algorithms](../index.html)
#[derive(Serialize, Deserialize, Debug)]
pub struct LinearKNNSearch<T, F: FloatExt, D: Distance<T, F>> {
pub struct LinearKNNSearch<T, F: RealNumber, D: Distance<T, F>> {
distance: D,
data: Vec<T>,
f: PhantomData<F>,
}
impl<T, F: FloatExt, D: Distance<T, F>> LinearKNNSearch<T, F, D> {
impl<T, F: RealNumber, D: Distance<T, F>> LinearKNNSearch<T, F, D> {
/// Initializes algorithm.
/// * `data` - vector of data points to search for.
/// * `distance` - distance metric to use for searching. This function should extend [`Distance`](../algorithm/neighbour/index.html) interface.
@@ -86,24 +86,24 @@ impl<T, F: FloatExt, D: Distance<T, F>> LinearKNNSearch<T, F, D> {
}
#[derive(Debug)]
struct KNNPoint<F: FloatExt> {
struct KNNPoint<F: RealNumber> {
distance: F,
index: Option<usize>,
}
impl<F: FloatExt> PartialOrd for KNNPoint<F> {
impl<F: RealNumber> PartialOrd for KNNPoint<F> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.distance.partial_cmp(&other.distance)
}
}
impl<F: FloatExt> PartialEq for KNNPoint<F> {
impl<F: RealNumber> PartialEq for KNNPoint<F> {
fn eq(&self, other: &Self) -> bool {
self.distance == other.distance
}
}
impl<F: FloatExt> Eq for KNNPoint<F> {}
impl<F: RealNumber> Eq for KNNPoint<F> {}
#[cfg(test)]
mod tests {
+4 -4
View File
@@ -9,10 +9,10 @@ use serde::{Deserialize, Serialize};
use crate::algorithm::neighbour::bbd_tree::BBDTree;
use crate::linalg::Matrix;
use crate::math::distance::euclidian::*;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct KMeans<T: FloatExt> {
pub struct KMeans<T: RealNumber> {
k: usize,
y: Vec<usize>,
size: Vec<usize>,
@@ -20,7 +20,7 @@ pub struct KMeans<T: FloatExt> {
centroids: Vec<Vec<T>>,
}
impl<T: FloatExt> PartialEq for KMeans<T> {
impl<T: RealNumber> PartialEq for KMeans<T> {
fn eq(&self, other: &Self) -> bool {
if self.k != other.k
|| self.size != other.size
@@ -55,7 +55,7 @@ impl Default for KMeansParameters {
}
}
impl<T: FloatExt + Sum> KMeans<T> {
impl<T: RealNumber + Sum> KMeans<T> {
pub fn new<M: Matrix<T>>(data: &M, k: usize, parameters: KMeansParameters) -> KMeans<T> {
let bbd = BBDTree::new(data);
+4 -4
View File
@@ -3,10 +3,10 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct PCA<T: FloatExt, M: Matrix<T>> {
pub struct PCA<T: RealNumber, M: Matrix<T>> {
eigenvectors: M,
eigenvalues: Vec<T>,
projection: M,
@@ -14,7 +14,7 @@ pub struct PCA<T: FloatExt, M: Matrix<T>> {
pmu: Vec<T>,
}
impl<T: FloatExt, M: Matrix<T>> PartialEq for PCA<T, M> {
impl<T: RealNumber, M: Matrix<T>> PartialEq for PCA<T, M> {
fn eq(&self, other: &Self) -> bool {
if self.eigenvectors != other.eigenvectors
|| self.eigenvalues.len() != other.eigenvalues.len()
@@ -44,7 +44,7 @@ impl Default for PCAParameters {
}
}
impl<T: FloatExt, M: Matrix<T>> PCA<T, M> {
impl<T: RealNumber, M: Matrix<T>> PCA<T, M> {
pub fn new(data: &M, n_components: usize, parameters: PCAParameters) -> PCA<T, M> {
let (m, n) = data.shape();
+4 -4
View File
@@ -7,7 +7,7 @@ use rand::Rng;
use serde::{Deserialize, Serialize};
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::tree::decision_tree_classifier::{
which_max, DecisionTreeClassifier, DecisionTreeClassifierParameters, SplitCriterion,
};
@@ -23,13 +23,13 @@ pub struct RandomForestClassifierParameters {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RandomForestClassifier<T: FloatExt> {
pub struct RandomForestClassifier<T: RealNumber> {
parameters: RandomForestClassifierParameters,
trees: Vec<DecisionTreeClassifier<T>>,
classes: Vec<T>,
}
impl<T: FloatExt> PartialEq for RandomForestClassifier<T> {
impl<T: RealNumber> PartialEq for RandomForestClassifier<T> {
fn eq(&self, other: &Self) -> bool {
if self.classes.len() != other.classes.len() || self.trees.len() != other.trees.len() {
return false;
@@ -62,7 +62,7 @@ impl Default for RandomForestClassifierParameters {
}
}
impl<T: FloatExt> RandomForestClassifier<T> {
impl<T: RealNumber> RandomForestClassifier<T> {
pub fn fit<M: Matrix<T>>(
x: &M,
y: &M::RowVector,
+4 -4
View File
@@ -7,7 +7,7 @@ use rand::Rng;
use serde::{Deserialize, Serialize};
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::tree::decision_tree_regressor::{
DecisionTreeRegressor, DecisionTreeRegressorParameters,
};
@@ -22,7 +22,7 @@ pub struct RandomForestRegressorParameters {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RandomForestRegressor<T: FloatExt> {
pub struct RandomForestRegressor<T: RealNumber> {
parameters: RandomForestRegressorParameters,
trees: Vec<DecisionTreeRegressor<T>>,
}
@@ -39,7 +39,7 @@ impl Default for RandomForestRegressorParameters {
}
}
impl<T: FloatExt> PartialEq for RandomForestRegressor<T> {
impl<T: RealNumber> PartialEq for RandomForestRegressor<T> {
fn eq(&self, other: &Self) -> bool {
if self.trees.len() != other.trees.len() {
return false;
@@ -54,7 +54,7 @@ impl<T: FloatExt> PartialEq for RandomForestRegressor<T> {
}
}
impl<T: FloatExt> RandomForestRegressor<T> {
impl<T: RealNumber> RandomForestRegressor<T> {
pub fn fit<M: Matrix<T>>(
x: &M,
y: &M::RowVector,
+1 -1
View File
@@ -76,7 +76,7 @@ pub mod ensemble;
pub mod linalg;
/// Supervised classification and regression models that assume linear relationship between dependent and explanatory variables.
pub mod linear;
/// Multitude of helper methods and classes, including definitions of distance metrics
/// Helper methods and classes, including definitions of distance metrics
pub mod math;
/// Functions for assessing prediction error.
pub mod metrics;
+12 -12
View File
@@ -1,24 +1,24 @@
#![allow(non_snake_case)]
use crate::linalg::BaseMatrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use num::complex::Complex;
use std::fmt::Debug;
#[derive(Debug, Clone)]
pub struct EVD<T: FloatExt, M: BaseMatrix<T>> {
pub struct EVD<T: RealNumber, M: BaseMatrix<T>> {
pub d: Vec<T>,
pub e: Vec<T>,
pub V: M,
}
impl<T: FloatExt, M: BaseMatrix<T>> EVD<T, M> {
impl<T: RealNumber, M: BaseMatrix<T>> EVD<T, M> {
pub fn new(V: M, d: Vec<T>, e: Vec<T>) -> EVD<T, M> {
EVD { d: d, e: e, V: V }
}
}
pub trait EVDDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
pub trait EVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
fn evd(&self, symmetric: bool) -> EVD<T, Self> {
self.clone().evd_mut(symmetric)
}
@@ -58,7 +58,7 @@ pub trait EVDDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
}
}
fn tred2<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
let (n, _) = V.shape();
for i in 0..n {
d[i] = V.get(n - 1, i);
@@ -161,7 +161,7 @@ fn tred2<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T
e[0] = T::zero();
}
fn tql2<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn tql2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
let (n, _) = V.shape();
for i in 1..n {
e[i - 1] = e[i];
@@ -277,7 +277,7 @@ fn tql2<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>
}
}
fn balance<T: FloatExt, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
fn balance<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
let radix = T::two();
let sqrdx = radix * radix;
@@ -330,7 +330,7 @@ fn balance<T: FloatExt, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
return scale;
}
fn elmhes<T: FloatExt, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
fn elmhes<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
let (n, _) = A.shape();
let mut perm = vec![0; n];
@@ -376,7 +376,7 @@ fn elmhes<T: FloatExt, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
return perm;
}
fn eltran<T: FloatExt, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<usize>) {
fn eltran<T: RealNumber, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<usize>) {
let (n, _) = A.shape();
for mp in (1..n - 1).rev() {
for k in mp + 1..n {
@@ -393,7 +393,7 @@ fn eltran<T: FloatExt, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<usize>) {
}
}
fn hqr2<T: FloatExt, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
let (n, _) = A.shape();
let mut z = T::zero();
let mut s = T::zero();
@@ -748,7 +748,7 @@ fn hqr2<T: FloatExt, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e:
}
}
fn balbak<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, scale: &Vec<T>) {
fn balbak<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, scale: &Vec<T>) {
let (n, _) = V.shape();
for i in 0..n {
for j in 0..n {
@@ -757,7 +757,7 @@ fn balbak<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, scale: &Vec<T>) {
}
}
fn sort<T: FloatExt, M: BaseMatrix<T>>(d: &mut Vec<T>, e: &mut Vec<T>, V: &mut M) {
fn sort<T: RealNumber, M: BaseMatrix<T>>(d: &mut Vec<T>, e: &mut Vec<T>, V: &mut M) {
let n = d.len();
let mut temp = vec![T::zero(); n];
for j in 1..n {
+4 -4
View File
@@ -4,10 +4,10 @@ use std::fmt::Debug;
use std::marker::PhantomData;
use crate::linalg::BaseMatrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Debug, Clone)]
pub struct LU<T: FloatExt, M: BaseMatrix<T>> {
pub struct LU<T: RealNumber, M: BaseMatrix<T>> {
LU: M,
pivot: Vec<usize>,
pivot_sign: i8,
@@ -15,7 +15,7 @@ pub struct LU<T: FloatExt, M: BaseMatrix<T>> {
phantom: PhantomData<T>,
}
impl<T: FloatExt, M: BaseMatrix<T>> LU<T, M> {
impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
pub fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
let (_, n) = LU.shape();
@@ -153,7 +153,7 @@ impl<T: FloatExt, M: BaseMatrix<T>> LU<T, M> {
}
}
pub trait LUDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
fn lu(&self) -> LU<T, Self> {
self.clone().lu_mut()
}
+7 -7
View File
@@ -12,13 +12,13 @@ use std::fmt::{Debug, Display};
use std::marker::PhantomData;
use std::ops::Range;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use evd::EVDDecomposableMatrix;
use lu::LUDecomposableMatrix;
use qr::QRDecomposableMatrix;
use svd::SVDDecomposableMatrix;
pub trait BaseVector<T: FloatExt>: Clone + Debug {
pub trait BaseVector<T: RealNumber>: Clone + Debug {
fn get(&self, i: usize) -> T;
fn set(&mut self, i: usize, x: T);
@@ -28,7 +28,7 @@ pub trait BaseVector<T: FloatExt>: Clone + Debug {
fn to_vec(&self) -> Vec<T>;
}
pub trait BaseMatrix<T: FloatExt>: Clone + Debug {
pub trait BaseMatrix<T: RealNumber>: Clone + Debug {
type RowVector: BaseVector<T> + Clone + Debug;
fn from_row_vector(vec: Self::RowVector) -> Self;
@@ -190,7 +190,7 @@ pub trait BaseMatrix<T: FloatExt>: Clone + Debug {
fn cov(&self) -> Self;
}
pub trait Matrix<T: FloatExt>:
pub trait Matrix<T: RealNumber>:
BaseMatrix<T>
+ SVDDecomposableMatrix<T>
+ EVDDecomposableMatrix<T>
@@ -201,7 +201,7 @@ pub trait Matrix<T: FloatExt>:
{
}
pub fn row_iter<F: FloatExt, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
pub fn row_iter<F: RealNumber, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
RowIter {
m: m,
pos: 0,
@@ -210,14 +210,14 @@ pub fn row_iter<F: FloatExt, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
}
}
pub struct RowIter<'a, T: FloatExt, M: BaseMatrix<T>> {
pub struct RowIter<'a, T: RealNumber, M: BaseMatrix<T>> {
m: &'a M,
pos: usize,
max_pos: usize,
phantom: PhantomData<&'a T>,
}
impl<'a, T: FloatExt, M: BaseMatrix<T>> Iterator for RowIter<'a, T, M> {
impl<'a, T: RealNumber, M: BaseMatrix<T>> Iterator for RowIter<'a, T, M> {
type Item = Vec<T>;
fn next(&mut self) -> Option<Vec<T>> {
+17 -17
View File
@@ -14,9 +14,9 @@ use crate::linalg::qr::QRDecomposableMatrix;
use crate::linalg::svd::SVDDecomposableMatrix;
use crate::linalg::Matrix;
pub use crate::linalg::{BaseMatrix, BaseVector};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
impl<T: FloatExt> BaseVector<T> for Vec<T> {
impl<T: RealNumber> BaseVector<T> for Vec<T> {
fn get(&self, i: usize) -> T {
self[i]
}
@@ -35,13 +35,13 @@ impl<T: FloatExt> BaseVector<T> for Vec<T> {
}
#[derive(Debug, Clone)]
pub struct DenseMatrix<T: FloatExt> {
pub struct DenseMatrix<T: RealNumber> {
ncols: usize,
nrows: usize,
values: Vec<T>,
}
impl<T: FloatExt> fmt::Display for DenseMatrix<T> {
impl<T: RealNumber> fmt::Display for DenseMatrix<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut rows: Vec<Vec<f64>> = Vec::new();
for r in 0..self.nrows {
@@ -56,7 +56,7 @@ impl<T: FloatExt> fmt::Display for DenseMatrix<T> {
}
}
impl<T: FloatExt> DenseMatrix<T> {
impl<T: RealNumber> DenseMatrix<T> {
fn new(nrows: usize, ncols: usize, values: Vec<T>) -> Self {
DenseMatrix {
ncols: ncols,
@@ -115,7 +115,7 @@ impl<T: FloatExt> DenseMatrix<T> {
}
}
impl<'de, T: FloatExt + fmt::Debug + Deserialize<'de>> Deserialize<'de> for DenseMatrix<T> {
impl<'de, T: RealNumber + fmt::Debug + Deserialize<'de>> Deserialize<'de> for DenseMatrix<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
@@ -128,11 +128,11 @@ impl<'de, T: FloatExt + fmt::Debug + Deserialize<'de>> Deserialize<'de> for Dens
Values,
}
struct DenseMatrixVisitor<T: FloatExt + fmt::Debug> {
struct DenseMatrixVisitor<T: RealNumber + fmt::Debug> {
t: PhantomData<T>,
}
impl<'a, T: FloatExt + fmt::Debug + Deserialize<'a>> Visitor<'a> for DenseMatrixVisitor<T> {
impl<'a, T: RealNumber + fmt::Debug + Deserialize<'a>> Visitor<'a> for DenseMatrixVisitor<T> {
type Value = DenseMatrix<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
@@ -200,7 +200,7 @@ impl<'de, T: FloatExt + fmt::Debug + Deserialize<'de>> Deserialize<'de> for Dens
}
}
impl<T: FloatExt + fmt::Debug + Serialize> Serialize for DenseMatrix<T> {
impl<T: RealNumber + fmt::Debug + Serialize> Serialize for DenseMatrix<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
@@ -214,17 +214,17 @@ impl<T: FloatExt + fmt::Debug + Serialize> Serialize for DenseMatrix<T> {
}
}
impl<T: FloatExt> SVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: RealNumber> SVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> EVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: RealNumber> EVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> QRDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: RealNumber> QRDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> LUDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: RealNumber> LUDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> Matrix<T> for DenseMatrix<T> {}
impl<T: RealNumber> Matrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> PartialEq for DenseMatrix<T> {
impl<T: RealNumber> PartialEq for DenseMatrix<T> {
fn eq(&self, other: &Self) -> bool {
if self.ncols != other.ncols || self.nrows != other.nrows {
return false;
@@ -247,13 +247,13 @@ impl<T: FloatExt> PartialEq for DenseMatrix<T> {
}
}
impl<T: FloatExt> Into<Vec<T>> for DenseMatrix<T> {
impl<T: RealNumber> Into<Vec<T>> for DenseMatrix<T> {
fn into(self) -> Vec<T> {
self.values
}
}
impl<T: FloatExt> BaseMatrix<T> for DenseMatrix<T> {
impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
type RowVector = Vec<T>;
fn from_row_vector(vec: Self::RowVector) -> Self {
+8 -8
View File
@@ -9,9 +9,9 @@ use crate::linalg::qr::QRDecomposableMatrix;
use crate::linalg::svd::SVDDecomposableMatrix;
use crate::linalg::Matrix as SmartCoreMatrix;
use crate::linalg::{BaseMatrix, BaseVector};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
impl<T: FloatExt + 'static> BaseVector<T> for MatrixMN<T, U1, Dynamic> {
impl<T: RealNumber + 'static> BaseVector<T> for MatrixMN<T, U1, Dynamic> {
fn get(&self, i: usize) -> T {
*self.get((0, i)).unwrap()
}
@@ -28,7 +28,7 @@ impl<T: FloatExt + 'static> BaseVector<T> for MatrixMN<T, U1, Dynamic> {
}
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
BaseMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
type RowVector = MatrixMN<T, U1, Dynamic>;
@@ -340,27 +340,27 @@ impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum
}
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
SVDDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
EVDDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
QRDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
LUDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static>
SmartCoreMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
}
+8 -8
View File
@@ -14,9 +14,9 @@ use crate::linalg::qr::QRDecomposableMatrix;
use crate::linalg::svd::SVDDecomposableMatrix;
use crate::linalg::Matrix;
use crate::linalg::{BaseMatrix, BaseVector};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
impl<T: FloatExt> BaseVector<T> for ArrayBase<OwnedRepr<T>, Ix1> {
impl<T: RealNumber> BaseVector<T> for ArrayBase<OwnedRepr<T>, Ix1> {
fn get(&self, i: usize) -> T {
self[i]
}
@@ -33,7 +33,7 @@ impl<T: FloatExt> BaseVector<T> for ArrayBase<OwnedRepr<T>, Ix1> {
}
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
BaseMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2>
{
type RowVector = ArrayBase<OwnedRepr<T>, Ix1>;
@@ -308,27 +308,27 @@ impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign
}
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
SVDDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2>
{
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
EVDDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2>
{
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
QRDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2>
{
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum>
LUDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2>
{
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> Matrix<T>
impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> Matrix<T>
for ArrayBase<OwnedRepr<T>, Ix2>
{
}
+4 -4
View File
@@ -3,16 +3,16 @@
use std::fmt::Debug;
use crate::linalg::BaseMatrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Debug, Clone)]
pub struct QR<T: FloatExt, M: BaseMatrix<T>> {
pub struct QR<T: RealNumber, M: BaseMatrix<T>> {
QR: M,
tau: Vec<T>,
singular: bool,
}
impl<T: FloatExt, M: BaseMatrix<T>> QR<T, M> {
impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
pub fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
let mut singular = false;
for j in 0..tau.len() {
@@ -112,7 +112,7 @@ impl<T: FloatExt, M: BaseMatrix<T>> QR<T, M> {
}
}
pub trait QRDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
pub trait QRDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
fn qr(&self) -> QR<T, Self> {
self.clone().qr_mut()
}
+4 -4
View File
@@ -1,11 +1,11 @@
#![allow(non_snake_case)]
use crate::linalg::BaseMatrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use std::fmt::Debug;
#[derive(Debug, Clone)]
pub struct SVD<T: FloatExt, M: SVDDecomposableMatrix<T>> {
pub struct SVD<T: RealNumber, M: SVDDecomposableMatrix<T>> {
pub U: M,
pub V: M,
pub s: Vec<T>,
@@ -15,7 +15,7 @@ pub struct SVD<T: FloatExt, M: SVDDecomposableMatrix<T>> {
tol: T,
}
pub trait SVDDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
fn svd_solve_mut(self, b: Self) -> Self {
self.svd_mut().solve(b)
}
@@ -367,7 +367,7 @@ pub trait SVDDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
}
}
impl<T: FloatExt, M: SVDDecomposableMatrix<T>> SVD<T, M> {
impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
pub fn new(U: M, V: M, s: Vec<T>) -> SVD<T, M> {
let m = U.shape().0;
let n = V.shape().0;
+4 -4
View File
@@ -3,7 +3,7 @@ use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub enum LinearRegressionSolverName {
@@ -17,7 +17,7 @@ pub struct LinearRegressionParameters {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinearRegression<T: FloatExt, M: Matrix<T>> {
pub struct LinearRegression<T: RealNumber, M: Matrix<T>> {
coefficients: M,
intercept: T,
solver: LinearRegressionSolverName,
@@ -31,14 +31,14 @@ impl Default for LinearRegressionParameters {
}
}
impl<T: FloatExt, M: Matrix<T>> PartialEq for LinearRegression<T, M> {
impl<T: RealNumber, M: Matrix<T>> PartialEq for LinearRegression<T, M> {
fn eq(&self, other: &Self) -> bool {
self.coefficients == other.coefficients
&& (self.intercept - other.intercept).abs() <= T::epsilon()
}
}
impl<T: FloatExt, M: Matrix<T>> LinearRegression<T, M> {
impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
pub fn fit(
x: &M,
y: &M::RowVector,
+9 -9
View File
@@ -4,21 +4,21 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::optimization::first_order::lbfgs::LBFGS;
use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::Backtracking;
use crate::optimization::FunctionOrder;
#[derive(Serialize, Deserialize, Debug)]
pub struct LogisticRegression<T: FloatExt, M: Matrix<T>> {
pub struct LogisticRegression<T: RealNumber, M: Matrix<T>> {
weights: M,
classes: Vec<T>,
num_attributes: usize,
num_classes: usize,
}
trait ObjectiveFunction<T: FloatExt, M: Matrix<T>> {
trait ObjectiveFunction<T: RealNumber, M: Matrix<T>> {
fn f(&self, w_bias: &M) -> T;
fn df(&self, g: &mut M, w_bias: &M);
@@ -33,13 +33,13 @@ trait ObjectiveFunction<T: FloatExt, M: Matrix<T>> {
}
}
struct BinaryObjectiveFunction<'a, T: FloatExt, M: Matrix<T>> {
struct BinaryObjectiveFunction<'a, T: RealNumber, M: Matrix<T>> {
x: &'a M,
y: Vec<usize>,
phantom: PhantomData<&'a T>,
}
impl<T: FloatExt, M: Matrix<T>> PartialEq for LogisticRegression<T, M> {
impl<T: RealNumber, M: Matrix<T>> PartialEq for LogisticRegression<T, M> {
fn eq(&self, other: &Self) -> bool {
if self.num_classes != other.num_classes
|| self.num_attributes != other.num_attributes
@@ -58,7 +58,7 @@ impl<T: FloatExt, M: Matrix<T>> PartialEq for LogisticRegression<T, M> {
}
}
impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M> for BinaryObjectiveFunction<'a, T, M> {
impl<'a, T: RealNumber, M: Matrix<T>> ObjectiveFunction<T, M> for BinaryObjectiveFunction<'a, T, M> {
fn f(&self, w_bias: &M) -> T {
let mut f = T::zero();
let (n, _) = self.x.shape();
@@ -88,14 +88,14 @@ impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M> for BinaryObjectiveF
}
}
struct MultiClassObjectiveFunction<'a, T: FloatExt, M: Matrix<T>> {
struct MultiClassObjectiveFunction<'a, T: RealNumber, M: Matrix<T>> {
x: &'a M,
y: Vec<usize>,
k: usize,
phantom: PhantomData<&'a T>,
}
impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M>
impl<'a, T: RealNumber, M: Matrix<T>> ObjectiveFunction<T, M>
for MultiClassObjectiveFunction<'a, T, M>
{
fn f(&self, w_bias: &M) -> T {
@@ -147,7 +147,7 @@ impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M>
}
}
impl<T: FloatExt, M: Matrix<T>> LogisticRegression<T, M> {
impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
pub fn fit(x: &M, y: &M::RowVector) -> LogisticRegression<T, M> {
let y_m = M::from_row_vector(y.clone());
let (x_nrows, num_attributes) = x.shape();
+3 -3
View File
@@ -1,6 +1,6 @@
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use super::Distance;
@@ -8,7 +8,7 @@ use super::Distance;
pub struct Euclidian {}
impl Euclidian {
pub fn squared_distance<T: FloatExt>(x: &Vec<T>, y: &Vec<T>) -> T {
pub fn squared_distance<T: RealNumber>(x: &Vec<T>, y: &Vec<T>) -> T {
if x.len() != y.len() {
panic!("Input vector sizes are different.");
}
@@ -22,7 +22,7 @@ impl Euclidian {
}
}
impl<T: FloatExt> Distance<Vec<T>, T> for Euclidian {
impl<T: RealNumber> Distance<Vec<T>, T> for Euclidian {
fn distance(&self, x: &Vec<T>, y: &Vec<T>) -> T {
Euclidian::squared_distance(x, y).sqrt()
}
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use super::Distance;
#[derive(Serialize, Deserialize, Debug)]
pub struct Hamming {}
impl<T: PartialEq, F: FloatExt> Distance<Vec<T>, F> for Hamming {
impl<T: PartialEq, F: RealNumber> Distance<Vec<T>, F> for Hamming {
fn distance(&self, x: &Vec<T>, y: &Vec<T>) -> F {
if x.len() != y.len() {
panic!("Input vector sizes are different");
+4 -4
View File
@@ -4,19 +4,19 @@ use std::marker::PhantomData;
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use super::Distance;
use crate::linalg::Matrix;
#[derive(Serialize, Deserialize, Debug)]
pub struct Mahalanobis<T: FloatExt, M: Matrix<T>> {
pub struct Mahalanobis<T: RealNumber, M: Matrix<T>> {
pub sigma: M,
pub sigmaInv: M,
t: PhantomData<T>,
}
impl<T: FloatExt, M: Matrix<T>> Mahalanobis<T, M> {
impl<T: RealNumber, M: Matrix<T>> Mahalanobis<T, M> {
pub fn new(data: &M) -> Mahalanobis<T, M> {
let sigma = data.cov();
let sigmaInv = sigma.lu().inverse();
@@ -38,7 +38,7 @@ impl<T: FloatExt, M: Matrix<T>> Mahalanobis<T, M> {
}
}
impl<T: FloatExt, M: Matrix<T>> Distance<Vec<T>, T> for Mahalanobis<T, M> {
impl<T: RealNumber, M: Matrix<T>> Distance<Vec<T>, T> for Mahalanobis<T, M> {
fn distance(&self, x: &Vec<T>, y: &Vec<T>) -> T {
let (nrows, ncols) = self.sigma.shape();
if x.len() != nrows {
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use super::Distance;
#[derive(Serialize, Deserialize, Debug)]
pub struct Manhattan {}
impl<T: FloatExt> Distance<Vec<T>, T> for Manhattan {
impl<T: RealNumber> Distance<Vec<T>, T> for Manhattan {
fn distance(&self, x: &Vec<T>, y: &Vec<T>) -> T {
if x.len() != y.len() {
panic!("Input vector sizes are different");
+13 -11
View File
@@ -1,30 +1,32 @@
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use super::Distance;
#[derive(Serialize, Deserialize, Debug)]
pub struct Minkowski<T: FloatExt> {
pub p: T,
pub struct Minkowski {
pub p: u16,
}
impl<T: FloatExt> Distance<Vec<T>, T> for Minkowski<T> {
impl<T: RealNumber> Distance<Vec<T>, T> for Minkowski {
fn distance(&self, x: &Vec<T>, y: &Vec<T>) -> T {
if x.len() != y.len() {
panic!("Input vector sizes are different");
}
if self.p < T::one() {
if self.p < 1 {
panic!("p must be at least 1");
}
let mut dist = T::zero();
let p_t = T::from_u16(self.p).unwrap();
for i in 0..x.len() {
let d = (x[i] - y[i]).abs();
dist = dist + d.powf(self.p);
dist = dist + d.powf(p_t);
}
dist.powf(T::one() / self.p)
dist.powf(T::one() / p_t)
}
}
@@ -37,9 +39,9 @@ mod tests {
let a = vec![1., 2., 3.];
let b = vec![4., 5., 6.];
let l1: f64 = Minkowski { p: 1.0 }.distance(&a, &b);
let l2: f64 = Minkowski { p: 2.0 }.distance(&a, &b);
let l3: f64 = Minkowski { p: 3.0 }.distance(&a, &b);
let l1: f64 = Minkowski { p: 1 }.distance(&a, &b);
let l2: f64 = Minkowski { p: 2 }.distance(&a, &b);
let l3: f64 = Minkowski { p: 3 }.distance(&a, &b);
assert!((l1 - 9.0).abs() < 1e-8);
assert!((l2 - 5.19615242).abs() < 1e-8);
@@ -52,6 +54,6 @@ mod tests {
let a = vec![1., 2., 3.];
let b = vec![4., 5., 6.];
let _: f64 = Minkowski { p: 0.0 }.distance(&a, &b);
let _: f64 = Minkowski { p: 0 }.distance(&a, &b);
}
}
+30 -3
View File
@@ -1,30 +1,57 @@
//! # Collection of Distance Functions
//!
//! Many algorithms in machine learning require a measure of distance between data points. Distance metric (or metric) is a function that defines a distance between a pair of point elements of a set.
//! Formally, the distance can be any metric measure that is defined as \\( d(x, y) \geq 0\\) and follows three conditions:
//! 1. \\( d(x, y) = 0 \\) if and only \\( x = y \\), positive definiteness
//! 1. \\( d(x, y) = d(y, x) \\), symmetry
//! 1. \\( d(x, y) \leq d(x, z) + d(z, y) \\), subadditivity or triangle inequality
//!
//! for all \\(x, y, z \in Z \\)
//!
//! A good distance metric helps to improve the performance of classification, clustering and information retrieval algorithms significantly.
//!
//! <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS_CHTML"></script>
/// Euclidean Distance is the straight-line distance between two points in Euclidean spacere that presents the shortest distance between these points.
pub mod euclidian;
/// Hamming Distance between two strings is the number of positions at which the corresponding symbols are different.
pub mod hamming;
/// The Mahalanobis distance is the distance between two points in multivariate space.
pub mod mahalanobis;
/// Also known as rectilinear distance, city block distance, taxicab metric.
pub mod manhattan;
/// A generalization of both the Euclidean distance and the Manhattan distance.
pub mod minkowski;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
pub trait Distance<T, F: FloatExt> {
/// Distance metric, a function that calculates distance between two points
pub trait Distance<T, F: RealNumber> {
/// Calculates distance between _a_ and _b_
fn distance(&self, a: &T, b: &T) -> F;
}
/// Multitude of distance metric functions
pub struct Distances {}
impl Distances {
/// Euclidian distance
pub fn euclidian() -> euclidian::Euclidian {
euclidian::Euclidian {}
}
pub fn minkowski<T: FloatExt>(p: T) -> minkowski::Minkowski<T> {
/// Minkowski distance
/// * `p` - function order. Should be >= 1
pub fn minkowski(p: u16) -> minkowski::Minkowski {
minkowski::Minkowski { p: p }
}
/// Manhattan distance
pub fn manhattan() -> manhattan::Manhattan {
manhattan::Manhattan {}
}
/// Hamming distance
pub fn hamming() -> hamming::Hamming {
hamming::Hamming {}
}
+2 -1
View File
@@ -1,2 +1,3 @@
/// Multitude of distance metrics are defined here
pub mod distance;
pub(crate) mod num;
pub mod num;
+3 -3
View File
@@ -3,7 +3,7 @@ use rand::prelude::*;
use std::fmt::{Debug, Display};
use std::iter::{Product, Sum};
pub trait FloatExt: Float + FromPrimitive + Debug + Display + Copy + Sum + Product {
pub trait RealNumber: Float + FromPrimitive + Debug + Display + Copy + Sum + Product {
fn copysign(self, sign: Self) -> Self;
fn ln_1pe(self) -> Self;
@@ -21,7 +21,7 @@ pub trait FloatExt: Float + FromPrimitive + Debug + Display + Copy + Sum + Produ
}
}
impl FloatExt for f64 {
impl RealNumber for f64 {
fn copysign(self, sign: Self) -> Self {
self.copysign(sign)
}
@@ -58,7 +58,7 @@ impl FloatExt for f64 {
}
}
impl FloatExt for f32 {
impl RealNumber for f32 {
fn copysign(self, sign: Self) -> Self {
self.copysign(sign)
}
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct Accuracy {}
impl Accuracy {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+2 -2
View File
@@ -4,13 +4,13 @@ use serde::{Deserialize, Serialize};
use crate::algorithm::sort::quick_sort::QuickArgSort;
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct AUC {}
impl AUC {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred_prob: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred_prob: &V) -> T {
let mut pos = T::zero();
let mut neg = T::zero();
+2 -2
View File
@@ -1,7 +1,7 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::metrics::precision::Precision;
use crate::metrics::recall::Recall;
@@ -9,7 +9,7 @@ use crate::metrics::recall::Recall;
pub struct F1 {}
impl F1 {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct MeanAbsoluteError {}
impl MeanAbsoluteError {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct MeanSquareError {}
impl MeanSquareError {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+9 -9
View File
@@ -8,7 +8,7 @@ pub mod r2;
pub mod recall;
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
pub struct ClassificationMetrics {}
@@ -50,34 +50,34 @@ impl RegressionMetrics {
}
}
pub fn accuracy<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn accuracy<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
ClassificationMetrics::accuracy().get_score(y_true, y_pred)
}
pub fn recall<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn recall<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
ClassificationMetrics::recall().get_score(y_true, y_pred)
}
pub fn precision<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn precision<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
ClassificationMetrics::precision().get_score(y_true, y_pred)
}
pub fn f1<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn f1<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
ClassificationMetrics::f1().get_score(y_true, y_pred)
}
pub fn roc_auc_score<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred_probabilities: &V) -> T {
pub fn roc_auc_score<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred_probabilities: &V) -> T {
ClassificationMetrics::roc_auc_score().get_score(y_true, y_pred_probabilities)
}
pub fn mean_squared_error<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn mean_squared_error<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
RegressionMetrics::mean_squared_error().get_score(y_true, y_pred)
}
pub fn mean_absolute_error<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn mean_absolute_error<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
RegressionMetrics::mean_absolute_error().get_score(y_true, y_pred)
}
pub fn r2<T: FloatExt, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
pub fn r2<T: RealNumber, V: BaseVector<T>>(y_true: &V, y_pred: &V) -> T {
RegressionMetrics::r2().get_score(y_true, y_pred)
}
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct Precision {}
impl Precision {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct R2 {}
impl R2 {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+2 -2
View File
@@ -1,13 +1,13 @@
use serde::{Deserialize, Serialize};
use crate::linalg::BaseVector;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct Recall {}
impl Recall {
pub fn get_score<T: FloatExt, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
pub fn get_score<T: RealNumber, V: BaseVector<T>>(&self, y_true: &V, y_pred: &V) -> T {
if y_true.len() != y_pred.len() {
panic!(
"The vector sizes don't match: {} != {}",
+4 -4
View File
@@ -36,7 +36,7 @@ use serde::{Deserialize, Serialize};
use crate::linalg::{row_iter, Matrix};
use crate::math::distance::Distance;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::neighbors::{KNNAlgorithm, KNNAlgorithmName, KNNWeightFunction};
/// `KNNClassifier` parameters. Use `Default::default()` for default values.
@@ -52,7 +52,7 @@ pub struct KNNClassifierParameters {
/// K Nearest Neighbors Classifier
#[derive(Serialize, Deserialize, Debug)]
pub struct KNNClassifier<T: FloatExt, D: Distance<Vec<T>, T>> {
pub struct KNNClassifier<T: RealNumber, D: Distance<Vec<T>, T>> {
classes: Vec<T>,
y: Vec<usize>,
knn_algorithm: KNNAlgorithm<T, D>,
@@ -70,7 +70,7 @@ impl Default for KNNClassifierParameters {
}
}
impl<T: FloatExt, D: Distance<Vec<T>, T>> PartialEq for KNNClassifier<T, D> {
impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for KNNClassifier<T, D> {
fn eq(&self, other: &Self) -> bool {
if self.classes.len() != other.classes.len()
|| self.k != other.k
@@ -93,7 +93,7 @@ impl<T: FloatExt, D: Distance<Vec<T>, T>> PartialEq for KNNClassifier<T, D> {
}
}
impl<T: FloatExt, D: Distance<Vec<T>, T>> KNNClassifier<T, D> {
impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNClassifier<T, D> {
/// Fits KNN classifier to a NxM matrix where N is number of samples and M is number of features.
/// * `x` - training data
/// * `y` - vector with target values (classes) of length N
+4 -4
View File
@@ -38,7 +38,7 @@ use serde::{Deserialize, Serialize};
use crate::linalg::{row_iter, BaseVector, Matrix};
use crate::math::distance::Distance;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::neighbors::{KNNAlgorithm, KNNAlgorithmName, KNNWeightFunction};
/// `KNNRegressor` parameters. Use `Default::default()` for default values.
@@ -54,7 +54,7 @@ pub struct KNNRegressorParameters {
/// K Nearest Neighbors Regressor
#[derive(Serialize, Deserialize, Debug)]
pub struct KNNRegressor<T: FloatExt, D: Distance<Vec<T>, T>> {
pub struct KNNRegressor<T: RealNumber, D: Distance<Vec<T>, T>> {
y: Vec<T>,
knn_algorithm: KNNAlgorithm<T, D>,
weight: KNNWeightFunction,
@@ -71,7 +71,7 @@ impl Default for KNNRegressorParameters {
}
}
impl<T: FloatExt, D: Distance<Vec<T>, T>> PartialEq for KNNRegressor<T, D> {
impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for KNNRegressor<T, D> {
fn eq(&self, other: &Self) -> bool {
if self.k != other.k || self.y.len() != other.y.len() {
return false;
@@ -86,7 +86,7 @@ impl<T: FloatExt, D: Distance<Vec<T>, T>> PartialEq for KNNRegressor<T, D> {
}
}
impl<T: FloatExt, D: Distance<Vec<T>, T>> KNNRegressor<T, D> {
impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNRegressor<T, D> {
/// Fits KNN regressor to a NxM matrix where N is number of samples and M is number of features.
/// * `x` - training data
/// * `y` - vector with real values
+5 -5
View File
@@ -34,7 +34,7 @@
use crate::algorithm::neighbour::cover_tree::CoverTree;
use crate::algorithm::neighbour::linear_search::LinearKNNSearch;
use crate::math::distance::Distance;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use serde::{Deserialize, Serialize};
/// K Nearest Neighbors Classifier
@@ -62,13 +62,13 @@ pub enum KNNWeightFunction {
}
#[derive(Serialize, Deserialize, Debug)]
enum KNNAlgorithm<T: FloatExt, D: Distance<Vec<T>, T>> {
enum KNNAlgorithm<T: RealNumber, D: Distance<Vec<T>, T>> {
LinearSearch(LinearKNNSearch<Vec<T>, T, D>),
CoverTree(CoverTree<Vec<T>, T, D>),
}
impl KNNWeightFunction {
fn calc_weights<T: FloatExt>(&self, distances: Vec<T>) -> std::vec::Vec<T> {
fn calc_weights<T: RealNumber>(&self, distances: Vec<T>) -> std::vec::Vec<T> {
match *self {
KNNWeightFunction::Distance => {
// if there are any points that has zero distance from one or more training points,
@@ -88,7 +88,7 @@ impl KNNWeightFunction {
}
impl KNNAlgorithmName {
fn fit<T: FloatExt, D: Distance<Vec<T>, T>>(
fn fit<T: RealNumber, D: Distance<Vec<T>, T>>(
&self,
data: Vec<Vec<T>>,
distance: D,
@@ -102,7 +102,7 @@ impl KNNAlgorithmName {
}
}
impl<T: FloatExt, D: Distance<Vec<T>, T>> KNNAlgorithm<T, D> {
impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNAlgorithm<T, D> {
fn find(&self, from: &Vec<T>, k: usize) -> Vec<(usize, T)> {
match *self {
KNNAlgorithm::LinearSearch(ref linear) => linear.find(from, k),
@@ -1,18 +1,18 @@
use std::default::Default;
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
pub struct GradientDescent<T: FloatExt> {
pub struct GradientDescent<T: RealNumber> {
pub max_iter: usize,
pub g_rtol: T,
pub g_atol: T,
}
impl<T: FloatExt> Default for GradientDescent<T> {
impl<T: RealNumber> Default for GradientDescent<T> {
fn default() -> Self {
GradientDescent {
max_iter: 10000,
@@ -22,7 +22,7 @@ impl<T: FloatExt> Default for GradientDescent<T> {
}
}
impl<T: FloatExt> FirstOrderOptimizer<T> for GradientDescent<T> {
impl<T: RealNumber> FirstOrderOptimizer<T> for GradientDescent<T> {
fn optimize<'a, X: Matrix<T>, LS: LineSearchMethod<T>>(
&self,
f: &'a F<T, X>,
+6 -6
View File
@@ -2,12 +2,12 @@ use std::default::Default;
use std::fmt::Debug;
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
pub struct LBFGS<T: FloatExt> {
pub struct LBFGS<T: RealNumber> {
pub max_iter: usize,
pub g_rtol: T,
pub g_atol: T,
@@ -19,7 +19,7 @@ pub struct LBFGS<T: FloatExt> {
pub m: usize,
}
impl<T: FloatExt> Default for LBFGS<T> {
impl<T: RealNumber> Default for LBFGS<T> {
fn default() -> Self {
LBFGS {
max_iter: 1000,
@@ -35,7 +35,7 @@ impl<T: FloatExt> Default for LBFGS<T> {
}
}
impl<T: FloatExt> LBFGS<T> {
impl<T: RealNumber> LBFGS<T> {
fn two_loops<X: Matrix<T>>(&self, state: &mut LBFGSState<T, X>) {
let lower = state.iteration.max(self.m) - self.m;
let upper = state.iteration;
@@ -175,7 +175,7 @@ impl<T: FloatExt> LBFGS<T> {
}
#[derive(Debug)]
struct LBFGSState<T: FloatExt, X: Matrix<T>> {
struct LBFGSState<T: RealNumber, X: Matrix<T>> {
x: X,
x_prev: X,
x_f: T,
@@ -195,7 +195,7 @@ struct LBFGSState<T: FloatExt, X: Matrix<T>> {
alpha: T,
}
impl<T: FloatExt> FirstOrderOptimizer<T> for LBFGS<T> {
impl<T: RealNumber> FirstOrderOptimizer<T> for LBFGS<T> {
fn optimize<'a, X: Matrix<T>, LS: LineSearchMethod<T>>(
&self,
f: &F<T, X>,
+3 -3
View File
@@ -5,11 +5,11 @@ use std::clone::Clone;
use std::fmt::Debug;
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
pub trait FirstOrderOptimizer<T: FloatExt> {
pub trait FirstOrderOptimizer<T: RealNumber> {
fn optimize<'a, X: Matrix<T>, LS: LineSearchMethod<T>>(
&self,
f: &F<T, X>,
@@ -20,7 +20,7 @@ pub trait FirstOrderOptimizer<T: FloatExt> {
}
#[derive(Debug, Clone)]
pub struct OptimizerResult<T: FloatExt, X: Matrix<T>> {
pub struct OptimizerResult<T: RealNumber, X: Matrix<T>> {
pub x: X,
pub f_x: T,
pub iterations: usize,
+10 -10
View File
@@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize};
use crate::algorithm::sort::quick_sort::QuickArgSort;
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct DecisionTreeClassifierParameters {
@@ -18,7 +18,7 @@ pub struct DecisionTreeClassifierParameters {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct DecisionTreeClassifier<T: FloatExt> {
pub struct DecisionTreeClassifier<T: RealNumber> {
nodes: Vec<Node<T>>,
parameters: DecisionTreeClassifierParameters,
num_classes: usize,
@@ -34,7 +34,7 @@ pub enum SplitCriterion {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Node<T: FloatExt> {
pub struct Node<T: RealNumber> {
index: usize,
output: usize,
split_feature: usize,
@@ -44,7 +44,7 @@ pub struct Node<T: FloatExt> {
false_child: Option<usize>,
}
impl<T: FloatExt> PartialEq for DecisionTreeClassifier<T> {
impl<T: RealNumber> PartialEq for DecisionTreeClassifier<T> {
fn eq(&self, other: &Self) -> bool {
if self.depth != other.depth
|| self.num_classes != other.num_classes
@@ -67,7 +67,7 @@ impl<T: FloatExt> PartialEq for DecisionTreeClassifier<T> {
}
}
impl<T: FloatExt> PartialEq for Node<T> {
impl<T: RealNumber> PartialEq for Node<T> {
fn eq(&self, other: &Self) -> bool {
self.output == other.output
&& self.split_feature == other.split_feature
@@ -95,7 +95,7 @@ impl Default for DecisionTreeClassifierParameters {
}
}
impl<T: FloatExt> Node<T> {
impl<T: RealNumber> Node<T> {
fn new(index: usize, output: usize) -> Self {
Node {
index: index,
@@ -109,7 +109,7 @@ impl<T: FloatExt> Node<T> {
}
}
struct NodeVisitor<'a, T: FloatExt, M: Matrix<T>> {
struct NodeVisitor<'a, T: RealNumber, M: Matrix<T>> {
x: &'a M,
y: &'a Vec<usize>,
node: usize,
@@ -121,7 +121,7 @@ struct NodeVisitor<'a, T: FloatExt, M: Matrix<T>> {
phantom: PhantomData<&'a T>,
}
fn impurity<T: FloatExt>(criterion: &SplitCriterion, count: &Vec<usize>, n: usize) -> T {
fn impurity<T: RealNumber>(criterion: &SplitCriterion, count: &Vec<usize>, n: usize) -> T {
let mut impurity = T::zero();
match criterion {
@@ -156,7 +156,7 @@ fn impurity<T: FloatExt>(criterion: &SplitCriterion, count: &Vec<usize>, n: usiz
return impurity;
}
impl<'a, T: FloatExt, M: Matrix<T>> NodeVisitor<'a, T, M> {
impl<'a, T: RealNumber, M: Matrix<T>> NodeVisitor<'a, T, M> {
fn new(
node_id: usize,
samples: Vec<usize>,
@@ -193,7 +193,7 @@ pub(in crate) fn which_max(x: &Vec<usize>) -> usize {
return which;
}
impl<T: FloatExt> DecisionTreeClassifier<T> {
impl<T: RealNumber> DecisionTreeClassifier<T> {
pub fn fit<M: Matrix<T>>(
x: &M,
y: &M::RowVector,
+9 -9
View File
@@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
use crate::algorithm::sort::quick_sort::QuickArgSort;
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
pub struct DecisionTreeRegressorParameters {
@@ -16,14 +16,14 @@ pub struct DecisionTreeRegressorParameters {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct DecisionTreeRegressor<T: FloatExt> {
pub struct DecisionTreeRegressor<T: RealNumber> {
nodes: Vec<Node<T>>,
parameters: DecisionTreeRegressorParameters,
depth: u16,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Node<T: FloatExt> {
pub struct Node<T: RealNumber> {
index: usize,
output: T,
split_feature: usize,
@@ -43,7 +43,7 @@ impl Default for DecisionTreeRegressorParameters {
}
}
impl<T: FloatExt> Node<T> {
impl<T: RealNumber> Node<T> {
fn new(index: usize, output: T) -> Self {
Node {
index: index,
@@ -57,7 +57,7 @@ impl<T: FloatExt> Node<T> {
}
}
impl<T: FloatExt> PartialEq for Node<T> {
impl<T: RealNumber> PartialEq for Node<T> {
fn eq(&self, other: &Self) -> bool {
(self.output - other.output).abs() < T::epsilon()
&& self.split_feature == other.split_feature
@@ -74,7 +74,7 @@ impl<T: FloatExt> PartialEq for Node<T> {
}
}
impl<T: FloatExt> PartialEq for DecisionTreeRegressor<T> {
impl<T: RealNumber> PartialEq for DecisionTreeRegressor<T> {
fn eq(&self, other: &Self) -> bool {
if self.depth != other.depth || self.nodes.len() != other.nodes.len() {
return false;
@@ -89,7 +89,7 @@ impl<T: FloatExt> PartialEq for DecisionTreeRegressor<T> {
}
}
struct NodeVisitor<'a, T: FloatExt, M: Matrix<T>> {
struct NodeVisitor<'a, T: RealNumber, M: Matrix<T>> {
x: &'a M,
y: &'a M,
node: usize,
@@ -100,7 +100,7 @@ struct NodeVisitor<'a, T: FloatExt, M: Matrix<T>> {
level: u16,
}
impl<'a, T: FloatExt, M: Matrix<T>> NodeVisitor<'a, T, M> {
impl<'a, T: RealNumber, M: Matrix<T>> NodeVisitor<'a, T, M> {
fn new(
node_id: usize,
samples: Vec<usize>,
@@ -122,7 +122,7 @@ impl<'a, T: FloatExt, M: Matrix<T>> NodeVisitor<'a, T, M> {
}
}
impl<T: FloatExt> DecisionTreeRegressor<T> {
impl<T: RealNumber> DecisionTreeRegressor<T> {
pub fn fit<M: Matrix<T>>(
x: &M,
y: &M::RowVector,