fix: minor refactoring

This commit is contained in:
Volodymyr Orlov
2020-03-31 18:24:01 -07:00
parent 8bb6013430
commit 5766364311
15 changed files with 49 additions and 52 deletions
+4 -4
View File
@@ -5,14 +5,14 @@ use crate::linalg::Matrix;
use crate::math::distance::euclidian;
#[derive(Debug)]
pub struct BBDTree<T: FloatExt + Debug> {
pub struct BBDTree<T: FloatExt> {
nodes: Vec<BBDTreeNode<T>>,
index: Vec<usize>,
root: usize
}
#[derive(Debug)]
struct BBDTreeNode<T: FloatExt + Debug> {
struct BBDTreeNode<T: FloatExt> {
count: usize,
index: usize,
center: Vec<T>,
@@ -23,7 +23,7 @@ struct BBDTreeNode<T: FloatExt + Debug> {
upper: Option<usize>
}
impl<T: FloatExt + Debug> BBDTreeNode<T> {
impl<T: FloatExt> BBDTreeNode<T> {
fn new(d: usize) -> BBDTreeNode<T> {
BBDTreeNode {
count: 0,
@@ -38,7 +38,7 @@ impl<T: FloatExt + Debug> BBDTreeNode<T> {
}
}
impl<T: FloatExt + Debug> BBDTree<T> {
impl<T: FloatExt> BBDTree<T> {
pub fn new<M: Matrix<T>>(data: &M) -> BBDTree<T> {
let nodes = Vec::new();
+1 -1
View File
@@ -56,7 +56,7 @@ impl Default for KMeansParameters {
}
}
impl<T: FloatExt + Debug + Sum> KMeans<T>{
impl<T: FloatExt + Sum> KMeans<T>{
pub fn new<M: Matrix<T>>(data: &M, k: usize, parameters: KMeansParameters) -> KMeans<T> {
let bbd = BBDTree::new(data);
+2 -2
View File
@@ -3,7 +3,7 @@ use crate::math::num::FloatExt;
use crate::linalg::{Matrix};
#[derive(Debug)]
pub struct PCA<T: FloatExt + Debug, M: Matrix<T>> {
pub struct PCA<T: FloatExt, M: Matrix<T>> {
eigenvectors: M,
eigenvalues: Vec<T>,
projection: M,
@@ -24,7 +24,7 @@ impl Default for PCAParameters {
}
}
impl<T: FloatExt + Debug, M: Matrix<T>> PCA<T, M> {
impl<T: FloatExt, M: Matrix<T>> PCA<T, M> {
pub fn new(data: &M, n_components: usize, parameters: PCAParameters) -> PCA<T, M> {
+1 -1
View File
@@ -39,7 +39,7 @@ impl Default for RandomForestClassifierParameters {
}
}
impl<T: FloatExt + Debug> RandomForestClassifier<T> {
impl<T: FloatExt> RandomForestClassifier<T> {
pub fn fit<M: Matrix<T>>(x: &M, y: &M::RowVector, parameters: RandomForestClassifierParameters) -> RandomForestClassifier<T> {
let (_, num_attributes) = x.shape();
+1 -1
View File
@@ -36,7 +36,7 @@ impl Default for RandomForestRegressorParameters {
}
}
impl<T: FloatExt + Debug> RandomForestRegressor<T> {
impl<T: FloatExt> RandomForestRegressor<T> {
pub fn fit<M: Matrix<T>>(x: &M, y: &M::RowVector, parameters: RandomForestRegressorParameters) -> RandomForestRegressor<T> {
let (n_rows, num_attributes) = x.shape();
+11 -11
View File
@@ -6,13 +6,13 @@ use crate::math::num::FloatExt;
use std::fmt::Debug;
#[derive(Debug, Clone)]
pub struct EVD<T: FloatExt + Debug, M: BaseMatrix<T>> {
pub struct EVD<T: FloatExt, M: BaseMatrix<T>> {
pub d: Vec<T>,
pub e: Vec<T>,
pub V: M
}
impl<T: FloatExt + Debug, M: BaseMatrix<T>> EVD<T, M> {
impl<T: FloatExt, M: BaseMatrix<T>> EVD<T, M> {
pub fn new(V: M, d: Vec<T>, e: Vec<T>) -> EVD<T, M> {
EVD {
d: d,
@@ -22,7 +22,7 @@ impl<T: FloatExt + Debug, M: BaseMatrix<T>> EVD<T, M> {
}
}
pub trait EVDDecomposableMatrix<T: FloatExt + Debug>: BaseMatrix<T> {
pub trait EVDDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
fn evd(&self, symmetric: bool) -> EVD<T, Self>{
self.clone().evd_mut(symmetric)
@@ -68,7 +68,7 @@ pub trait EVDDecomposableMatrix<T: FloatExt + Debug>: BaseMatrix<T> {
}
}
fn tred2<T: FloatExt + Debug, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn tred2<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
let (n, _) = V.shape();
for i in 0..n {
@@ -172,7 +172,7 @@ fn tred2<T: FloatExt + Debug, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &m
e[0] = T::zero();
}
fn tql2<T: FloatExt + Debug, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn tql2<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
let (n, _) = V.shape();
for i in 1..n {
e[i - 1] = e[i];
@@ -288,7 +288,7 @@ fn tql2<T: FloatExt + Debug, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mu
}
}
fn balance<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
fn balance<T: FloatExt, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
let radix = T::two();
let sqrdx = radix * radix;
@@ -341,7 +341,7 @@ fn balance<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
return scale;
}
fn elmhes<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
fn elmhes<T: FloatExt, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
let (n, _) = A.shape();
let mut perm = vec![0; n];
@@ -387,7 +387,7 @@ fn elmhes<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
return perm;
}
fn eltran<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<usize>) {
fn eltran<T: FloatExt, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<usize>) {
let (n, _) = A.shape();
for mp in (1..n - 1).rev() {
for k in mp + 1..n {
@@ -404,7 +404,7 @@ fn eltran<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<us
}
}
fn hqr2<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn hqr2<T: FloatExt, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
let (n, _) = A.shape();
let mut z = T::zero();
let mut s = T::zero();
@@ -742,7 +742,7 @@ fn hqr2<T: FloatExt + Debug, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec
}
}
fn balbak<T: FloatExt + Debug, M: BaseMatrix<T>>(V: &mut M, scale: &Vec<T>) {
fn balbak<T: FloatExt, M: BaseMatrix<T>>(V: &mut M, scale: &Vec<T>) {
let (n, _) = V.shape();
for i in 0..n {
for j in 0..n {
@@ -751,7 +751,7 @@ fn balbak<T: FloatExt + Debug, M: BaseMatrix<T>>(V: &mut M, scale: &Vec<T>) {
}
}
fn sort<T: FloatExt + Debug, M: BaseMatrix<T>>(d: &mut Vec<T>, e: &mut Vec<T>, V: &mut M) {
fn sort<T: FloatExt, M: BaseMatrix<T>>(d: &mut Vec<T>, e: &mut Vec<T>, V: &mut M) {
let n = d.len();
let mut temp = vec![T::zero(); n];
for j in 1..n {
+10 -10
View File
@@ -16,7 +16,7 @@ use crate::linalg::qr::QRDecomposableMatrix;
use crate::math::num::FloatExt;
#[derive(Debug, Clone)]
pub struct DenseMatrix<T: FloatExt + Debug> {
pub struct DenseMatrix<T: FloatExt> {
ncols: usize,
nrows: usize,
@@ -24,7 +24,7 @@ pub struct DenseMatrix<T: FloatExt + Debug> {
}
impl<T: FloatExt + Debug> fmt::Display for DenseMatrix<T> {
impl<T: FloatExt> fmt::Display for DenseMatrix<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut rows: Vec<Vec<f64>> = Vec::new();
for r in 0..self.nrows {
@@ -34,7 +34,7 @@ impl<T: FloatExt + Debug> fmt::Display for DenseMatrix<T> {
}
}
impl<T: FloatExt + Debug> DenseMatrix<T> {
impl<T: FloatExt> DenseMatrix<T> {
fn new(nrows: usize, ncols: usize, values: Vec<T>) -> Self {
DenseMatrix {
@@ -182,15 +182,15 @@ impl<T: FloatExt + fmt::Debug + Serialize> Serialize for DenseMatrix<T> {
}
}
impl<T: FloatExt + Debug> SVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> SVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt + Debug> EVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> EVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt + Debug> QRDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> QRDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt + Debug> Matrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> Matrix<T> for DenseMatrix<T> {}
impl<T: FloatExt + Debug> PartialEq for DenseMatrix<T> {
impl<T: FloatExt> PartialEq for DenseMatrix<T> {
fn eq(&self, other: &Self) -> bool {
if self.ncols != other.ncols || self.nrows != other.nrows {
return false
@@ -213,13 +213,13 @@ impl<T: FloatExt + Debug> PartialEq for DenseMatrix<T> {
}
}
impl<T: FloatExt + Debug> Into<Vec<T>> for DenseMatrix<T> {
impl<T: FloatExt> Into<Vec<T>> for DenseMatrix<T> {
fn into(self) -> Vec<T> {
self.values
}
}
impl<T: FloatExt + Debug> BaseMatrix<T> for DenseMatrix<T> {
impl<T: FloatExt> BaseMatrix<T> for DenseMatrix<T> {
type RowVector = Vec<T>;
+3 -3
View File
@@ -6,13 +6,13 @@ use crate::math::num::FloatExt;
use crate::linalg::BaseMatrix;
#[derive(Debug, Clone)]
pub struct QR<T: FloatExt + Debug, M: BaseMatrix<T>> {
pub struct QR<T: FloatExt, M: BaseMatrix<T>> {
QR: M,
tau: Vec<T>,
singular: bool
}
impl<T: FloatExt + Debug, M: BaseMatrix<T>> QR<T, M> {
impl<T: FloatExt, M: BaseMatrix<T>> QR<T, M> {
pub fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
let mut singular = false;
@@ -112,7 +112,7 @@ impl<T: FloatExt + Debug, M: BaseMatrix<T>> QR<T, M> {
}
}
pub trait QRDecomposableMatrix<T: FloatExt + Debug>: BaseMatrix<T> {
pub trait QRDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
fn qr(&self) -> QR<T, Self> {
self.clone().qr_mut()
+3 -3
View File
@@ -5,7 +5,7 @@ use crate::math::num::FloatExt;
use std::fmt::Debug;
#[derive(Debug, Clone)]
pub struct SVD<T: FloatExt + Debug, M: SVDDecomposableMatrix<T>> {
pub struct SVD<T: FloatExt, M: SVDDecomposableMatrix<T>> {
pub U: M,
pub V: M,
pub s: Vec<T>,
@@ -15,7 +15,7 @@ pub struct SVD<T: FloatExt + Debug, M: SVDDecomposableMatrix<T>> {
tol: T
}
pub trait SVDDecomposableMatrix<T: FloatExt + Debug>: BaseMatrix<T> {
pub trait SVDDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
fn svd_solve_mut(self, b: Self) -> Self {
self.svd_mut().solve(b)
@@ -373,7 +373,7 @@ pub trait SVDDecomposableMatrix<T: FloatExt + Debug>: BaseMatrix<T> {
}
}
impl<T: FloatExt + Debug, M: SVDDecomposableMatrix<T>> SVD<T, M> {
impl<T: FloatExt, M: SVDDecomposableMatrix<T>> SVD<T, M> {
pub fn new(U: M, V: M, s: Vec<T>) -> SVD<T, M> {
let m = U.shape().0;
let n = V.shape().0;
+1 -3
View File
@@ -1,5 +1,3 @@
use std::fmt::Debug;
use crate::math::num::FloatExt;
use crate::linalg::{Matrix, row_iter};
use crate::algorithm::neighbour::{KNNAlgorithm, KNNAlgorithmName};
@@ -13,7 +11,7 @@ pub struct KNNClassifier<'a, T: FloatExt> {
k: usize,
}
impl<'a, T: FloatExt + Debug> KNNClassifier<'a, T> {
impl<'a, T: FloatExt> KNNClassifier<'a, T> {
pub fn fit<M: Matrix<T>>(x: &M, y: &M::RowVector, k: usize, distance: &'a dyn Fn(&Vec<T>, &Vec<T>) -> T, algorithm: KNNAlgorithmName) -> KNNClassifier<'a, T> {
@@ -1,5 +1,4 @@
use std::default::Default;
use std::fmt::Debug;
use crate::math::num::FloatExt;
use crate::linalg::Matrix;
@@ -23,7 +22,7 @@ impl<T: FloatExt> Default for GradientDescent<T> {
}
}
impl<T: FloatExt + Debug> FirstOrderOptimizer<T> for GradientDescent<T>
impl<T: FloatExt> FirstOrderOptimizer<T> for GradientDescent<T>
{
fn optimize<'a, X: Matrix<T>, LS: LineSearchMethod<T>>(&self, f: &'a F<T, X>, df: &'a DF<X>, x0: &X, ls: &'a LS) -> OptimizerResult<T, X> {
+3 -3
View File
@@ -35,7 +35,7 @@ impl<T: FloatExt> Default for LBFGS<T> {
}
}
impl<T: FloatExt + Debug> LBFGS<T> {
impl<T: FloatExt> LBFGS<T> {
fn two_loops<X: Matrix<T>>(&self, state: &mut LBFGSState<T, X>) {
@@ -169,7 +169,7 @@ impl<T: FloatExt + Debug> LBFGS<T> {
}
#[derive(Debug)]
struct LBFGSState<T: FloatExt + Debug, X: Matrix<T>> {
struct LBFGSState<T: FloatExt, X: Matrix<T>> {
x: X,
x_prev: X,
x_f: T,
@@ -189,7 +189,7 @@ struct LBFGSState<T: FloatExt + Debug, X: Matrix<T>> {
alpha: T
}
impl<T: FloatExt + Debug> FirstOrderOptimizer<T> for LBFGS<T> {
impl<T: FloatExt> FirstOrderOptimizer<T> for LBFGS<T> {
fn optimize<'a, X: Matrix<T>, LS: LineSearchMethod<T>>(&self, f: &F<T, X>, df: &'a DF<X>, x0: &X, ls: &'a LS) -> OptimizerResult<T, X> {
+2 -2
View File
@@ -9,12 +9,12 @@ use crate::linalg::Matrix;
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{F, DF};
pub trait FirstOrderOptimizer<T: FloatExt + Debug> {
pub trait FirstOrderOptimizer<T: FloatExt> {
fn optimize<'a, X: Matrix<T>, LS: LineSearchMethod<T>>(&self, f: &F<T, X>, df: &'a DF<X>, x0: &X, ls: &'a LS) -> OptimizerResult<T, X>;
}
#[derive(Debug, Clone)]
pub struct OptimizerResult<T: FloatExt + Debug, X: Matrix<T>>
pub struct OptimizerResult<T: FloatExt, X: Matrix<T>>
{
pub x: X,
pub f_x: T,
+3 -3
View File
@@ -68,7 +68,7 @@ impl<T: FloatExt> Node<T> {
}
}
struct NodeVisitor<'a, T: FloatExt + Debug, M: Matrix<T>> {
struct NodeVisitor<'a, T: FloatExt, M: Matrix<T>> {
x: &'a M,
y: &'a Vec<usize>,
node: usize,
@@ -115,7 +115,7 @@ fn impurity<T: FloatExt>(criterion: &SplitCriterion, count: &Vec<usize>, n: usiz
return impurity;
}
impl<'a, T: FloatExt + Debug, M: Matrix<T>> NodeVisitor<'a, T, M> {
impl<'a, T: FloatExt, M: Matrix<T>> NodeVisitor<'a, T, M> {
fn new(node_id: usize, samples: Vec<usize>, order: &'a Vec<Vec<usize>>, x: &'a M, y: &'a Vec<usize>, level: u16) -> Self {
NodeVisitor {
@@ -147,7 +147,7 @@ pub(in crate) fn which_max(x: &Vec<usize>) -> usize {
return which;
}
impl<T: FloatExt + Debug> DecisionTreeClassifier<T> {
impl<T: FloatExt> DecisionTreeClassifier<T> {
pub fn fit<M: Matrix<T>>(x: &M, y: &M::RowVector, parameters: DecisionTreeClassifierParameters) -> DecisionTreeClassifier<T> {
let (x_nrows, num_attributes) = x.shape();
+3 -3
View File
@@ -56,7 +56,7 @@ impl<T: FloatExt> Node<T> {
}
}
struct NodeVisitor<'a, T: FloatExt + Debug, M: Matrix<T>> {
struct NodeVisitor<'a, T: FloatExt, M: Matrix<T>> {
x: &'a M,
y: &'a M,
node: usize,
@@ -67,7 +67,7 @@ struct NodeVisitor<'a, T: FloatExt + Debug, M: Matrix<T>> {
level: u16
}
impl<'a, T: FloatExt + Debug, M: Matrix<T>> NodeVisitor<'a, T, M> {
impl<'a, T: FloatExt, M: Matrix<T>> NodeVisitor<'a, T, M> {
fn new(node_id: usize, samples: Vec<usize>, order: &'a Vec<Vec<usize>>, x: &'a M, y: &'a M, level: u16) -> Self {
NodeVisitor {
@@ -84,7 +84,7 @@ impl<'a, T: FloatExt + Debug, M: Matrix<T>> NodeVisitor<'a, T, M> {
}
impl<T: FloatExt + Debug> DecisionTreeRegressor<T> {
impl<T: FloatExt> DecisionTreeRegressor<T> {
pub fn fit<M: Matrix<T>>(x: &M, y: &M::RowVector, parameters: DecisionTreeRegressorParameters) -> DecisionTreeRegressor<T> {
let (x_nrows, num_attributes) = x.shape();