Solve conflic with num-traits (#130)

* Solve conflic with num-traits

* Fix clippy warnings

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
This commit is contained in:
morenol
2022-05-05 10:39:18 -04:00
committed by GitHub
parent 12c102d02b
commit 820201e920
23 changed files with 58 additions and 65 deletions
+5 -5
View File
@@ -65,7 +65,7 @@ struct Node<F: RealNumber> {
max_dist: F,
parent_dist: F,
children: Vec<Node<F>>,
scale: i64,
_scale: i64,
}
#[derive(Debug)]
@@ -85,7 +85,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
max_dist: F::zero(),
parent_dist: F::zero(),
children: Vec::new(),
scale: 0,
_scale: 0,
};
let mut tree = CoverTree {
base,
@@ -243,7 +243,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
max_dist: F::zero(),
parent_dist: F::zero(),
children: Vec::new(),
scale: 100,
_scale: 100,
}
}
@@ -304,7 +304,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
max_dist: F::zero(),
parent_dist: F::zero(),
children,
scale: 100,
_scale: 100,
}
} else {
let mut far: Vec<DistanceSet<F>> = Vec::new();
@@ -373,7 +373,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
max_dist: self.max(consumed_set),
parent_dist: F::zero(),
children,
scale: (top_scale - max_scale),
_scale: (top_scale - max_scale),
}
}
}
+2 -2
View File
@@ -155,11 +155,11 @@ impl<T: RealNumber + Sum, D: Distance<Vec<T>, T>> DBSCAN<T, D> {
parameters: DBSCANParameters<T, D>,
) -> Result<DBSCAN<T, D>, Failed> {
if parameters.min_samples < 1 {
return Err(Failed::fit(&"Invalid minPts".to_string()));
return Err(Failed::fit("Invalid minPts"));
}
if parameters.eps <= T::zero() {
return Err(Failed::fit(&"Invalid radius: ".to_string()));
return Err(Failed::fit("Invalid radius: "));
}
let mut k = 0;
+5 -5
View File
@@ -71,9 +71,9 @@ use crate::math::num::RealNumber;
#[derive(Debug)]
pub struct KMeans<T: RealNumber> {
k: usize,
y: Vec<usize>,
_y: Vec<usize>,
size: Vec<usize>,
distortion: T,
_distortion: T,
centroids: Vec<Vec<T>>,
}
@@ -208,9 +208,9 @@ impl<T: RealNumber + Sum> KMeans<T> {
Ok(KMeans {
k: parameters.k,
y,
_y: y,
size,
distortion,
_distortion: distortion,
centroids,
})
}
@@ -344,7 +344,7 @@ mod tests {
let y = kmeans.predict(&x).unwrap();
for i in 0..y.len() {
assert_eq!(y[i] as usize, kmeans.y[i]);
assert_eq!(y[i] as usize, kmeans._y[i]);
}
}
+2 -2
View File
@@ -67,14 +67,14 @@ pub(crate) fn serialize_data<X: RealNumber, Y: RealNumber>(
.data
.iter()
.copied()
.flat_map(|f| f.to_f32_bits().to_le_bytes().to_vec().into_iter())
.flat_map(|f| f.to_f32_bits().to_le_bytes().to_vec())
.collect();
file.write_all(&x)?;
let y: Vec<u8> = dataset
.target
.iter()
.copied()
.flat_map(|f| f.to_f32_bits().to_le_bytes().to_vec().into_iter())
.flat_map(|f| f.to_f32_bits().to_le_bytes().to_vec())
.collect();
file.write_all(&y)?;
}
+2 -2
View File
@@ -88,7 +88,7 @@ pub struct RandomForestClassifierParameters {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug)]
pub struct RandomForestClassifier<T: RealNumber> {
parameters: RandomForestClassifierParameters,
_parameters: RandomForestClassifierParameters,
trees: Vec<DecisionTreeClassifier<T>>,
classes: Vec<T>,
samples: Option<Vec<Vec<bool>>>,
@@ -249,7 +249,7 @@ impl<T: RealNumber> RandomForestClassifier<T> {
}
Ok(RandomForestClassifier {
parameters,
_parameters: parameters,
trees,
classes,
samples: maybe_all_samples,
+2 -2
View File
@@ -84,7 +84,7 @@ pub struct RandomForestRegressorParameters {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug)]
pub struct RandomForestRegressor<T: RealNumber> {
parameters: RandomForestRegressorParameters,
_parameters: RandomForestRegressorParameters,
trees: Vec<DecisionTreeRegressor<T>>,
samples: Option<Vec<Vec<bool>>>,
}
@@ -215,7 +215,7 @@ impl<T: RealNumber> RandomForestRegressor<T> {
}
Ok(RandomForestRegressor {
parameters,
_parameters: parameters,
trees,
samples: maybe_all_samples,
})
+3 -4
View File
@@ -87,8 +87,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> Cholesky<T, M> {
if bn != rn {
return Err(Failed::because(
FailedError::SolutionFailed,
&"Can\'t solve Ax = b for x. Number of rows in b != number of rows in R."
.to_string(),
"Can\'t solve Ax = b for x. Number of rows in b != number of rows in R.",
));
}
@@ -128,7 +127,7 @@ pub trait CholeskyDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
if m != n {
return Err(Failed::because(
FailedError::DecompositionFailed,
&"Can\'t do Cholesky decomposition on a non-square matrix".to_string(),
"Can\'t do Cholesky decomposition on a non-square matrix",
));
}
@@ -148,7 +147,7 @@ pub trait CholeskyDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
if d < T::zero() {
return Err(Failed::because(
FailedError::DecompositionFailed,
&"The matrix is not positive definite.".to_string(),
"The matrix is not positive definite.",
));
}
+7 -12
View File
@@ -97,7 +97,7 @@ pub trait EVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
}
}
fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut [T], e: &mut [T]) {
let (n, _) = V.shape();
for (i, d_i) in d.iter_mut().enumerate().take(n) {
*d_i = V.get(n - 1, i);
@@ -195,7 +195,7 @@ fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec
e[0] = T::zero();
}
fn tql2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn tql2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut [T], e: &mut [T]) {
let (n, _) = V.shape();
for i in 1..n {
e[i - 1] = e[i];
@@ -419,7 +419,7 @@ fn eltran<T: RealNumber, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &[usize]) {
}
}
fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e: &mut Vec<T>) {
fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut [T], e: &mut [T]) {
let (n, _) = A.shape();
let mut z = T::zero();
let mut s = T::zero();
@@ -471,7 +471,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
A.set(nn, nn, x);
A.set(nn - 1, nn - 1, y + t);
if q >= T::zero() {
z = p + z.copysign(p);
z = p + RealNumber::copysign(z, p);
d[nn - 1] = x + z;
d[nn] = x + z;
if z != T::zero() {
@@ -570,7 +570,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
r /= x;
}
}
let s = (p * p + q * q + r * r).sqrt().copysign(p);
let s = RealNumber::copysign((p * p + q * q + r * r).sqrt(), p);
if s != T::zero() {
if k == m {
if l != m {
@@ -594,12 +594,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
A.sub_element_mut(k + 1, j, p * y);
A.sub_element_mut(k, j, p * x);
}
let mmin;
if nn < k + 3 {
mmin = nn;
} else {
mmin = k + 3;
}
let mmin = if nn < k + 3 { nn } else { k + 3 };
for i in 0..mmin + 1 {
p = x * A.get(i, k) + y * A.get(i, k + 1);
if k + 1 != nn {
@@ -783,7 +778,7 @@ fn balbak<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, scale: &[T]) {
}
}
fn sort<T: RealNumber, M: BaseMatrix<T>>(d: &mut Vec<T>, e: &mut Vec<T>, V: &mut M) {
fn sort<T: RealNumber, M: BaseMatrix<T>>(d: &mut [T], e: &mut [T], V: &mut M) {
let n = d.len();
let mut temp = vec![T::zero(); n];
for j in 1..n {
+3 -3
View File
@@ -46,13 +46,13 @@ use crate::math::num::RealNumber;
pub struct LU<T: RealNumber, M: BaseMatrix<T>> {
LU: M,
pivot: Vec<usize>,
pivot_sign: i8,
_pivot_sign: i8,
singular: bool,
phantom: PhantomData<T>,
}
impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
pub(crate) fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
pub(crate) fn new(LU: M, pivot: Vec<usize>, _pivot_sign: i8) -> LU<T, M> {
let (_, n) = LU.shape();
let mut singular = false;
@@ -66,7 +66,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
LU {
LU,
pivot,
pivot_sign,
_pivot_sign,
singular,
phantom: PhantomData,
}
+4 -5
View File
@@ -689,12 +689,11 @@ impl<'a, T: RealNumber, M: BaseMatrix<T>> Iterator for RowIter<'a, T, M> {
type Item = Vec<T>;
fn next(&mut self) -> Option<Vec<T>> {
let res;
if self.pos < self.max_pos {
res = Some(self.m.get_row_as_vec(self.pos))
let res = if self.pos < self.max_pos {
Some(self.m.get_row_as_vec(self.pos))
} else {
res = None
}
None
};
self.pos += 1;
res
}
-1
View File
@@ -523,7 +523,6 @@ impl<T: RealNumber> PartialEq for DenseMatrix<T> {
true
}
}
impl<T: RealNumber> From<DenseMatrix<T>> for Vec<T> {
fn from(dense_matrix: DenseMatrix<T>) -> Vec<T> {
dense_matrix.values
+6 -6
View File
@@ -47,7 +47,7 @@ pub struct SVD<T: RealNumber, M: SVDDecomposableMatrix<T>> {
pub V: M,
/// Singular values of the original matrix
pub s: Vec<T>,
full: bool,
_full: bool,
m: usize,
n: usize,
tol: T,
@@ -116,7 +116,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
}
let mut f = U.get(i, i);
g = -s.sqrt().copysign(f);
g = -RealNumber::copysign(s.sqrt(), f);
let h = f * g - s;
U.set(i, i, f - g);
for j in l - 1..n {
@@ -152,7 +152,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
}
let f = U.get(i, l - 1);
g = -s.sqrt().copysign(f);
g = -RealNumber::copysign(s.sqrt(), f);
let h = f * g - s;
U.set(i, l - 1, f - g);
@@ -299,7 +299,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
let mut h = rv1[k];
let mut f = ((y - z) * (y + z) + (g - h) * (g + h)) / (T::two() * h * y);
g = f.hypot(T::one());
f = ((x - z) * (x + z) + h * ((y / (f + g.copysign(f))) - h)) / x;
f = ((x - z) * (x + z) + h * ((y / (f + RealNumber::copysign(g, f))) - h)) / x;
let mut c = T::one();
let mut s = T::one();
@@ -428,13 +428,13 @@ impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
pub(crate) fn new(U: M, V: M, s: Vec<T>) -> SVD<T, M> {
let m = U.shape().0;
let n = V.shape().0;
let full = s.len() == m.min(n);
let _full = s.len() == m.min(n);
let tol = T::half() * (T::from(m + n).unwrap() + T::one()).sqrt() * s[0] * T::epsilon();
SVD {
U,
V,
s,
full,
_full,
m,
n,
tol,
+3 -3
View File
@@ -94,7 +94,7 @@ pub struct LinearRegressionParameters {
pub struct LinearRegression<T: RealNumber, M: Matrix<T>> {
coefficients: M,
intercept: T,
solver: LinearRegressionSolverName,
_solver: LinearRegressionSolverName,
}
impl LinearRegressionParameters {
@@ -155,7 +155,7 @@ impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
if x_nrows != y_nrows {
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
"Number of rows of X doesn\'t match number of rows of Y",
));
}
@@ -171,7 +171,7 @@ impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
Ok(LinearRegression {
intercept: w.get(num_attributes, 0),
coefficients: wights,
solver: parameters.solver,
_solver: parameters.solver,
})
}
+1 -1
View File
@@ -321,7 +321,7 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
if x_nrows != y_nrows {
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
"Number of rows of X doesn\'t match number of rows of Y",
));
}
+2 -2
View File
@@ -96,7 +96,7 @@ pub struct RidgeRegressionParameters<T: RealNumber> {
pub struct RidgeRegression<T: RealNumber, M: Matrix<T>> {
coefficients: M,
intercept: T,
solver: RidgeRegressionSolverName,
_solver: RidgeRegressionSolverName,
}
impl<T: RealNumber> RidgeRegressionParameters<T> {
@@ -226,7 +226,7 @@ impl<T: RealNumber, M: Matrix<T>> RidgeRegression<T, M> {
Ok(RidgeRegression {
intercept: b,
coefficients: w,
solver: parameters.solver,
_solver: parameters.solver,
})
}
+1 -1
View File
@@ -161,7 +161,7 @@ impl<T: RealNumber> CategoricalNBDistribution<T> {
let y_max = y
.iter()
.max()
.ok_or_else(|| Failed::fit(&"Failed to get the labels of y.".to_string()))?;
.ok_or_else(|| Failed::fit("Failed to get the labels of y."))?;
let class_labels: Vec<T> = (0..*y_max + 1)
.map(|label| T::from(label).unwrap())
+1
View File
@@ -8,6 +8,7 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
#[allow(clippy::upper_case_acronyms)]
pub struct LBFGS<T: RealNumber> {
pub max_iter: usize,
pub g_rtol: T,
+1
View File
@@ -4,6 +4,7 @@ pub mod line_search;
pub type F<'a, T, X> = dyn for<'b> Fn(&'b X) -> T + 'a;
pub type DF<'a, X> = dyn for<'b> Fn(&'b mut X, &'b X) + 'a;
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq)]
pub enum FunctionOrder {
SECOND,
+1 -2
View File
@@ -78,8 +78,7 @@ fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) ->
.zip(
repeats
.zip(offset)
.map(|(r, o)| iter::repeat(o).take(r))
.flatten(),
.flat_map(|(r, o)| iter::repeat(o).take(r)),
)
.map(|(idx, ofst)| idx + ofst)
.collect();
+1 -1
View File
@@ -222,7 +222,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVC<T, M, K> {
if n != y.len() {
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
"Number of rows of X doesn\'t match number of rows of Y",
));
}
+1 -1
View File
@@ -212,7 +212,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVR<T, M, K> {
if n != y.len() {
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
"Number of rows of X doesn\'t match number of rows of Y",
));
}
+3 -3
View File
@@ -118,7 +118,7 @@ pub enum SplitCriterion {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug)]
struct Node<T: RealNumber> {
index: usize,
_index: usize,
output: usize,
split_feature: usize,
split_value: Option<T>,
@@ -204,7 +204,7 @@ impl Default for DecisionTreeClassifierParameters {
impl<T: RealNumber> Node<T> {
fn new(index: usize, output: usize) -> Self {
Node {
index,
_index: index,
output,
split_feature: 0,
split_value: Option::None,
@@ -514,7 +514,7 @@ impl<T: RealNumber> DecisionTreeClassifier<T> {
visitor: &mut NodeVisitor<'_, T, M>,
n: usize,
count: &[usize],
false_count: &mut Vec<usize>,
false_count: &mut [usize],
parent_impurity: T,
j: usize,
) {
+2 -2
View File
@@ -97,7 +97,7 @@ pub struct DecisionTreeRegressor<T: RealNumber> {
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug)]
struct Node<T: RealNumber> {
index: usize,
_index: usize,
output: T,
split_feature: usize,
split_value: Option<T>,
@@ -137,7 +137,7 @@ impl Default for DecisionTreeRegressorParameters {
impl<T: RealNumber> Node<T> {
fn new(index: usize, output: T) -> Self {
Node {
index,
_index: index,
output,
split_feature: 0,
split_value: Option::None,