Run: cargo clippy --fix -Z unstable-options and cargo fmt

This commit is contained in:
Luis Moreno
2020-11-08 19:39:11 -04:00
parent 8281a1620e
commit 860056c3ba
48 changed files with 367 additions and 395 deletions
+10 -12
View File
@@ -50,8 +50,8 @@ impl<T: RealNumber> BBDTree<T> {
}
let mut tree = BBDTree {
nodes: nodes,
index: index,
nodes,
index,
root: 0,
};
@@ -113,7 +113,7 @@ impl<T: RealNumber> BBDTree<T> {
}
}
if !self.nodes[node].lower.is_none() {
if self.nodes[node].lower.is_some() {
let mut new_candidates = vec![0; k];
let mut newk = 0;
@@ -152,7 +152,7 @@ impl<T: RealNumber> BBDTree<T> {
}
for i in 0..d {
sums[closest][i] = sums[closest][i] + self.nodes[node].sum[i];
sums[closest][i] += self.nodes[node].sum[i];
}
counts[closest] += self.nodes[node].count;
@@ -184,11 +184,11 @@ impl<T: RealNumber> BBDTree<T> {
let mut rhs = T::zero();
for i in 0..d {
let diff = test[i] - best[i];
lhs = lhs + diff * diff;
lhs += diff * diff;
if diff > T::zero() {
rhs = rhs + (center[i] + radius[i] - best[i]) * diff;
rhs += (center[i] + radius[i] - best[i]) * diff;
} else {
rhs = rhs + (center[i] - radius[i] - best[i]) * diff;
rhs += (center[i] - radius[i] - best[i]) * diff;
}
}
@@ -244,7 +244,7 @@ impl<T: RealNumber> BBDTree<T> {
if end > begin + 1 {
let len = end - begin;
for i in 0..d {
node.sum[i] = node.sum[i] * T::from(len).unwrap();
node.sum[i] *= T::from(len).unwrap();
}
}
@@ -261,9 +261,7 @@ impl<T: RealNumber> BBDTree<T> {
let mut i2_good = data.get(self.index[i2], split_index) >= split_cutoff;
if !i1_good && !i2_good {
let temp = self.index[i1];
self.index[i1] = self.index[i2];
self.index[i2] = temp;
self.index.swap(i1, i2);
i1_good = true;
i2_good = true;
}
@@ -302,7 +300,7 @@ impl<T: RealNumber> BBDTree<T> {
let mut scatter = T::zero();
for i in 0..d {
let x = (node.sum[i] / T::from(node.count).unwrap()) - center[i];
scatter = scatter + x * x;
scatter += x * x;
}
node.cost + T::from(node.count).unwrap() * scatter
}
+13 -12
View File
@@ -51,7 +51,7 @@ impl<T, F: RealNumber, D: Distance<T, F>> PartialEq for CoverTree<T, F, D> {
return false;
}
}
return true;
true
}
}
@@ -84,11 +84,11 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
scale: 0,
};
let mut tree = CoverTree {
base: base,
base,
inv_log_base: F::one() / base.ln(),
distance: distance,
root: root,
data: data,
distance,
root,
data,
identical_excluded: false,
};
@@ -147,11 +147,12 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
*heap.peek()
};
if d <= (upper_bound + child.max_dist) {
if c > 0 && d < upper_bound {
if !self.identical_excluded || self.get_data_value(child.idx) != p {
if c > 0
&& d < upper_bound
&& (!self.identical_excluded || self.get_data_value(child.idx) != p)
{
heap.add(d);
}
}
if !child.children.is_empty() {
next_cover_set.push((d, child));
@@ -234,7 +235,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
fn new_leaf(&self, idx: usize) -> Node<F> {
Node {
idx: idx,
idx,
max_dist: F::zero(),
parent_dist: F::zero(),
children: Vec::new(),
@@ -298,7 +299,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
idx: p,
max_dist: F::zero(),
parent_dist: F::zero(),
children: children,
children,
scale: 100,
}
} else {
@@ -368,7 +369,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
idx: p,
max_dist: self.max(consumed_set),
parent_dist: F::zero(),
children: children,
children,
scale: (top_scale - max_scale),
}
}
@@ -442,7 +443,7 @@ impl<T: Debug + PartialEq, F: RealNumber, D: Distance<T, F>> CoverTree<T, F, D>
max = n.dist[n.dist.len() - 1];
}
}
return max;
max
}
}
+5 -5
View File
@@ -44,8 +44,8 @@ impl<T, F: RealNumber, D: Distance<T, F>> LinearKNNSearch<T, F, D> {
/// * `distance` - distance metric to use for searching. This function should extend [`Distance`](../../../math/distance/index.html) interface.
pub fn new(data: Vec<T>, distance: D) -> Result<LinearKNNSearch<T, F, D>, Failed> {
Ok(LinearKNNSearch {
data: data,
distance: distance,
data,
distance,
f: PhantomData,
})
}
@@ -157,7 +157,7 @@ mod tests {
.iter()
.map(|v| v.0)
.collect();
found_idxs1.sort();
found_idxs1.sort_unstable();
assert_eq!(vec!(0, 1, 2), found_idxs1);
@@ -167,7 +167,7 @@ mod tests {
.iter()
.map(|v| *v.2)
.collect();
found_idxs1.sort();
found_idxs1.sort_unstable();
assert_eq!(vec!(2, 3, 4, 5, 6, 7, 8), found_idxs1);
@@ -187,7 +187,7 @@ mod tests {
.iter()
.map(|v| v.0)
.collect();
found_idxs2.sort();
found_idxs2.sort_unstable();
assert_eq!(vec!(1, 2, 3), found_idxs2);
}
+2 -2
View File
@@ -66,10 +66,10 @@ impl KNNAlgorithmName {
) -> Result<KNNAlgorithm<T, D>, Failed> {
match *self {
KNNAlgorithmName::LinearSearch => {
LinearKNNSearch::new(data, distance).map(|a| KNNAlgorithm::LinearSearch(a))
LinearKNNSearch::new(data, distance).map(KNNAlgorithm::LinearSearch)
}
KNNAlgorithmName::CoverTree => {
CoverTree::new(data, distance).map(|a| KNNAlgorithm::CoverTree(a))
CoverTree::new(data, distance).map(KNNAlgorithm::CoverTree)
}
}
}
+4 -4
View File
@@ -15,7 +15,7 @@ pub struct HeapSelection<T: PartialOrd + Debug> {
impl<'a, T: PartialOrd + Debug> HeapSelection<T> {
pub fn with_capacity(k: usize) -> HeapSelection<T> {
HeapSelection {
k: k,
k,
n: 0,
sorted: false,
heap: Vec::new(),
@@ -51,7 +51,7 @@ impl<'a, T: PartialOrd + Debug> HeapSelection<T> {
pub fn peek(&self) -> &T {
if self.sorted {
return &self.heap[0];
&self.heap[0]
} else {
&self
.heap
@@ -62,11 +62,11 @@ impl<'a, T: PartialOrd + Debug> HeapSelection<T> {
}
pub fn peek_mut(&mut self) -> &mut T {
return &mut self.heap[0];
&mut self.heap[0]
}
pub fn get(self) -> Vec<T> {
return self.heap;
self.heap
}
fn sift_down(&mut self, k: usize, n: usize) {
+2 -2
View File
@@ -93,11 +93,11 @@ impl<T: RealNumber + Sum, D: Distance<Vec<T>, T>> DBSCAN<T, D> {
parameters: DBSCANParameters<T>,
) -> Result<DBSCAN<T, D>, Failed> {
if parameters.min_samples < 1 {
return Err(Failed::fit(&format!("Invalid minPts")));
return Err(Failed::fit(&"Invalid minPts".to_string()));
}
if parameters.eps <= T::zero() {
return Err(Failed::fit(&format!("Invalid radius: ")));
return Err(Failed::fit(&"Invalid radius: ".to_string()));
}
let mut k = 0;
+9 -9
View File
@@ -149,13 +149,13 @@ impl<T: RealNumber + Sum> KMeans<T> {
for i in 0..n {
for j in 0..d {
centroids[y[i]][j] = centroids[y[i]][j] + data.get(i, j);
centroids[y[i]][j] += data.get(i, j);
}
}
for i in 0..k {
for j in 0..d {
centroids[i][j] = centroids[i][j] / T::from(size[i]).unwrap();
centroids[i][j] /= T::from(size[i]).unwrap();
}
}
@@ -178,11 +178,11 @@ impl<T: RealNumber + Sum> KMeans<T> {
}
Ok(KMeans {
k: k,
y: y,
size: size,
distortion: distortion,
centroids: centroids,
k,
y,
size,
distortion,
centroids,
})
}
@@ -235,13 +235,13 @@ impl<T: RealNumber + Sum> KMeans<T> {
let mut sum: T = T::zero();
for i in d.iter() {
sum = sum + *i;
sum += *i;
}
let cutoff = T::from(rng.gen::<f64>()).unwrap() * sum;
let mut cost = T::zero();
let mut index = 0;
while index < n {
cost = cost + d[index];
cost += d[index];
if cost >= cutoff {
break;
}
+2 -2
View File
@@ -38,8 +38,8 @@ pub fn load_dataset() -> Dataset<f32, f32> {
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_features: num_features,
num_samples,
num_features,
feature_names: vec![
"CRIM", "ZN", "INDUS", "CHAS", "NOX", "RM", "AGE", "DIS", "RAD", "TAX", "PTRATIO", "B",
"LSTAT",
+2 -2
View File
@@ -40,8 +40,8 @@ pub fn load_dataset() -> Dataset<f32, f32> {
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_features: num_features,
num_samples,
num_features,
feature_names: vec![
"mean radius", "mean texture", "mean perimeter", "mean area",
"mean smoothness", "mean compactness", "mean concavity",
+2 -2
View File
@@ -33,8 +33,8 @@ pub fn load_dataset() -> Dataset<f32, f32> {
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_features: num_features,
num_samples,
num_features,
feature_names: vec![
"Age", "Sex", "BMI", "BP", "S1", "S2", "S3", "S4", "S5", "S6",
]
+2 -2
View File
@@ -23,8 +23,8 @@ pub fn load_dataset() -> Dataset<f32, f32> {
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_features: num_features,
num_samples,
num_features,
feature_names: vec![
"sepal length (cm)",
"sepal width (cm)",
+3 -3
View File
@@ -39,8 +39,8 @@ pub fn make_blobs(
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_features: num_features,
num_samples,
num_features,
feature_names: (0..num_features).map(|n| n.to_string()).collect(),
target_names: vec!["label".to_string()],
description: "Isotropic Gaussian blobs".to_string(),
@@ -82,7 +82,7 @@ pub fn make_circles(num_samples: usize, factor: f32, noise: f32) -> Dataset<f32,
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_samples,
num_features: 2,
feature_names: (0..2).map(|n| n.to_string()).collect(),
target_names: vec!["label".to_string()],
+2 -2
View File
@@ -28,8 +28,8 @@ pub fn load_dataset() -> Dataset<f32, f32> {
Dataset {
data: x,
target: y,
num_samples: num_samples,
num_features: num_features,
num_samples,
num_features,
feature_names: vec![
"sepal length (cm)",
"sepal width (cm)",
+2 -2
View File
@@ -61,14 +61,14 @@ pub(crate) fn serialize_data<X: RealNumber, Y: RealNumber>(
let x: Vec<u8> = dataset
.data
.iter()
.map(|v| *v)
.copied()
.flat_map(|f| f.to_f32_bits().to_le_bytes().to_vec().into_iter())
.collect();
file.write_all(&x)?;
let y: Vec<u8> = dataset
.target
.iter()
.map(|v| *v)
.copied()
.flat_map(|f| f.to_f32_bits().to_le_bytes().to_vec().into_iter())
.collect();
file.write_all(&y)?;
+7 -7
View File
@@ -68,14 +68,14 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for PCA<T, M> {
if self.eigenvectors != other.eigenvectors
|| self.eigenvalues.len() != other.eigenvalues.len()
{
return false;
false
} else {
for i in 0..self.eigenvalues.len() {
if (self.eigenvalues[i] - other.eigenvalues[i]).abs() > T::epsilon() {
return false;
}
}
return true;
true
}
}
}
@@ -190,16 +190,16 @@ impl<T: RealNumber, M: Matrix<T>> PCA<T, M> {
let mut pmu = vec![T::zero(); n_components];
for k in 0..n {
for i in 0..n_components {
pmu[i] = pmu[i] + projection.get(i, k) * mu[k];
pmu[i] += projection.get(i, k) * mu[k];
}
}
Ok(PCA {
eigenvectors: eigenvectors,
eigenvalues: eigenvalues,
eigenvectors,
eigenvalues,
projection: projection.transpose(),
mu: mu,
pmu: pmu,
mu,
pmu,
})
}
+4 -4
View File
@@ -89,7 +89,7 @@ pub struct RandomForestClassifier<T: RealNumber> {
impl<T: RealNumber> PartialEq for RandomForestClassifier<T> {
fn eq(&self, other: &Self) -> bool {
if self.classes.len() != other.classes.len() || self.trees.len() != other.trees.len() {
return false;
false
} else {
for i in 0..self.classes.len() {
if (self.classes[i] - other.classes[i]).abs() > T::epsilon() {
@@ -164,8 +164,8 @@ impl<T: RealNumber> RandomForestClassifier<T> {
}
Ok(RandomForestClassifier {
parameters: parameters,
trees: trees,
parameters,
trees,
classes,
})
}
@@ -191,7 +191,7 @@ impl<T: RealNumber> RandomForestClassifier<T> {
result[tree.predict_for_row(x, row)] += 1;
}
return which_max(&result);
which_max(&result)
}
fn sample_with_replacement(y: &Vec<usize>, num_classes: usize) -> Vec<usize> {
+3 -6
View File
@@ -95,7 +95,7 @@ impl Default for RandomForestRegressorParameters {
impl<T: RealNumber> PartialEq for RandomForestRegressor<T> {
fn eq(&self, other: &Self) -> bool {
if self.trees.len() != other.trees.len() {
return false;
false
} else {
for i in 0..self.trees.len() {
if self.trees[i] != other.trees[i] {
@@ -135,10 +135,7 @@ impl<T: RealNumber> RandomForestRegressor<T> {
trees.push(tree);
}
Ok(RandomForestRegressor {
parameters: parameters,
trees: trees,
})
Ok(RandomForestRegressor { parameters, trees })
}
/// Predict class for `x`
@@ -161,7 +158,7 @@ impl<T: RealNumber> RandomForestRegressor<T> {
let mut result = T::zero();
for tree in self.trees.iter() {
result = result + tree.predict_for_row(x, row);
result += tree.predict_for_row(x, row);
}
result / T::from(n_trees).unwrap()
+1 -1
View File
@@ -61,7 +61,7 @@ impl Failed {
/// new instance of `err`
pub fn because(err: FailedError, msg: &str) -> Self {
Failed {
err: err,
err,
msg: msg.to_string(),
}
}
+6 -8
View File
@@ -46,10 +46,7 @@ pub struct Cholesky<T: RealNumber, M: BaseMatrix<T>> {
impl<T: RealNumber, M: BaseMatrix<T>> Cholesky<T, M> {
pub(crate) fn new(R: M) -> Cholesky<T, M> {
Cholesky {
R: R,
t: PhantomData,
}
Cholesky { R, t: PhantomData }
}
/// Get lower triangular matrix.
@@ -90,7 +87,8 @@ impl<T: RealNumber, M: BaseMatrix<T>> Cholesky<T, M> {
if bn != rn {
return Err(Failed::because(
FailedError::SolutionFailed,
&format!("Can't solve Ax = b for x. Number of rows in b != number of rows in R."),
&"Can\'t solve Ax = b for x. Number of rows in b != number of rows in R."
.to_string(),
));
}
@@ -130,7 +128,7 @@ pub trait CholeskyDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
if m != n {
return Err(Failed::because(
FailedError::DecompositionFailed,
&format!("Can't do Cholesky decomposition on a non-square matrix"),
&"Can\'t do Cholesky decomposition on a non-square matrix".to_string(),
));
}
@@ -143,14 +141,14 @@ pub trait CholeskyDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
}
s = (self.get(j, k) - s) / self.get(k, k);
self.set(j, k, s);
d = d + s * s;
d += s * s;
}
d = self.get(j, j) - d;
if d < T::zero() {
return Err(Failed::because(
FailedError::DecompositionFailed,
&format!("The matrix is not positive definite."),
&"The matrix is not positive definite.".to_string(),
));
}
+45 -45
View File
@@ -93,7 +93,7 @@ pub trait EVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
sort(&mut d, &mut e, &mut V);
}
Ok(EVD { V: V, d: d, e: e })
Ok(EVD { V, d, e })
}
}
@@ -107,7 +107,7 @@ fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec
let mut scale = T::zero();
let mut h = T::zero();
for k in 0..i {
scale = scale + d[k].abs();
scale += d[k].abs();
}
if scale == T::zero() {
e[i] = d[i - 1];
@@ -118,8 +118,8 @@ fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec
}
} else {
for k in 0..i {
d[k] = d[k] / scale;
h = h + d[k] * d[k];
d[k] /= scale;
h += d[k] * d[k];
}
let mut f = d[i - 1];
let mut g = h.sqrt();
@@ -127,7 +127,7 @@ fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec
g = -g;
}
e[i] = scale * g;
h = h - f * g;
h -= f * g;
d[i - 1] = f - g;
for j in 0..i {
e[j] = T::zero();
@@ -138,19 +138,19 @@ fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec
V.set(j, i, f);
g = e[j] + V.get(j, j) * f;
for k in j + 1..=i - 1 {
g = g + V.get(k, j) * d[k];
e[k] = e[k] + V.get(k, j) * f;
g += V.get(k, j) * d[k];
e[k] += V.get(k, j) * f;
}
e[j] = g;
}
f = T::zero();
for j in 0..i {
e[j] = e[j] / h;
f = f + e[j] * d[j];
e[j] /= h;
f += e[j] * d[j];
}
let hh = f / (h + h);
for j in 0..i {
e[j] = e[j] - hh * d[j];
e[j] -= hh * d[j];
}
for j in 0..i {
f = d[j];
@@ -176,7 +176,7 @@ fn tred2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec
for j in 0..=i {
let mut g = T::zero();
for k in 0..=i {
g = g + V.get(k, i + 1) * V.get(k, j);
g += V.get(k, i + 1) * V.get(k, j);
}
for k in 0..=i {
V.sub_element_mut(k, j, g * d[k]);
@@ -239,9 +239,9 @@ fn tql2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<
let dl1 = d[l + 1];
let mut h = g - d[l];
for i in l + 2..n {
d[i] = d[i] - h;
d[i] -= h;
}
f = f + h;
f += h;
p = d[m];
let mut c = T::one();
@@ -278,7 +278,7 @@ fn tql2<T: RealNumber, M: BaseMatrix<T>>(V: &mut M, d: &mut Vec<T>, e: &mut Vec<
}
}
}
d[l] = d[l] + f;
d[l] += f;
e[l] = T::zero();
}
@@ -321,8 +321,8 @@ fn balance<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
let mut c = T::zero();
for j in 0..n {
if j != i {
c = c + A.get(j, i).abs();
r = r + A.get(i, j).abs();
c += A.get(j, i).abs();
r += A.get(i, j).abs();
}
}
if c != T::zero() && r != T::zero() {
@@ -330,18 +330,18 @@ fn balance<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
let mut f = T::one();
let s = c + r;
while c < g {
f = f * radix;
c = c * sqrdx;
f *= radix;
c *= sqrdx;
}
g = r * radix;
while c > g {
f = f / radix;
c = c / sqrdx;
f /= radix;
c /= sqrdx;
}
if (c + r) / f < t * s {
done = false;
g = T::one() / f;
scale[i] = scale[i] * f;
scale[i] *= f;
for j in 0..n {
A.mul_element_mut(i, j, g);
}
@@ -353,7 +353,7 @@ fn balance<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<T> {
}
}
return scale;
scale
}
fn elmhes<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
@@ -386,7 +386,7 @@ fn elmhes<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
for i in (m + 1)..n {
let mut y = A.get(i, m - 1);
if y != T::zero() {
y = y / x;
y /= x;
A.set(i, m - 1, y);
for j in m..n {
A.sub_element_mut(i, j, y * A.get(m, j));
@@ -399,7 +399,7 @@ fn elmhes<T: RealNumber, M: BaseMatrix<T>>(A: &mut M) -> Vec<usize> {
}
}
return perm;
perm
}
fn eltran<T: RealNumber, M: BaseMatrix<T>>(A: &M, V: &mut M, perm: &Vec<usize>) {
@@ -430,7 +430,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
for i in 0..n {
for j in i32::max(i as i32 - 1, 0)..n as i32 {
anorm = anorm + A.get(i, j as usize).abs();
anorm += A.get(i, j as usize).abs();
}
}
@@ -467,7 +467,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
p = T::half() * (y - x);
q = p * p + w;
z = q.abs().sqrt();
x = x + t;
x += t;
A.set(nn, nn, x);
A.set(nn - 1, nn - 1, y + t);
if q >= T::zero() {
@@ -482,8 +482,8 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
p = x / s;
q = z / s;
r = (p * p + q * q).sqrt();
p = p / r;
q = q / r;
p /= r;
q /= r;
for j in nn - 1..n {
z = A.get(nn - 1, j);
A.set(nn - 1, j, q * z + p * A.get(nn, j));
@@ -516,7 +516,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
panic!("Too many iterations in hqr");
}
if its == 10 || its == 20 {
t = t + x;
t += x;
for i in 0..nn + 1 {
A.sub_element_mut(i, i, x);
}
@@ -535,9 +535,9 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
q = A.get(m + 1, m + 1) - z - r - s;
r = A.get(m + 2, m + 1);
s = p.abs() + q.abs() + r.abs();
p = p / s;
q = q / s;
r = r / s;
p /= s;
q /= s;
r /= s;
if m == l {
break;
}
@@ -565,9 +565,9 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
}
x = p.abs() + q.abs() + r.abs();
if x != T::zero() {
p = p / x;
q = q / x;
r = r / x;
p /= x;
q /= x;
r /= x;
}
}
let s = (p * p + q * q + r * r).sqrt().copysign(p);
@@ -579,16 +579,16 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
} else {
A.set(k, k - 1, -s * x);
}
p = p + s;
p += s;
x = p / s;
y = q / s;
z = r / s;
q = q / p;
r = r / p;
q /= p;
r /= p;
for j in k..n {
p = A.get(k, j) + q * A.get(k + 1, j);
if k + 1 != nn {
p = p + r * A.get(k + 2, j);
p += r * A.get(k + 2, j);
A.sub_element_mut(k + 2, j, p * z);
}
A.sub_element_mut(k + 1, j, p * y);
@@ -603,7 +603,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
for i in 0..mmin + 1 {
p = x * A.get(i, k) + y * A.get(i, k + 1);
if k + 1 != nn {
p = p + z * A.get(i, k + 2);
p += z * A.get(i, k + 2);
A.sub_element_mut(i, k + 2, p * r);
}
A.sub_element_mut(i, k + 1, p * q);
@@ -612,7 +612,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
for i in 0..n {
p = x * V.get(i, k) + y * V.get(i, k + 1);
if k + 1 != nn {
p = p + z * V.get(i, k + 2);
p += z * V.get(i, k + 2);
V.sub_element_mut(i, k + 2, p * r);
}
V.sub_element_mut(i, k + 1, p * q);
@@ -642,7 +642,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
let w = A.get(i, i) - p;
r = T::zero();
for j in m..=nn {
r = r + A.get(i, j) * A.get(j, nn);
r += A.get(i, j) * A.get(j, nn);
}
if e[i] < T::zero() {
z = w;
@@ -701,8 +701,8 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
let mut ra = T::zero();
let mut sa = T::zero();
for j in m..=nn {
ra = ra + A.get(i, j) * A.get(j, na);
sa = sa + A.get(i, j) * A.get(j, nn);
ra += A.get(i, j) * A.get(j, na);
sa += A.get(i, j) * A.get(j, nn);
}
if e[i] < T::zero() {
z = w;
@@ -766,7 +766,7 @@ fn hqr2<T: RealNumber, M: BaseMatrix<T>>(A: &mut M, V: &mut M, d: &mut Vec<T>, e
for i in 0..n {
z = T::zero();
for k in 0..=j {
z = z + V.get(i, k) * A.get(k, j);
z += V.get(i, k) * A.get(k, j);
}
V.set(i, j, z);
}
+7 -9
View File
@@ -63,10 +63,10 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
}
LU {
LU: LU,
pivot: pivot,
pivot_sign: pivot_sign,
singular: singular,
LU,
pivot,
pivot_sign,
singular,
phantom: PhantomData,
}
}
@@ -220,10 +220,10 @@ pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
let kmax = usize::min(i, j);
let mut s = T::zero();
for k in 0..kmax {
s = s + self.get(i, k) * LUcolj[k];
s += self.get(i, k) * LUcolj[k];
}
LUcolj[i] = LUcolj[i] - s;
LUcolj[i] -= s;
self.set(i, j, LUcolj[i]);
}
@@ -239,9 +239,7 @@ pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
self.set(p, k, self.get(j, k));
self.set(j, k, t);
}
let k = piv[p];
piv[p] = piv[j];
piv[j] = k;
piv.swap(p, j);
pivsign = -pivsign;
}
+1 -1
View File
@@ -517,7 +517,7 @@ pub trait Matrix<T: RealNumber>:
pub(crate) fn row_iter<F: RealNumber, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
RowIter {
m: m,
m,
pos: 0,
max_pos: m.shape().0,
phantom: PhantomData,
+40 -40
View File
@@ -53,7 +53,7 @@ impl<T: RealNumber> BaseVector<T> for Vec<T> {
let mut result = T::zero();
for i in 0..self.len() {
result = result + self[i] * other[i];
result += self[i] * other[i];
}
result
@@ -63,7 +63,7 @@ impl<T: RealNumber> BaseVector<T> for Vec<T> {
let mut norm = T::zero();
for xi in self.iter() {
norm = norm + *xi * *xi;
norm += *xi * *xi;
}
norm.sqrt()
@@ -82,7 +82,7 @@ impl<T: RealNumber> BaseVector<T> for Vec<T> {
let mut norm = T::zero();
for xi in self.iter() {
norm = norm + xi.abs().powf(p);
norm += xi.abs().powf(p);
}
norm.powf(T::one() / p)
@@ -90,19 +90,19 @@ impl<T: RealNumber> BaseVector<T> for Vec<T> {
}
fn div_element_mut(&mut self, pos: usize, x: T) {
self[pos] = self[pos] / x;
self[pos] /= x;
}
fn mul_element_mut(&mut self, pos: usize, x: T) {
self[pos] = self[pos] * x;
self[pos] *= x;
}
fn add_element_mut(&mut self, pos: usize, x: T) {
self[pos] = self[pos] + x
self[pos] += x
}
fn sub_element_mut(&mut self, pos: usize, x: T) {
self[pos] = self[pos] - x;
self[pos] -= x;
}
fn add_mut(&mut self, other: &Self) -> &Self {
@@ -165,7 +165,7 @@ impl<T: RealNumber> BaseVector<T> for Vec<T> {
fn sum(&self) -> T {
let mut sum = T::zero();
for i in 0..self.len() {
sum = sum + self[i];
sum += self[i];
}
sum
}
@@ -216,15 +216,15 @@ impl<T: RealNumber> DenseMatrix<T> {
/// `values` should be in column-major order.
pub fn new(nrows: usize, ncols: usize, values: Vec<T>) -> Self {
DenseMatrix {
ncols: ncols,
nrows: nrows,
values: values,
ncols,
nrows,
values,
}
}
/// New instance of `DenseMatrix` from 2d array.
pub fn from_2d_array(values: &[&[T]]) -> Self {
DenseMatrix::from_2d_vec(&values.into_iter().map(|row| Vec::from(*row)).collect())
DenseMatrix::from_2d_vec(&values.iter().map(|row| Vec::from(*row)).collect())
}
/// New instance of `DenseMatrix` from 2d vector.
@@ -235,8 +235,8 @@ impl<T: RealNumber> DenseMatrix<T> {
.unwrap_or_else(|| panic!("Cannot create 2d matrix from an empty vector"))
.len();
let mut m = DenseMatrix {
ncols: ncols,
nrows: nrows,
ncols,
nrows,
values: vec![T::zero(); ncols * nrows],
};
for row in 0..nrows {
@@ -261,8 +261,8 @@ impl<T: RealNumber> DenseMatrix<T> {
/// * `values` - values to initialize the matrix.
pub fn from_vec(nrows: usize, ncols: usize, values: &Vec<T>) -> DenseMatrix<T> {
let mut m = DenseMatrix {
ncols: ncols,
nrows: nrows,
ncols,
nrows,
values: vec![T::zero(); ncols * nrows],
};
for row in 0..nrows {
@@ -285,7 +285,7 @@ impl<T: RealNumber> DenseMatrix<T> {
DenseMatrix {
ncols: values.len(),
nrows: 1,
values: values,
values,
}
}
@@ -301,7 +301,7 @@ impl<T: RealNumber> DenseMatrix<T> {
DenseMatrix {
ncols: 1,
nrows: values.len(),
values: values,
values,
}
}
@@ -412,7 +412,7 @@ impl<'de, T: RealNumber + fmt::Debug + Deserialize<'de>> Deserialize<'de> for De
}
}
const FIELDS: &'static [&'static str] = &["nrows", "ncols", "values"];
const FIELDS: &[&str] = &["nrows", "ncols", "values"];
deserializer.deserialize_struct(
"DenseMatrix",
FIELDS,
@@ -562,7 +562,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
matrix.set(i, i, T::one());
}
return matrix;
matrix
}
fn shape(&self) -> (usize, usize) {
@@ -614,7 +614,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
for c in 0..other.ncols {
let mut s = T::zero();
for i in 0..inner_d {
s = s + self.get(r, i) * other.get(i, c);
s += self.get(r, i) * other.get(i, c);
}
result.set(r, c, s);
}
@@ -633,7 +633,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
let mut result = T::zero();
for i in 0..(self.nrows * self.ncols) {
result = result + self.values[i] * other.values[i];
result += self.values[i] * other.values[i];
}
result
@@ -727,19 +727,19 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
}
fn div_element_mut(&mut self, row: usize, col: usize, x: T) {
self.values[col * self.nrows + row] = self.values[col * self.nrows + row] / x;
self.values[col * self.nrows + row] /= x;
}
fn mul_element_mut(&mut self, row: usize, col: usize, x: T) {
self.values[col * self.nrows + row] = self.values[col * self.nrows + row] * x;
self.values[col * self.nrows + row] *= x;
}
fn add_element_mut(&mut self, row: usize, col: usize, x: T) {
self.values[col * self.nrows + row] = self.values[col * self.nrows + row] + x
self.values[col * self.nrows + row] += x
}
fn sub_element_mut(&mut self, row: usize, col: usize, x: T) {
self.values[col * self.nrows + row] = self.values[col * self.nrows + row] - x;
self.values[col * self.nrows + row] -= x;
}
fn transpose(&self) -> Self {
@@ -759,9 +759,9 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
fn rand(nrows: usize, ncols: usize) -> Self {
let values: Vec<T> = (0..nrows * ncols).map(|_| T::rand()).collect();
DenseMatrix {
ncols: ncols,
nrows: nrows,
values: values,
ncols,
nrows,
values,
}
}
@@ -769,7 +769,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
let mut norm = T::zero();
for xi in self.values.iter() {
norm = norm + *xi * *xi;
norm += *xi * *xi;
}
norm.sqrt()
@@ -790,7 +790,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
let mut norm = T::zero();
for xi in self.values.iter() {
norm = norm + xi.abs().powf(p);
norm += xi.abs().powf(p);
}
norm.powf(T::one() / p)
@@ -802,12 +802,12 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
for r in 0..self.nrows {
for c in 0..self.ncols {
mean[c] = mean[c] + self.get(r, c);
mean[c] += self.get(r, c);
}
}
for i in 0..mean.len() {
mean[i] = mean[i] / T::from(self.nrows).unwrap();
mean[i] /= T::from(self.nrows).unwrap();
}
mean
@@ -815,28 +815,28 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
fn add_scalar_mut(&mut self, scalar: T) -> &Self {
for i in 0..self.values.len() {
self.values[i] = self.values[i] + scalar;
self.values[i] += scalar;
}
self
}
fn sub_scalar_mut(&mut self, scalar: T) -> &Self {
for i in 0..self.values.len() {
self.values[i] = self.values[i] - scalar;
self.values[i] -= scalar;
}
self
}
fn mul_scalar_mut(&mut self, scalar: T) -> &Self {
for i in 0..self.values.len() {
self.values[i] = self.values[i] * scalar;
self.values[i] *= scalar;
}
self
}
fn div_scalar_mut(&mut self, scalar: T) -> &Self {
for i in 0..self.values.len() {
self.values[i] = self.values[i] / scalar;
self.values[i] /= scalar;
}
self
}
@@ -902,7 +902,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
fn sum(&self) -> T {
let mut sum = T::zero();
for i in 0..self.values.len() {
sum = sum + self.values[i];
sum += self.values[i];
}
sum
}
@@ -934,7 +934,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
for c in 0..self.ncols {
let p = (self.get(r, c) - max).exp();
self.set(r, c, p);
z = z + p;
z += p;
}
}
for r in 0..self.nrows {
@@ -1058,7 +1058,7 @@ mod tests {
DenseMatrix::new(1, 3, vec![1., 2., 3.])
);
assert_eq!(
DenseMatrix::from_row_vector(vec.clone()).to_row_vector(),
DenseMatrix::from_row_vector(vec).to_row_vector(),
vec![1., 2., 3.]
);
}
+6 -10
View File
@@ -51,11 +51,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
}
}
QR {
QR: QR,
tau: tau,
singular: singular,
}
QR { QR, tau, singular }
}
/// Get upper triangular matrix.
@@ -68,7 +64,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
R.set(i, j, self.QR.get(i, j));
}
}
return R;
R
}
/// Get an orthogonal matrix.
@@ -82,7 +78,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
if self.QR.get(k, k) != T::zero() {
let mut s = T::zero();
for i in k..m {
s = s + self.QR.get(i, k) * Q.get(i, j);
s += self.QR.get(i, k) * Q.get(i, j);
}
s = -s / self.QR.get(k, k);
for i in k..m {
@@ -96,7 +92,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
k -= 1;
}
}
return Q;
Q
}
fn solve(&self, mut b: M) -> Result<M, Failed> {
@@ -118,7 +114,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
for j in 0..b_ncols {
let mut s = T::zero();
for i in k..m {
s = s + self.QR.get(i, k) * b.get(i, j);
s += self.QR.get(i, k) * b.get(i, j);
}
s = -s / self.QR.get(k, k);
for i in k..m {
@@ -175,7 +171,7 @@ pub trait QRDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
for j in k + 1..n {
let mut s = T::zero();
for i in k..m {
s = s + self.get(i, k) * self.get(i, j);
s += self.get(i, k) * self.get(i, j);
}
s = -s / self.get(k, k);
for i in k..m {
+20 -20
View File
@@ -106,13 +106,13 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
if i < m {
for k in i..m {
scale = scale + U.get(k, i).abs();
scale += U.get(k, i).abs();
}
if scale.abs() > T::epsilon() {
for k in i..m {
U.div_element_mut(k, i, scale);
s = s + U.get(k, i) * U.get(k, i);
s += U.get(k, i) * U.get(k, i);
}
let mut f = U.get(i, i);
@@ -122,7 +122,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
for j in l - 1..n {
s = T::zero();
for k in i..m {
s = s + U.get(k, i) * U.get(k, j);
s += U.get(k, i) * U.get(k, j);
}
f = s / h;
for k in i..m {
@@ -140,15 +140,15 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
let mut s = T::zero();
scale = T::zero();
if i + 1 <= m && i + 1 != n {
if i < m && i + 1 != n {
for k in l - 1..n {
scale = scale + U.get(i, k).abs();
scale += U.get(i, k).abs();
}
if scale.abs() > T::epsilon() {
for k in l - 1..n {
U.div_element_mut(i, k, scale);
s = s + U.get(i, k) * U.get(i, k);
s += U.get(i, k) * U.get(i, k);
}
let f = U.get(i, l - 1);
@@ -163,7 +163,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
for j in l - 1..m {
s = T::zero();
for k in l - 1..n {
s = s + U.get(j, k) * U.get(i, k);
s += U.get(j, k) * U.get(i, k);
}
for k in l - 1..n {
@@ -189,7 +189,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
for j in l..n {
let mut s = T::zero();
for k in l..n {
s = s + U.get(i, k) * v.get(k, j);
s += U.get(i, k) * v.get(k, j);
}
for k in l..n {
v.add_element_mut(k, j, s * v.get(k, i));
@@ -218,7 +218,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
for j in l..n {
let mut s = T::zero();
for k in l..m {
s = s + U.get(k, i) * U.get(k, j);
s += U.get(k, i) * U.get(k, j);
}
let f = (s / U.get(i, i)) * g;
for k in i..m {
@@ -316,7 +316,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y = y * c;
y *= c;
for jj in 0..n {
x = v.get(jj, j);
@@ -431,13 +431,13 @@ impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
let full = s.len() == m.min(n);
let tol = T::half() * (T::from(m + n).unwrap() + T::one()).sqrt() * s[0] * T::epsilon();
SVD {
U: U,
V: V,
s: s,
full: full,
m: m,
n: n,
tol: tol,
U,
V,
s,
full,
m,
n,
tol,
}
}
@@ -458,9 +458,9 @@ impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
let mut r = T::zero();
if self.s[j] > self.tol {
for i in 0..self.m {
r = r + self.U.get(i, j) * b.get(i, k);
r += self.U.get(i, j) * b.get(i, k);
}
r = r / self.s[j];
r /= self.s[j];
}
tmp[j] = r;
}
@@ -468,7 +468,7 @@ impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
for j in 0..self.n {
let mut r = T::zero();
for jj in 0..self.n {
r = r + self.V.get(j, jj) * tmp[jj];
r += self.V.get(j, jj) * tmp[jj];
}
b.set(j, k, r);
}
+3 -3
View File
@@ -123,9 +123,9 @@ impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
let (y_nrows, _) = b.shape();
if x_nrows != y_nrows {
return Err(Failed::fit(&format!(
"Number of rows of X doesn't match number of rows of Y"
)));
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
));
}
let a = x.h_stack(&M::ones(x_nrows, 1));
+18 -18
View File
@@ -82,7 +82,7 @@ trait ObjectiveFunction<T: RealNumber, M: Matrix<T>> {
let mut sum = T::zero();
let p = x.shape().1;
for i in 0..p {
sum = sum + x.get(m_row, i) * w.get(0, i + v_col);
sum += x.get(m_row, i) * w.get(0, i + v_col);
}
sum + w.get(0, p + v_col)
@@ -101,7 +101,7 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for LogisticRegression<T, M> {
|| self.num_attributes != other.num_attributes
|| self.classes.len() != other.classes.len()
{
return false;
false
} else {
for i in 0..self.classes.len() {
if (self.classes[i] - other.classes[i]).abs() > T::epsilon() {
@@ -109,7 +109,7 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for LogisticRegression<T, M> {
}
}
return self.weights == other.weights;
self.weights == other.weights
}
}
}
@@ -123,7 +123,7 @@ impl<'a, T: RealNumber, M: Matrix<T>> ObjectiveFunction<T, M>
for i in 0..n {
let wx = BinaryObjectiveFunction::partial_dot(w_bias, self.x, 0, i);
f = f + (wx.ln_1pe() - (T::from(self.y[i]).unwrap()) * wx);
f += wx.ln_1pe() - (T::from(self.y[i]).unwrap()) * wx;
}
f
@@ -169,7 +169,7 @@ impl<'a, T: RealNumber, M: Matrix<T>> ObjectiveFunction<T, M>
);
}
prob.softmax_mut();
f = f - prob.get(0, self.y[i]).ln();
f -= prob.get(0, self.y[i]).ln();
}
f
@@ -215,9 +215,9 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
let (_, y_nrows) = y_m.shape();
if x_nrows != y_nrows {
return Err(Failed::fit(&format!(
"Number of rows of X doesn't match number of rows of Y"
)));
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
));
}
let classes = y_m.unique();
@@ -240,7 +240,7 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
let x0 = M::zeros(1, num_attributes + 1);
let objective = BinaryObjectiveFunction {
x: x,
x,
y: yi,
phantom: PhantomData,
};
@@ -249,17 +249,17 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
Ok(LogisticRegression {
weights: result.x,
classes: classes,
num_attributes: num_attributes,
classes,
num_attributes,
num_classes: k,
})
} else {
let x0 = M::zeros(1, (num_attributes + 1) * k);
let objective = MultiClassObjectiveFunction {
x: x,
x,
y: yi,
k: k,
k,
phantom: PhantomData,
};
@@ -268,9 +268,9 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
let weights = result.x.reshape(k, num_attributes + 1);
Ok(LogisticRegression {
weights: weights,
classes: classes,
num_attributes: num_attributes,
weights,
classes,
num_attributes,
num_classes: k,
})
}
@@ -362,7 +362,7 @@ mod tests {
let objective = MultiClassObjectiveFunction {
x: &x,
y: y,
y,
k: 3,
phantom: PhantomData,
};
@@ -411,7 +411,7 @@ mod tests {
let objective = BinaryObjectiveFunction {
x: &x,
y: y,
y,
phantom: PhantomData,
};
+1 -1
View File
@@ -38,7 +38,7 @@ impl Euclidian {
let mut sum = T::zero();
for i in 0..x.len() {
let d = x[i] - y[i];
sum = sum + d * d;
sum += d * d;
}
sum
+5 -5
View File
@@ -68,8 +68,8 @@ impl<T: RealNumber, M: Matrix<T>> Mahalanobis<T, M> {
let sigma = data.cov();
let sigmaInv = sigma.lu().and_then(|lu| lu.inverse()).unwrap();
Mahalanobis {
sigma: sigma,
sigmaInv: sigmaInv,
sigma,
sigmaInv,
t: PhantomData,
}
}
@@ -80,8 +80,8 @@ impl<T: RealNumber, M: Matrix<T>> Mahalanobis<T, M> {
let sigma = cov.clone();
let sigmaInv = sigma.lu().and_then(|lu| lu.inverse()).unwrap();
Mahalanobis {
sigma: sigma,
sigmaInv: sigmaInv,
sigma,
sigmaInv,
t: PhantomData,
}
}
@@ -118,7 +118,7 @@ impl<T: RealNumber, M: Matrix<T>> Distance<Vec<T>, T> for Mahalanobis<T, M> {
let mut s = T::zero();
for j in 0..n {
for i in 0..n {
s = s + self.sigmaInv.get(i, j) * z[i] * z[j];
s += self.sigmaInv.get(i, j) * z[i] * z[j];
}
}
+1 -1
View File
@@ -35,7 +35,7 @@ impl<T: RealNumber> Distance<Vec<T>, T> for Manhattan {
let mut dist = T::zero();
for i in 0..x.len() {
dist = dist + (x[i] - y[i]).abs();
dist += (x[i] - y[i]).abs();
}
dist
+1 -1
View File
@@ -48,7 +48,7 @@ impl<T: RealNumber> Distance<Vec<T>, T> for Minkowski {
for i in 0..x.len() {
let d = (x[i] - y[i]).abs();
dist = dist + d.powf(p_t);
dist += d.powf(p_t);
}
dist.powf(T::one() / p_t)
+1 -1
View File
@@ -45,7 +45,7 @@ impl Distances {
/// Minkowski distance, see [`Minkowski`](minkowski/index.html)
/// * `p` - function order. Should be >= 1
pub fn minkowski(p: u16) -> minkowski::Minkowski {
minkowski::Minkowski { p: p }
minkowski::Minkowski { p }
}
/// Manhattan distance, see [`Manhattan`](manhattan/index.html)
+10 -10
View File
@@ -57,19 +57,19 @@ impl RealNumber for f64 {
fn ln_1pe(self) -> f64 {
if self > 15. {
return self;
self
} else {
return self.exp().ln_1p();
self.exp().ln_1p()
}
}
fn sigmoid(self) -> f64 {
if self < -40. {
return 0.;
0.
} else if self > 40. {
return 1.;
1.
} else {
return 1. / (1. + f64::exp(-self));
1. / (1. + f64::exp(-self))
}
}
@@ -98,19 +98,19 @@ impl RealNumber for f32 {
fn ln_1pe(self) -> f32 {
if self > 15. {
return self;
self
} else {
return self.exp().ln_1p();
self.exp().ln_1p()
}
}
fn sigmoid(self) -> f32 {
if self < -40. {
return 0.;
0.
} else if self > 40. {
return 1.;
1.
} else {
return 1. / (1. + f32::exp(-self));
1. / (1. + f32::exp(-self))
}
}
+3 -3
View File
@@ -42,9 +42,9 @@ impl AUC {
for i in 0..n {
if y_true.get(i) == T::zero() {
neg = neg + T::one();
neg += T::one();
} else if y_true.get(i) == T::one() {
pos = pos + T::one();
pos += T::one();
} else {
panic!(
"AUC is only for binary classification. Invalid label: {}",
@@ -79,7 +79,7 @@ impl AUC {
let mut auc = T::zero();
for i in 0..n {
if y_true.get(label_idx[i]) == T::one() {
auc = auc + rank[i];
auc += rank[i];
}
}
+3 -4
View File
@@ -37,7 +37,7 @@ pub fn entropy<T: RealNumber>(data: &Vec<T>) -> Option<T> {
for &c in bincounts.values() {
if c > 0 {
let pi = T::from_usize(c).unwrap();
entropy = entropy - (pi / sum) * (pi.ln() - sum.ln());
entropy -= (pi / sum) * (pi.ln() - sum.ln());
}
}
@@ -89,9 +89,8 @@ pub fn mutual_info_score<T: RealNumber>(contingency: &Vec<Vec<usize>>) -> T {
let mut result = T::zero();
for i in 0..log_outer.len() {
result = result
+ ((contingency_nm[i] * (log_contingency_nm[i] - contingency_sum_ln))
+ contingency_nm[i] * log_outer[i])
result += (contingency_nm[i] * (log_contingency_nm[i] - contingency_sum_ln))
+ contingency_nm[i] * log_outer[i]
}
result.max(T::zero())
+1 -1
View File
@@ -43,7 +43,7 @@ impl MeanAbsoluteError {
let n = y_true.len();
let mut ras = T::zero();
for i in 0..n {
ras = ras + (y_true.get(i) - y_pred.get(i)).abs();
ras += (y_true.get(i) - y_pred.get(i)).abs();
}
ras / T::from_usize(n).unwrap()
+1 -1
View File
@@ -43,7 +43,7 @@ impl MeanSquareError {
let n = y_true.len();
let mut rss = T::zero();
for i in 0..n {
rss = rss + (y_true.get(i) - y_pred.get(i)).square();
rss += (y_true.get(i) - y_pred.get(i)).square();
}
rss / T::from_usize(n).unwrap()
+1 -1
View File
@@ -101,7 +101,7 @@ impl ClassificationMetrics {
/// F1 score, also known as balanced F-score or F-measure, see [F1](f1/index.html).
pub fn f1<T: RealNumber>(beta: T) -> f1::F1<T> {
f1::F1 { beta: beta }
f1::F1 { beta }
}
/// Area Under the Receiver Operating Characteristic Curve (ROC AUC), see [AUC](auc/index.html).
+4 -4
View File
@@ -45,10 +45,10 @@ impl R2 {
let mut mean = T::zero();
for i in 0..n {
mean = mean + y_true.get(i);
mean += y_true.get(i);
}
mean = mean / T::from_usize(n).unwrap();
mean /= T::from_usize(n).unwrap();
let mut ss_tot = T::zero();
let mut ss_res = T::zero();
@@ -56,8 +56,8 @@ impl R2 {
for i in 0..n {
let y_i = y_true.get(i);
let f_i = y_pred.get(i);
ss_tot = ss_tot + (y_i - mean).square();
ss_res = ss_res + (y_i - f_i).square();
ss_tot += (y_i - mean).square();
ss_res += (y_i - f_i).square();
}
T::one() - (ss_res / ss_tot)
+4 -4
View File
@@ -127,7 +127,7 @@ impl BaseKFold for KFold {
// initialise indices
let mut indices: Vec<usize> = (0..n_samples).collect();
if self.shuffle == true {
if self.shuffle {
indices.shuffle(&mut thread_rng());
}
// return a new array of given shape n_split, filled with each element of n_samples divided by n_splits.
@@ -135,7 +135,7 @@ impl BaseKFold for KFold {
// increment by one if odd
for i in 0..(n_samples % self.n_splits) {
fold_sizes[i] = fold_sizes[i] + 1;
fold_sizes[i] += 1;
}
// generate the right array of arrays for test indices
@@ -175,13 +175,13 @@ impl BaseKFold for KFold {
.clone()
.iter()
.enumerate()
.filter(|&(idx, _)| test_index[idx] == false)
.filter(|&(idx, _)| !test_index[idx])
.map(|(idx, _)| idx)
.collect::<Vec<usize>>(); // filter train indices out according to mask
let test_index = indices
.iter()
.enumerate()
.filter(|&(idx, _)| test_index[idx] == true)
.filter(|&(idx, _)| test_index[idx])
.map(|(idx, _)| idx)
.collect::<Vec<usize>>(); // filter tests indices out according to mask
return_values.push((train_index, test_index))
+4 -4
View File
@@ -78,7 +78,7 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for KNNClassifier<T, D> {
|| self.k != other.k
|| self.y.len() != other.y.len()
{
return false;
false
} else {
for i in 0..self.classes.len() {
if (self.classes[i] - other.classes[i]).abs() > T::epsilon() {
@@ -139,7 +139,7 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNClassifier<T, D> {
}
Ok(KNNClassifier {
classes: classes,
classes,
y: yi,
k: parameters.k,
knn_algorithm: parameters.algorithm.fit(data, distance)?,
@@ -166,13 +166,13 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNClassifier<T, D> {
let weights = self
.weight
.calc_weights(search_result.iter().map(|v| v.1).collect());
let w_sum = weights.iter().map(|w| *w).sum();
let w_sum = weights.iter().copied().sum();
let mut c = vec![T::zero(); self.classes.len()];
let mut max_c = T::zero();
let mut max_i = 0;
for (r, w) in search_result.iter().zip(weights.iter()) {
c[self.y[r.0]] = c[self.y[r.0]] + (*w / w_sum);
c[self.y[r.0]] += *w / w_sum;
if c[self.y[r.0]] > max_c {
max_c = c[self.y[r.0]];
max_i = self.y[r.0];
+3 -3
View File
@@ -76,7 +76,7 @@ impl Default for KNNRegressorParameters {
impl<T: RealNumber, D: Distance<Vec<T>, T>> PartialEq for KNNRegressor<T, D> {
fn eq(&self, other: &Self) -> bool {
if self.k != other.k || self.y.len() != other.y.len() {
return false;
false
} else {
for i in 0..self.y.len() {
if (self.y[i] - other.y[i]).abs() > T::epsilon() {
@@ -151,10 +151,10 @@ impl<T: RealNumber, D: Distance<Vec<T>, T>> KNNRegressor<T, D> {
let weights = self
.weight
.calc_weights(search_result.iter().map(|v| v.1).collect());
let w_sum = weights.iter().map(|w| *w).sum();
let w_sum = weights.iter().copied().sum();
for (r, w) in search_result.iter().zip(weights.iter()) {
result = result + self.y[r.0] * (*w / w_sum);
result += self.y[r.0] * (*w / w_sum);
}
Ok(result)
@@ -74,8 +74,8 @@ impl<T: RealNumber> FirstOrderOptimizer<T> for GradientDescent<T> {
let f_x = f(&x);
OptimizerResult {
x: x,
f_x: f_x,
x,
f_x,
iterations: iter,
}
}
+6 -9
View File
@@ -48,7 +48,7 @@ impl Kernels {
/// Radial basis function kernel (Gaussian)
pub fn rbf<T: RealNumber>(gamma: T) -> RBFKernel<T> {
RBFKernel { gamma: gamma }
RBFKernel { gamma }
}
/// Polynomial kernel
@@ -57,9 +57,9 @@ impl Kernels {
/// * `coef0` - independent term in kernel function
pub fn polynomial<T: RealNumber>(degree: T, gamma: T, coef0: T) -> PolynomialKernel<T> {
PolynomialKernel {
degree: degree,
gamma: gamma,
coef0: coef0,
degree,
gamma,
coef0,
}
}
@@ -79,17 +79,14 @@ impl Kernels {
/// * `gamma` - kernel coefficient
/// * `coef0` - independent term in kernel function
pub fn sigmoid<T: RealNumber>(gamma: T, coef0: T) -> SigmoidKernel<T> {
SigmoidKernel {
gamma: gamma,
coef0: coef0,
}
SigmoidKernel { gamma, coef0 }
}
/// Sigmoid kernel
/// * `gamma` - kernel coefficient
pub fn sigmoid_with_gamma<T: RealNumber>(gamma: T) -> SigmoidKernel<T> {
SigmoidKernel {
gamma: gamma,
gamma,
coef0: T::one(),
}
}
+26 -26
View File
@@ -173,9 +173,9 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVC<T, M, K> {
let (n, _) = x.shape();
if n != y.len() {
return Err(Failed::fit(&format!(
"Number of rows of X doesn't match number of rows of Y"
)));
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
));
}
let classes = y.unique();
@@ -204,11 +204,11 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVC<T, M, K> {
let (support_vectors, weight, b) = optimizer.optimize();
Ok(SVC {
classes: classes,
kernel: kernel,
classes,
kernel,
instances: support_vectors,
w: weight,
b: b,
b,
})
}
@@ -251,7 +251,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> PartialEq for SVC<
|| self.w.len() != other.w.len()
|| self.instances.len() != other.instances.len()
{
return false;
false
} else {
for i in 0..self.w.len() {
if (self.w[i] - other.w[i]).abs() > T::epsilon() {
@@ -263,7 +263,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> PartialEq for SVC<
return false;
}
}
return true;
true
}
}
}
@@ -278,12 +278,12 @@ impl<T: RealNumber, V: BaseVector<T>> SupportVector<T, V> {
};
SupportVector {
index: i,
x: x,
x,
grad: g,
k: k_v,
alpha: T::zero(),
cmin: cmin,
cmax: cmax,
cmin,
cmax,
}
}
}
@@ -291,7 +291,7 @@ impl<T: RealNumber, V: BaseVector<T>> SupportVector<T, V> {
impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Cache<'a, T, M, K> {
fn new(kernel: &'a K) -> Cache<'a, T, M, K> {
Cache {
kernel: kernel,
kernel,
data: HashMap::new(),
phantom: PhantomData,
}
@@ -326,8 +326,8 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
let (n, _) = x.shape();
Optimizer {
x: x,
y: y,
x,
y,
parameters: &parameters,
svmin: 0,
svmax: 0,
@@ -335,7 +335,7 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
gmax: T::min_value(),
tau: T::from_f64(1e-12).unwrap(),
sv: Vec::with_capacity(n),
kernel: kernel,
kernel,
recalculate_minmax_grad: true,
}
}
@@ -389,11 +389,12 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
if self.process(i, self.x.get_row(i), self.y.get(i), cache) {
cp += 1;
}
} else if self.y.get(i) == -T::one() && cn < few {
if self.process(i, self.x.get_row(i), self.y.get(i), cache) {
} else if self.y.get(i) == -T::one()
&& cn < few
&& self.process(i, self.x.get_row(i), self.y.get(i), cache)
{
cn += 1;
}
}
if cp >= few && cn >= few {
break;
@@ -420,11 +421,11 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
self.find_min_max_gradient();
if self.gmin < self.gmax {
if (y > T::zero() && g < self.gmin) || (y < T::zero() && g > self.gmax) {
if self.gmin < self.gmax
&& ((y > T::zero() && g < self.gmin) || (y < T::zero() && g > self.gmax))
{
return false;
}
}
for v in cache_values {
cache.insert(v.0, v.1);
@@ -494,13 +495,12 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
let mut idxs_to_drop: HashSet<usize> = HashSet::new();
self.sv.retain(|v| {
if v.alpha == T::zero() {
if (v.grad >= gmax && T::zero() >= v.cmax)
|| (v.grad <= gmin && T::zero() <= v.cmin)
if v.alpha == T::zero()
&& ((v.grad >= gmax && T::zero() >= v.cmax)
|| (v.grad <= gmin && T::zero() <= v.cmin))
{
idxs_to_drop.insert(v.index);
return false;
}
};
true
});
@@ -647,7 +647,7 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
self.update(idx_1, idx_2, step, cache);
return self.gmax - self.gmin > tol;
self.gmax - self.gmin > tol
}
None => false,
}
+14 -22
View File
@@ -160,9 +160,9 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVR<T, M, K> {
let (n, _) = x.shape();
if n != y.len() {
return Err(Failed::fit(&format!(
"Number of rows of X doesn't match number of rows of Y"
)));
return Err(Failed::fit(
&"Number of rows of X doesn\'t match number of rows of Y".to_string(),
));
}
let optimizer = Optimizer::new(x, y, &kernel, &parameters);
@@ -170,10 +170,10 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVR<T, M, K> {
let (support_vectors, weight, b) = optimizer.smo();
Ok(SVR {
kernel: kernel,
kernel,
instances: support_vectors,
w: weight,
b: b,
b,
})
}
@@ -198,7 +198,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> SVR<T, M, K> {
f += self.w[i] * self.kernel.apply(&x, &self.instances[i]);
}
return f;
f
}
}
@@ -208,7 +208,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> PartialEq for SVR<
|| self.w.len() != other.w.len()
|| self.instances.len() != other.instances.len()
{
return false;
false
} else {
for i in 0..self.w.len() {
if (self.w[i] - other.w[i]).abs() > T::epsilon() {
@@ -220,7 +220,7 @@ impl<T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> PartialEq for SVR<
return false;
}
}
return true;
true
}
}
}
@@ -230,7 +230,7 @@ impl<T: RealNumber, V: BaseVector<T>> SupportVector<T, V> {
let k_v = k.apply(&x, &x);
SupportVector {
index: i,
x: x,
x,
grad: [eps + y, eps - y],
k: k_v,
alpha: [T::zero(), T::zero()],
@@ -270,7 +270,7 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
gmaxindex: 0,
tau: T::from_f64(1e-12).unwrap(),
sv: support_vectors,
kernel: kernel,
kernel,
}
}
@@ -392,24 +392,20 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
self.sv[v2].alpha[j] = T::zero();
self.sv[v1].alpha[i] = diff;
}
} else {
if self.sv[v1].alpha[i] < T::zero() {
} else if self.sv[v1].alpha[i] < T::zero() {
self.sv[v1].alpha[i] = T::zero();
self.sv[v2].alpha[j] = -diff;
}
}
if diff > T::zero() {
if self.sv[v1].alpha[i] > self.c {
self.sv[v1].alpha[i] = self.c;
self.sv[v2].alpha[j] = self.c - diff;
}
} else {
if self.sv[v2].alpha[j] > self.c {
} else if self.sv[v2].alpha[j] > self.c {
self.sv[v2].alpha[j] = self.c;
self.sv[v1].alpha[i] = self.c + diff;
}
}
} else {
let delta = (self.sv[v1].grad[i] - self.sv[v2].grad[j]) / curv;
let sum = self.sv[v1].alpha[i] + self.sv[v2].alpha[j];
@@ -421,25 +417,21 @@ impl<'a, T: RealNumber, M: Matrix<T>, K: Kernel<T, M::RowVector>> Optimizer<'a,
self.sv[v1].alpha[i] = self.c;
self.sv[v2].alpha[j] = sum - self.c;
}
} else {
if self.sv[v2].alpha[j] < T::zero() {
} else if self.sv[v2].alpha[j] < T::zero() {
self.sv[v2].alpha[j] = T::zero();
self.sv[v1].alpha[i] = sum;
}
}
if sum > self.c {
if self.sv[v2].alpha[j] > self.c {
self.sv[v2].alpha[j] = self.c;
self.sv[v1].alpha[i] = sum - self.c;
}
} else {
if self.sv[v1].alpha[i] < T::zero() {
} else if self.sv[v1].alpha[i] < T::zero() {
self.sv[v1].alpha[i] = T::zero();
self.sv[v2].alpha[j] = sum;
}
}
}
let delta_alpha_i = self.sv[v1].alpha[i] - old_alpha_i;
let delta_alpha_j = self.sv[v2].alpha[j] - old_alpha_j;
+19 -20
View File
@@ -126,7 +126,7 @@ impl<T: RealNumber> PartialEq for DecisionTreeClassifier<T> {
|| self.num_classes != other.num_classes
|| self.nodes.len() != other.nodes.len()
{
return false;
false
} else {
for i in 0..self.classes.len() {
if (self.classes[i] - other.classes[i]).abs() > T::epsilon() {
@@ -138,7 +138,7 @@ impl<T: RealNumber> PartialEq for DecisionTreeClassifier<T> {
return false;
}
}
return true;
true
}
}
}
@@ -174,8 +174,8 @@ impl Default for DecisionTreeClassifierParameters {
impl<T: RealNumber> Node<T> {
fn new(index: usize, output: usize) -> Self {
Node {
index: index,
output: output,
index,
output,
split_feature: 0,
split_value: Option::None,
split_score: Option::None,
@@ -206,7 +206,7 @@ fn impurity<T: RealNumber>(criterion: &SplitCriterion, count: &Vec<usize>, n: us
for i in 0..count.len() {
if count[i] > 0 {
let p = T::from(count[i]).unwrap() / T::from(n).unwrap();
impurity = impurity - p * p;
impurity -= p * p;
}
}
}
@@ -215,7 +215,7 @@ fn impurity<T: RealNumber>(criterion: &SplitCriterion, count: &Vec<usize>, n: us
for i in 0..count.len() {
if count[i] > 0 {
let p = T::from(count[i]).unwrap() / T::from(n).unwrap();
impurity = impurity - p * p.log2();
impurity -= p * p.log2();
}
}
}
@@ -229,7 +229,7 @@ fn impurity<T: RealNumber>(criterion: &SplitCriterion, count: &Vec<usize>, n: us
}
}
return impurity;
impurity
}
impl<'a, T: RealNumber, M: Matrix<T>> NodeVisitor<'a, T, M> {
@@ -242,14 +242,14 @@ impl<'a, T: RealNumber, M: Matrix<T>> NodeVisitor<'a, T, M> {
level: u16,
) -> Self {
NodeVisitor {
x: x,
y: y,
x,
y,
node: node_id,
samples: samples,
order: order,
samples,
order,
true_child_output: 0,
false_child_output: 0,
level: level,
level,
phantom: PhantomData,
}
}
@@ -266,7 +266,7 @@ pub(in crate) fn which_max(x: &Vec<usize>) -> usize {
}
}
return which;
which
}
impl<T: RealNumber> DecisionTreeClassifier<T> {
@@ -325,10 +325,10 @@ impl<T: RealNumber> DecisionTreeClassifier<T> {
}
let mut tree = DecisionTreeClassifier {
nodes: nodes,
parameters: parameters,
nodes,
parameters,
num_classes: k,
classes: classes,
classes,
depth: 0,
};
@@ -376,19 +376,18 @@ impl<T: RealNumber> DecisionTreeClassifier<T> {
let node = &self.nodes[node_id];
if node.true_child == None && node.false_child == None {
result = node.output;
} else {
if x.get(row, node.split_feature) <= node.split_value.unwrap_or(T::nan()) {
} else if x.get(row, node.split_feature) <= node.split_value.unwrap_or(T::nan())
{
queue.push_back(node.true_child.unwrap());
} else {
queue.push_back(node.false_child.unwrap());
}
}
}
None => break,
};
}
return result;
result
}
fn find_best_cutoff<M: Matrix<T>>(
+18 -21
View File
@@ -113,8 +113,8 @@ impl Default for DecisionTreeRegressorParameters {
impl<T: RealNumber> Node<T> {
fn new(index: usize, output: T) -> Self {
Node {
index: index,
output: output,
index,
output,
split_feature: 0,
split_value: Option::None,
split_score: Option::None,
@@ -144,14 +144,14 @@ impl<T: RealNumber> PartialEq for Node<T> {
impl<T: RealNumber> PartialEq for DecisionTreeRegressor<T> {
fn eq(&self, other: &Self) -> bool {
if self.depth != other.depth || self.nodes.len() != other.nodes.len() {
return false;
false
} else {
for i in 0..self.nodes.len() {
if self.nodes[i] != other.nodes[i] {
return false;
}
}
return true;
true
}
}
}
@@ -177,14 +177,14 @@ impl<'a, T: RealNumber, M: Matrix<T>> NodeVisitor<'a, T, M> {
level: u16,
) -> Self {
NodeVisitor {
x: x,
y: y,
x,
y,
node: node_id,
samples: samples,
order: order,
samples,
order,
true_child_output: T::zero(),
false_child_output: T::zero(),
level: level,
level,
}
}
}
@@ -221,7 +221,7 @@ impl<T: RealNumber> DecisionTreeRegressor<T> {
let mut sum = T::zero();
for i in 0..y_ncols {
n += samples[i];
sum = sum + T::from(samples[i]).unwrap() * y_m.get(0, i);
sum += T::from(samples[i]).unwrap() * y_m.get(0, i);
}
let root = Node::new(0, sum / T::from(n).unwrap());
@@ -233,8 +233,8 @@ impl<T: RealNumber> DecisionTreeRegressor<T> {
}
let mut tree = DecisionTreeRegressor {
nodes: nodes,
parameters: parameters,
nodes,
parameters,
depth: 0,
};
@@ -282,19 +282,18 @@ impl<T: RealNumber> DecisionTreeRegressor<T> {
let node = &self.nodes[node_id];
if node.true_child == None && node.false_child == None {
result = node.output;
} else {
if x.get(row, node.split_feature) <= node.split_value.unwrap_or(T::nan()) {
} else if x.get(row, node.split_feature) <= node.split_value.unwrap_or(T::nan())
{
queue.push_back(node.true_child.unwrap());
} else {
queue.push_back(node.false_child.unwrap());
}
}
}
None => break,
};
}
return result;
result
}
fn find_best_cutoff<M: Matrix<T>>(
@@ -348,8 +347,7 @@ impl<T: RealNumber> DecisionTreeRegressor<T> {
if prevx.is_nan() || visitor.x.get(*i, j) == prevx {
prevx = visitor.x.get(*i, j);
true_count += visitor.samples[*i];
true_sum =
true_sum + T::from(visitor.samples[*i]).unwrap() * visitor.y.get(0, *i);
true_sum += T::from(visitor.samples[*i]).unwrap() * visitor.y.get(0, *i);
continue;
}
@@ -360,8 +358,7 @@ impl<T: RealNumber> DecisionTreeRegressor<T> {
{
prevx = visitor.x.get(*i, j);
true_count += visitor.samples[*i];
true_sum =
true_sum + T::from(visitor.samples[*i]).unwrap() * visitor.y.get(0, *i);
true_sum += T::from(visitor.samples[*i]).unwrap() * visitor.y.get(0, *i);
continue;
}
@@ -384,7 +381,7 @@ impl<T: RealNumber> DecisionTreeRegressor<T> {
}
prevx = visitor.x.get(*i, j);
true_sum = true_sum + T::from(visitor.samples[*i]).unwrap() * visitor.y.get(0, *i);
true_sum += T::from(visitor.samples[*i]).unwrap() * visitor.y.get(0, *i);
true_count += visitor.samples[*i];
}
}