chore: fix clippy (#283)
* chore: fix clippy Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
This commit is contained in:
@@ -27,9 +27,9 @@ use crate::error::Failed;
|
||||
use crate::linalg::basic::arrays::{Array, Array1, Array2, ArrayView1, MutArrayView1};
|
||||
use crate::numbers::floatnum::FloatNumber;
|
||||
|
||||
///
|
||||
/// Trait for Biconjugate Gradient Solver
|
||||
pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2<T>> {
|
||||
///
|
||||
/// Solve Ax = b
|
||||
fn solve_mut(
|
||||
&self,
|
||||
a: &'a X,
|
||||
@@ -109,7 +109,7 @@ pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2<T>> {
|
||||
Ok(err)
|
||||
}
|
||||
|
||||
///
|
||||
/// solve preconditioner
|
||||
fn solve_preconditioner(&self, a: &'a X, b: &[T], x: &mut [T]) {
|
||||
let diag = Self::diag(a);
|
||||
let n = diag.len();
|
||||
@@ -133,7 +133,7 @@ pub trait BiconjugateGradientSolver<'a, T: FloatNumber, X: Array2<T>> {
|
||||
y.copy_from(&x.xa(true, a));
|
||||
}
|
||||
|
||||
///
|
||||
/// Extract the diagonal from a matrix
|
||||
fn diag(a: &X) -> Vec<T> {
|
||||
let (nrows, ncols) = a.shape();
|
||||
let n = nrows.min(ncols);
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::linalg::basic::arrays::{Array1, Array2, ArrayView1, MutArray, MutArra
|
||||
use crate::linear::bg_solver::BiconjugateGradientSolver;
|
||||
use crate::numbers::floatnum::FloatNumber;
|
||||
|
||||
///
|
||||
/// Interior Point Optimizer
|
||||
pub struct InteriorPointOptimizer<T: FloatNumber, X: Array2<T>> {
|
||||
ata: X,
|
||||
d1: Vec<T>,
|
||||
@@ -25,9 +25,8 @@ pub struct InteriorPointOptimizer<T: FloatNumber, X: Array2<T>> {
|
||||
prs: Vec<T>,
|
||||
}
|
||||
|
||||
///
|
||||
impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
|
||||
///
|
||||
/// Initialize a new Interior Point Optimizer
|
||||
pub fn new(a: &X, n: usize) -> InteriorPointOptimizer<T, X> {
|
||||
InteriorPointOptimizer {
|
||||
ata: a.ab(true, a, false),
|
||||
@@ -38,7 +37,7 @@ impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Run the optimization
|
||||
pub fn optimize(
|
||||
&mut self,
|
||||
x: &X,
|
||||
@@ -101,7 +100,7 @@ impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
|
||||
|
||||
// CALCULATE DUALITY GAP
|
||||
let xnu = nu.xa(false, x);
|
||||
let max_xnu = xnu.norm(std::f64::INFINITY);
|
||||
let max_xnu = xnu.norm(f64::INFINITY);
|
||||
if max_xnu > lambda_f64 {
|
||||
let lnu = T::from_f64(lambda_f64 / max_xnu).unwrap();
|
||||
nu.mul_scalar_mut(lnu);
|
||||
@@ -208,7 +207,6 @@ impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
|
||||
Ok(w)
|
||||
}
|
||||
|
||||
///
|
||||
fn sumlogneg(f: &X) -> T {
|
||||
let (n, _) = f.shape();
|
||||
let mut sum = T::zero();
|
||||
@@ -220,11 +218,9 @@ impl<T: FloatNumber, X: Array2<T>> InteriorPointOptimizer<T, X> {
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
impl<'a, T: FloatNumber, X: Array2<T>> BiconjugateGradientSolver<'a, T, X>
|
||||
for InteriorPointOptimizer<T, X>
|
||||
{
|
||||
///
|
||||
fn solve_preconditioner(&self, a: &'a X, b: &[T], x: &mut [T]) {
|
||||
let (_, p) = a.shape();
|
||||
|
||||
@@ -234,7 +230,6 @@ impl<'a, T: FloatNumber, X: Array2<T>> BiconjugateGradientSolver<'a, T, X>
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
fn mat_vec_mul(&self, _: &X, x: &Vec<T>, y: &mut Vec<T>) {
|
||||
let (_, p) = self.ata.shape();
|
||||
let x_slice = Vec::from_slice(x.slice(0..p).as_ref());
|
||||
@@ -246,7 +241,6 @@ impl<'a, T: FloatNumber, X: Array2<T>> BiconjugateGradientSolver<'a, T, X>
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
fn mat_t_vec_mul(&self, a: &X, x: &Vec<T>, y: &mut Vec<T>) {
|
||||
self.mat_vec_mul(a, x, y);
|
||||
}
|
||||
|
||||
@@ -183,14 +183,11 @@ pub struct LogisticRegression<
|
||||
}
|
||||
|
||||
trait ObjectiveFunction<T: Number + FloatNumber, X: Array2<T>> {
|
||||
///
|
||||
fn f(&self, w_bias: &[T]) -> T;
|
||||
|
||||
///
|
||||
#[allow(clippy::ptr_arg)]
|
||||
fn df(&self, g: &mut Vec<T>, w_bias: &Vec<T>);
|
||||
|
||||
///
|
||||
#[allow(clippy::ptr_arg)]
|
||||
fn partial_dot(w: &[T], x: &X, v_col: usize, m_row: usize) -> T {
|
||||
let mut sum = T::zero();
|
||||
@@ -629,11 +626,11 @@ mod tests {
|
||||
objective.df(&mut g, &vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]);
|
||||
objective.df(&mut g, &vec![1., 2., 3., 4., 5., 6., 7., 8., 9.]);
|
||||
|
||||
assert!((g[0] + 33.000068218163484).abs() < std::f64::EPSILON);
|
||||
assert!((g[0] + 33.000068218163484).abs() < f64::EPSILON);
|
||||
|
||||
let f = objective.f(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]);
|
||||
|
||||
assert!((f - 408.0052230582765).abs() < std::f64::EPSILON);
|
||||
assert!((f - 408.0052230582765).abs() < f64::EPSILON);
|
||||
|
||||
let objective_reg = MultiClassObjectiveFunction {
|
||||
x: &x,
|
||||
@@ -689,13 +686,13 @@ mod tests {
|
||||
objective.df(&mut g, &vec![1., 2., 3.]);
|
||||
objective.df(&mut g, &vec![1., 2., 3.]);
|
||||
|
||||
assert!((g[0] - 26.051064349381285).abs() < std::f64::EPSILON);
|
||||
assert!((g[1] - 10.239000702928523).abs() < std::f64::EPSILON);
|
||||
assert!((g[2] - 3.869294270156324).abs() < std::f64::EPSILON);
|
||||
assert!((g[0] - 26.051064349381285).abs() < f64::EPSILON);
|
||||
assert!((g[1] - 10.239000702928523).abs() < f64::EPSILON);
|
||||
assert!((g[2] - 3.869294270156324).abs() < f64::EPSILON);
|
||||
|
||||
let f = objective.f(&[1., 2., 3.]);
|
||||
|
||||
assert!((f - 59.76994756647412).abs() < std::f64::EPSILON);
|
||||
assert!((f - 59.76994756647412).abs() < f64::EPSILON);
|
||||
|
||||
let objective_reg = BinaryObjectiveFunction {
|
||||
x: &x,
|
||||
@@ -916,7 +913,7 @@ mod tests {
|
||||
let x: DenseMatrix<f32> = DenseMatrix::rand(52181, 94);
|
||||
let y1: Vec<i32> = vec![1; 2181];
|
||||
let y2: Vec<i32> = vec![0; 50000];
|
||||
let y: Vec<i32> = y1.into_iter().chain(y2.into_iter()).collect();
|
||||
let y: Vec<i32> = y1.into_iter().chain(y2).collect();
|
||||
|
||||
let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap();
|
||||
let lr_reg = LogisticRegression::fit(
|
||||
@@ -938,12 +935,12 @@ mod tests {
|
||||
let x: &DenseMatrix<f64> = &DenseMatrix::rand(52181, 94);
|
||||
let y1: Vec<u32> = vec![1; 2181];
|
||||
let y2: Vec<u32> = vec![0; 50000];
|
||||
let y: &Vec<u32> = &(y1.into_iter().chain(y2.into_iter()).collect());
|
||||
let y: &Vec<u32> = &(y1.into_iter().chain(y2).collect());
|
||||
println!("y vec height: {:?}", y.len());
|
||||
println!("x matrix shape: {:?}", x.shape());
|
||||
|
||||
let lr = LogisticRegression::fit(x, y, Default::default()).unwrap();
|
||||
let y_hat = lr.predict(&x).unwrap();
|
||||
let y_hat = lr.predict(x).unwrap();
|
||||
|
||||
println!("y_hat shape: {:?}", y_hat.shape());
|
||||
|
||||
|
||||
Reference in New Issue
Block a user