chore: fix clippy (#283)

* chore: fix clippy


Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
This commit is contained in:
morenol
2024-11-25 10:34:29 -05:00
committed by GitHub
parent 239c00428f
commit ba75f9ffad
29 changed files with 194 additions and 236 deletions
@@ -1,5 +1,3 @@
// TODO: missing documentation
use std::default::Default;
use crate::linalg::basic::arrays::Array1;
@@ -8,30 +6,27 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
///
/// Gradient Descent optimization algorithm
pub struct GradientDescent {
///
/// Maximum number of iterations
pub max_iter: usize,
///
/// Relative tolerance for the gradient norm
pub g_rtol: f64,
///
/// Absolute tolerance for the gradient norm
pub g_atol: f64,
}
///
impl Default for GradientDescent {
fn default() -> Self {
GradientDescent {
max_iter: 10000,
g_rtol: std::f64::EPSILON.sqrt(),
g_atol: std::f64::EPSILON,
g_rtol: f64::EPSILON.sqrt(),
g_atol: f64::EPSILON,
}
}
}
///
impl<T: FloatNumber> FirstOrderOptimizer<T> for GradientDescent {
///
fn optimize<'a, X: Array1<T>, LS: LineSearchMethod<T>>(
&self,
f: &'a F<'_, T, X>,
+14 -25
View File
@@ -11,31 +11,29 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
///
/// Limited-memory BFGS optimization algorithm
pub struct LBFGS {
///
/// Maximum number of iterations
pub max_iter: usize,
///
/// TODO: Add documentation
pub g_rtol: f64,
///
/// TODO: Add documentation
pub g_atol: f64,
///
/// TODO: Add documentation
pub x_atol: f64,
///
/// TODO: Add documentation
pub x_rtol: f64,
///
/// TODO: Add documentation
pub f_abstol: f64,
///
/// TODO: Add documentation
pub f_reltol: f64,
///
/// TODO: Add documentation
pub successive_f_tol: usize,
///
/// TODO: Add documentation
pub m: usize,
}
///
impl Default for LBFGS {
///
fn default() -> Self {
LBFGS {
max_iter: 1000,
@@ -51,9 +49,7 @@ impl Default for LBFGS {
}
}
///
impl LBFGS {
///
fn two_loops<T: FloatNumber + RealNumber, X: Array1<T>>(&self, state: &mut LBFGSState<T, X>) {
let lower = state.iteration.max(self.m) - self.m;
let upper = state.iteration;
@@ -95,7 +91,6 @@ impl LBFGS {
state.s.mul_scalar_mut(-T::one());
}
///
fn init_state<T: FloatNumber + RealNumber, X: Array1<T>>(&self, x: &X) -> LBFGSState<T, X> {
LBFGSState {
x: x.clone(),
@@ -119,7 +114,6 @@ impl LBFGS {
}
}
///
fn update_state<'a, T: FloatNumber + RealNumber, X: Array1<T>, LS: LineSearchMethod<T>>(
&self,
f: &'a F<'_, T, X>,
@@ -161,7 +155,6 @@ impl LBFGS {
df(&mut state.x_df, &state.x);
}
///
fn assess_convergence<T: FloatNumber, X: Array1<T>>(
&self,
state: &mut LBFGSState<T, X>,
@@ -173,7 +166,7 @@ impl LBFGS {
}
if state.x.max_diff(&state.x_prev)
<= T::from_f64(self.x_rtol * state.x.norm(std::f64::INFINITY)).unwrap()
<= T::from_f64(self.x_rtol * state.x.norm(f64::INFINITY)).unwrap()
{
x_converged = true;
}
@@ -188,14 +181,13 @@ impl LBFGS {
state.counter_f_tol += 1;
}
if state.x_df.norm(std::f64::INFINITY) <= self.g_atol {
if state.x_df.norm(f64::INFINITY) <= self.g_atol {
g_converged = true;
}
g_converged || x_converged || state.counter_f_tol > self.successive_f_tol
}
///
fn update_hessian<T: FloatNumber, X: Array1<T>>(
&self,
_: &DF<'_, X>,
@@ -212,7 +204,6 @@ impl LBFGS {
}
}
///
#[derive(Debug)]
struct LBFGSState<T: FloatNumber, X: Array1<T>> {
x: X,
@@ -234,9 +225,7 @@ struct LBFGSState<T: FloatNumber, X: Array1<T>> {
alpha: T,
}
///
impl<T: FloatNumber + RealNumber> FirstOrderOptimizer<T> for LBFGS {
///
fn optimize<'a, X: Array1<T>, LS: LineSearchMethod<T>>(
&self,
f: &F<'_, T, X>,
@@ -248,7 +237,7 @@ impl<T: FloatNumber + RealNumber> FirstOrderOptimizer<T> for LBFGS {
df(&mut state.x_df, x0);
let g_converged = state.x_df.norm(std::f64::INFINITY) < self.g_atol;
let g_converged = state.x_df.norm(f64::INFINITY) < self.g_atol;
let mut converged = g_converged;
let stopped = false;
@@ -299,7 +288,7 @@ mod tests {
let result = optimizer.optimize(&f, &df, &x0, &ls);
assert!((result.f_x - 0.0).abs() < std::f64::EPSILON);
assert!((result.f_x - 0.0).abs() < f64::EPSILON);
assert!((result.x[0] - 1.0).abs() < 1e-8);
assert!((result.x[1] - 1.0).abs() < 1e-8);
assert!(result.iterations <= 24);
+8 -8
View File
@@ -1,6 +1,6 @@
///
/// Gradient descent optimization algorithm
pub mod gradient_descent;
///
/// Limited-memory BFGS optimization algorithm
pub mod lbfgs;
use std::clone::Clone;
@@ -11,9 +11,9 @@ use crate::numbers::floatnum::FloatNumber;
use crate::optimization::line_search::LineSearchMethod;
use crate::optimization::{DF, F};
///
/// First-order optimization is a class of algorithms that use the first derivative of a function to find optimal solutions.
pub trait FirstOrderOptimizer<T: FloatNumber> {
///
/// run first order optimization
fn optimize<'a, X: Array1<T>, LS: LineSearchMethod<T>>(
&self,
f: &F<'_, T, X>,
@@ -23,13 +23,13 @@ pub trait FirstOrderOptimizer<T: FloatNumber> {
) -> OptimizerResult<T, X>;
}
///
/// Result of optimization
#[derive(Debug, Clone)]
pub struct OptimizerResult<T: FloatNumber, X: Array1<T>> {
///
/// Solution
pub x: X,
///
/// f(x) value
pub f_x: T,
///
/// number of iterations
pub iterations: usize,
}
+12 -17
View File
@@ -1,11 +1,9 @@
// TODO: missing documentation
use crate::optimization::FunctionOrder;
use num_traits::Float;
///
/// Line search optimization.
pub trait LineSearchMethod<T: Float> {
///
/// Find alpha that satisfies strong Wolfe conditions.
fn search(
&self,
f: &(dyn Fn(T) -> T),
@@ -16,32 +14,31 @@ pub trait LineSearchMethod<T: Float> {
) -> LineSearchResult<T>;
}
///
/// Line search result
#[derive(Debug, Clone)]
pub struct LineSearchResult<T: Float> {
///
/// Alpha value
pub alpha: T,
///
/// f(alpha) value
pub f_x: T,
}
///
/// Backtracking line search method.
pub struct Backtracking<T: Float> {
///
/// TODO: Add documentation
pub c1: T,
///
/// Maximum number of iterations for Backtracking single run
pub max_iterations: usize,
///
/// TODO: Add documentation
pub max_infinity_iterations: usize,
///
/// TODO: Add documentation
pub phi: T,
///
/// TODO: Add documentation
pub plo: T,
///
/// function order
pub order: FunctionOrder,
}
///
impl<T: Float> Default for Backtracking<T> {
fn default() -> Self {
Backtracking {
@@ -55,9 +52,7 @@ impl<T: Float> Default for Backtracking<T> {
}
}
///
impl<T: Float> LineSearchMethod<T> for Backtracking<T> {
///
fn search(
&self,
f: &(dyn Fn(T) -> T),
+7 -9
View File
@@ -1,21 +1,19 @@
// TODO: missing documentation
///
/// first order optimization algorithms
pub mod first_order;
///
/// line search algorithms
pub mod line_search;
///
/// Function f(x) = y
pub type F<'a, T, X> = dyn for<'b> Fn(&'b X) -> T + 'a;
///
/// Function df(x)
pub type DF<'a, X> = dyn for<'b> Fn(&'b mut X, &'b X) + 'a;
///
/// Function order
#[allow(clippy::upper_case_acronyms)]
#[derive(Debug, PartialEq, Eq)]
pub enum FunctionOrder {
///
/// Second order
SECOND,
///
/// Third order
THIRD,
}