From 583284e66f3e27c03a393f630bee0b677f05b706 Mon Sep 17 00:00:00 2001 From: Volodymyr Orlov Date: Tue, 24 Nov 2020 19:12:53 -0800 Subject: [PATCH 1/3] feat: adds LASSO --- src/linalg/high_order.rs | 28 ++ src/linalg/mod.rs | 63 ++++ src/linalg/naive/dense_matrix.rs | 60 +++- src/linalg/nalgebra_bindings.rs | 6 + src/linalg/ndarray_bindings.rs | 6 + src/linear/bg_solver.rs | 146 +++++++++ src/linear/lasso.rs | 509 ++++++++++++++++++++++++++++++ src/linear/logistic_regression.rs | 2 +- src/linear/mod.rs | 2 + 9 files changed, 819 insertions(+), 3 deletions(-) create mode 100644 src/linalg/high_order.rs create mode 100644 src/linear/bg_solver.rs create mode 100644 src/linear/lasso.rs diff --git a/src/linalg/high_order.rs b/src/linalg/high_order.rs new file mode 100644 index 0000000..359c4a1 --- /dev/null +++ b/src/linalg/high_order.rs @@ -0,0 +1,28 @@ +//! In this module you will find composite of matrix operations that are used elsewhere +//! for improved efficiency. + +use crate::linalg::BaseMatrix; +use crate::math::num::RealNumber; + +/// High order matrix operations. +pub trait HighOrderOperations: BaseMatrix { + /// Y = AB + /// ``` + /// use smartcore::linalg::naive::dense_matrix::*; + /// use smartcore::linalg::high_order::HighOrderOperations; + /// + /// let a = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]); + /// let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]); + /// let expected = DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]]); + /// + /// assert_eq!(a.ab(true, &b, false), expected); + /// ``` + fn ab(&self, a_transpose: bool, b: &Self, b_transpose: bool) -> Self { + match (a_transpose, b_transpose) { + (true, true) => self.transpose().matmul(&b.transpose()), + (false, true) => self.matmul(&b.transpose()), + (true, false) => self.transpose().matmul(b), + (false, false) => self.matmul(b), + } + } +} diff --git a/src/linalg/mod.rs b/src/linalg/mod.rs index c560b78..1be2e75 100644 --- a/src/linalg/mod.rs +++ b/src/linalg/mod.rs @@ -36,6 +36,7 @@ pub mod cholesky; /// The matrix is represented in terms of its eigenvalues and eigenvectors. pub mod evd; +pub mod high_order; /// Factors a matrix as the product of a lower triangular matrix and an upper triangular matrix. pub mod lu; /// Dense matrix with column-major order that wraps [Vec](https://doc.rust-lang.org/std/vec/struct.Vec.html). @@ -59,6 +60,7 @@ use std::ops::Range; use crate::math::num::RealNumber; use cholesky::CholeskyDecomposableMatrix; use evd::EVDDecomposableMatrix; +use high_order::HighOrderOperations; use lu::LUDecomposableMatrix; use qr::QRDecomposableMatrix; use stats::MatrixStats; @@ -134,6 +136,66 @@ pub trait BaseVector: Clone + Debug { /// Subtract `x` from single element of the vector, write result to original vector. fn sub_element_mut(&mut self, pos: usize, x: T); + /// Subtract scalar + fn sub_scalar_mut(&mut self, x: T) -> &Self { + for i in 0..self.len() { + self.set(i, self.get(i) - x); + } + self + } + + /// Subtract scalar + fn add_scalar_mut(&mut self, x: T) -> &Self { + for i in 0..self.len() { + self.set(i, self.get(i) + x); + } + self + } + + /// Subtract scalar + fn mul_scalar_mut(&mut self, x: T) -> &Self { + for i in 0..self.len() { + self.set(i, self.get(i) * x); + } + self + } + + /// Subtract scalar + fn div_scalar_mut(&mut self, x: T) -> &Self { + for i in 0..self.len() { + self.set(i, self.get(i) / x); + } + self + } + + /// Add vectors, element-wise + fn add_scalar(&self, x: T) -> Self { + let mut r = self.clone(); + r.add_scalar_mut(x); + r + } + + /// Subtract vectors, element-wise + fn sub_scalar(&self, x: T) -> Self { + let mut r = self.clone(); + r.sub_scalar_mut(x); + r + } + + /// Multiply vectors, element-wise + fn mul_scalar(&self, x: T) -> Self { + let mut r = self.clone(); + r.mul_scalar_mut(x); + r + } + + /// Divide vectors, element-wise + fn div_scalar(&self, x: T) -> Self { + let mut r = self.clone(); + r.div_scalar_mut(x); + r + } + /// Add vectors, element-wise, overriding original vector with result. fn add_mut(&mut self, other: &Self) -> &Self; @@ -557,6 +619,7 @@ pub trait Matrix: + LUDecomposableMatrix + CholeskyDecomposableMatrix + MatrixStats + + HighOrderOperations + PartialEq + Display { diff --git a/src/linalg/naive/dense_matrix.rs b/src/linalg/naive/dense_matrix.rs index 7486329..f4c8a97 100644 --- a/src/linalg/naive/dense_matrix.rs +++ b/src/linalg/naive/dense_matrix.rs @@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize}; use crate::linalg::cholesky::CholeskyDecomposableMatrix; use crate::linalg::evd::EVDDecomposableMatrix; +use crate::linalg::high_order::HighOrderOperations; use crate::linalg::lu::LUDecomposableMatrix; use crate::linalg::qr::QRDecomposableMatrix; use crate::linalg::stats::MatrixStats; @@ -444,6 +445,38 @@ impl LUDecomposableMatrix for DenseMatrix {} impl CholeskyDecomposableMatrix for DenseMatrix {} +impl HighOrderOperations for DenseMatrix { + fn ab(&self, a_transpose: bool, b: &Self, b_transpose: bool) -> Self { + if !a_transpose && !b_transpose { + self.matmul(b) + } else { + let (d1, d2, d3, d4) = match (a_transpose, b_transpose) { + (true, false) => (self.nrows, self.ncols, b.ncols, b.nrows), + (false, true) => (self.ncols, self.nrows, b.nrows, b.ncols), + _ => (self.nrows, self.ncols, b.nrows, b.ncols), + }; + if d1 != d4 { + panic!("Can not multiply {}x{} by {}x{} matrices", d2, d1, d4, d3); + } + let mut result = Self::zeros(d2, d3); + for r in 0..d2 { + for c in 0..d3 { + let mut s = T::zero(); + for i in 0..d1 { + match (a_transpose, b_transpose) { + (true, false) => s += self.get(i, r) * b.get(i, c), + (false, true) => s += self.get(r, i) * b.get(c, i), + _ => s += self.get(i, r) * b.get(c, i), + } + } + result.set(r, c, s); + } + } + result + } + } +} + impl MatrixStats for DenseMatrix {} impl Matrix for DenseMatrix {} @@ -625,8 +658,8 @@ impl BaseMatrix for DenseMatrix { } fn dot(&self, other: &Self) -> T { - if self.nrows != 1 && other.nrows != 1 { - panic!("A and B should both be 1-dimentional vectors."); + if (self.nrows != 1 && other.nrows != 1) && (self.ncols != 1 && other.ncols != 1) { + panic!("A and B should both be either a row or a column vector."); } if self.nrows * self.ncols != other.nrows * other.ncols { panic!("A and B should have the same size"); @@ -1114,6 +1147,29 @@ mod tests { assert_eq!(result, expected); } + #[test] + fn ab() { + let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]); + let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]); + let c = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]); + assert_eq!( + a.ab(false, &b, false), + DenseMatrix::from_2d_array(&[&[46., 52.], &[109., 124.]]) + ); + assert_eq!( + c.ab(true, &b, false), + DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]]) + ); + assert_eq!( + b.ab(false, &c, true), + DenseMatrix::from_2d_array(&[&[17., 39., 61.], &[23., 53., 83.,], &[29., 67., 105.]]) + ); + assert_eq!( + a.ab(true, &b, true), + DenseMatrix::from_2d_array(&[&[29., 39., 49.], &[40., 54., 68.,], &[51., 69., 87.]]) + ); + } + #[test] fn dot() { let a = DenseMatrix::from_array(1, 3, &[1., 2., 3.]); diff --git a/src/linalg/nalgebra_bindings.rs b/src/linalg/nalgebra_bindings.rs index 8ddfdb6..8f504c6 100644 --- a/src/linalg/nalgebra_bindings.rs +++ b/src/linalg/nalgebra_bindings.rs @@ -44,6 +44,7 @@ use nalgebra::{DMatrix, Dynamic, Matrix, MatrixMN, RowDVector, Scalar, VecStorag use crate::linalg::cholesky::CholeskyDecomposableMatrix; use crate::linalg::evd::EVDDecomposableMatrix; +use crate::linalg::high_order::HighOrderOperations; use crate::linalg::lu::LUDecomposableMatrix; use crate::linalg::qr::QRDecomposableMatrix; use crate::linalg::stats::MatrixStats; @@ -552,6 +553,11 @@ impl + HighOrderOperations for Matrix> +{ +} + impl SmartCoreMatrix for Matrix> { diff --git a/src/linalg/ndarray_bindings.rs b/src/linalg/ndarray_bindings.rs index b5058ab..9945c5f 100644 --- a/src/linalg/ndarray_bindings.rs +++ b/src/linalg/ndarray_bindings.rs @@ -51,6 +51,7 @@ use ndarray::{s, stack, Array, ArrayBase, Axis, Ix1, Ix2, OwnedRepr}; use crate::linalg::cholesky::CholeskyDecomposableMatrix; use crate::linalg::evd::EVDDecomposableMatrix; +use crate::linalg::high_order::HighOrderOperations; use crate::linalg::lu::LUDecomposableMatrix; use crate::linalg::qr::QRDecomposableMatrix; use crate::linalg::stats::MatrixStats; @@ -502,6 +503,11 @@ impl + HighOrderOperations for ArrayBase, Ix2> +{ +} + impl Matrix for ArrayBase, Ix2> { diff --git a/src/linear/bg_solver.rs b/src/linear/bg_solver.rs new file mode 100644 index 0000000..b299623 --- /dev/null +++ b/src/linear/bg_solver.rs @@ -0,0 +1,146 @@ +//! This is a generic solver for Ax = b type of equation +//! +//! for more information take a look at [this Wikipedia article](https://en.wikipedia.org/wiki/Biconjugate_gradient_method) +//! and [this paper](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf) +use crate::error::Failed; +use crate::linalg::Matrix; +use crate::math::num::RealNumber; + +pub trait BiconjugateGradientSolver> { + fn solve_mut(&self, a: &M, b: &M, x: &mut M, tol: T, max_iter: usize) -> Result { + if tol <= T::zero() { + return Err(Failed::fit("tolerance shoud be > 0")); + } + + if max_iter == 0 { + return Err(Failed::fit("maximum number of iterations should be > 0")); + } + + let (n, _) = b.shape(); + + let mut r = M::zeros(n, 1); + let mut rr = M::zeros(n, 1); + let mut z = M::zeros(n, 1); + let mut zz = M::zeros(n, 1); + + self.mat_vec_mul(a, x, &mut r); + + for j in 0..n { + r.set(j, 0, b.get(j, 0) - r.get(j, 0)); + rr.set(j, 0, r.get(j, 0)); + } + + let bnrm = b.norm(T::two()); + self.solve_preconditioner(a, &r, &mut z); + + let mut p = M::zeros(n, 1); + let mut pp = M::zeros(n, 1); + let mut bkden = T::zero(); + let mut err = T::zero(); + + for iter in 1..max_iter { + let mut bknum = T::zero(); + + self.solve_preconditioner(a, &rr, &mut zz); + for j in 0..n { + bknum += z.get(j, 0) * rr.get(j, 0); + } + if iter == 1 { + for j in 0..n { + p.set(j, 0, z.get(j, 0)); + pp.set(j, 0, zz.get(j, 0)); + } + } else { + let bk = bknum / bkden; + for j in 0..n { + p.set(j, 0, bk * p.get(j, 0) + z.get(j, 0)); + pp.set(j, 0, bk * pp.get(j, 0) + zz.get(j, 0)); + } + } + bkden = bknum; + self.mat_vec_mul(a, &p, &mut z); + let mut akden = T::zero(); + for j in 0..n { + akden += z.get(j, 0) * pp.get(j, 0); + } + let ak = bknum / akden; + self.mat_t_vec_mul(a, &pp, &mut zz); + for j in 0..n { + x.set(j, 0, x.get(j, 0) + ak * p.get(j, 0)); + r.set(j, 0, r.get(j, 0) - ak * z.get(j, 0)); + rr.set(j, 0, rr.get(j, 0) - ak * zz.get(j, 0)); + } + self.solve_preconditioner(a, &r, &mut z); + err = r.norm(T::two()) / bnrm; + + if err <= tol { + break; + } + } + + Ok(err) + } + + fn solve_preconditioner(&self, a: &M, b: &M, x: &mut M) { + let diag = Self::diag(a); + let n = diag.len(); + + for i in 0..n { + if diag[i] != T::zero() { + x.set(i, 0, b.get(i, 0) / diag[i]); + } else { + x.set(i, 0, b.get(i, 0)); + } + } + } + + // y = Ax + fn mat_vec_mul(&self, a: &M, x: &M, y: &mut M) { + y.copy_from(&a.matmul(x)); + } + + // y = Atx + fn mat_t_vec_mul(&self, a: &M, x: &M, y: &mut M) { + y.copy_from(&a.ab(true, x, false)); + } + + fn diag(a: &M) -> Vec { + let (nrows, ncols) = a.shape(); + let n = nrows.min(ncols); + + let mut d = Vec::with_capacity(n); + for i in 0..n { + d.push(a.get(i, i)); + } + + d + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::linalg::naive::dense_matrix::*; + + pub struct BGSolver {} + + impl> BiconjugateGradientSolver for BGSolver {} + + #[test] + fn bg_solver() { + let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]); + let b = DenseMatrix::from_2d_array(&[&[40., 51., 28.]]); + let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0]]); + + let mut x = DenseMatrix::zeros(3, 1); + + let solver = BGSolver {}; + + let err: f64 = solver + .solve_mut(&a, &b.transpose(), &mut x, 1e-6, 6) + .unwrap(); + + assert!(x.transpose().approximate_eq(&expected, 1e-4)); + assert!((err - 0.0).abs() < 1e-4); + } +} diff --git a/src/linear/lasso.rs b/src/linear/lasso.rs new file mode 100644 index 0000000..306b1aa --- /dev/null +++ b/src/linear/lasso.rs @@ -0,0 +1,509 @@ +//! # Lasso +//! +//! [Linear regression](../linear_regression/index.html) is the standard algorithm for predicting a quantitative response \\(y\\) on the basis of a linear combination of explanatory variables \\(X\\) +//! that assumes that there is approximately a linear relationship between \\(X\\) and \\(y\\). +//! Lasso is an extension to linear regression that adds L1 regularization term to the loss function during training. +//! +//! Similar to [ridge regression](../ridge_regression/index.html), the lasso shrinks the coefficient estimates towards zero when. However, in the case of the lasso, the l1 penalty has the effect of +//! forcing some of the coefficient estimates to be exactly equal to zero when the tuning parameter \\(\alpha\\) is sufficiently large. +//! +//! Lasso coefficient estimates solve the problem: +//! +//! \\[\underset{\beta}{minimize} \space \space \sum_{i=1}^n \left( y_i - \beta_0 - \sum_{j=1}^p \beta_jx_{ij} \right)^2 + \alpha \sum_{j=1}^p \lVert \beta_j \rVert_1\\] +//! +//! This problem is solved with an interior-point method that is comparable to coordinate descent in solving large problems with modest accuracy, +//! but is able to solve them with high accuracy with relatively small additional computational cost. +//! +//! ## References: +//! +//! * ["An Introduction to Statistical Learning", James G., Witten D., Hastie T., Tibshirani R., 6.2. Shrinkage Methods](http://faculty.marshall.usc.edu/gareth-james/ISL/) +//! * ["An Interior-Point Method for Large-Scale l1-Regularized Least Squares", K. Koh, M. Lustig, S. Boyd, D. Gorinevsky](https://web.stanford.edu/~boyd/papers/pdf/l1_ls.pdf) +//! * [Simple Matlab Solver for l1-regularized Least Squares Problems](https://web.stanford.edu/~boyd/l1_ls/) +//! +//! +//! +use std::fmt::Debug; + +use serde::{Deserialize, Serialize}; + +use crate::error::Failed; +use crate::linalg::BaseVector; +use crate::linalg::Matrix; +use crate::linear::bg_solver::BiconjugateGradientSolver; +use crate::math::num::RealNumber; + +/// Lasso regression parameters +#[derive(Serialize, Deserialize, Debug)] +pub struct LassoParameters { + /// Controls the strength of the penalty to the loss function. + pub alpha: T, + /// If true the regressors X will be normalized before regression + /// by subtracting the mean and dividing by the standard deviation. + pub normalize: bool, + /// The tolerance for the optimization + pub tol: T, + /// The maximum number of iterations + pub max_iter: usize, +} + +#[derive(Serialize, Deserialize, Debug)] +/// Lasso regressor +pub struct Lasso> { + coefficients: M, + intercept: T, +} + +struct InteriorPointOptimizer> { + ata: M, + d1: Vec, + d2: Vec, + prb: Vec, + prs: Vec, +} + +impl Default for LassoParameters { + fn default() -> Self { + LassoParameters { + alpha: T::one(), + normalize: true, + tol: T::from_f64(1e-4).unwrap(), + max_iter: 1000, + } + } +} + +impl> PartialEq for Lasso { + fn eq(&self, other: &Self) -> bool { + self.coefficients == other.coefficients + && (self.intercept - other.intercept).abs() <= T::epsilon() + } +} + +impl> Lasso { + /// Fits Lasso regression to your data. + /// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation. + /// * `y` - target values + /// * `parameters` - other parameters, use `Default::default()` to set parameters to default values. + pub fn fit( + x: &M, + y: &M::RowVector, + parameters: LassoParameters, + ) -> Result, Failed> { + let (n, p) = x.shape(); + + if n <= p { + return Err(Failed::fit( + "Number of rows in X should be >= number of columns in X", + )); + } + + if parameters.alpha < T::zero() { + return Err(Failed::fit("alpha should be >= 0")); + } + + if parameters.tol <= T::zero() { + return Err(Failed::fit("tol should be > 0")); + } + + if parameters.max_iter == 0 { + return Err(Failed::fit("max_iter should be > 0")); + } + + if y.len() != n { + return Err(Failed::fit("Number of rows in X should = len(y)")); + } + + let (w, b) = if parameters.normalize { + let (scaled_x, col_mean, col_std) = Self::rescale_x(x)?; + + let mut optimizer = InteriorPointOptimizer::new(&scaled_x, p); + + let mut w = optimizer.optimize(&scaled_x, y, ¶meters)?; + + for j in 0..p { + w.set(j, 0, w.get(j, 0) / col_std[j]); + } + + let mut b = T::zero(); + + for i in 0..p { + b += w.get(i, 0) * col_mean[i]; + } + + b = y.mean() - b; + (w, b) + } else { + let mut optimizer = InteriorPointOptimizer::new(x, p); + + let w = optimizer.optimize(x, y, ¶meters)?; + + (w, y.mean()) + }; + + Ok(Lasso { + intercept: b, + coefficients: w, + }) + } + + /// Predict target values from `x` + /// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features. + pub fn predict(&self, x: &M) -> Result { + let (nrows, _) = x.shape(); + let mut y_hat = x.matmul(&self.coefficients); + y_hat.add_mut(&M::fill(nrows, 1, self.intercept)); + Ok(y_hat.transpose().to_row_vector()) + } + + /// Get estimates regression coefficients + pub fn coefficients(&self) -> &M { + &self.coefficients + } + + /// Get estimate of intercept + pub fn intercept(&self) -> T { + self.intercept + } + + fn rescale_x(x: &M) -> Result<(M, Vec, Vec), Failed> { + let col_mean = x.mean(0); + let col_std = x.std(0); + + for i in 0..col_std.len() { + if (col_std[i] - T::zero()).abs() < T::epsilon() { + return Err(Failed::fit(&format!( + "Cannot rescale constant column {}", + i + ))); + } + } + + let mut scaled_x = x.clone(); + scaled_x.scale_mut(&col_mean, &col_std, 0); + Ok((scaled_x, col_mean, col_std)) + } +} + +impl> InteriorPointOptimizer { + fn new(a: &M, n: usize) -> InteriorPointOptimizer { + InteriorPointOptimizer { + ata: a.ab(true, a, false), + d1: vec![T::zero(); n], + d2: vec![T::zero(); n], + prb: vec![T::zero(); n], + prs: vec![T::zero(); n], + } + } + + fn optimize( + &mut self, + x: &M, + y: &M::RowVector, + parameters: &LassoParameters, + ) -> Result { + let (n, p) = x.shape(); + let p_f64 = T::from_usize(p).unwrap(); + + //parameters + let pcgmaxi = 5000; + let min_pcgtol = T::from_f64(0.1).unwrap(); + let eta = T::from_f64(1E-3).unwrap(); + let alpha = T::from_f64(0.01).unwrap(); + let beta = T::from_f64(0.5).unwrap(); + let gamma = T::from_f64(-0.25).unwrap(); + let mu = T::two(); + + let y = M::from_row_vector(y.sub_scalar(y.mean())).transpose(); + + let mut max_ls_iter = 100; + let mut pitr = 0; + let mut w = M::zeros(p, 1); + let mut neww = w.clone(); + let mut u = M::ones(p, 1); + let mut newu = u.clone(); + + let mut f = M::fill(p, 2, -T::one()); + let mut newf = f.clone(); + + let mut q1 = vec![T::zero(); p]; + let mut q2 = vec![T::zero(); p]; + + let mut dx = M::zeros(p, 1); + let mut du = M::zeros(p, 1); + let mut dxu = M::zeros(2 * p, 1); + let mut grad = M::zeros(2 * p, 1); + + let mut nu = M::zeros(n, 1); + let mut dobj = T::zero(); + let mut s = T::infinity(); + let mut t = T::one() + .max(T::one() / parameters.alpha) + .min(T::two() * p_f64 / T::from(1e-3).unwrap()); + + for ntiter in 0..parameters.max_iter { + let mut z = x.matmul(&w); + + for i in 0..n { + z.set(i, 0, z.get(i, 0) - y.get(i, 0)); + nu.set(i, 0, T::two() * z.get(i, 0)); + } + + // CALCULATE DUALITY GAP + let xnu = x.ab(true, &nu, false); + let max_xnu = xnu.norm(T::infinity()); + if max_xnu > parameters.alpha { + let lnu = parameters.alpha / max_xnu; + nu.mul_scalar_mut(lnu); + } + + let pobj = z.dot(&z) + parameters.alpha * w.norm(T::one()); + dobj = dobj.max(gamma * nu.dot(&nu) - nu.dot(&y)); + + let gap = pobj - dobj; + + // STOPPING CRITERION + if gap / dobj < parameters.tol { + break; + } + + // UPDATE t + if s >= T::half() { + t = t.max((T::two() * p_f64 * mu / gap).min(mu * t)); + } + + // CALCULATE NEWTON STEP + for i in 0..p { + let q1i = T::one() / (u.get(i, 0) + w.get(i, 0)); + let q2i = T::one() / (u.get(i, 0) - w.get(i, 0)); + q1[i] = q1i; + q2[i] = q2i; + self.d1[i] = (q1i * q1i + q2i * q2i) / t; + self.d2[i] = (q1i * q1i - q2i * q2i) / t; + } + + let mut gradphi = x.ab(true, &z, false); + + for i in 0..p { + let g1 = T::two() * gradphi.get(i, 0) - (q1[i] - q2[i]) / t; + let g2 = parameters.alpha - (q1[i] + q2[i]) / t; + gradphi.set(i, 0, g1); + grad.set(i, 0, -g1); + grad.set(i + p, 0, -g2); + } + + for i in 0..p { + self.prb[i] = T::two() + self.d1[i]; + self.prs[i] = self.prb[i] * self.d1[i] - self.d2[i] * self.d2[i]; + } + + let normg = grad.norm2(); + let mut pcgtol = min_pcgtol.min(eta * gap / T::one().min(normg)); + if ntiter != 0 && pitr == 0 { + pcgtol *= min_pcgtol; + } + + let error = self.solve_mut(x, &grad, &mut dxu, pcgtol, pcgmaxi)?; + if error > pcgtol { + pitr = pcgmaxi; + } + + for i in 0..p { + dx.set(i, 0, dxu.get(i, 0)); + du.set(i, 0, dxu.get(i + p, 0)); + } + + // BACKTRACKING LINE SEARCH + let phi = z.dot(&z) + parameters.alpha * u.sum() - Self::sumlogneg(&f) / t; + s = T::one(); + let gdx = grad.dot(&dxu); + + let lsiter = 0; + while lsiter < max_ls_iter { + for i in 0..p { + neww.set(i, 0, w.get(i, 0) + s * dx.get(i, 0)); + newu.set(i, 0, u.get(i, 0) + s * du.get(i, 0)); + newf.set(i, 0, neww.get(i, 0) - newu.get(i, 0)); + newf.set(i, 1, -neww.get(i, 0) - newu.get(i, 0)); + } + + if newf.max() < T::zero() { + let mut newz = x.matmul(&neww); + for i in 0..n { + newz.set(i, 0, newz.get(i, 0) - y.get(i, 0)); + } + + let newphi = newz.dot(&newz) + parameters.alpha * newu.sum() + - Self::sumlogneg(&newf) / t; + if newphi - phi <= alpha * s * gdx { + break; + } + } + s = beta * s; + max_ls_iter += 1; + } + + if lsiter == max_ls_iter { + return Err(Failed::fit( + "Exceeded maximum number of iteration for interior point optimizer", + )); + } + + w.copy_from(&neww); + u.copy_from(&newu); + f.copy_from(&newf); + } + + Ok(w) + } + + fn sumlogneg(f: &M) -> T { + let (n, _) = f.shape(); + let mut sum = T::zero(); + for i in 0..n { + sum += (-f.get(i, 0)).ln(); + sum += (-f.get(i, 1)).ln(); + } + sum + } +} + +impl<'a, T: RealNumber, M: Matrix> BiconjugateGradientSolver + for InteriorPointOptimizer +{ + fn solve_preconditioner(&self, a: &M, b: &M, x: &mut M) { + let (_, p) = a.shape(); + + for i in 0..p { + x.set( + i, + 0, + (self.d1[i] * b.get(i, 0) - self.d2[i] * b.get(i + p, 0)) / self.prs[i], + ); + x.set( + i + p, + 0, + (-self.d2[i] * b.get(i, 0) + self.prb[i] * b.get(i + p, 0)) / self.prs[i], + ); + } + } + + fn mat_vec_mul(&self, _: &M, x: &M, y: &mut M) { + let (_, p) = self.ata.shape(); + let atax = self.ata.matmul(&x.slice(0..p, 0..1)); + + for i in 0..p { + y.set( + i, + 0, + T::two() * atax.get(i, 0) + self.d1[i] * x.get(i, 0) + self.d2[i] * x.get(i + p, 0), + ); + y.set( + i + p, + 0, + self.d2[i] * x.get(i, 0) + self.d1[i] * x.get(i + p, 0), + ); + } + } + + fn mat_t_vec_mul(&self, a: &M, x: &M, y: &mut M) { + self.mat_vec_mul(a, x, y); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::linalg::naive::dense_matrix::*; + use crate::metrics::mean_absolute_error; + + #[test] + fn lasso_fit_predict() { + let x = DenseMatrix::from_2d_array(&[ + &[234.289, 235.6, 159.0, 107.608, 1947., 60.323], + &[259.426, 232.5, 145.6, 108.632, 1948., 61.122], + &[258.054, 368.2, 161.6, 109.773, 1949., 60.171], + &[284.599, 335.1, 165.0, 110.929, 1950., 61.187], + &[328.975, 209.9, 309.9, 112.075, 1951., 63.221], + &[346.999, 193.2, 359.4, 113.270, 1952., 63.639], + &[365.385, 187.0, 354.7, 115.094, 1953., 64.989], + &[363.112, 357.8, 335.0, 116.219, 1954., 63.761], + &[397.469, 290.4, 304.8, 117.388, 1955., 66.019], + &[419.180, 282.2, 285.7, 118.734, 1956., 67.857], + &[442.769, 293.6, 279.8, 120.445, 1957., 68.169], + &[444.546, 468.1, 263.7, 121.950, 1958., 66.513], + &[482.704, 381.3, 255.2, 123.366, 1959., 68.655], + &[502.601, 393.1, 251.4, 125.368, 1960., 69.564], + &[518.173, 480.6, 257.2, 127.852, 1961., 69.331], + &[554.894, 400.7, 282.7, 130.081, 1962., 70.551], + ]); + + let y: Vec = vec![ + 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, + 114.2, 115.7, 116.9, + ]; + + let y_hat = Lasso::fit( + &x, + &y, + LassoParameters { + alpha: 0.1, + normalize: false, + tol: 1e-4, + max_iter: 1000, + }, + ) + .and_then(|lr| lr.predict(&x)) + .unwrap(); + + assert!(mean_absolute_error(&y_hat, &y) < 2.0); + + let y_hat = Lasso::fit( + &x, + &y, + LassoParameters { + alpha: 0.1, + normalize: false, + tol: 1e-4, + max_iter: 1000, + }, + ) + .and_then(|lr| lr.predict(&x)) + .unwrap(); + + assert!(mean_absolute_error(&y_hat, &y) < 2.0); + } + + #[test] + fn serde() { + let x = DenseMatrix::from_2d_array(&[ + &[234.289, 235.6, 159.0, 107.608, 1947., 60.323], + &[259.426, 232.5, 145.6, 108.632, 1948., 61.122], + &[258.054, 368.2, 161.6, 109.773, 1949., 60.171], + &[284.599, 335.1, 165.0, 110.929, 1950., 61.187], + &[328.975, 209.9, 309.9, 112.075, 1951., 63.221], + &[346.999, 193.2, 359.4, 113.270, 1952., 63.639], + &[365.385, 187.0, 354.7, 115.094, 1953., 64.989], + &[363.112, 357.8, 335.0, 116.219, 1954., 63.761], + &[397.469, 290.4, 304.8, 117.388, 1955., 66.019], + &[419.180, 282.2, 285.7, 118.734, 1956., 67.857], + &[442.769, 293.6, 279.8, 120.445, 1957., 68.169], + &[444.546, 468.1, 263.7, 121.950, 1958., 66.513], + &[482.704, 381.3, 255.2, 123.366, 1959., 68.655], + &[502.601, 393.1, 251.4, 125.368, 1960., 69.564], + &[518.173, 480.6, 257.2, 127.852, 1961., 69.331], + &[554.894, 400.7, 282.7, 130.081, 1962., 70.551], + ]); + + let y = vec![ + 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, + 114.2, 115.7, 116.9, + ]; + + let lr = Lasso::fit(&x, &y, Default::default()).unwrap(); + + let deserialized_lr: Lasso> = + serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap(); + + assert_eq!(lr, deserialized_lr); + } +} diff --git a/src/linear/logistic_regression.rs b/src/linear/logistic_regression.rs index 4b52529..a3674b3 100644 --- a/src/linear/logistic_regression.rs +++ b/src/linear/logistic_regression.rs @@ -289,7 +289,7 @@ impl> LogisticRegression { let n = x.shape().0; let mut result = M::zeros(1, n); if self.num_classes == 2 { - let y_hat: Vec = x.matmul(&self.coefficients.transpose()).get_col_as_vec(0); + let y_hat: Vec = x.ab(false, &self.coefficients, true).get_col_as_vec(0); let intercept = self.intercept.get(0, 0); for i in 0..n { result.set( diff --git a/src/linear/mod.rs b/src/linear/mod.rs index fef7070..edaea4f 100644 --- a/src/linear/mod.rs +++ b/src/linear/mod.rs @@ -20,6 +20,8 @@ //! //! +pub(crate) mod bg_solver; +pub mod lasso; pub mod linear_regression; pub mod logistic_regression; pub mod ridge_regression; From f9056f716ad1296c335d816b5c3e15c1823dd174 Mon Sep 17 00:00:00 2001 From: Volodymyr Orlov Date: Tue, 24 Nov 2020 19:21:27 -0800 Subject: [PATCH 2/3] lasso: minor change in unit test --- src/linear/lasso.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linear/lasso.rs b/src/linear/lasso.rs index 306b1aa..965c1c4 100644 --- a/src/linear/lasso.rs +++ b/src/linear/lasso.rs @@ -447,7 +447,7 @@ mod tests { &y, LassoParameters { alpha: 0.1, - normalize: false, + normalize: true, tol: 1e-4, max_iter: 1000, }, From 67e582987792166ca15a8e0303968e78b5160626 Mon Sep 17 00:00:00 2001 From: Volodymyr Orlov Date: Wed, 25 Nov 2020 12:23:04 -0800 Subject: [PATCH 3/3] simplifies generic matrix.ab implementation --- src/linalg/high_order.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linalg/high_order.rs b/src/linalg/high_order.rs index 359c4a1..493c737 100644 --- a/src/linalg/high_order.rs +++ b/src/linalg/high_order.rs @@ -19,7 +19,7 @@ pub trait HighOrderOperations: BaseMatrix { /// ``` fn ab(&self, a_transpose: bool, b: &Self, b_transpose: bool) -> Self { match (a_transpose, b_transpose) { - (true, true) => self.transpose().matmul(&b.transpose()), + (true, true) => b.matmul(self).transpose(), (false, true) => self.matmul(&b.transpose()), (true, false) => self.transpose().matmul(b), (false, false) => self.matmul(b),