Merge potential next release v0.4 (#187) Breaking Changes

* First draft of the new n-dimensional arrays + NB use case
* Improves default implementation of multiple Array methods
* Refactors tree methods
* Adds matrix decomposition routines
* Adds matrix decomposition methods to ndarray and nalgebra bindings
* Refactoring + linear regression now uses array2
* Ridge & Linear regression
* LBFGS optimizer & logistic regression
* LBFGS optimizer & logistic regression
* Changes linear methods, metrics and model selection methods to new n-dimensional arrays
* Switches KNN and clustering algorithms to new n-d array layer
* Refactors distance metrics
* Optimizes knn and clustering methods
* Refactors metrics module
* Switches decomposition methods to n-dimensional arrays
* Linalg refactoring - cleanup rng merge (#172)
* Remove legacy DenseMatrix and BaseMatrix implementation. Port the new Number, FloatNumber and Array implementation into module structure.
* Exclude AUC metrics. Needs reimplementation
* Improve developers walkthrough

New traits system in place at `src/numbers` and `src/linalg`
Co-authored-by: Lorenzo <tunedconsulting@gmail.com>

* Provide SupervisedEstimator with a constructor to avoid explicit dynamical box allocation in 'cross_validate' and 'cross_validate_predict' as required by the use of 'dyn' as per Rust 2021
* Implement getters to use as_ref() in src/neighbors
* Implement getters to use as_ref() in src/naive_bayes
* Implement getters to use as_ref() in src/linear
* Add Clone to src/naive_bayes
* Change signature for cross_validate and other model_selection functions to abide to use of dyn in Rust 2021
* Implement ndarray-bindings. Remove FloatNumber from implementations
* Drop nalgebra-bindings support (as decided in conf-call to go for ndarray)
* Remove benches. Benches will have their own repo at smartcore-benches
* Implement SVC
* Implement SVC serialization. Move search parameters in dedicated module
* Implement SVR. Definitely too slow
* Fix compilation issues for wasm (#202)

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
* Fix tests (#203)

* Port linalg/traits/stats.rs
* Improve methods naming
* Improve Display for DenseMatrix

Co-authored-by: Montana Low <montanalow@users.noreply.github.com>
Co-authored-by: VolodymyrOrlov <volodymyr.orlov@gmail.com>
This commit is contained in:
Lorenzo
2022-10-31 10:44:57 +00:00
committed by GitHub
parent bb71656137
commit 52eb6ce023
110 changed files with 10327 additions and 9107 deletions
+206
View File
@@ -0,0 +1,206 @@
//! # Cholesky Decomposition
//!
//! every positive definite matrix \\(A \in R^{n \times n}\\) can be factored as
//!
//! \\[A = R^TR\\]
//!
//! where \\(R\\) is upper triangular matrix with positive diagonal elements
//!
//! Example:
//! ```
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linalg::traits::cholesky::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[25., 15., -5.],
//! &[15., 18., 0.],
//! &[-5., 0., 11.]
//! ]);
//!
//! let cholesky = A.cholesky().unwrap();
//! let lower_triangular: DenseMatrix<f64> = cholesky.L();
//! let upper_triangular: DenseMatrix<f64> = cholesky.U();
//! ```
//!
//! ## References:
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.9 Cholesky Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use std::fmt::Debug;
use std::marker::PhantomData;
use crate::error::{Failed, FailedError};
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
#[derive(Debug, Clone)]
/// Results of Cholesky decomposition.
pub struct Cholesky<T: Number + RealNumber, M: Array2<T>> {
R: M,
t: PhantomData<T>,
}
impl<T: Number + RealNumber, M: Array2<T>> Cholesky<T, M> {
pub(crate) fn new(R: M) -> Cholesky<T, M> {
Cholesky { R, t: PhantomData }
}
/// Get lower triangular matrix.
pub fn L(&self) -> M {
let (n, _) = self.R.shape();
let mut R = M::zeros(n, n);
for i in 0..n {
for j in 0..n {
if j <= i {
R.set((i, j), *self.R.get((i, j)));
}
}
}
R
}
/// Get upper triangular matrix.
pub fn U(&self) -> M {
let (n, _) = self.R.shape();
let mut R = M::zeros(n, n);
for i in 0..n {
for j in 0..n {
if j <= i {
R.set((j, i), *self.R.get((i, j)));
}
}
}
R
}
/// Solves Ax = b
pub(crate) fn solve(&self, mut b: M) -> Result<M, Failed> {
let (bn, m) = b.shape();
let (rn, _) = self.R.shape();
if bn != rn {
return Err(Failed::because(
FailedError::SolutionFailed,
"Can\'t solve Ax = b for x. FloatNumber of rows in b != number of rows in R.",
));
}
for k in 0..bn {
for j in 0..m {
for i in 0..k {
b.sub_element_mut((k, j), *b.get((i, j)) * *self.R.get((k, i)));
}
b.div_element_mut((k, j), *self.R.get((k, k)));
}
}
for k in (0..bn).rev() {
for j in 0..m {
for i in k + 1..bn {
b.sub_element_mut((k, j), *b.get((i, j)) * *self.R.get((i, k)));
}
b.div_element_mut((k, j), *self.R.get((k, k)));
}
}
Ok(b)
}
}
/// Trait that implements Cholesky decomposition routine for any matrix.
pub trait CholeskyDecomposable<T: Number + RealNumber>: Array2<T> {
/// Compute the Cholesky decomposition of a matrix.
fn cholesky(&self) -> Result<Cholesky<T, Self>, Failed> {
self.clone().cholesky_mut()
}
/// Compute the Cholesky decomposition of a matrix. The input matrix
/// will be used for factorization.
fn cholesky_mut(mut self) -> Result<Cholesky<T, Self>, Failed> {
let (m, n) = self.shape();
if m != n {
return Err(Failed::because(
FailedError::DecompositionFailed,
"Can\'t do Cholesky decomposition on a non-square matrix",
));
}
for j in 0..n {
let mut d = T::zero();
for k in 0..j {
let mut s = T::zero();
for i in 0..k {
s += *self.get((k, i)) * *self.get((j, i));
}
s = (*self.get((j, k)) - s) / *self.get((k, k));
self.set((j, k), s);
d += s * s;
}
d = *self.get((j, j)) - d;
if d < T::zero() {
return Err(Failed::because(
FailedError::DecompositionFailed,
"The matrix is not positive definite.",
));
}
self.set((j, j), d.sqrt());
}
Ok(Cholesky::new(self))
}
/// Solves Ax = b
fn cholesky_solve_mut(self, b: Self) -> Result<Self, Failed> {
self.cholesky_mut().and_then(|qr| qr.solve(b))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::matrix::DenseMatrix;
use approx::relative_eq;
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn cholesky_decompose() {
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
let l =
DenseMatrix::from_2d_array(&[&[5.0, 0.0, 0.0], &[3.0, 3.0, 0.0], &[-1.0, 1.0, 3.0]]);
let u =
DenseMatrix::from_2d_array(&[&[5.0, 3.0, -1.0], &[0.0, 3.0, 1.0], &[0.0, 0.0, 3.0]]);
let cholesky = a.cholesky().unwrap();
assert!(relative_eq!(cholesky.L().abs(), l.abs(), epsilon = 1e-4));
assert!(relative_eq!(cholesky.U().abs(), u.abs(), epsilon = 1e-4));
assert!(relative_eq!(
cholesky.L().matmul(&cholesky.U()).abs(),
a.abs(),
epsilon = 1e-4
));
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn cholesky_solve_mut() {
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
let b = DenseMatrix::from_2d_array(&[&[40., 51., 28.]]);
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0]]);
let cholesky = a.cholesky().unwrap();
assert!(relative_eq!(
cholesky.solve(b.transpose()).unwrap().transpose(),
expected,
epsilon = 1e-4
));
}
}
+909
View File
@@ -0,0 +1,909 @@
//! # Eigen Decomposition
//!
//! Eigendecomposition is one of the most useful matrix factorization methods in machine learning that decomposes a matrix into eigenvectors and eigenvalues.
//! This decomposition plays an important role in the the [Principal Component Analysis (PCA)](../../decomposition/pca/index.html).
//!
//! Eigendecomposition decomposes a square matrix into a set of eigenvectors and eigenvalues.
//!
//! \\[A = Q \Lambda Q^{-1}\\]
//!
//! where \\(Q\\) is a matrix comprised of the eigenvectors, \\(\Lambda\\) is a diagonal matrix comprised of the eigenvalues along the diagonal,
//! and \\(Q{-1}\\) is the inverse of the matrix comprised of the eigenvectors.
//!
//! Example:
//! ```
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linalg::traits::evd::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9000, 0.4000, 0.7000],
//! &[0.4000, 0.5000, 0.3000],
//! &[0.7000, 0.3000, 0.8000],
//! ]);
//!
//! let evd = A.evd(true).unwrap();
//! let eigenvectors: DenseMatrix<f64> = evd.V;
//! let eigenvalues: Vec<f64> = evd.d;
//! ```
//!
//! ## References:
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., Section 11 Eigensystems](http://numerical.recipes/)
//! * ["Introduction to Linear Algebra", Gilbert Strang, 5rd ed., ch. 6 Eigenvalues and Eigenvectors](https://math.mit.edu/~gs/linearalgebra/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use crate::error::Failed;
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
use num::complex::Complex;
use std::fmt::Debug;
#[derive(Debug, Clone)]
/// Results of eigen decomposition
pub struct EVD<T: Number + RealNumber, M: Array2<T>> {
/// Real part of eigenvalues.
pub d: Vec<T>,
/// Imaginary part of eigenvalues.
pub e: Vec<T>,
/// Eigenvectors
pub V: M,
}
/// Trait that implements EVD decomposition routine for any matrix.
pub trait EVDDecomposable<T: Number + RealNumber>: Array2<T> {
/// Compute the eigen decomposition of a square matrix.
/// * `symmetric` - whether the matrix is symmetric
fn evd(&self, symmetric: bool) -> Result<EVD<T, Self>, Failed> {
self.clone().evd_mut(symmetric)
}
/// Compute the eigen decomposition of a square matrix. The input matrix
/// will be used for factorization.
/// * `symmetric` - whether the matrix is symmetric
fn evd_mut(mut self, symmetric: bool) -> Result<EVD<T, Self>, Failed> {
let (nrows, ncols) = self.shape();
if ncols != nrows {
panic!("Matrix is not square: {} x {}", nrows, ncols);
}
let n = nrows;
let mut d = vec![T::zero(); n];
let mut e = vec![T::zero(); n];
let mut V;
if symmetric {
V = self;
// Tridiagonalize.
tred2(&mut V, &mut d, &mut e);
// Diagonalize.
tql2(&mut V, &mut d, &mut e);
} else {
let scale = balance(&mut self);
let perm = elmhes(&mut self);
V = Self::eye(n);
eltran(&self, &mut V, &perm);
hqr2(&mut self, &mut V, &mut d, &mut e);
balbak(&mut V, &scale);
sort(&mut d, &mut e, &mut V);
}
Ok(EVD { V, d, e })
}
}
fn tred2<T: Number + RealNumber, M: Array2<T>>(V: &mut M, d: &mut [T], e: &mut [T]) {
let (n, _) = V.shape();
for (i, d_i) in d.iter_mut().enumerate().take(n) {
*d_i = *V.get((n - 1, i));
}
for i in (1..n).rev() {
let mut scale = T::zero();
let mut h = T::zero();
for d_k in d.iter().take(i) {
scale += d_k.abs();
}
if scale == T::zero() {
e[i] = d[i - 1];
for (j, d_j) in d.iter_mut().enumerate().take(i) {
*d_j = *V.get((i - 1, j));
V.set((i, j), T::zero());
V.set((j, i), T::zero());
}
} else {
for d_k in d.iter_mut().take(i) {
*d_k /= scale;
h += (*d_k) * (*d_k);
}
let mut f = d[i - 1];
let mut g = h.sqrt();
if f > T::zero() {
g = -g;
}
e[i] = scale * g;
h -= f * g;
d[i - 1] = f - g;
for e_j in e.iter_mut().take(i) {
*e_j = T::zero();
}
for j in 0..i {
f = d[j];
V.set((j, i), f);
g = e[j] + *V.get((j, j)) * f;
for k in j + 1..=i - 1 {
g += *V.get((k, j)) * d[k];
e[k] += *V.get((k, j)) * f;
}
e[j] = g;
}
f = T::zero();
for j in 0..i {
e[j] /= h;
f += e[j] * d[j];
}
let hh = f / (h + h);
for j in 0..i {
e[j] -= hh * d[j];
}
for j in 0..i {
f = d[j];
g = e[j];
for k in j..=i - 1 {
V.sub_element_mut((k, j), f * e[k] + g * d[k]);
}
d[j] = *V.get((i - 1, j));
V.set((i, j), T::zero());
}
}
d[i] = h;
}
for i in 0..n - 1 {
V.set((n - 1, i), *V.get((i, i)));
V.set((i, i), T::one());
let h = d[i + 1];
if h != T::zero() {
for (k, d_k) in d.iter_mut().enumerate().take(i + 1) {
*d_k = *V.get((k, i + 1)) / h;
}
for j in 0..=i {
let mut g = T::zero();
for k in 0..=i {
g += *V.get((k, i + 1)) * *V.get((k, j));
}
for (k, d_k) in d.iter().enumerate().take(i + 1) {
V.sub_element_mut((k, j), g * (*d_k));
}
}
}
for k in 0..=i {
V.set((k, i + 1), T::zero());
}
}
for (j, d_j) in d.iter_mut().enumerate().take(n) {
*d_j = *V.get((n - 1, j));
V.set((n - 1, j), T::zero());
}
V.set((n - 1, n - 1), T::one());
e[0] = T::zero();
}
fn tql2<T: Number + RealNumber, M: Array2<T>>(V: &mut M, d: &mut [T], e: &mut [T]) {
let (n, _) = V.shape();
for i in 1..n {
e[i - 1] = e[i];
}
e[n - 1] = T::zero();
let mut f = T::zero();
let mut tst1 = T::zero();
for l in 0..n {
tst1 = T::max(tst1, d[l].abs() + e[l].abs());
let mut m = l;
loop {
if m < n {
if e[m].abs() <= tst1 * T::epsilon() {
break;
}
m += 1;
} else {
break;
}
}
if m > l {
let mut iter = 0;
loop {
iter += 1;
if iter >= 30 {
panic!("Too many iterations");
}
let mut g = d[l];
let mut p = (d[l + 1] - g) / (T::two() * e[l]);
let mut r = p.hypot(T::one());
if p < T::zero() {
r = -r;
}
d[l] = e[l] / (p + r);
d[l + 1] = e[l] * (p + r);
let dl1 = d[l + 1];
let mut h = g - d[l];
for d_i in d.iter_mut().take(n).skip(l + 2) {
*d_i -= h;
}
f += h;
p = d[m];
let mut c = T::one();
let mut c2 = c;
let mut c3 = c;
let el1 = e[l + 1];
let mut s = T::zero();
let mut s2 = T::zero();
for i in (l..m).rev() {
c3 = c2;
c2 = c;
s2 = s;
g = c * e[i];
h = c * p;
r = p.hypot(e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
for k in 0..n {
h = *V.get((k, i + 1));
V.set((k, i + 1), s * *V.get((k, i)) + c * h);
V.set((k, i), c * *V.get((k, i)) - s * h);
}
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
if e[l].abs() <= tst1 * T::epsilon() {
break;
}
}
}
d[l] += f;
e[l] = T::zero();
}
for i in 0..n - 1 {
let mut k = i;
let mut p = d[i];
for (j, d_j) in d.iter().enumerate().take(n).skip(i + 1) {
if *d_j > p {
k = j;
p = *d_j;
}
}
if k != i {
d[k] = d[i];
d[i] = p;
for j in 0..n {
p = *V.get((j, i));
V.set((j, i), *V.get((j, k)));
V.set((j, k), p);
}
}
}
}
fn balance<T: Number + RealNumber, M: Array2<T>>(A: &mut M) -> Vec<T> {
let radix = T::two();
let sqrdx = radix * radix;
let (n, _) = A.shape();
let mut scale = vec![T::one(); n];
let t = T::from(0.95).unwrap();
let mut done = false;
while !done {
done = true;
for (i, scale_i) in scale.iter_mut().enumerate().take(n) {
let mut r = T::zero();
let mut c = T::zero();
for j in 0..n {
if j != i {
c += A.get((j, i)).abs();
r += A.get((i, j)).abs();
}
}
if c != T::zero() && r != T::zero() {
let mut g = r / radix;
let mut f = T::one();
let s = c + r;
while c < g {
f *= radix;
c *= sqrdx;
}
g = r * radix;
while c > g {
f /= radix;
c /= sqrdx;
}
if (c + r) / f < t * s {
done = false;
g = T::one() / f;
*scale_i *= f;
for j in 0..n {
A.mul_element_mut((i, j), g);
}
for j in 0..n {
A.mul_element_mut((j, i), f);
}
}
}
}
}
scale
}
fn elmhes<T: Number + RealNumber, M: Array2<T>>(A: &mut M) -> Vec<usize> {
let (n, _) = A.shape();
let mut perm = vec![0; n];
for (m, perm_m) in perm.iter_mut().enumerate().take(n - 1).skip(1) {
let mut x = T::zero();
let mut i = m;
for j in m..n {
if A.get((j, m - 1)).abs() > x.abs() {
x = *A.get((j, m - 1));
i = j;
}
}
*perm_m = i;
if i != m {
for j in (m - 1)..n {
A.swap((i, j), (m, j));
}
for j in 0..n {
A.swap((j, i), (j, m));
}
}
if x != T::zero() {
for i in (m + 1)..n {
let mut y = *A.get((i, m - 1));
if y != T::zero() {
y /= x;
A.set((i, m - 1), y);
for j in m..n {
A.sub_element_mut((i, j), y * *A.get((m, j)));
}
for j in 0..n {
A.add_element_mut((j, m), y * *A.get((j, i)));
}
}
}
}
}
perm
}
fn eltran<T: Number + RealNumber, M: Array2<T>>(A: &M, V: &mut M, perm: &[usize]) {
let (n, _) = A.shape();
for mp in (1..n - 1).rev() {
for k in mp + 1..n {
V.set((k, mp), *A.get((k, mp - 1)));
}
let i = perm[mp];
if i != mp {
for j in mp..n {
V.set((mp, j), *V.get((i, j)));
V.set((i, j), T::zero());
}
V.set((i, mp), T::one());
}
}
}
fn hqr2<T: Number + RealNumber, M: Array2<T>>(A: &mut M, V: &mut M, d: &mut [T], e: &mut [T]) {
let (n, _) = A.shape();
let mut z = T::zero();
let mut s = T::zero();
let mut r = T::zero();
let mut q = T::zero();
let mut p = T::zero();
let mut anorm = T::zero();
for i in 0..n {
for j in i32::max(i as i32 - 1, 0)..n as i32 {
anorm += A.get((i, j as usize)).abs();
}
}
let mut nn = n - 1;
let mut t = T::zero();
'outer: loop {
let mut its = 0;
loop {
let mut l = nn;
while l > 0 {
s = A.get((l - 1, l - 1)).abs() + A.get((l, l)).abs();
if s == T::zero() {
s = anorm;
}
if A.get((l, l - 1)).abs() <= T::epsilon() * s {
A.set((l, l - 1), T::zero());
break;
}
l -= 1;
}
let mut x = *A.get((nn, nn));
if l == nn {
d[nn] = x + t;
A.set((nn, nn), x + t);
if nn == 0 {
break 'outer;
} else {
nn -= 1;
}
} else {
let mut y = *A.get((nn - 1, nn - 1));
let mut w = *A.get((nn, nn - 1)) * *A.get((nn - 1, nn));
if l == nn - 1 {
p = T::half() * (y - x);
q = p * p + w;
z = q.abs().sqrt();
x += t;
A.set((nn, nn), x);
A.set((nn - 1, nn - 1), y + t);
if q >= T::zero() {
z = p + <T as RealNumber>::copysign(z, p);
d[nn - 1] = x + z;
d[nn] = x + z;
if z != T::zero() {
d[nn] = x - w / z;
}
x = *A.get((nn, nn - 1));
s = x.abs() + z.abs();
p = x / s;
q = z / s;
r = (p * p + q * q).sqrt();
p /= r;
q /= r;
for j in nn - 1..n {
z = *A.get((nn - 1, j));
A.set((nn - 1, j), q * z + p * *A.get((nn, j)));
A.set((nn, j), q * *A.get((nn, j)) - p * z);
}
for i in 0..=nn {
z = *A.get((i, nn - 1));
A.set((i, nn - 1), q * z + p * *A.get((i, nn)));
A.set((i, nn), q * *A.get((i, nn)) - p * z);
}
for i in 0..n {
z = *V.get((i, nn - 1));
V.set((i, nn - 1), q * z + p * *V.get((i, nn)));
V.set((i, nn), q * *V.get((i, nn)) - p * z);
}
} else {
d[nn] = x + p;
e[nn] = -z;
d[nn - 1] = d[nn];
e[nn - 1] = -e[nn];
}
if nn <= 1 {
break 'outer;
} else {
nn -= 2;
}
} else {
if its == 30 {
panic!("Too many iterations in hqr");
}
if its == 10 || its == 20 {
t += x;
for i in 0..nn + 1 {
A.sub_element_mut((i, i), x);
}
s = A.get((nn, nn - 1)).abs() + A.get((nn - 1, nn - 2)).abs();
y = T::from_f64(0.75).unwrap() * s;
x = T::from_f64(0.75).unwrap() * s;
w = T::from_f64(-0.4375).unwrap() * s * s;
}
its += 1;
let mut m = nn - 2;
while m >= l {
z = *A.get((m, m));
r = x - z;
s = y - z;
p = (r * s - w) / *A.get((m + 1, m)) + *A.get((m, m + 1));
q = *A.get((m + 1, m + 1)) - z - r - s;
r = *A.get((m + 2, m + 1));
s = p.abs() + q.abs() + r.abs();
p /= s;
q /= s;
r /= s;
if m == l {
break;
}
let u = A.get((m, m - 1)).abs() * (q.abs() + r.abs());
let v = p.abs()
* (A.get((m - 1, m - 1)).abs() + z.abs() + A.get((m + 1, m + 1)).abs());
if u <= T::epsilon() * v {
break;
}
m -= 1;
}
for i in m..nn - 1 {
A.set((i + 2, i), T::zero());
if i != m {
A.set((i + 2, i - 1), T::zero());
}
}
for k in m..nn {
if k != m {
p = *A.get((k, k - 1));
q = *A.get((k + 1, k - 1));
r = T::zero();
if k + 1 != nn {
r = *A.get((k + 2, k - 1));
}
x = p.abs() + q.abs() + r.abs();
if x != T::zero() {
p /= x;
q /= x;
r /= x;
}
}
let s = <T as RealNumber>::copysign((p * p + q * q + r * r).sqrt(), p);
if s != T::zero() {
if k == m {
if l != m {
A.set((k, k - 1), -*A.get((k, k - 1)));
}
} else {
A.set((k, k - 1), -s * x);
}
p += s;
x = p / s;
y = q / s;
z = r / s;
q /= p;
r /= p;
for j in k..n {
p = *A.get((k, j)) + q * *A.get((k + 1, j));
if k + 1 != nn {
p += r * *A.get((k + 2, j));
A.sub_element_mut((k + 2, j), p * z);
}
A.sub_element_mut((k + 1, j), p * y);
A.sub_element_mut((k, j), p * x);
}
let mmin = if nn < k + 3 { nn } else { k + 3 };
for i in 0..(mmin + 1) {
p = x * *A.get((i, k)) + y * *A.get((i, k + 1));
if k + 1 != nn {
p += z * *A.get((i, k + 2));
A.sub_element_mut((i, k + 2), p * r);
}
A.sub_element_mut((i, k + 1), p * q);
A.sub_element_mut((i, k), p);
}
for i in 0..n {
p = x * *V.get((i, k)) + y * *V.get((i, k + 1));
if k + 1 != nn {
p += z * *V.get((i, k + 2));
V.sub_element_mut((i, k + 2), p * r);
}
V.sub_element_mut((i, k + 1), p * q);
V.sub_element_mut((i, k), p);
}
}
}
}
}
if l + 1 >= nn {
break;
}
}
}
if anorm != T::zero() {
for nn in (0..n).rev() {
p = d[nn];
q = e[nn];
let na = nn.wrapping_sub(1);
if q == T::zero() {
let mut m = nn;
A.set((nn, nn), T::one());
if nn > 0 {
let mut i = nn - 1;
loop {
let w = *A.get((i, i)) - p;
r = T::zero();
for j in m..=nn {
r += *A.get((i, j)) * *A.get((j, nn));
}
if e[i] < T::zero() {
z = w;
s = r;
} else {
m = i;
if e[i] == T::zero() {
t = w;
if t == T::zero() {
t = T::epsilon() * anorm;
}
A.set((i, nn), -r / t);
} else {
let x = *A.get((i, i + 1));
let y = *A.get((i + 1, i));
q = (d[i] - p).powf(T::two()) + e[i].powf(T::two());
t = (x * s - z * r) / q;
A.set((i, nn), t);
if x.abs() > z.abs() {
A.set((i + 1, nn), (-r - w * t) / x);
} else {
A.set((i + 1, nn), (-s - y * t) / z);
}
}
t = A.get((i, nn)).abs();
if T::epsilon() * t * t > T::one() {
for j in i..=nn {
A.div_element_mut((j, nn), t);
}
}
}
if i == 0 {
break;
} else {
i -= 1;
}
}
}
} else if q < T::zero() {
let mut m = na;
if A.get((nn, na)).abs() > A.get((na, nn)).abs() {
A.set((na, na), q / *A.get((nn, na)));
A.set((na, nn), -(*A.get((nn, nn)) - p) / *A.get((nn, na)));
} else {
let temp = Complex::new(T::zero(), -*A.get((na, nn)))
/ Complex::new(*A.get((na, na)) - p, q);
A.set((na, na), temp.re);
A.set((na, nn), temp.im);
}
A.set((nn, na), T::zero());
A.set((nn, nn), T::one());
if nn >= 2 {
for i in (0..nn - 1).rev() {
let w = *A.get((i, i)) - p;
let mut ra = T::zero();
let mut sa = T::zero();
for j in m..=nn {
ra += *A.get((i, j)) * *A.get((j, na));
sa += *A.get((i, j)) * *A.get((j, nn));
}
if e[i] < T::zero() {
z = w;
r = ra;
s = sa;
} else {
m = i;
if e[i] == T::zero() {
let temp = Complex::new(-ra, -sa) / Complex::new(w, q);
A.set((i, na), temp.re);
A.set((i, nn), temp.im);
} else {
let x = *A.get((i, i + 1));
let y = *A.get((i + 1, i));
let mut vr =
(d[i] - p).powf(T::two()) + (e[i]).powf(T::two()) - q * q;
let vi = T::two() * q * (d[i] - p);
if vr == T::zero() && vi == T::zero() {
vr = T::epsilon()
* anorm
* (w.abs() + q.abs() + x.abs() + y.abs() + z.abs());
}
let temp =
Complex::new(x * r - z * ra + q * sa, x * s - z * sa - q * ra)
/ Complex::new(vr, vi);
A.set((i, na), temp.re);
A.set((i, nn), temp.im);
if x.abs() > z.abs() + q.abs() {
A.set(
(i + 1, na),
(-ra - w * *A.get((i, na)) + q * *A.get((i, nn))) / x,
);
A.set(
(i + 1, nn),
(-sa - w * *A.get((i, nn)) - q * *A.get((i, na))) / x,
);
} else {
let temp = Complex::new(
-r - y * *A.get((i, na)),
-s - y * *A.get((i, nn)),
) / Complex::new(z, q);
A.set((i + 1, na), temp.re);
A.set((i + 1, nn), temp.im);
}
}
}
t = T::max(A.get((i, na)).abs(), A.get((i, nn)).abs());
if T::epsilon() * t * t > T::one() {
for j in i..=nn {
A.div_element_mut((j, na), t);
A.div_element_mut((j, nn), t);
}
}
}
}
}
}
for j in (0..n).rev() {
for i in 0..n {
z = T::zero();
for k in 0..=j {
z += *V.get((i, k)) * *A.get((k, j));
}
V.set((i, j), z);
}
}
}
}
fn balbak<T: Number + RealNumber, M: Array2<T>>(V: &mut M, scale: &[T]) {
let (n, _) = V.shape();
for (i, scale_i) in scale.iter().enumerate().take(n) {
for j in 0..n {
V.mul_element_mut((i, j), *scale_i);
}
}
}
fn sort<T: Number + RealNumber, M: Array2<T>>(d: &mut [T], e: &mut [T], V: &mut M) {
let n = d.len();
let mut temp = vec![T::zero(); n];
for j in 1..n {
let real = d[j];
let img = e[j];
for (k, temp_k) in temp.iter_mut().enumerate().take(n) {
*temp_k = *V.get((k, j));
}
let mut i = j as i32 - 1;
while i >= 0 {
if d[i as usize] >= d[j] {
break;
}
d[i as usize + 1] = d[i as usize];
e[i as usize + 1] = e[i as usize];
for k in 0..n {
V.set((k, i as usize + 1), *V.get((k, i as usize)));
}
i -= 1;
}
d[i as usize + 1] = real;
e[i as usize + 1] = img;
for (k, temp_k) in temp.iter().enumerate().take(n) {
V.set((k, i as usize + 1), *temp_k);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::matrix::DenseMatrix;
use approx::relative_eq;
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose_symmetric() {
let A = DenseMatrix::from_2d_array(&[
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.7000, 0.3000, 0.8000],
]);
let eigen_values: Vec<f64> = vec![1.7498382, 0.3165784, 0.1335834];
let eigen_vectors = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.6391588],
]);
let evd = A.evd(true).unwrap();
assert!(relative_eq!(
eigen_vectors.abs(),
evd.V.abs(),
epsilon = 1e-4
));
for i in 0..eigen_values.len() {
assert!((eigen_values[i] - evd.d[i]).abs() < 1e-4);
}
for i in 0..eigen_values.len() {
assert!((0f64 - evd.e[i]).abs() < std::f64::EPSILON);
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose_asymmetric() {
let A = DenseMatrix::from_2d_array(&[
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.8000, 0.3000, 0.8000],
]);
let eigen_values: Vec<f64> = vec![1.79171122, 0.31908143, 0.08920735];
let eigen_vectors = DenseMatrix::from_2d_array(&[
&[0.7178958, 0.05322098, 0.6812010],
&[0.3837711, -0.84702111, -0.1494582],
&[0.6952105, 0.43984484, -0.7036135],
]);
let evd = A.evd(false).unwrap();
assert!(relative_eq!(
eigen_vectors.abs(),
evd.V.abs(),
epsilon = 1e-4
));
for i in 0..eigen_values.len() {
assert!((eigen_values[i] - evd.d[i]).abs() < 1e-4);
}
for i in 0..eigen_values.len() {
assert!((0f64 - evd.e[i]).abs() < std::f64::EPSILON);
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose_complex() {
let A = DenseMatrix::from_2d_array(&[
&[3.0, -2.0, 1.0, 1.0],
&[4.0, -1.0, 1.0, 1.0],
&[1.0, 1.0, 3.0, -2.0],
&[1.0, 1.0, 4.0, -1.0],
]);
let eigen_values_d: Vec<f64> = vec![0.0, 2.0, 2.0, 0.0];
let eigen_values_e: Vec<f64> = vec![2.2361, 0.9999, -0.9999, -2.2361];
let eigen_vectors = DenseMatrix::from_2d_array(&[
&[-0.9159, -0.1378, 0.3816, -0.0806],
&[-0.6707, 0.1059, 0.901, 0.6289],
&[0.9159, -0.1378, 0.3816, 0.0806],
&[0.6707, 0.1059, 0.901, -0.6289],
]);
let evd = A.evd(false).unwrap();
assert!(relative_eq!(
eigen_vectors.abs(),
evd.V.abs(),
epsilon = 1e-4
));
for i in 0..eigen_values_d.len() {
assert!((eigen_values_d[i] - evd.d[i]).abs() < 1e-4);
}
for i in 0..eigen_values_e.len() {
assert!((eigen_values_e[i] - evd.e[i]).abs() < 1e-4);
}
}
}
+33
View File
@@ -0,0 +1,33 @@
//! In this module you will find composite of matrix operations that are used elsewhere
//! for improved efficiency.
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
/// High order matrix operations.
pub trait HighOrderOperations<T: Number>: Array2<T> {
/// Y = AB
/// ```
/// use smartcore::linalg::basic::matrix::*;
/// use smartcore::linalg::traits::high_order::HighOrderOperations;
/// use smartcore::linalg::basic::arrays::Array2;
///
/// let a = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
/// let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]);
/// let expected = DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]]);
///
/// assert_eq!(a.ab(true, &b, false), expected);
/// ```
fn ab(&self, a_transpose: bool, b: &Self, b_transpose: bool) -> Self {
match (a_transpose, b_transpose) {
(true, true) => b.matmul(self).transpose(),
(false, true) => self.matmul(&b.transpose()),
(true, false) => self.transpose().matmul(b),
(false, false) => self.matmul(b),
}
}
}
mod tests {
/* TODO: Add tests */
}
+287
View File
@@ -0,0 +1,287 @@
//! # LU Decomposition
//!
//! Decomposes a square matrix into a product of two triangular matrices:
//!
//! \\[A = LU\\]
//!
//! where \\(U\\) is an upper triangular matrix and \\(L\\) is a lower triangular matrix.
//! and \\(Q{-1}\\) is the inverse of the matrix comprised of the eigenvectors. The LU decomposition is used to obtain more efficient solutions to equations of the form
//!
//! \\[Ax = b\\]
//!
//! Example:
//! ```
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linalg::traits::lu::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[1., 2., 3.],
//! &[0., 1., 5.],
//! &[5., 6., 0.]
//! ]);
//!
//! let lu = A.lu().unwrap();
//! let lower: DenseMatrix<f64> = lu.L();
//! let upper: DenseMatrix<f64> = lu.U();
//! ```
//!
//! ## References:
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.3.1 Performing the LU Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use std::cmp::Ordering;
use std::fmt::Debug;
use std::marker::PhantomData;
use crate::error::Failed;
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
#[derive(Debug, Clone)]
/// Result of LU decomposition.
pub struct LU<T: Number + RealNumber, M: Array2<T>> {
LU: M,
pivot: Vec<usize>,
#[allow(dead_code)]
pivot_sign: i8,
singular: bool,
phantom: PhantomData<T>,
}
impl<T: Number + RealNumber, M: Array2<T>> LU<T, M> {
pub(crate) fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
let (_, n) = LU.shape();
let mut singular = false;
for j in 0..n {
if LU.get((j, j)) == &T::zero() {
singular = true;
break;
}
}
LU {
LU,
pivot,
pivot_sign,
singular,
phantom: PhantomData,
}
}
/// Get lower triangular matrix
pub fn L(&self) -> M {
let (n_rows, n_cols) = self.LU.shape();
let mut L = M::zeros(n_rows, n_cols);
for i in 0..n_rows {
for j in 0..n_cols {
match i.cmp(&j) {
Ordering::Greater => L.set((i, j), *self.LU.get((i, j))),
Ordering::Equal => L.set((i, j), T::one()),
Ordering::Less => L.set((i, j), T::zero()),
}
}
}
L
}
/// Get upper triangular matrix
pub fn U(&self) -> M {
let (n_rows, n_cols) = self.LU.shape();
let mut U = M::zeros(n_rows, n_cols);
for i in 0..n_rows {
for j in 0..n_cols {
if i <= j {
U.set((i, j), *self.LU.get((i, j)));
} else {
U.set((i, j), T::zero());
}
}
}
U
}
/// Pivot vector
pub fn pivot(&self) -> M {
let (_, n) = self.LU.shape();
let mut piv = M::zeros(n, n);
for i in 0..n {
piv.set((i, self.pivot[i]), T::one());
}
piv
}
/// Returns matrix inverse
pub fn inverse(&self) -> Result<M, Failed> {
let (m, n) = self.LU.shape();
if m != n {
panic!("Matrix is not square: {}x{}", m, n);
}
let mut inv = M::zeros(n, n);
for i in 0..n {
inv.set((i, i), T::one());
}
self.solve(inv)
}
fn solve(&self, mut b: M) -> Result<M, Failed> {
let (m, n) = self.LU.shape();
let (b_m, b_n) = b.shape();
if b_m != m {
panic!(
"Row dimensions do not agree: A is {} x {}, but B is {} x {}",
m, n, b_m, b_n
);
}
if self.singular {
panic!("Matrix is singular.");
}
let mut X = M::zeros(b_m, b_n);
for j in 0..b_n {
for i in 0..m {
X.set((i, j), *b.get((self.pivot[i], j)));
}
}
for k in 0..n {
for i in k + 1..n {
for j in 0..b_n {
X.sub_element_mut((i, j), *X.get((k, j)) * *self.LU.get((i, k)));
}
}
}
for k in (0..n).rev() {
for j in 0..b_n {
X.div_element_mut((k, j), *self.LU.get((k, k)));
}
for i in 0..k {
for j in 0..b_n {
X.sub_element_mut((i, j), *X.get((k, j)) * *self.LU.get((i, k)));
}
}
}
for j in 0..b_n {
for i in 0..m {
b.set((i, j), *X.get((i, j)));
}
}
Ok(b)
}
}
/// Trait that implements LU decomposition routine for any matrix.
pub trait LUDecomposable<T: Number + RealNumber>: Array2<T> {
/// Compute the LU decomposition of a square matrix.
fn lu(&self) -> Result<LU<T, Self>, Failed> {
self.clone().lu_mut()
}
/// Compute the LU decomposition of a square matrix. The input matrix
/// will be used for factorization.
fn lu_mut(mut self) -> Result<LU<T, Self>, Failed> {
let (m, n) = self.shape();
let mut piv = (0..m).collect::<Vec<_>>();
let mut pivsign = 1;
let mut LUcolj = vec![T::zero(); m];
for j in 0..n {
for (i, LUcolj_i) in LUcolj.iter_mut().enumerate().take(m) {
*LUcolj_i = *self.get((i, j));
}
for i in 0..m {
let kmax = usize::min(i, j);
let mut s = T::zero();
for (k, LUcolj_k) in LUcolj.iter().enumerate().take(kmax) {
s += *self.get((i, k)) * (*LUcolj_k);
}
LUcolj[i] -= s;
self.set((i, j), LUcolj[i]);
}
let mut p = j;
for i in j + 1..m {
if LUcolj[i].abs() > LUcolj[p].abs() {
p = i;
}
}
if p != j {
for k in 0..n {
self.swap((p, k), (j, k));
}
piv.swap(p, j);
pivsign = -pivsign;
}
if j < m && self.get((j, j)) != &T::zero() {
for i in j + 1..m {
self.div_element_mut((i, j), *self.get((j, j)));
}
}
}
Ok(LU::new(self, piv, pivsign))
}
/// Solves Ax = b
fn lu_solve_mut(self, b: Self) -> Result<Self, Failed> {
self.lu_mut().and_then(|lu| lu.solve(b))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::matrix::DenseMatrix;
use approx::relative_eq;
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected_L =
DenseMatrix::from_2d_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0.2, 0.8, 1.]]);
let expected_U =
DenseMatrix::from_2d_array(&[&[5., 6., 0.], &[0., 1., 5.], &[0., 0., -1.]]);
let expected_pivot =
DenseMatrix::from_2d_array(&[&[0., 0., 1.], &[0., 1., 0.], &[1., 0., 0.]]);
let lu = a.lu().unwrap();
assert!(relative_eq!(lu.L(), expected_L, epsilon = 1e-4));
assert!(relative_eq!(lu.U(), expected_U, epsilon = 1e-4));
assert!(relative_eq!(lu.pivot(), expected_pivot, epsilon = 1e-4));
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn inverse() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected =
DenseMatrix::from_2d_array(&[&[-6.0, 3.6, 1.4], &[5.0, -3.0, -1.0], &[-1.0, 0.8, 0.2]]);
let a_inv = a.lu().and_then(|lu| lu.inverse()).unwrap();
assert!(relative_eq!(a_inv, expected, epsilon = 1e-4));
}
}
+15
View File
@@ -0,0 +1,15 @@
#![allow(clippy::wrong_self_convention)]
pub mod cholesky;
/// The matrix is represented in terms of its eigenvalues and eigenvectors.
pub mod evd;
pub mod high_order;
/// Factors a matrix as the product of a lower triangular matrix and an upper triangular matrix.
pub mod lu;
/// QR factorization that factors a matrix into a product of an orthogonal matrix and an upper triangular matrix.
pub mod qr;
/// statistacal tools for DenseMatrix
pub mod stats;
/// Singular value decomposition.
pub mod svd;
+233
View File
@@ -0,0 +1,233 @@
//! # QR Decomposition
//!
//! Any real square matrix \\(A \in R^{n \times n}\\) can be decomposed as a product of an orthogonal matrix \\(Q\\) and an upper triangular matrix \\(R\\):
//!
//! \\[A = QR\\]
//!
//! Example:
//! ```
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linalg::traits::qr::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9, 0.4, 0.7],
//! &[0.4, 0.5, 0.3],
//! &[0.7, 0.3, 0.8]
//! ]);
//!
//! let qr = A.qr().unwrap();
//! let orthogonal: DenseMatrix<f64> = qr.Q();
//! let triangular: DenseMatrix<f64> = qr.R();
//! ```
//!
//! ## References:
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.10 QR Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use std::fmt::Debug;
use crate::error::Failed;
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
#[derive(Debug, Clone)]
/// Results of QR decomposition.
pub struct QR<T: Number + RealNumber, M: Array2<T>> {
QR: M,
tau: Vec<T>,
singular: bool,
}
impl<T: Number + RealNumber, M: Array2<T>> QR<T, M> {
pub(crate) fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
let mut singular = false;
for tau_elem in tau.iter() {
if *tau_elem == T::zero() {
singular = true;
break;
}
}
QR { QR, tau, singular }
}
/// Get upper triangular matrix.
pub fn R(&self) -> M {
let (_, n) = self.QR.shape();
let mut R = M::zeros(n, n);
for i in 0..n {
R.set((i, i), self.tau[i]);
for j in i + 1..n {
R.set((i, j), *self.QR.get((i, j)));
}
}
R
}
/// Get an orthogonal matrix.
pub fn Q(&self) -> M {
let (m, n) = self.QR.shape();
let mut Q = M::zeros(m, n);
let mut k = n - 1;
loop {
Q.set((k, k), T::one());
for j in k..n {
if self.QR.get((k, k)) != &T::zero() {
let mut s = T::zero();
for i in k..m {
s += *self.QR.get((i, k)) * *Q.get((i, j));
}
s = -s / *self.QR.get((k, k));
for i in k..m {
Q.add_element_mut((i, j), s * *self.QR.get((i, k)));
}
}
}
if k == 0 {
break;
} else {
k -= 1;
}
}
Q
}
fn solve(&self, mut b: M) -> Result<M, Failed> {
let (m, n) = self.QR.shape();
let (b_nrows, b_ncols) = b.shape();
if b_nrows != m {
panic!(
"Row dimensions do not agree: A is {} x {}, but B is {} x {}",
m, n, b_nrows, b_ncols
);
}
if self.singular {
panic!("Matrix is rank deficient.");
}
for k in 0..n {
for j in 0..b_ncols {
let mut s = T::zero();
for i in k..m {
s += *self.QR.get((i, k)) * *b.get((i, j));
}
s = -s / *self.QR.get((k, k));
for i in k..m {
b.add_element_mut((i, j), s * *self.QR.get((i, k)));
}
}
}
for k in (0..n).rev() {
for j in 0..b_ncols {
b.set((k, j), *b.get((k, j)) / self.tau[k]);
}
for i in 0..k {
for j in 0..b_ncols {
b.sub_element_mut((i, j), *b.get((k, j)) * *self.QR.get((i, k)));
}
}
}
Ok(b)
}
}
/// Trait that implements QR decomposition routine for any matrix.
pub trait QRDecomposable<T: Number + RealNumber>: Array2<T> {
/// Compute the QR decomposition of a matrix.
fn qr(&self) -> Result<QR<T, Self>, Failed> {
self.clone().qr_mut()
}
/// Compute the QR decomposition of a matrix. The input matrix
/// will be used for factorization.
fn qr_mut(mut self) -> Result<QR<T, Self>, Failed> {
let (m, n) = self.shape();
let mut r_diagonal: Vec<T> = vec![T::zero(); n];
for (k, r_diagonal_k) in r_diagonal.iter_mut().enumerate().take(n) {
let mut nrm = T::zero();
for i in k..m {
nrm = nrm.hypot(*self.get((i, k)));
}
if nrm.abs() > T::epsilon() {
if self.get((k, k)) < &T::zero() {
nrm = -nrm;
}
for i in k..m {
self.div_element_mut((i, k), nrm);
}
self.add_element_mut((k, k), T::one());
for j in k + 1..n {
let mut s = T::zero();
for i in k..m {
s += *self.get((i, k)) * *self.get((i, j));
}
s = -s / *self.get((k, k));
for i in k..m {
self.add_element_mut((i, j), s * *self.get((i, k)));
}
}
}
*r_diagonal_k = -nrm;
}
Ok(QR::new(self, r_diagonal))
}
/// Solves Ax = b
fn qr_solve_mut(self, b: Self) -> Result<Self, Failed> {
self.qr_mut().and_then(|qr| qr.solve(b))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::matrix::DenseMatrix;
use approx::relative_eq;
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose() {
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let q = DenseMatrix::from_2d_array(&[
&[-0.7448, 0.2436, 0.6212],
&[-0.331, -0.9432, -0.027],
&[-0.5793, 0.2257, -0.7832],
]);
let r = DenseMatrix::from_2d_array(&[
&[-1.2083, -0.6373, -1.0842],
&[0.0, -0.3064, 0.0682],
&[0.0, 0.0, -0.1999],
]);
let qr = a.qr().unwrap();
assert!(relative_eq!(qr.Q().abs(), q.abs(), epsilon = 1e-4));
assert!(relative_eq!(qr.R().abs(), r.abs(), epsilon = 1e-4));
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn qr_solve_mut() {
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let expected_w = DenseMatrix::from_2d_array(&[
&[-0.2027027, -1.2837838],
&[0.8783784, 2.2297297],
&[0.4729730, 0.6621622],
]);
let w = a.qr_solve_mut(b).unwrap();
assert!(relative_eq!(w, expected_w, epsilon = 1e-2));
}
}
+294
View File
@@ -0,0 +1,294 @@
//! # Various Statistical Methods
//!
//! This module provides reference implementations for various statistical functions.
//! Concrete implementations of the `BaseMatrix` trait are free to override these methods for better performance.
//! This methods shall be used when dealing with `DenseMatrix`. Use the ones in `linalg::arrays` for `Array` types.
use crate::linalg::basic::arrays::{Array2, ArrayView2, MutArrayView2};
use crate::numbers::realnum::RealNumber;
/// Defines baseline implementations for various statistical functions
pub trait MatrixStats<T: RealNumber>: ArrayView2<T> + Array2<T> {
/// Computes the arithmetic mean along the specified axis.
fn mean(&self, axis: u8) -> Vec<T> {
let (n, _m) = match axis {
0 => {
let (n, m) = self.shape();
(m, n)
}
_ => self.shape(),
};
let mut x: Vec<T> = vec![T::zero(); n];
for (i, x_i) in x.iter_mut().enumerate().take(n) {
let vec = match axis {
0 => self.get_col(i).iterator(0).copied().collect::<Vec<T>>(),
_ => self.get_row(i).iterator(0).copied().collect::<Vec<T>>(),
};
*x_i = Self::_mean_of_vector(&vec[..]);
}
x
}
/// Computes variance along the specified axis.
fn var(&self, axis: u8) -> Vec<T> {
let (n, _m) = match axis {
0 => {
let (n, m) = self.shape();
(m, n)
}
_ => self.shape(),
};
let mut x: Vec<T> = vec![T::zero(); n];
for (i, x_i) in x.iter_mut().enumerate().take(n) {
let vec = match axis {
0 => self.get_col(i).iterator(0).copied().collect::<Vec<T>>(),
_ => self.get_row(i).iterator(0).copied().collect::<Vec<T>>(),
};
*x_i = Self::_var_of_vec(&vec[..], Option::None);
}
x
}
/// Computes the standard deviation along the specified axis.
fn std(&self, axis: u8) -> Vec<T> {
let mut x = Self::var(self, axis);
let n = match axis {
0 => self.shape().1,
_ => self.shape().0,
};
for x_i in x.iter_mut().take(n) {
*x_i = x_i.sqrt();
}
x
}
/// (reference)[http://en.wikipedia.org/wiki/Arithmetic_mean]
/// Taken from statistical
/// The MIT License (MIT)
/// Copyright (c) 2015 Jeff Belgum
fn _mean_of_vector(v: &[T]) -> T {
let len = num::cast(v.len()).unwrap();
v.iter().fold(T::zero(), |acc: T, elem| acc + *elem) / len
}
/// Taken from statistical
/// The MIT License (MIT)
/// Copyright (c) 2015 Jeff Belgum
fn _sum_square_deviations_vec(v: &[T], c: Option<T>) -> T {
let c = match c {
Some(c) => c,
None => Self::_mean_of_vector(v),
};
let sum = v
.iter()
.map(|x| (*x - c) * (*x - c))
.fold(T::zero(), |acc, elem| acc + elem);
assert!(sum >= T::zero(), "negative sum of square root deviations");
sum
}
/// (Sample variance)[http://en.wikipedia.org/wiki/Variance#Sample_variance]
/// Taken from statistical
/// The MIT License (MIT)
/// Copyright (c) 2015 Jeff Belgum
fn _var_of_vec(v: &[T], xbar: Option<T>) -> T {
assert!(v.len() > 1, "variance requires at least two data points");
let len: T = num::cast(v.len()).unwrap();
let sum = Self::_sum_square_deviations_vec(v, xbar);
sum / len
}
/// standardize values by removing the mean and scaling to unit variance
fn standard_scale_mut(&mut self, mean: &[T], std: &[T], axis: u8) {
let (n, m) = match axis {
0 => {
let (n, m) = self.shape();
(m, n)
}
_ => self.shape(),
};
for i in 0..n {
for j in 0..m {
match axis {
0 => self.set((j, i), (*self.get((j, i)) - mean[i]) / std[i]),
_ => self.set((i, j), (*self.get((i, j)) - mean[i]) / std[i]),
}
}
}
}
}
//TODO: this is processing. Should have its own "processing.rs" module
/// Defines baseline implementations for various matrix processing functions
pub trait MatrixPreprocessing<T: RealNumber>: MutArrayView2<T> + Clone {
/// Each element of the matrix greater than the threshold becomes 1, while values less than or equal to the threshold become 0
/// ```rust
/// use smartcore::linalg::basic::matrix::DenseMatrix;
/// use smartcore::linalg::traits::stats::MatrixPreprocessing;
/// let mut a = DenseMatrix::from_2d_array(&[&[0., 2., 3.], &[-5., -6., -7.]]);
/// let expected = DenseMatrix::from_2d_array(&[&[0., 1., 1.],&[0., 0., 0.]]);
/// a.binarize_mut(0.);
///
/// assert_eq!(a, expected);
/// ```
fn binarize_mut(&mut self, threshold: T) {
let (nrows, ncols) = self.shape();
for row in 0..nrows {
for col in 0..ncols {
if *self.get((row, col)) > threshold {
self.set((row, col), T::one());
} else {
self.set((row, col), T::zero());
}
}
}
}
/// Returns new matrix where elements are binarized according to a given threshold.
/// ```rust
/// use smartcore::linalg::basic::matrix::DenseMatrix;
/// use smartcore::linalg::traits::stats::MatrixPreprocessing;
/// let a = DenseMatrix::from_2d_array(&[&[0., 2., 3.], &[-5., -6., -7.]]);
/// let expected = DenseMatrix::from_2d_array(&[&[0., 1., 1.],&[0., 0., 0.]]);
///
/// assert_eq!(a.binarize(0.), expected);
/// ```
fn binarize(self, threshold: T) -> Self
where
Self: Sized,
{
let mut m = self;
m.binarize_mut(threshold);
m
}
}
#[cfg(test)]
mod tests {
use crate::linalg::basic::arrays::Array1;
use crate::linalg::basic::matrix::DenseMatrix;
use crate::linalg::traits::stats::MatrixStats;
#[test]
fn test_mean() {
let m = DenseMatrix::from_2d_array(&[
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
let expected_0 = vec![4., 5., 6., 3., 4.];
let expected_1 = vec![1.8, 4.4, 7.];
assert_eq!(m.mean(0), expected_0);
assert_eq!(m.mean(1), expected_1);
}
#[test]
fn test_var() {
let m = DenseMatrix::from_2d_array(&[&[1., 2., 3., 4.], &[5., 6., 7., 8.]]);
let expected_0 = vec![4., 4., 4., 4.];
let expected_1 = vec![1.25, 1.25];
assert!(m.var(0).approximate_eq(&expected_0, 1e-6));
assert!(m.var(1).approximate_eq(&expected_1, 1e-6));
assert_eq!(m.mean(0), vec![3.0, 4.0, 5.0, 6.0]);
assert_eq!(m.mean(1), vec![2.5, 6.5]);
}
#[test]
fn test_var_other() {
let m = DenseMatrix::from_2d_array(&[
&[0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25],
&[0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25],
]);
let expected_0 = vec![0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
let expected_1 = vec![1.25, 1.25];
assert!(m.var(0).approximate_eq(&expected_0, std::f64::EPSILON));
assert!(m.var(1).approximate_eq(&expected_1, std::f64::EPSILON));
assert_eq!(
m.mean(0),
vec![0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
);
assert_eq!(m.mean(1), vec![1.375, 1.375]);
}
#[test]
fn test_std() {
let m = DenseMatrix::from_2d_array(&[
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
let expected_0 = vec![
2.449489742783178,
2.449489742783178,
2.449489742783178,
1.632993161855452,
1.632993161855452,
];
let expected_1 = vec![0.7483314773547883, 1.019803902718557, 1.4142135623730951];
println!("{:?}", m.var(0));
assert!(m.std(0).approximate_eq(&expected_0, f64::EPSILON));
assert!(m.std(1).approximate_eq(&expected_1, f64::EPSILON));
assert_eq!(m.mean(0), vec![4.0, 5.0, 6.0, 3.0, 4.0]);
assert_eq!(m.mean(1), vec![1.8, 4.4, 7.0]);
}
#[test]
fn test_scale() {
let m: DenseMatrix<f64> =
DenseMatrix::from_2d_array(&[&[1., 2., 3., 4.], &[5., 6., 7., 8.]]);
let expected_0: DenseMatrix<f64> =
DenseMatrix::from_2d_array(&[&[-1., -1., -1., -1.], &[1., 1., 1., 1.]]);
let expected_1: DenseMatrix<f64> = DenseMatrix::from_2d_array(&[
&[
-1.3416407864998738,
-0.4472135954999579,
0.4472135954999579,
1.3416407864998738,
],
&[
-1.3416407864998738,
-0.4472135954999579,
0.4472135954999579,
1.3416407864998738,
],
]);
assert_eq!(m.mean(0), vec![3.0, 4.0, 5.0, 6.0]);
assert_eq!(m.mean(1), vec![2.5, 6.5]);
assert_eq!(m.var(0), vec![4., 4., 4., 4.]);
assert_eq!(m.var(1), vec![1.25, 1.25]);
assert_eq!(m.std(0), vec![2., 2., 2., 2.]);
assert_eq!(m.std(1), vec![1.118033988749895, 1.118033988749895]);
{
let mut m = m.clone();
m.standard_scale_mut(&m.mean(0), &m.std(0), 0);
assert_eq!(&m, &expected_0);
}
{
let mut m = m.clone();
m.standard_scale_mut(&m.mean(1), &m.std(1), 1);
assert_eq!(&m, &expected_1);
}
}
}
+738
View File
@@ -0,0 +1,738 @@
//! # SVD Decomposition
//!
//! Any _m_ by _n_ matrix \\(A\\) can be factored into:
//!
//! \\[A = U \Sigma V^T\\]
//!
//! Where columns of \\(U\\) are eigenvectors of \\(AA^T\\) (left-singular vectors of _A_),
//! \\(V\\) are eigenvectors of \\(A^TA\\) (right-singular vectors of _A_),
//! and the diagonal values in the \\(\Sigma\\) matrix are known as the singular values of the original matrix.
//!
//! Example:
//! ```
//! use smartcore::linalg::basic::matrix::DenseMatrix;
//! use smartcore::linalg::traits::svd::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9, 0.4, 0.7],
//! &[0.4, 0.5, 0.3],
//! &[0.7, 0.3, 0.8]
//! ]);
//!
//! let svd = A.svd().unwrap();
//! let u: DenseMatrix<f64> = svd.U;
//! let v: DenseMatrix<f64> = svd.V;
//! let s: Vec<f64> = svd.s;
//! ```
//!
//! ## References:
//! * ["Linear Algebra and Its Applications", Gilbert Strang, 5th ed., 6.3 Singular Value Decomposition](https://www.academia.edu/32459792/_Strang_G_Linear_algebra_and_its_applications_4_5881001_PDF)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.6 Singular Value Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use crate::error::Failed;
use crate::linalg::basic::arrays::Array2;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
use std::fmt::Debug;
/// Results of SVD decomposition
#[derive(Debug, Clone)]
pub struct SVD<T: Number + RealNumber, M: SVDDecomposable<T>> {
/// Left-singular vectors of _A_
pub U: M,
/// Right-singular vectors of _A_
pub V: M,
/// Singular values of the original matrix
pub s: Vec<T>,
///
m: usize,
///
n: usize,
///
tol: T,
}
impl<T: Number + RealNumber, M: SVDDecomposable<T>> SVD<T, M> {
/// Diagonal matrix with singular values
pub fn S(&self) -> M {
let mut s = M::zeros(self.U.shape().1, self.V.shape().0);
for i in 0..self.s.len() {
s.set((i, i), self.s[i]);
}
s
}
}
/// Trait that implements SVD decomposition routine for any matrix.
pub trait SVDDecomposable<T: Number + RealNumber>: Array2<T> {
/// Solves Ax = b. Overrides original matrix in the process.
fn svd_solve_mut(self, b: Self) -> Result<Self, Failed> {
self.svd_mut().and_then(|svd| svd.solve(b))
}
/// Solves Ax = b
fn svd_solve(&self, b: Self) -> Result<Self, Failed> {
self.svd().and_then(|svd| svd.solve(b))
}
/// Compute the SVD decomposition of a matrix.
fn svd(&self) -> Result<SVD<T, Self>, Failed> {
self.clone().svd_mut()
}
/// Compute the SVD decomposition of a matrix. The input matrix
/// will be used for factorization.
fn svd_mut(self) -> Result<SVD<T, Self>, Failed> {
let mut U = self;
let (m, n) = U.shape();
let (mut l, mut nm) = (0usize, 0usize);
let (mut anorm, mut g, mut scale) = (T::zero(), T::zero(), T::zero());
let mut v = Self::zeros(n, n);
let mut w = vec![T::zero(); n];
let mut rv1 = vec![T::zero(); n];
for i in 0..n {
l = i + 2;
rv1[i] = scale * g;
g = T::zero();
let mut s = T::zero();
scale = T::zero();
if i < m {
for k in i..m {
scale += U.get((k, i)).abs();
}
if scale.abs() > T::epsilon() {
for k in i..m {
U.div_element_mut((k, i), scale);
s += *U.get((k, i)) * *U.get((k, i));
}
let mut f = *U.get((i, i));
g = -<T as RealNumber>::copysign(s.sqrt(), f);
let h = f * g - s;
U.set((i, i), f - g);
for j in l - 1..n {
s = T::zero();
for k in i..m {
s += *U.get((k, i)) * *U.get((k, j));
}
f = s / h;
for k in i..m {
U.add_element_mut((k, j), f * *U.get((k, i)));
}
}
for k in i..m {
U.mul_element_mut((k, i), scale);
}
}
}
w[i] = scale * g;
g = T::zero();
let mut s = T::zero();
scale = T::zero();
if i < m && i + 1 != n {
for k in l - 1..n {
scale += U.get((i, k)).abs();
}
if scale.abs() > T::epsilon() {
for k in l - 1..n {
U.div_element_mut((i, k), scale);
s += *U.get((i, k)) * *U.get((i, k));
}
let f = *U.get((i, l - 1));
g = -<T as RealNumber>::copysign(s.sqrt(), f);
let h = f * g - s;
U.set((i, l - 1), f - g);
for (k, rv1_k) in rv1.iter_mut().enumerate().take(n).skip(l - 1) {
*rv1_k = *U.get((i, k)) / h;
}
for j in l - 1..m {
s = T::zero();
for k in l - 1..n {
s += *U.get((j, k)) * *U.get((i, k));
}
for (k, rv1_k) in rv1.iter().enumerate().take(n).skip(l - 1) {
U.add_element_mut((j, k), s * (*rv1_k));
}
}
for k in l - 1..n {
U.mul_element_mut((i, k), scale);
}
}
}
anorm = T::max(anorm, w[i].abs() + rv1[i].abs());
}
for i in (0..n).rev() {
if i < n - 1 {
if g != T::zero() {
for j in l..n {
v.set((j, i), (*U.get((i, j)) / *U.get((i, l))) / g);
}
for j in l..n {
let mut s = T::zero();
for k in l..n {
s += *U.get((i, k)) * *v.get((k, j));
}
for k in l..n {
v.add_element_mut((k, j), s * *v.get((k, i)));
}
}
}
for j in l..n {
v.set((i, j), T::zero());
v.set((j, i), T::zero());
}
}
v.set((i, i), T::one());
g = rv1[i];
l = i;
}
for i in (0..usize::min(m, n)).rev() {
l = i + 1;
g = w[i];
for j in l..n {
U.set((i, j), T::zero());
}
if g.abs() > T::epsilon() {
g = T::one() / g;
for j in l..n {
let mut s = T::zero();
for k in l..m {
s += *U.get((k, i)) * *U.get((k, j));
}
let f = (s / *U.get((i, i))) * g;
for k in i..m {
U.add_element_mut((k, j), f * *U.get((k, i)));
}
}
for j in i..m {
U.mul_element_mut((j, i), g);
}
} else {
for j in i..m {
U.set((j, i), T::zero());
}
}
U.add_element_mut((i, i), T::one());
}
for k in (0..n).rev() {
for iteration in 0..30 {
let mut flag = true;
l = k;
while l != 0 {
if l == 0 || rv1[l].abs() <= T::epsilon() * anorm {
flag = false;
break;
}
nm = l - 1;
if w[nm].abs() <= T::epsilon() * anorm {
break;
}
l -= 1;
}
if flag {
let mut c = T::zero();
let mut s = T::one();
for i in l..k + 1 {
let f = s * rv1[i];
rv1[i] = c * rv1[i];
if f.abs() <= T::epsilon() * anorm {
break;
}
g = w[i];
let mut h = f.hypot(g);
w[i] = h;
h = T::one() / h;
c = g * h;
s = -f * h;
for j in 0..m {
let y = *U.get((j, nm));
let z = *U.get((j, i));
U.set((j, nm), y * c + z * s);
U.set((j, i), z * c - y * s);
}
}
}
let z = w[k];
if l == k {
if z < T::zero() {
w[k] = -z;
for j in 0..n {
v.set((j, k), -*v.get((j, k)));
}
}
break;
}
if iteration == 29 {
panic!("no convergence in 30 iterations");
}
let mut x = w[l];
nm = k - 1;
let mut y = w[nm];
g = rv1[nm];
let mut h = rv1[k];
let mut f = ((y - z) * (y + z) + (g - h) * (g + h)) / (T::two() * h * y);
g = f.hypot(T::one());
f = ((x - z) * (x + z) + h * ((y / (f + <T as RealNumber>::copysign(g, f))) - h))
/ x;
let mut c = T::one();
let mut s = T::one();
for j in l..=nm {
let i = j + 1;
g = rv1[i];
y = w[i];
h = s * g;
g = c * g;
let mut z = f.hypot(h);
rv1[j] = z;
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for jj in 0..n {
x = *v.get((jj, j));
z = *v.get((jj, i));
v.set((jj, j), x * c + z * s);
v.set((jj, i), z * c - x * s);
}
z = f.hypot(h);
w[j] = z;
if z.abs() > T::epsilon() {
z = T::one() / z;
c = f * z;
s = h * z;
}
f = c * g + s * y;
x = c * y - s * g;
for jj in 0..m {
y = *U.get((jj, j));
z = *U.get((jj, i));
U.set((jj, j), y * c + z * s);
U.set((jj, i), z * c - y * s);
}
}
rv1[l] = T::zero();
rv1[k] = f;
w[k] = x;
}
}
let mut inc = 1usize;
let mut su = vec![T::zero(); m];
let mut sv = vec![T::zero(); n];
loop {
inc *= 3;
inc += 1;
if inc > n {
break;
}
}
loop {
inc /= 3;
for i in inc..n {
let sw = w[i];
for (k, su_k) in su.iter_mut().enumerate().take(m) {
*su_k = *U.get((k, i));
}
for (k, sv_k) in sv.iter_mut().enumerate().take(n) {
*sv_k = *v.get((k, i));
}
let mut j = i;
while w[j - inc] < sw {
w[j] = w[j - inc];
for k in 0..m {
U.set((k, j), *U.get((k, j - inc)));
}
for k in 0..n {
v.set((k, j), *v.get((k, j - inc)));
}
j -= inc;
if j < inc {
break;
}
}
w[j] = sw;
for (k, su_k) in su.iter().enumerate().take(m) {
U.set((k, j), *su_k);
}
for (k, sv_k) in sv.iter().enumerate().take(n) {
v.set((k, j), *sv_k);
}
}
if inc <= 1 {
break;
}
}
for k in 0..n {
let mut s = 0.;
for i in 0..m {
if U.get((i, k)) < &T::zero() {
s += 1.;
}
}
for j in 0..n {
if v.get((j, k)) < &T::zero() {
s += 1.;
}
}
if s > (m + n) as f64 / 2. {
for i in 0..m {
U.set((i, k), -*U.get((i, k)));
}
for j in 0..n {
v.set((j, k), -*v.get((j, k)));
}
}
}
Ok(SVD::new(U, v, w))
}
}
impl<T: Number + RealNumber, M: SVDDecomposable<T>> SVD<T, M> {
pub(crate) fn new(U: M, V: M, s: Vec<T>) -> SVD<T, M> {
let m = U.shape().0;
let n = V.shape().0;
let tol = T::half() * (T::from(m + n).unwrap() + T::one()).sqrt() * s[0] * T::epsilon();
SVD { U, V, s, m, n, tol }
}
pub(crate) fn solve(&self, mut b: M) -> Result<M, Failed> {
let p = b.shape().1;
if self.U.shape().0 != b.shape().0 {
panic!(
"Dimensions do not agree. U.nrows should equal b.nrows but is {}, {}",
self.U.shape().0,
b.shape().0
);
}
for k in 0..p {
let mut tmp = vec![T::zero(); self.n];
for (j, tmp_j) in tmp.iter_mut().enumerate().take(self.n) {
let mut r = T::zero();
if self.s[j] > self.tol {
for i in 0..self.m {
r += *self.U.get((i, j)) * *b.get((i, k));
}
r /= self.s[j];
}
*tmp_j = r;
}
for j in 0..self.n {
let mut r = T::zero();
for (jj, tmp_jj) in tmp.iter().enumerate().take(self.n) {
r += *self.V.get((j, jj)) * (*tmp_jj);
}
b.set((j, k), r);
}
}
Ok(b)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::matrix::DenseMatrix;
use approx::relative_eq;
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose_symmetric() {
let A = DenseMatrix::from_2d_array(&[
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.7000, 0.3000, 0.8000],
]);
let s: Vec<f64> = vec![1.7498382, 0.3165784, 0.1335834];
let U = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.639158],
]);
let V = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.6391588],
]);
let svd = A.svd().unwrap();
assert!(relative_eq!(V.abs(), svd.V.abs(), epsilon = 1e-4));
assert!(relative_eq!(U.abs(), svd.U.abs(), epsilon = 1e-4));
for i in 0..s.len() {
assert!((s[i] - svd.s[i]).abs() < 1e-4);
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose_asymmetric() {
let A = DenseMatrix::from_2d_array(&[
&[
1.19720880,
-1.8391378,
0.3019585,
-1.1165701,
-1.7210814,
0.4918882,
-0.04247433,
],
&[
0.06605075,
1.0315583,
0.8294362,
-0.3646043,
-1.6038017,
-0.9188110,
-0.63760340,
],
&[
-1.02637715,
1.0747931,
-0.8089055,
-0.4726863,
-0.2064826,
-0.3325532,
0.17966051,
],
&[
-1.45817729,
-0.8942353,
0.3459245,
1.5068363,
-2.0180708,
-0.3696350,
-1.19575563,
],
&[
-0.07318103,
-0.2783787,
1.2237598,
0.1995332,
0.2545336,
-0.1392502,
-1.88207227,
],
&[
0.88248425, -0.9360321, 0.1393172, 0.1393281, -0.3277873, -0.5553013, 1.63805985,
],
&[
0.12641406,
-0.8710055,
-0.2712301,
0.2296515,
1.1781535,
-0.2158704,
-0.27529472,
],
]);
let s: Vec<f64> = vec![
3.8589375, 3.4396766, 2.6487176, 2.2317399, 1.5165054, 0.8109055, 0.2706515,
];
let U = DenseMatrix::from_2d_array(&[
&[
-0.3082776,
0.77676231,
0.01330514,
0.23231424,
-0.47682758,
0.13927109,
0.02640713,
],
&[
-0.4013477,
-0.09112050,
0.48754440,
0.47371793,
0.40636608,
0.24600706,
-0.37796295,
],
&[
0.0599719,
-0.31406586,
0.45428229,
-0.08071283,
-0.38432597,
0.57320261,
0.45673993,
],
&[
-0.7694214,
-0.12681435,
-0.05536793,
-0.62189972,
-0.02075522,
-0.01724911,
-0.03681864,
],
&[
-0.3319069,
-0.17984404,
-0.54466777,
0.45335157,
0.19377726,
0.12333423,
0.55003852,
],
&[
0.1259351,
0.49087824,
0.16349687,
-0.32080176,
0.64828744,
0.20643772,
0.38812467,
],
&[
0.1491884,
0.01768604,
-0.47884363,
-0.14108924,
0.03922507,
0.73034065,
-0.43965505,
],
]);
let V = DenseMatrix::from_2d_array(&[
&[
-0.2122609,
-0.54650056,
0.08071332,
-0.43239135,
-0.2925067,
0.1414550,
0.59769207,
],
&[
-0.1943605,
0.63132116,
-0.54059857,
-0.37089970,
-0.1363031,
0.2892641,
0.17774114,
],
&[
0.3031265,
-0.06182488,
0.18579097,
-0.38606409,
-0.5364911,
0.2983466,
-0.58642548,
],
&[
0.1844063, 0.24425278, 0.25923756, 0.59043765, -0.4435443, 0.3959057, 0.37019098,
],
&[
-0.7164205,
0.30694911,
0.58264743,
-0.07458095,
-0.1142140,
-0.1311972,
-0.13124764,
],
&[
-0.1103067,
-0.10633600,
0.18257905,
-0.03638501,
0.5722925,
0.7784398,
-0.09153611,
],
&[
-0.5156083,
-0.36573746,
-0.47613340,
0.41342817,
-0.2659765,
0.1654796,
-0.32346758,
],
]);
let svd = A.svd().unwrap();
assert!(relative_eq!(V.abs(), svd.V.abs(), epsilon = 1e-4));
assert!(relative_eq!(U.abs(), svd.U.abs(), epsilon = 1e-4));
for i in 0..s.len() {
assert!((s[i] - svd.s[i]).abs() < 1e-4);
}
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn solve() {
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let expected_w =
DenseMatrix::from_2d_array(&[&[-0.20, -1.28], &[0.87, 2.22], &[0.47, 0.66]]);
let w = a.svd_solve_mut(b).unwrap();
assert!(relative_eq!(w, expected_w, epsilon = 1e-2));
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn decompose_restore() {
let a = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0, 4.0], &[5.0, 6.0, 7.0, 8.0]]);
let svd = a.svd().unwrap();
let u: &DenseMatrix<f32> = &svd.U; //U
let v: &DenseMatrix<f32> = &svd.V; // V
let s: &DenseMatrix<f32> = &svd.S(); // Sigma
let a_hat = u.matmul(s).matmul(&v.transpose());
assert!(relative_eq!(a, a_hat, epsilon = 1e-3));
}
}