feat: documents matrix decomposition methods

This commit is contained in:
Volodymyr Orlov
2020-09-07 16:28:52 -07:00
parent bbe810d164
commit cc1f84e81f
6 changed files with 250 additions and 10 deletions
+44 -6
View File
@@ -1,3 +1,37 @@
//! # Eigen Decomposition
//!
//! Eigendecomposition is one of the most useful matrix factorization methods in machine learning that decomposes a matrix into eigenvectors and eigenvalues.
//! This decomposition plays an important role in the the [Principal Component Analysis (PCA)](../../decomposition/pca/index.html).
//!
//! Eigendecomposition decomposes a square matrix into a set of eigenvectors and eigenvalues.
//!
//! \\[A = Q \Lambda Q^{-1}\\]
//!
//! where \\(Q\\) is a matrix comprised of the eigenvectors, \\(\Lambda\\) is a diagonal matrix comprised of the eigenvalues along the diagonal,
//! and \\(Q{-1}\\) is the inverse of the matrix comprised of the eigenvectors.
//!
//! Example:
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::evd::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9000, 0.4000, 0.7000],
//! &[0.4000, 0.5000, 0.3000],
//! &[0.7000, 0.3000, 0.8000],
//! ]);
//!
//! let evd = A.evd(true);
//! let eigenvectors: DenseMatrix<f64> = evd.V;
//! let eigenvalues: Vec<f64> = evd.d;
//! ```
//!
//! ## References:
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., Section 11 Eigensystems](http://numerical.recipes/)
//! * ["Introduction to Linear Algebra", Gilbert Strang, 5rd ed., ch. 6 Eigenvalues and Eigenvectors](https://math.mit.edu/~gs/linearalgebra/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use crate::linalg::BaseMatrix;
@@ -6,23 +40,27 @@ use num::complex::Complex;
use std::fmt::Debug;
#[derive(Debug, Clone)]
/// Results of eigen decomposition
pub struct EVD<T: RealNumber, M: BaseMatrix<T>> {
/// Real part of eigenvalues.
pub d: Vec<T>,
/// Imaginary part of eigenvalues.
pub e: Vec<T>,
/// Eigenvectors
pub V: M,
}
impl<T: RealNumber, M: BaseMatrix<T>> EVD<T, M> {
pub fn new(V: M, d: Vec<T>, e: Vec<T>) -> EVD<T, M> {
EVD { d: d, e: e, V: V }
}
}
/// Trait that implements EVD decomposition routine for any matrix.
pub trait EVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
/// Compute the eigen decomposition of a square matrix.
/// * `symmetric` - whether the matrix is symmetric
fn evd(&self, symmetric: bool) -> EVD<T, Self> {
self.clone().evd_mut(symmetric)
}
/// Compute the eigen decomposition of a square matrix. The input matrix
/// will be used for factorization.
/// * `symmetric` - whether the matrix is symmetric
fn evd_mut(mut self, symmetric: bool) -> EVD<T, Self> {
let (nrows, ncols) = self.shape();
if ncols != nrows {
+44 -1
View File
@@ -1,3 +1,36 @@
//! # LU Decomposition
//!
//! Decomposes a square matrix into a product of two triangular matrices:
//!
//! \\[A = LU\\]
//!
//! where \\(U\\) is an upper triangular matrix and \\(L\\) is a lower triangular matrix.
//! and \\(Q{-1}\\) is the inverse of the matrix comprised of the eigenvectors. The LU decomposition is used to obtain more efficient solutions to equations of the form
//!
//! \\[Ax = b\\]
//!
//! Example:
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::lu::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[1., 2., 3.],
//! &[0., 1., 5.],
//! &[5., 6., 0.]
//! ]);
//!
//! let lu = A.lu();
//! let lower: DenseMatrix<f64> = lu.L();
//! let upper: DenseMatrix<f64> = lu.U();
//! ```
//!
//! ## References:
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.3.1 Performing the LU Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use std::fmt::Debug;
@@ -7,6 +40,7 @@ use crate::linalg::BaseMatrix;
use crate::math::num::RealNumber;
#[derive(Debug, Clone)]
/// Result of LU decomposition.
pub struct LU<T: RealNumber, M: BaseMatrix<T>> {
LU: M,
pivot: Vec<usize>,
@@ -16,7 +50,7 @@ pub struct LU<T: RealNumber, M: BaseMatrix<T>> {
}
impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
pub fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
pub(crate) fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
let (_, n) = LU.shape();
let mut singular = false;
@@ -36,6 +70,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
}
}
/// Get lower triangular matrix
pub fn L(&self) -> M {
let (n_rows, n_cols) = self.LU.shape();
let mut L = M::zeros(n_rows, n_cols);
@@ -55,6 +90,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
L
}
/// Get upper triangular matrix
pub fn U(&self) -> M {
let (n_rows, n_cols) = self.LU.shape();
let mut U = M::zeros(n_rows, n_cols);
@@ -72,6 +108,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
U
}
/// Pivot vector
pub fn pivot(&self) -> M {
let (_, n) = self.LU.shape();
let mut piv = M::zeros(n, n);
@@ -83,6 +120,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
piv
}
/// Returns matrix inverse
pub fn inverse(&self) -> M {
let (m, n) = self.LU.shape();
@@ -153,11 +191,15 @@ impl<T: RealNumber, M: BaseMatrix<T>> LU<T, M> {
}
}
/// Trait that implements LU decomposition routine for any matrix.
pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
/// Compute the LU decomposition of a square matrix.
fn lu(&self) -> LU<T, Self> {
self.clone().lu_mut()
}
/// Compute the LU decomposition of a square matrix. The input matrix
/// will be used for factorization.
fn lu_mut(mut self) -> LU<T, Self> {
let (m, n) = self.shape();
@@ -213,6 +255,7 @@ pub trait LUDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
LU::new(self, piv, pivsign)
}
/// Solves Ax = b
fn lu_solve_mut(self, b: Self) -> Self {
self.lu_mut().solve(b)
}
+39
View File
@@ -1,3 +1,42 @@
//! # Connector for nalgebra
//!
//! If you want to use [nalgebra](https://docs.rs/nalgebra/) matrices and vectors with SmartCore:
//!
//! ```
//! use nalgebra::{DMatrix, RowDVector};
//! use smartcore::linear::linear_regression::*;
//! // Enable nalgebra connector
//! use smartcore::linalg::nalgebra_bindings::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
//! let x = DMatrix::from_row_slice(16, 6, &[
//! 234.289, 235.6, 159.0, 107.608, 1947., 60.323,
//! 259.426, 232.5, 145.6, 108.632, 1948., 61.122,
//! 258.054, 368.2, 161.6, 109.773, 1949., 60.171,
//! 284.599, 335.1, 165.0, 110.929, 1950., 61.187,
//! 328.975, 209.9, 309.9, 112.075, 1951., 63.221,
//! 346.999, 193.2, 359.4, 113.270, 1952., 63.639,
//! 365.385, 187.0, 354.7, 115.094, 1953., 64.989,
//! 363.112, 357.8, 335.0, 116.219, 1954., 63.761,
//! 397.469, 290.4, 304.8, 117.388, 1955., 66.019,
//! 419.180, 282.2, 285.7, 118.734, 1956., 67.857,
//! 442.769, 293.6, 279.8, 120.445, 1957., 68.169,
//! 444.546, 468.1, 263.7, 121.950, 1958., 66.513,
//! 482.704, 381.3, 255.2, 123.366, 1959., 68.655,
//! 502.601, 393.1, 251.4, 125.368, 1960., 69.564,
//! 518.173, 480.6, 257.2, 127.852, 1961., 69.331,
//! 554.894, 400.7, 282.7, 130.081, 1962., 70.551
//! ]);
//!
//! let y: RowDVector<f64> = RowDVector::from_vec(vec![
//! 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0,
//! 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7,
//! 116.9,
//! ]);
//!
//! let lr = LinearRegression::fit(&x, &y, Default::default());
//! let y_hat = lr.predict(&x);
//! ```
use std::iter::Sum;
use std::ops::{AddAssign, DivAssign, MulAssign, Range, SubAssign};
+41
View File
@@ -1,3 +1,44 @@
//! # Connector for ndarray
//!
//! If you want to use [ndarray](https://docs.rs/ndarray) matrices and vectors with SmartCore:
//!
//! ```
//! use ndarray::{arr1, arr2};
//! use smartcore::linear::logistic_regression::*;
//! // Enable ndarray connector
//! use smartcore::linalg::ndarray_bindings::*;
//!
//! // Iris dataset
//! let x = arr2(&[
//! [5.1, 3.5, 1.4, 0.2],
//! [4.9, 3.0, 1.4, 0.2],
//! [4.7, 3.2, 1.3, 0.2],
//! [4.6, 3.1, 1.5, 0.2],
//! [5.0, 3.6, 1.4, 0.2],
//! [5.4, 3.9, 1.7, 0.4],
//! [4.6, 3.4, 1.4, 0.3],
//! [5.0, 3.4, 1.5, 0.2],
//! [4.4, 2.9, 1.4, 0.2],
//! [4.9, 3.1, 1.5, 0.1],
//! [7.0, 3.2, 4.7, 1.4],
//! [6.4, 3.2, 4.5, 1.5],
//! [6.9, 3.1, 4.9, 1.5],
//! [5.5, 2.3, 4.0, 1.3],
//! [6.5, 2.8, 4.6, 1.5],
//! [5.7, 2.8, 4.5, 1.3],
//! [6.3, 3.3, 4.7, 1.6],
//! [4.9, 2.4, 3.3, 1.0],
//! [6.6, 2.9, 4.6, 1.3],
//! [5.2, 2.7, 3.9, 1.4],
//! ]);
//! let y = arr1(&[
//! 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
//! 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.
//! ]);
//!
//! let lr = LogisticRegression::fit(&x, &y);
//! let y_hat = lr.predict(&x);
//! ```
use std::iter::Sum;
use std::ops::AddAssign;
use std::ops::DivAssign;
+37 -1
View File
@@ -1,3 +1,31 @@
//! # QR Decomposition
//!
//! Any real square matrix \\(A \in R^{n \times n}\\) can be decomposed as a product of an orthogonal matrix \\(Q\\) and an upper triangular matrix \\(R\\):
//!
//! \\[A = QR\\]
//!
//! Example:
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::qr::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9, 0.4, 0.7],
//! &[0.4, 0.5, 0.3],
//! &[0.7, 0.3, 0.8]
//! ]);
//!
//! let lu = A.qr();
//! let orthogonal: DenseMatrix<f64> = lu.Q();
//! let triangular: DenseMatrix<f64> = lu.R();
//! ```
//!
//! ## References:
//! * ["No bullshit guide to linear algebra", Ivan Savov, 2016, 7.6 Matrix decompositions](https://minireference.com/)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.10 QR Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use std::fmt::Debug;
@@ -6,6 +34,7 @@ use crate::linalg::BaseMatrix;
use crate::math::num::RealNumber;
#[derive(Debug, Clone)]
/// Results of QR decomposition.
pub struct QR<T: RealNumber, M: BaseMatrix<T>> {
QR: M,
tau: Vec<T>,
@@ -13,7 +42,7 @@ pub struct QR<T: RealNumber, M: BaseMatrix<T>> {
}
impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
pub fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
pub(crate) fn new(QR: M, tau: Vec<T>) -> QR<T, M> {
let mut singular = false;
for j in 0..tau.len() {
if tau[j] == T::zero() {
@@ -29,6 +58,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
}
}
/// Get upper triangular matrix.
pub fn R(&self) -> M {
let (_, n) = self.QR.shape();
let mut R = M::zeros(n, n);
@@ -41,6 +71,7 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
return R;
}
/// Get an orthogonal matrix.
pub fn Q(&self) -> M {
let (m, n) = self.QR.shape();
let mut Q = M::zeros(m, n);
@@ -112,11 +143,15 @@ impl<T: RealNumber, M: BaseMatrix<T>> QR<T, M> {
}
}
/// Trait that implements QR decomposition routine for any matrix.
pub trait QRDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
/// Compute the QR decomposition of a matrix.
fn qr(&self) -> QR<T, Self> {
self.clone().qr_mut()
}
/// Compute the QR decomposition of a matrix. The input matrix
/// will be used for factorization.
fn qr_mut(mut self) -> QR<T, Self> {
let (m, n) = self.shape();
@@ -154,6 +189,7 @@ pub trait QRDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
QR::new(self, r_diagonal)
}
/// Solves Ax = b
fn qr_solve_mut(self, b: Self) -> Self {
self.qr_mut().solve(b)
}
+45 -2
View File
@@ -1,13 +1,50 @@
//! # SVD Decomposition
//!
//! Any _m_ by _n_ matrix \\(A\\) can be factored into:
//!
//! \\[A = U \Sigma V^T\\]
//!
//! Where columns of \\(U\\) are eigenvectors of \\(AA^T\\) (left-singular vectors of _A_),
//! \\(V\\) are eigenvectors of \\(A^TA\\) (right-singular vectors of _A_),
//! and the diagonal values in the \\(\Sigma\\) matrix are known as the singular values of the original matrix.
//!
//! Example:
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::svd::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9, 0.4, 0.7],
//! &[0.4, 0.5, 0.3],
//! &[0.7, 0.3, 0.8]
//! ]);
//!
//! let svd = A.svd();
//! let u: DenseMatrix<f64> = svd.U;
//! let v: DenseMatrix<f64> = svd.V;
//! let s: Vec<f64> = svd.s;
//! ```
//!
//! ## References:
//! * ["Linear Algebra and Its Applications", Gilbert Strang, 5th ed., 6.3 Singular Value Decomposition](https://www.academia.edu/32459792/_Strang_G_Linear_algebra_and_its_applications_4_5881001_PDF)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., 2.6 Singular Value Decomposition](http://numerical.recipes/)
//!
//! <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
//! <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
#![allow(non_snake_case)]
use crate::linalg::BaseMatrix;
use crate::math::num::RealNumber;
use std::fmt::Debug;
/// Results of SVD decomposition
#[derive(Debug, Clone)]
pub struct SVD<T: RealNumber, M: SVDDecomposableMatrix<T>> {
/// Left-singular vectors of _A_
pub U: M,
/// Right-singular vectors of _A_
pub V: M,
/// Singular values of the original matrix
pub s: Vec<T>,
full: bool,
m: usize,
@@ -15,19 +52,25 @@ pub struct SVD<T: RealNumber, M: SVDDecomposableMatrix<T>> {
tol: T,
}
/// Trait that implements SVD decomposition routine for any matrix.
pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
/// Solves Ax = b. Overrides original matrix in the process.
fn svd_solve_mut(self, b: Self) -> Self {
self.svd_mut().solve(b)
}
/// Solves Ax = b
fn svd_solve(&self, b: Self) -> Self {
self.svd().solve(b)
}
/// Compute the SVD decomposition of a matrix.
fn svd(&self) -> SVD<T, Self> {
self.clone().svd_mut()
}
/// Compute the SVD decomposition of a matrix. The input matrix
/// will be used for factorization.
fn svd_mut(self) -> SVD<T, Self> {
let mut U = self;
@@ -368,7 +411,7 @@ pub trait SVDDecomposableMatrix<T: RealNumber>: BaseMatrix<T> {
}
impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
pub fn new(U: M, V: M, s: Vec<T>) -> SVD<T, M> {
pub(crate) fn new(U: M, V: M, s: Vec<T>) -> SVD<T, M> {
let m = U.shape().0;
let n = V.shape().0;
let full = s.len() == m.min(n);
@@ -384,7 +427,7 @@ impl<T: RealNumber, M: SVDDecomposableMatrix<T>> SVD<T, M> {
}
}
pub fn solve(&self, mut b: M) -> M {
pub(crate) fn solve(&self, mut b: M) -> M {
let p = b.shape().1;
if self.U.shape().0 != b.shape().0 {