feat: adds new distance measures + LU decomposition

This commit is contained in:
Volodymyr Orlov
2020-06-05 10:40:17 -07:00
parent f8f1e75fe2
commit e20e9ca6e0
16 changed files with 594 additions and 28 deletions
+1 -3
View File
@@ -829,9 +829,7 @@ mod tests {
&[0.6952105, 0.43984484, -0.7036135]
]);
let evd = A.evd(false);
println!("{}", &evd.V.abs());
let evd = A.evd(false);
assert!(eigen_vectors.abs().approximate_eq(&evd.V.abs(), 1e-4));
for i in 0..eigen_values.len() {
+254
View File
@@ -0,0 +1,254 @@
#![allow(non_snake_case)]
use std::fmt::Debug;
use std::marker::PhantomData;
use crate::math::num::FloatExt;
use crate::linalg::BaseMatrix;
#[derive(Debug, Clone)]
pub struct LU<T: FloatExt, M: BaseMatrix<T>> {
LU: M,
pivot: Vec<usize>,
pivot_sign: i8,
singular: bool,
phantom: PhantomData<T>
}
impl<T: FloatExt, M: BaseMatrix<T>> LU<T, M> {
pub fn new(LU: M, pivot: Vec<usize>, pivot_sign: i8) -> LU<T, M> {
let (_, n) = LU.shape();
let mut singular = false;
for j in 0..n {
if LU.get(j, j) == T::zero() {
singular = true;
break;
}
}
LU {
LU: LU,
pivot: pivot,
pivot_sign: pivot_sign,
singular: singular,
phantom: PhantomData
}
}
pub fn L(&self) -> M {
let (n_rows, n_cols) = self.LU.shape();
let mut L = M::zeros(n_rows, n_cols);
for i in 0..n_rows {
for j in 0..n_cols {
if i > j {
L.set(i, j, self.LU.get(i, j));
} else if i == j {
L.set(i, j, T::one());
} else {
L.set(i, j, T::zero());
}
}
}
L
}
pub fn U(&self) -> M {
let (n_rows, n_cols) = self.LU.shape();
let mut U = M::zeros(n_rows, n_cols);
for i in 0..n_rows {
for j in 0..n_cols {
if i <= j {
U.set(i, j, self.LU.get(i, j));
} else {
U.set(i, j, T::zero());
}
}
}
U
}
pub fn pivot(&self) -> M {
let (_, n) = self.LU.shape();
let mut piv = M::zeros(n, n);
for i in 0..n {
piv.set(i, self.pivot[i], T::one());
}
piv
}
pub fn inverse(&self) -> M {
let (m, n) = self.LU.shape();
if m != n {
panic!("Matrix is not square: {}x{}", m, n);
}
let mut inv = M::zeros(n, n);
for i in 0..n {
inv.set(i, i, T::one());
}
inv = self.solve(inv);
return inv;
}
fn solve(&self, mut b: M) -> M {
let (m, n) = self.LU.shape();
let (b_m, b_n) = b.shape();
if b_m != m {
panic!("Row dimensions do not agree: A is {} x {}, but B is {} x {}", m, n, b_m, b_n);
}
if self.singular {
panic!("Matrix is singular.");
}
let mut X = M::zeros(b_m, b_n);
for j in 0..b_n {
for i in 0..m {
X.set(i, j, b.get(self.pivot[i], j));
}
}
for k in 0..n {
for i in k+1..n {
for j in 0..b_n {
X.sub_element_mut(i, j, X.get(k, j) * self.LU.get(i, k));
}
}
}
for k in (0..n).rev() {
for j in 0..b_n {
X.div_element_mut(k, j, self.LU.get(k, k));
}
for i in 0..k {
for j in 0..b_n {
X.sub_element_mut(i, j, X.get(k, j) * self.LU.get(i, k));
}
}
}
for j in 0..b_n {
for i in 0..m {
b.set(i, j, X.get(i, j));
}
}
b
}
}
pub trait LUDecomposableMatrix<T: FloatExt>: BaseMatrix<T> {
fn lu(&self) -> LU<T, Self> {
self.clone().lu_mut()
}
fn lu_mut(mut self) -> LU<T, Self> {
let (m, n) = self.shape();
let mut piv = vec![0; m];
for i in 0..m {
piv[i] = i;
}
let mut pivsign = 1;
let mut LUcolj = vec![T::zero(); m];
for j in 0..n {
for i in 0..m {
LUcolj[i] = self.get(i, j);
}
for i in 0..m {
let kmax = usize::min(i, j);
let mut s = T::zero();
for k in 0..kmax {
s = s + self.get(i, k) * LUcolj[k];
}
LUcolj[i] = LUcolj[i] - s;
self.set(i, j, LUcolj[i]);
}
let mut p = j;
for i in j+1..m {
if LUcolj[i].abs() > LUcolj[p].abs() {
p = i;
}
}
if p != j {
for k in 0..n {
let t = self.get(p, k);
self.set(p, k, self.get(j, k));
self.set(j, k, t);
}
let k = piv[p];
piv[p] = piv[j];
piv[j] = k;
pivsign = -pivsign;
}
if j < m && self.get(j, j) != T::zero() {
for i in j+1..m {
self.div_element_mut(i, j, self.get(j, j));
}
}
}
LU::new(self, piv, pivsign)
}
fn lu_solve_mut(self, b: Self) -> Self {
self.lu_mut().solve(b)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::*;
#[test]
fn decompose() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected_L = DenseMatrix::from_array(&[&[1. , 0. , 0. ], &[0. , 1. , 0. ], &[0.2, 0.8, 1. ]]);
let expected_U = DenseMatrix::from_array(&[&[ 5., 6., 0.], &[ 0., 1., 5.], &[ 0., 0., -1.]]);
let expected_pivot = DenseMatrix::from_array(&[&[0., 0., 1.], &[0., 1., 0.], &[1., 0., 0.]]);
let lu = a.lu();
assert!(lu.L().approximate_eq(&expected_L, 1e-4));
assert!(lu.U().approximate_eq(&expected_U, 1e-4));
assert!(lu.pivot().approximate_eq(&expected_pivot, 1e-4));
}
#[test]
fn inverse() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected = DenseMatrix::from_array(&[&[-6.0, 3.6, 1.4], &[5.0, -3.0, -1.0], &[-1.0, 0.8, 0.2]]);
let a_inv = a.lu().inverse();
println!("{}", a_inv);
assert!(a_inv.approximate_eq(&expected, 1e-4));
}
}
+6 -2
View File
@@ -2,6 +2,7 @@ pub mod naive;
pub mod qr;
pub mod svd;
pub mod evd;
pub mod lu;
pub mod ndarray_bindings;
pub mod nalgebra_bindings;
@@ -13,6 +14,7 @@ use crate::math::num::FloatExt;
use svd::SVDDecomposableMatrix;
use evd::EVDDecomposableMatrix;
use qr::QRDecomposableMatrix;
use lu::LUDecomposableMatrix;
pub trait BaseMatrix<T: FloatExt>: Clone + Debug {
@@ -172,11 +174,13 @@ pub trait BaseMatrix<T: FloatExt>: Clone + Debug {
fn argmax(&self) -> Vec<usize>;
fn unique(&self) -> Vec<T>;
fn unique(&self) -> Vec<T>;
fn cov(&self) -> Self;
}
pub trait Matrix<T: FloatExt>: BaseMatrix<T> + SVDDecomposableMatrix<T> + EVDDecomposableMatrix<T> + QRDecomposableMatrix<T> + PartialEq + Display {}
pub trait Matrix<T: FloatExt>: BaseMatrix<T> + SVDDecomposableMatrix<T> + EVDDecomposableMatrix<T> + QRDecomposableMatrix<T> + LUDecomposableMatrix<T> + PartialEq + Display {}
pub fn row_iter<F: FloatExt, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
RowIter{
+38
View File
@@ -13,6 +13,7 @@ pub use crate::linalg::BaseMatrix;
use crate::linalg::svd::SVDDecomposableMatrix;
use crate::linalg::evd::EVDDecomposableMatrix;
use crate::linalg::qr::QRDecomposableMatrix;
use crate::linalg::lu::LUDecomposableMatrix;
use crate::math::num::FloatExt;
#[derive(Debug, Clone)]
@@ -188,6 +189,8 @@ impl<T: FloatExt> EVDDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> QRDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> LUDecomposableMatrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> Matrix<T> for DenseMatrix<T> {}
impl<T: FloatExt> PartialEq for DenseMatrix<T> {
@@ -679,6 +682,34 @@ impl<T: FloatExt> BaseMatrix<T> for DenseMatrix<T> {
result
}
fn cov(&self) -> Self {
let (m, n) = self.shape();
let mu = self.column_mean();
let mut cov = Self::zeros(n, n);
for k in 0..m {
for i in 0..n {
for j in 0..=i {
cov.add_element_mut(i, j, (self.get(k, i) - mu[i]) * (self.get(k, j) - mu[j]));
}
}
}
let m_t = T::from(m - 1).unwrap();
for i in 0..n {
for j in 0..=i {
cov.div_element_mut(i, j, m_t);
cov.set(j, i, cov.get(i, j));
}
}
cov
}
}
#[cfg(test)]
@@ -887,4 +918,11 @@ mod tests {
assert_eq!(format!("{}", a), "[[0.9, 0.4, 0.7], [0.4, 0.5, 0.3], [0.7, 0.3, 0.8]]");
}
#[test]
fn cov() {
let a = DenseMatrix::from_array(&[&[64.0, 580.0, 29.0], &[66.0, 570.0, 33.0], &[68.0, 590.0, 37.0], &[69.0, 660.0, 46.0], &[73.0, 600.0, 55.0]]);
let expected = DenseMatrix::from_array(&[&[11.5, 50.0, 34.75], &[50.0, 1250.0, 205.0], &[34.75, 205.0, 110.0]]);
assert_eq!(a.cov(), expected);
}
}
+7
View File
@@ -9,6 +9,7 @@ use crate::linalg::Matrix as SmartCoreMatrix;
use crate::linalg::svd::SVDDecomposableMatrix;
use crate::linalg::evd::EVDDecomposableMatrix;
use crate::linalg::qr::QRDecomposableMatrix;
use crate::linalg::lu::LUDecomposableMatrix;
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static> BaseMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>>
{
@@ -318,6 +319,10 @@ impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum
result
}
fn cov(&self) -> Self {
panic!("Not implemented");
}
}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static> SVDDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>> {}
@@ -326,6 +331,8 @@ impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static> QRDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>> {}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static> LUDecomposableMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>> {}
impl<T: FloatExt + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Sum + 'static> SmartCoreMatrix<T> for Matrix<T, Dynamic, Dynamic, VecStorage<T, Dynamic, Dynamic>> {}
#[cfg(test)]
+7
View File
@@ -14,6 +14,7 @@ use crate::linalg::Matrix;
use crate::linalg::svd::SVDDecomposableMatrix;
use crate::linalg::evd::EVDDecomposableMatrix;
use crate::linalg::qr::QRDecomposableMatrix;
use crate::linalg::lu::LUDecomposableMatrix;
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> BaseMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2>
@@ -286,6 +287,10 @@ impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign
result
}
fn cov(&self) -> Self {
panic!("Not implemented");
}
}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> SVDDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2> {}
@@ -294,6 +299,8 @@ impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> QRDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2> {}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> LUDecomposableMatrix<T> for ArrayBase<OwnedRepr<T>, Ix2> {}
impl<T: FloatExt + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssign + Sum> Matrix<T> for ArrayBase<OwnedRepr<T>, Ix2> {}
#[cfg(test)]