feat: documents matrix methods

This commit is contained in:
Volodymyr Orlov
2020-09-06 18:27:11 -07:00
parent 1e3ed4c924
commit bbe810d164
25 changed files with 587 additions and 245 deletions
+1 -1
View File
@@ -337,7 +337,7 @@ mod tests {
#[test]
fn fit_predict_iris() {
let data = DenseMatrix::from_array(&[
let data = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
+3 -3
View File
@@ -20,7 +20,7 @@
//! use smartcore::cluster::kmeans::*;
//!
//! // Iris data
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
@@ -264,7 +264,7 @@ mod tests {
#[test]
fn fit_predict_iris() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
@@ -298,7 +298,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
+8 -8
View File
@@ -14,7 +14,7 @@
//! use smartcore::decomposition::pca::*;
//!
//! // Iris data
//! let iris = DenseMatrix::from_array(&[
//! let iris = DenseMatrix::from_2d_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
@@ -211,7 +211,7 @@ impl<T: RealNumber, M: Matrix<T>> PCA<T, M> {
);
}
let mut x_transformed = x.dot(&self.projection);
let mut x_transformed = x.matmul(&self.projection);
for r in 0..nrows {
for c in 0..n_components {
x_transformed.sub_element_mut(r, c, self.pmu[c]);
@@ -227,7 +227,7 @@ mod tests {
use crate::linalg::naive::dense_matrix::*;
fn us_arrests_data() -> DenseMatrix<f64> {
DenseMatrix::from_array(&[
DenseMatrix::from_2d_array(&[
&[13.2, 236.0, 58.0, 21.2],
&[10.0, 263.0, 48.0, 44.5],
&[8.1, 294.0, 80.0, 31.0],
@@ -285,7 +285,7 @@ mod tests {
fn decompose_covariance() {
let us_arrests = us_arrests_data();
let expected_eigenvectors = DenseMatrix::from_array(&[
let expected_eigenvectors = DenseMatrix::from_2d_array(&[
&[
-0.0417043206282872,
-0.0448216562696701,
@@ -312,7 +312,7 @@ mod tests {
],
]);
let expected_projection = DenseMatrix::from_array(&[
let expected_projection = DenseMatrix::from_2d_array(&[
&[-64.8022, -11.448, 2.4949, -2.4079],
&[-92.8275, -17.9829, -20.1266, 4.094],
&[-124.0682, 8.8304, 1.6874, 4.3537],
@@ -394,7 +394,7 @@ mod tests {
fn decompose_correlation() {
let us_arrests = us_arrests_data();
let expected_eigenvectors = DenseMatrix::from_array(&[
let expected_eigenvectors = DenseMatrix::from_2d_array(&[
&[
0.124288601688222,
-0.0969866877028367,
@@ -421,7 +421,7 @@ mod tests {
],
]);
let expected_projection = DenseMatrix::from_array(&[
let expected_projection = DenseMatrix::from_2d_array(&[
&[0.9856, -1.1334, 0.4443, -0.1563],
&[1.9501, -1.0732, -2.04, 0.4386],
&[1.7632, 0.746, -0.0548, 0.8347],
@@ -507,7 +507,7 @@ mod tests {
#[test]
fn serde() {
let iris = DenseMatrix::from_array(&[
let iris = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
+3 -3
View File
@@ -12,7 +12,7 @@
//! use smartcore::ensemble::random_forest_classifier::*;
//!
//! // Iris dataset
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
@@ -226,7 +226,7 @@ mod tests {
#[test]
fn fit_predict_iris() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
@@ -270,7 +270,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
+3 -3
View File
@@ -12,7 +12,7 @@
//! use smartcore::ensemble::random_forest_regressor::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[234.289, 235.6, 159., 107.608, 1947., 60.323],
//! &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
//! &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
@@ -184,7 +184,7 @@ mod tests {
#[test]
fn fit_longley() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159., 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
@@ -231,7 +231,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159., 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
+1 -1
View File
@@ -48,7 +48,7 @@
//! use smartcore::math::distance::*;
//!
//! // Turn Rust vectors with samples into a matrix
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[1., 2.],
//! &[3., 4.],
//! &[5., 6.],
+6 -6
View File
@@ -793,7 +793,7 @@ mod tests {
#[test]
fn decompose_symmetric() {
let A = DenseMatrix::from_array(&[
let A = DenseMatrix::from_2d_array(&[
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.7000, 0.3000, 0.8000],
@@ -801,7 +801,7 @@ mod tests {
let eigen_values: Vec<f64> = vec![1.7498382, 0.3165784, 0.1335834];
let eigen_vectors = DenseMatrix::from_array(&[
let eigen_vectors = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.6391588],
@@ -820,7 +820,7 @@ mod tests {
#[test]
fn decompose_asymmetric() {
let A = DenseMatrix::from_array(&[
let A = DenseMatrix::from_2d_array(&[
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.8000, 0.3000, 0.8000],
@@ -828,7 +828,7 @@ mod tests {
let eigen_values: Vec<f64> = vec![1.79171122, 0.31908143, 0.08920735];
let eigen_vectors = DenseMatrix::from_array(&[
let eigen_vectors = DenseMatrix::from_2d_array(&[
&[0.7178958, 0.05322098, 0.6812010],
&[0.3837711, -0.84702111, -0.1494582],
&[0.6952105, 0.43984484, -0.7036135],
@@ -847,7 +847,7 @@ mod tests {
#[test]
fn decompose_complex() {
let A = DenseMatrix::from_array(&[
let A = DenseMatrix::from_2d_array(&[
&[3.0, -2.0, 1.0, 1.0],
&[4.0, -1.0, 1.0, 1.0],
&[1.0, 1.0, 3.0, -2.0],
@@ -857,7 +857,7 @@ mod tests {
let eigen_values_d: Vec<f64> = vec![0.0, 2.0, 2.0, 0.0];
let eigen_values_e: Vec<f64> = vec![2.2361, 0.9999, -0.9999, -2.2361];
let eigen_vectors = DenseMatrix::from_array(&[
let eigen_vectors = DenseMatrix::from_2d_array(&[
&[-0.9159, -0.1378, 0.3816, -0.0806],
&[-0.6707, 0.1059, 0.901, 0.6289],
&[0.9159, -0.1378, 0.3816, 0.0806],
+8 -6
View File
@@ -225,11 +225,13 @@ mod tests {
#[test]
fn decompose() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected_L = DenseMatrix::from_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0.2, 0.8, 1.]]);
let expected_U = DenseMatrix::from_array(&[&[5., 6., 0.], &[0., 1., 5.], &[0., 0., -1.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected_L =
DenseMatrix::from_2d_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0.2, 0.8, 1.]]);
let expected_U =
DenseMatrix::from_2d_array(&[&[5., 6., 0.], &[0., 1., 5.], &[0., 0., -1.]]);
let expected_pivot =
DenseMatrix::from_array(&[&[0., 0., 1.], &[0., 1., 0.], &[1., 0., 0.]]);
DenseMatrix::from_2d_array(&[&[0., 0., 1.], &[0., 1., 0.], &[1., 0., 0.]]);
let lu = a.lu();
assert!(lu.L().approximate_eq(&expected_L, 1e-4));
assert!(lu.U().approximate_eq(&expected_U, 1e-4));
@@ -238,9 +240,9 @@ mod tests {
#[test]
fn inverse() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let expected =
DenseMatrix::from_array(&[&[-6.0, 3.6, 1.4], &[5.0, -3.0, -1.0], &[-1.0, 0.8, 0.2]]);
DenseMatrix::from_2d_array(&[&[-6.0, 3.6, 1.4], &[5.0, -3.0, -1.0], &[-1.0, 0.8, 0.2]]);
let a_inv = a.lu().inverse();
println!("{}", a_inv);
assert!(a_inv.approximate_eq(&expected, 1e-4));
+221 -9
View File
@@ -1,11 +1,53 @@
//! # Linear Algebra and Matrix Decomposition
//!
//! Most machine learning algorithms in SmartCore depend on linear algebra and matrix decomposition methods from this module.
//!
//! Traits [`BaseMatrix`](trait.BaseMatrix.html), [`Matrix`](trait.Matrix.html) and [`BaseVector`](trait.BaseVector.html) define
//! abstract methods that can be implemented for any two-dimensional and one-dimentional arrays (matrix and vector).
//! Functions from these traits are designed for SmartCore machine learning algorithms and should not be used directly in your code.
//! If you still want to use functions from `BaseMatrix`, `Matrix` and `BaseVector` please be aware that methods defined in these
//! traits might change in the future.
//!
//! One reason why linear algebra traits are public is to allow for different types of matrices and vectors to be plugged into SmartCore.
//! Once all methods defined in `BaseMatrix`, `Matrix` and `BaseVector` are implemented for your favourite type of matrix and vector you
//! should be able to run SmartCore algorithms on it. Please see `nalgebra_bindings` and `ndarray_bindings` modules for an example of how
//! it is done for other libraries.
//!
//! You will also find verious matrix decomposition methods that work for any matrix that extends [`Matrix`](trait.Matrix.html).
//! For example, to decompose matrix defined as [Vec](https://doc.rust-lang.org/std/vec/struct.Vec.html):
//!
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linalg::svd::*;
//!
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9000, 0.4000, 0.7000],
//! &[0.4000, 0.5000, 0.3000],
//! &[0.7000, 0.3000, 0.8000],
//! ]);
//!
//! let svd = A.svd();
//!
//! let s: Vec<f64> = svd.s;
//! let v: DenseMatrix<f64> = svd.V;
//! let u: DenseMatrix<f64> = svd.U;
//! ```
/// The matrix is represented in terms of its eigenvalues and eigenvectors.
pub mod evd;
/// Factors a matrix as the product of a lower triangular matrix and an upper triangular matrix.
pub mod lu;
/// Dense matrix with column-major order that wraps [Vec](https://doc.rust-lang.org/std/vec/struct.Vec.html).
pub mod naive;
/// [nalgebra](https://docs.rs/nalgebra/) bindings.
#[cfg(feature = "nalgebra-bindings")]
pub mod nalgebra_bindings;
/// [ndarray](https://docs.rs/ndarray) bindings.
#[cfg(feature = "ndarray-bindings")]
pub mod ndarray_bindings;
/// QR factorization that factors a matrix into a product of an orthogonal matrix and an upper triangular matrix.
pub mod qr;
/// Singular value decomposition.
pub mod svd;
use std::fmt::{Debug, Display};
@@ -18,178 +60,348 @@ use lu::LUDecomposableMatrix;
use qr::QRDecomposableMatrix;
use svd::SVDDecomposableMatrix;
/// Column or row vector
pub trait BaseVector<T: RealNumber>: Clone + Debug {
/// Get an element of a vector
/// * `i` - index of an element
fn get(&self, i: usize) -> T;
/// Set an element at `i` to `x`
/// * `i` - index of an element
/// * `x` - new value
fn set(&mut self, i: usize, x: T);
/// Get number of elevemnt in the vector
fn len(&self) -> usize;
/// Return a vector with the elements of the one-dimensional array.
fn to_vec(&self) -> Vec<T>;
}
/// Generic matrix type.
pub trait BaseMatrix<T: RealNumber>: Clone + Debug {
/// Row vector that is associated with this matrix type,
/// e.g. if we have an implementation of sparce matrix
/// we should have an associated sparce vector type that
/// represents a row in this matrix.
type RowVector: BaseVector<T> + Clone + Debug;
/// Transforms row vector `vec` into a 1xM matrix.
fn from_row_vector(vec: Self::RowVector) -> Self;
/// Transforms 1-d matrix of 1xM into a row vector.
fn to_row_vector(self) -> Self::RowVector;
/// Get an element of the matrix.
/// * `row` - row number
/// * `col` - column number
fn get(&self, row: usize, col: usize) -> T;
/// Get a vector with elements of the `row`'th row
/// * `row` - row number
fn get_row_as_vec(&self, row: usize) -> Vec<T>;
/// Get a vector with elements of the `col`'th column
/// * `col` - column number
fn get_col_as_vec(&self, col: usize) -> Vec<T>;
/// Set an element at `col`, `row` to `x`
fn set(&mut self, row: usize, col: usize, x: T);
/// Create an identity matrix of size `size`
fn eye(size: usize) -> Self;
/// Create new matrix with zeros of size `nrows` by `ncols`.
fn zeros(nrows: usize, ncols: usize) -> Self;
/// Create new matrix with ones of size `nrows` by `ncols`.
fn ones(nrows: usize, ncols: usize) -> Self;
fn to_raw_vector(&self) -> Vec<T>;
/// Create new matrix of size `nrows` by `ncols` where each element is set to `value`.
fn fill(nrows: usize, ncols: usize, value: T) -> Self;
/// Return the shape of an array.
fn shape(&self) -> (usize, usize);
fn v_stack(&self, other: &Self) -> Self;
/// Stack arrays in sequence vertically (row wise).
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
/// let b = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.]]);
/// let expected = DenseMatrix::from_2d_array(&[
/// &[1., 2., 3., 1., 2.],
/// &[4., 5., 6., 3., 4.]
/// ]);
///
/// assert_eq!(a.h_stack(&b), expected);
/// ```
fn h_stack(&self, other: &Self) -> Self;
fn dot(&self, other: &Self) -> Self;
/// Stack arrays in sequence horizontally (column wise).
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let a = DenseMatrix::from_array(1, 3, &[1., 2., 3.]);
/// let b = DenseMatrix::from_array(1, 3, &[4., 5., 6.]);
/// let expected = DenseMatrix::from_2d_array(&[
/// &[1., 2., 3.],
/// &[4., 5., 6.]
/// ]);
///
/// assert_eq!(a.v_stack(&b), expected);
/// ```
fn v_stack(&self, other: &Self) -> Self;
fn vector_dot(&self, other: &Self) -> T;
/// Matrix product.
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let a = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.]]);
/// let expected = DenseMatrix::from_2d_array(&[
/// &[7., 10.],
/// &[15., 22.]
/// ]);
///
/// assert_eq!(a.matmul(&a), expected);
/// ```
fn matmul(&self, other: &Self) -> Self;
/// Vector dot product
/// Both matrices should be of size _1xM_
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let a = DenseMatrix::from_array(1, 3, &[1., 2., 3.]);
/// let b = DenseMatrix::from_array(1, 3, &[4., 5., 6.]);
///
/// assert_eq!(a.dot(&b), 32.);
/// ```
fn dot(&self, other: &Self) -> T;
/// Return a slice of the matrix.
/// * `rows` - range of rows to return
/// * `cols` - range of columns to return
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let m = DenseMatrix::from_2d_array(&[
/// &[1., 2., 3., 1.],
/// &[4., 5., 6., 3.],
/// &[7., 8., 9., 5.]
/// ]);
/// let expected = DenseMatrix::from_2d_array(&[&[2., 3.], &[5., 6.]]);
/// let result = m.slice(0..2, 1..3);
/// assert_eq!(result, expected);
/// ```
fn slice(&self, rows: Range<usize>, cols: Range<usize>) -> Self;
/// Returns True if matrices are element-wise equal within a tolerance `error`.
fn approximate_eq(&self, other: &Self, error: T) -> bool;
/// Add matrices, element-wise, overriding original matrix with result.
fn add_mut(&mut self, other: &Self) -> &Self;
/// Subtract matrices, element-wise, overriding original matrix with result.
fn sub_mut(&mut self, other: &Self) -> &Self;
/// Multiply matrices, element-wise, overriding original matrix with result.
fn mul_mut(&mut self, other: &Self) -> &Self;
/// Divide matrices, element-wise, overriding original matrix with result.
fn div_mut(&mut self, other: &Self) -> &Self;
/// Divide single element of the matrix by `x`, write result to original matrix.
fn div_element_mut(&mut self, row: usize, col: usize, x: T);
/// Multiply single element of the matrix by `x`, write result to original matrix.
fn mul_element_mut(&mut self, row: usize, col: usize, x: T);
/// Add single element of the matrix to `x`, write result to original matrix.
fn add_element_mut(&mut self, row: usize, col: usize, x: T);
/// Subtract `x` from single element of the matrix, write result to original matrix.
fn sub_element_mut(&mut self, row: usize, col: usize, x: T);
/// Add matrices, element-wise
fn add(&self, other: &Self) -> Self {
let mut r = self.clone();
r.add_mut(other);
r
}
/// Subtract matrices, element-wise
fn sub(&self, other: &Self) -> Self {
let mut r = self.clone();
r.sub_mut(other);
r
}
/// Multiply matrices, element-wise
fn mul(&self, other: &Self) -> Self {
let mut r = self.clone();
r.mul_mut(other);
r
}
/// Divide matrices, element-wise
fn div(&self, other: &Self) -> Self {
let mut r = self.clone();
r.div_mut(other);
r
}
/// Add `scalar` to the matrix, override original matrix with result.
fn add_scalar_mut(&mut self, scalar: T) -> &Self;
/// Subtract `scalar` from the elements of matrix, override original matrix with result.
fn sub_scalar_mut(&mut self, scalar: T) -> &Self;
/// Multiply `scalar` by the elements of matrix, override original matrix with result.
fn mul_scalar_mut(&mut self, scalar: T) -> &Self;
/// Divide elements of the matrix by `scalar`, override original matrix with result.
fn div_scalar_mut(&mut self, scalar: T) -> &Self;
/// Add `scalar` to the matrix.
fn add_scalar(&self, scalar: T) -> Self {
let mut r = self.clone();
r.add_scalar_mut(scalar);
r
}
/// Subtract `scalar` from the elements of matrix.
fn sub_scalar(&self, scalar: T) -> Self {
let mut r = self.clone();
r.sub_scalar_mut(scalar);
r
}
/// Multiply `scalar` by the elements of matrix.
fn mul_scalar(&self, scalar: T) -> Self {
let mut r = self.clone();
r.mul_scalar_mut(scalar);
r
}
/// Divide elements of the matrix by `scalar`.
fn div_scalar(&self, scalar: T) -> Self {
let mut r = self.clone();
r.div_scalar_mut(scalar);
r
}
/// Reverse or permute the axes of the matrix, return new matrix.
fn transpose(&self) -> Self;
/// Create new `nrows` by `ncols` matrix and populate it with random samples from a uniform distribution over [0, 1).
fn rand(nrows: usize, ncols: usize) -> Self;
/// Returns [L2 norm](https://en.wikipedia.org/wiki/Matrix_norm).
fn norm2(&self) -> T;
/// Returns [matrix norm](https://en.wikipedia.org/wiki/Matrix_norm) of order `p`.
fn norm(&self, p: T) -> T;
/// Returns the average of the matrix columns.
fn column_mean(&self) -> Vec<T>;
/// Numerical negative, element-wise. Overrides original matrix.
fn negative_mut(&mut self);
/// Numerical negative, element-wise.
fn negative(&self) -> Self {
let mut result = self.clone();
result.negative_mut();
result
}
/// Returns new matrix of shape `nrows` by `ncols` with data copied from original matrix.
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let a = DenseMatrix::from_array(1, 6, &[1., 2., 3., 4., 5., 6.]);
/// let expected = DenseMatrix::from_2d_array(&[
/// &[1., 2., 3.],
/// &[4., 5., 6.]
/// ]);
///
/// assert_eq!(a.reshape(2, 3), expected);
/// ```
fn reshape(&self, nrows: usize, ncols: usize) -> Self;
/// Copies content of `other` matrix.
fn copy_from(&mut self, other: &Self);
/// Calculate the absolute value element-wise. Overrides original matrix.
fn abs_mut(&mut self) -> &Self;
/// Calculate the absolute value element-wise.
fn abs(&self) -> Self {
let mut result = self.clone();
result.abs_mut();
result
}
/// Calculates sum of all elements of the matrix.
fn sum(&self) -> T;
fn max_diff(&self, other: &Self) -> T;
/// Calculates max of all elements of the matrix.
fn max(&self) -> T;
/// Calculates min of all elements of the matrix.
fn min(&self) -> T;
/// Calculates max(|a - b|) of two matrices
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
///
/// let a = DenseMatrix::from_array(2, 3, &[1., 2., 3., 4., -5., 6.]);
/// let b = DenseMatrix::from_array(2, 3, &[2., 3., 4., 1., 0., -12.]);
///
/// assert_eq!(a.max_diff(&b), 18.);
/// assert_eq!(b.max_diff(&b), 0.);
/// ```
fn max_diff(&self, other: &Self) -> T {
self.sub(other).abs().max()
}
/// Calculates [Softmax function](https://en.wikipedia.org/wiki/Softmax_function). Overrides the matrix with result.
fn softmax_mut(&mut self);
/// Raises elements of the matrix to the power of `p`
fn pow_mut(&mut self, p: T) -> &Self;
/// Returns new matrix with elements raised to the power of `p`
fn pow(&mut self, p: T) -> Self {
let mut result = self.clone();
result.pow_mut(p);
result
}
/// Returns the indices of the maximum values in each row.
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
/// let a = DenseMatrix::from_array(2, 3, &[1., 2., 3., -5., -6., -7.]);
///
/// assert_eq!(a.argmax(), vec![2, 0]);
/// ```
fn argmax(&self) -> Vec<usize>;
/// Returns vector with unique values from the matrix.
/// ```
/// use smartcore::linalg::naive::dense_matrix::*;
/// let a = DenseMatrix::from_array(3, 3, &[1., 2., 2., -2., -6., -7., 2., 3., 4.]);
///
///assert_eq!(a.unique(), vec![-7., -6., -2., 1., 2., 3., 4.]);
/// ```
fn unique(&self) -> Vec<T>;
/// Calculates the covariance matrix
fn cov(&self) -> Self;
}
/// Generic matrix with additional mixins like various factorization methods.
pub trait Matrix<T: RealNumber>:
BaseMatrix<T>
+ SVDDecomposableMatrix<T>
@@ -201,7 +413,7 @@ pub trait Matrix<T: RealNumber>:
{
}
pub fn row_iter<F: RealNumber, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
pub(crate) fn row_iter<F: RealNumber, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
RowIter {
m: m,
pos: 0,
@@ -210,7 +422,7 @@ pub fn row_iter<F: RealNumber, M: BaseMatrix<F>>(m: &M) -> RowIter<F, M> {
}
}
pub struct RowIter<'a, T: RealNumber, M: BaseMatrix<T>> {
pub(crate) struct RowIter<'a, T: RealNumber, M: BaseMatrix<T>> {
m: &'a M,
pos: usize,
max_pos: usize,
+168 -78
View File
@@ -34,6 +34,7 @@ impl<T: RealNumber> BaseVector<T> for Vec<T> {
}
}
/// Column-major, dense matrix. See [Simple Dense Matrix](../index.html).
#[derive(Debug, Clone)]
pub struct DenseMatrix<T: RealNumber> {
ncols: usize,
@@ -57,7 +58,9 @@ impl<T: RealNumber> fmt::Display for DenseMatrix<T> {
}
impl<T: RealNumber> DenseMatrix<T> {
fn new(nrows: usize, ncols: usize, values: Vec<T>) -> Self {
/// Create new instance of `DenseMatrix` without copying data.
/// `values` should be in column-major order.
pub fn new(nrows: usize, ncols: usize, values: Vec<T>) -> Self {
DenseMatrix {
ncols: ncols,
nrows: nrows,
@@ -65,11 +68,13 @@ impl<T: RealNumber> DenseMatrix<T> {
}
}
pub fn from_array(values: &[&[T]]) -> Self {
DenseMatrix::from_vec(&values.into_iter().map(|row| Vec::from(*row)).collect())
/// New instance of `DenseMatrix` from 2d array.
pub fn from_2d_array(values: &[&[T]]) -> Self {
DenseMatrix::from_2d_vec(&values.into_iter().map(|row| Vec::from(*row)).collect())
}
pub fn from_vec(values: &Vec<Vec<T>>) -> DenseMatrix<T> {
/// New instance of `DenseMatrix` from 2d vector.
pub fn from_2d_vec(values: &Vec<Vec<T>>) -> Self {
let nrows = values.len();
let ncols = values
.first()
@@ -88,11 +93,41 @@ impl<T: RealNumber> DenseMatrix<T> {
m
}
pub fn vector_from_array(values: &[T]) -> Self {
DenseMatrix::vector_from_vec(Vec::from(values))
/// Creates new matrix from an array.
/// * `nrows` - number of rows in new matrix.
/// * `ncols` - number of columns in new matrix.
/// * `values` - values to initialize the matrix.
pub fn from_array(nrows: usize, ncols: usize, values: &[T]) -> Self {
DenseMatrix::from_vec(nrows, ncols, &Vec::from(values))
}
pub fn vector_from_vec(values: Vec<T>) -> Self {
/// Creates new matrix from a vector.
/// * `nrows` - number of rows in new matrix.
/// * `ncols` - number of columns in new matrix.
/// * `values` - values to initialize the matrix.
pub fn from_vec(nrows: usize, ncols: usize, values: &Vec<T>) -> DenseMatrix<T> {
let mut m = DenseMatrix {
ncols: ncols,
nrows: nrows,
values: vec![T::zero(); ncols * nrows],
};
for row in 0..nrows {
for col in 0..ncols {
m.set(row, col, values[col + row * ncols]);
}
}
m
}
/// Creates new row vector (_1xN_ matrix) from an array.
/// * `values` - values to initialize the matrix.
pub fn row_vector_from_array(values: &[T]) -> Self {
DenseMatrix::row_vector_from_vec(Vec::from(values))
}
/// Creates new row vector (_1xN_ matrix) from a vector.
/// * `values` - values to initialize the matrix.
pub fn row_vector_from_vec(values: Vec<T>) -> Self {
DenseMatrix {
ncols: values.len(),
nrows: 1,
@@ -100,19 +135,21 @@ impl<T: RealNumber> DenseMatrix<T> {
}
}
pub fn div_mut(&mut self, b: Self) -> () {
if self.nrows != b.nrows || self.ncols != b.ncols {
panic!("Can't divide matrices of different sizes.");
/// Creates new column vector (_1xN_ matrix) from an array.
/// * `values` - values to initialize the matrix.
pub fn column_vector_from_array(values: &[T]) -> Self {
DenseMatrix::column_vector_from_vec(Vec::from(values))
}
for i in 0..self.values.len() {
self.values[i] = self.values[i] / b.values[i];
/// Creates new column vector (_1xN_ matrix) from a vector.
/// * `values` - values to initialize the matrix.
pub fn column_vector_from_vec(values: Vec<T>) -> Self {
DenseMatrix {
ncols: 1,
nrows: values.len(),
values: values,
}
}
pub fn get_raw_values(&self) -> &Vec<T> {
&self.values
}
}
impl<'de, T: RealNumber + fmt::Debug + Deserialize<'de>> Deserialize<'de> for DenseMatrix<T> {
@@ -261,7 +298,15 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
}
fn to_row_vector(self) -> Self::RowVector {
self.to_raw_vector()
let mut v = vec![T::zero(); self.nrows * self.ncols];
for r in 0..self.nrows {
for c in 0..self.ncols {
v[r * self.ncols + c] = self.get(r, c);
}
}
v
}
fn get(&self, row: usize, col: usize) -> T {
@@ -312,23 +357,11 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
return matrix;
}
fn to_raw_vector(&self) -> Vec<T> {
let mut v = vec![T::zero(); self.nrows * self.ncols];
for r in 0..self.nrows {
for c in 0..self.ncols {
v[r * self.ncols + c] = self.get(r, c);
}
}
v
}
fn shape(&self) -> (usize, usize) {
(self.nrows, self.ncols)
}
fn h_stack(&self, other: &Self) -> Self {
fn v_stack(&self, other: &Self) -> Self {
if self.ncols != other.ncols {
panic!("Number of columns in both matrices should be equal");
}
@@ -345,7 +378,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
result
}
fn v_stack(&self, other: &Self) -> Self {
fn h_stack(&self, other: &Self) -> Self {
if self.nrows != other.nrows {
panic!("Number of rows in both matrices should be equal");
}
@@ -362,7 +395,7 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
result
}
fn dot(&self, other: &Self) -> Self {
fn matmul(&self, other: &Self) -> Self {
if self.ncols != other.nrows {
panic!("Number of rows of A should equal number of columns of B");
}
@@ -382,8 +415,8 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
result
}
fn vector_dot(&self, other: &Self) -> T {
if (self.nrows != 1 || self.nrows != 1) && (other.nrows != 1 || other.ncols != 1) {
fn dot(&self, other: &Self) -> T {
if self.nrows != 1 && other.nrows != 1 {
panic!("A and B should both be 1-dimentional vectors.");
}
if self.nrows * self.ncols != other.nrows * other.ncols {
@@ -666,6 +699,22 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
sum
}
fn max(&self) -> T {
let mut max = T::neg_infinity();
for i in 0..self.values.len() {
max = T::max(max, self.values[i]);
}
max
}
fn min(&self) -> T {
let mut min = T::infinity();
for i in 0..self.values.len() {
min = T::min(min, self.values[i]);
}
min
}
fn softmax_mut(&mut self) {
let max = self
.values
@@ -752,6 +801,32 @@ impl<T: RealNumber> BaseMatrix<T> for DenseMatrix<T> {
mod tests {
use super::*;
#[test]
fn from_array() {
let vec = [1., 2., 3., 4., 5., 6.];
assert_eq!(
DenseMatrix::from_array(3, 2, &vec),
DenseMatrix::new(3, 2, vec![1., 3., 5., 2., 4., 6.])
);
assert_eq!(
DenseMatrix::from_array(2, 3, &vec),
DenseMatrix::new(2, 3, vec![1., 4., 2., 5., 3., 6.])
);
}
#[test]
fn row_column_vec_from_array() {
let vec = vec![1., 2., 3., 4., 5., 6.];
assert_eq!(
DenseMatrix::row_vector_from_array(&vec),
DenseMatrix::new(1, 6, vec![1., 2., 3., 4., 5., 6.])
);
assert_eq!(
DenseMatrix::column_vector_from_array(&vec),
DenseMatrix::new(6, 1, vec![1., 2., 3., 4., 5., 6.])
);
}
#[test]
fn from_to_row_vec() {
let vec = vec![1., 2., 3.];
@@ -766,59 +841,66 @@ mod tests {
}
#[test]
fn h_stack() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let b = DenseMatrix::from_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
let expected = DenseMatrix::from_array(&[
fn v_stack() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let b = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
let expected = DenseMatrix::from_2d_array(&[
&[1., 2., 3.],
&[4., 5., 6.],
&[7., 8., 9.],
&[1., 2., 3.],
&[4., 5., 6.],
]);
let result = a.h_stack(&b);
assert_eq!(result, expected);
}
#[test]
fn v_stack() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let b = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
let expected = DenseMatrix::from_array(&[
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
let result = a.v_stack(&b);
assert_eq!(result, expected);
}
#[test]
fn dot() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
let b = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
let expected = DenseMatrix::from_array(&[&[22., 28.], &[49., 64.]]);
let result = a.dot(&b);
assert_eq!(result, expected);
}
#[test]
fn slice() {
let m = DenseMatrix::from_array(&[
fn h_stack() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let b = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
let expected = DenseMatrix::from_2d_array(&[
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
let expected = DenseMatrix::from_array(&[&[2., 3.], &[5., 6.]]);
let result = a.h_stack(&b);
assert_eq!(result, expected);
}
#[test]
fn matmul() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
let b = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
let expected = DenseMatrix::from_2d_array(&[&[22., 28.], &[49., 64.]]);
let result = a.matmul(&b);
assert_eq!(result, expected);
}
#[test]
fn dot() {
let a = DenseMatrix::from_array(1, 3, &[1., 2., 3.]);
let b = DenseMatrix::from_array(1, 3, &[4., 5., 6.]);
assert_eq!(a.dot(&b), 32.);
}
#[test]
fn slice() {
let m = DenseMatrix::from_2d_array(&[
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
let expected = DenseMatrix::from_2d_array(&[&[2., 3.], &[5., 6.]]);
let result = m.slice(0..2, 1..3);
assert_eq!(result, expected);
}
#[test]
fn approximate_eq() {
let m = DenseMatrix::from_array(&[&[2., 3.], &[5., 6.]]);
let m_eq = DenseMatrix::from_array(&[&[2.5, 3.0], &[5., 5.5]]);
let m_neq = DenseMatrix::from_array(&[&[3.0, 3.0], &[5., 6.5]]);
let m = DenseMatrix::from_2d_array(&[&[2., 3.], &[5., 6.]]);
let m_eq = DenseMatrix::from_2d_array(&[&[2.5, 3.0], &[5., 5.5]]);
let m_neq = DenseMatrix::from_2d_array(&[&[3.0, 3.0], &[5., 6.5]]);
assert!(m.approximate_eq(&m_eq, 0.5));
assert!(!m.approximate_eq(&m_neq, 0.5));
}
@@ -835,8 +917,8 @@ mod tests {
#[test]
fn transpose() {
let m = DenseMatrix::from_array(&[&[1.0, 3.0], &[2.0, 4.0]]);
let expected = DenseMatrix::from_array(&[&[1.0, 2.0], &[3.0, 4.0]]);
let m = DenseMatrix::from_2d_array(&[&[1.0, 3.0], &[2.0, 4.0]]);
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0], &[3.0, 4.0]]);
let m_transposed = m.transpose();
for c in 0..2 {
for r in 0..2 {
@@ -847,7 +929,7 @@ mod tests {
#[test]
fn reshape() {
let m_orig = DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6.]);
let m_orig = DenseMatrix::row_vector_from_array(&[1., 2., 3., 4., 5., 6.]);
let m_2_by_3 = m_orig.reshape(2, 3);
let m_result = m_2_by_3.reshape(1, 6);
assert_eq!(m_2_by_3.shape(), (2, 3));
@@ -858,7 +940,7 @@ mod tests {
#[test]
fn norm() {
let v = DenseMatrix::vector_from_array(&[3., -2., 6.]);
let v = DenseMatrix::row_vector_from_array(&[3., -2., 6.]);
assert_eq!(v.norm(1.), 11.);
assert_eq!(v.norm(2.), 7.);
assert_eq!(v.norm(std::f64::INFINITY), 6.);
@@ -867,7 +949,7 @@ mod tests {
#[test]
fn softmax_mut() {
let mut prob: DenseMatrix<f64> = DenseMatrix::vector_from_array(&[1., 2., 3.]);
let mut prob: DenseMatrix<f64> = DenseMatrix::row_vector_from_array(&[1., 2., 3.]);
prob.softmax_mut();
assert!((prob.get(0, 0) - 0.09).abs() < 0.01);
assert!((prob.get(0, 1) - 0.24).abs() < 0.01);
@@ -876,21 +958,29 @@ mod tests {
#[test]
fn col_mean() {
let a = DenseMatrix::from_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let res = a.column_mean();
assert_eq!(res, vec![4., 5., 6.]);
}
#[test]
fn min_max_sum() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
assert_eq!(21., a.sum());
assert_eq!(1., a.min());
assert_eq!(6., a.max());
}
#[test]
fn eye() {
let a = DenseMatrix::from_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0., 0., 1.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0., 0., 1.]]);
let res = DenseMatrix::eye(3);
assert_eq!(res, a);
}
#[test]
fn to_from_json() {
let a = DenseMatrix::from_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let deserialized_a: DenseMatrix<f64> =
serde_json::from_str(&serde_json::to_string(&a).unwrap()).unwrap();
assert_eq!(a, deserialized_a);
@@ -898,7 +988,7 @@ mod tests {
#[test]
fn to_from_bincode() {
let a = DenseMatrix::from_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let deserialized_a: DenseMatrix<f64> =
bincode::deserialize(&bincode::serialize(&a).unwrap()).unwrap();
assert_eq!(a, deserialized_a);
@@ -906,7 +996,7 @@ mod tests {
#[test]
fn to_string() {
let a = DenseMatrix::from_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
assert_eq!(
format!("{}", a),
"[[0.9, 0.4, 0.7], [0.4, 0.5, 0.3], [0.7, 0.3, 0.8]]"
@@ -915,14 +1005,14 @@ mod tests {
#[test]
fn cov() {
let a = DenseMatrix::from_array(&[
let a = DenseMatrix::from_2d_array(&[
&[64.0, 580.0, 29.0],
&[66.0, 570.0, 33.0],
&[68.0, 590.0, 37.0],
&[69.0, 660.0, 46.0],
&[73.0, 600.0, 55.0],
]);
let expected = DenseMatrix::from_array(&[
let expected = DenseMatrix::from_2d_array(&[
&[11.5, 50.0, 34.75],
&[50.0, 1250.0, 205.0],
&[34.75, 205.0, 110.0],
+25
View File
@@ -1 +1,26 @@
//! # Simple Dense Matrix
//!
//! Implements [`BaseMatrix`](../../trait.BaseMatrix.html) and [`BaseVector`](../../trait.BaseVector.html) for [Vec](https://doc.rust-lang.org/std/vec/struct.Vec.html).
//! Data is stored in dense format with [column-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order).
//!
//! Example:
//!
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//!
//! // 3x3 matrix
//! let A = DenseMatrix::from_2d_array(&[
//! &[0.9000, 0.4000, 0.7000],
//! &[0.4000, 0.5000, 0.3000],
//! &[0.7000, 0.3000, 0.8000],
//! ]);
//!
//! // row vector
//! let B = DenseMatrix::from_array(1, 3, &[0.9, 0.4, 0.7]);
//!
//! // column vector
//! let C = DenseMatrix::from_vec(3, 1, &vec!(0.9, 0.4, 0.7));
//! ```
/// Add this module to use Dense Matrix
pub mod dense_matrix;
+44 -37
View File
@@ -69,17 +69,6 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
BaseMatrix::fill(nrows, ncols, T::one())
}
fn to_raw_vector(&self) -> Vec<T> {
let (nrows, ncols) = self.shape();
let mut result = vec![T::zero(); nrows * ncols];
for (i, row) in self.row_iter().enumerate() {
for (j, v) in row.iter().enumerate() {
result[i * ncols + j] = *v;
}
}
result
}
fn fill(nrows: usize, ncols: usize, value: T) -> Self {
let mut m = DMatrix::zeros(nrows, ncols);
m.fill(value);
@@ -90,7 +79,7 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
self.shape()
}
fn v_stack(&self, other: &Self) -> Self {
fn h_stack(&self, other: &Self) -> Self {
let mut columns = Vec::new();
for r in 0..self.ncols() {
columns.push(self.column(r));
@@ -101,7 +90,7 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
Matrix::from_columns(&columns)
}
fn h_stack(&self, other: &Self) -> Self {
fn v_stack(&self, other: &Self) -> Self {
let mut rows = Vec::new();
for r in 0..self.nrows() {
rows.push(self.row(r));
@@ -112,11 +101,11 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
Matrix::from_rows(&rows)
}
fn dot(&self, other: &Self) -> Self {
fn matmul(&self, other: &Self) -> Self {
self * other
}
fn vector_dot(&self, other: &Self) -> T {
fn dot(&self, other: &Self) -> T {
self.dot(other)
}
@@ -250,7 +239,14 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
}
fn reshape(&self, nrows: usize, ncols: usize) -> Self {
DMatrix::from_row_slice(nrows, ncols, &self.to_raw_vector())
let (c_nrows, c_ncols) = self.shape();
let mut raw_v = vec![T::zero(); c_nrows * c_ncols];
for (i, row) in self.row_iter().enumerate() {
for (j, v) in row.iter().enumerate() {
raw_v[i * c_ncols + j] = *v;
}
}
DMatrix::from_row_slice(nrows, ncols, &raw_v)
}
fn copy_from(&mut self, other: &Self) {
@@ -272,6 +268,22 @@ impl<T: RealNumber + Scalar + AddAssign + SubAssign + MulAssign + DivAssign + Su
sum
}
fn max(&self) -> T {
let mut m = T::zero();
for v in self.iter() {
m = m.max(*v);
}
m
}
fn min(&self) -> T {
let mut m = T::zero();
for v in self.iter() {
m = m.min(*v);
}
m
}
fn max_diff(&self, other: &Self) -> T {
let mut max_diff = T::zero();
for r in 0..self.nrows() {
@@ -488,13 +500,6 @@ mod tests {
assert_eq!(m.get_col_as_vec(1), vec!(2., 5., 8.));
}
#[test]
fn to_raw_vector() {
let m = DMatrix::from_row_slice(2, 3, &[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
assert_eq!(m.to_raw_vector(), vec!(1., 2., 3., 4., 5., 6.));
}
#[test]
fn element_add_sub_mul_div() {
let mut m = DMatrix::from_row_slice(2, 2, &[1.0, 2.0, 3.0, 4.0]);
@@ -518,25 +523,25 @@ mod tests {
let expected =
DMatrix::from_row_slice(3, 4, &[1., 2., 3., 7., 4., 5., 6., 8., 9., 10., 11., 12.]);
let result = m1.v_stack(&m2).h_stack(&m3);
let result = m1.h_stack(&m2).v_stack(&m3);
assert_eq!(result, expected);
}
#[test]
fn matmul() {
let a = DMatrix::from_row_slice(2, 3, &[1., 2., 3., 4., 5., 6.]);
let b = DMatrix::from_row_slice(3, 2, &[1., 2., 3., 4., 5., 6.]);
let expected = DMatrix::from_row_slice(2, 2, &[22., 28., 49., 64.]);
let result = BaseMatrix::matmul(&a, &b);
assert_eq!(result, expected);
}
#[test]
fn dot() {
let a = DMatrix::from_row_slice(2, 3, &[1., 2., 3., 4., 5., 6.]);
let b = DMatrix::from_row_slice(3, 2, &[1., 2., 3., 4., 5., 6.]);
let expected = DMatrix::from_row_slice(2, 2, &[22., 28., 49., 64.]);
let result = BaseMatrix::dot(&a, &b);
assert_eq!(result, expected);
}
#[test]
fn vector_dot() {
let a = DMatrix::from_row_slice(1, 3, &[1., 2., 3.]);
let b = DMatrix::from_row_slice(1, 3, &[1., 2., 3.]);
assert_eq!(14., a.vector_dot(&b));
assert_eq!(14., a.dot(&b));
}
#[test]
@@ -632,9 +637,11 @@ mod tests {
}
#[test]
fn sum() {
let a = DMatrix::from_row_slice(1, 3, &[1., 2., 3.]);
assert_eq!(a.sum(), 6.);
fn min_max_sum() {
let a = DMatrix::from_row_slice(2, 3, &[1., 2., 3., 4., 5., 6.]);
assert_eq!(21., a.sum());
assert_eq!(1., a.min());
assert_eq!(6., a.max());
}
#[test]
+22 -24
View File
@@ -76,10 +76,6 @@ impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssi
Array::ones((nrows, ncols))
}
fn to_raw_vector(&self) -> Vec<T> {
self.to_owned().iter().map(|v| *v).collect()
}
fn fill(nrows: usize, ncols: usize, value: T) -> Self {
Array::from_elem((nrows, ncols), value)
}
@@ -88,19 +84,19 @@ impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssi
(self.nrows(), self.ncols())
}
fn v_stack(&self, other: &Self) -> Self {
fn h_stack(&self, other: &Self) -> Self {
stack(Axis(1), &[self.view(), other.view()]).unwrap()
}
fn h_stack(&self, other: &Self) -> Self {
fn v_stack(&self, other: &Self) -> Self {
stack(Axis(0), &[self.view(), other.view()]).unwrap()
}
fn dot(&self, other: &Self) -> Self {
fn matmul(&self, other: &Self) -> Self {
self.dot(other)
}
fn vector_dot(&self, other: &Self) -> T {
fn dot(&self, other: &Self) -> T {
self.dot(&other.view().reversed_axes())[[0, 0]]
}
@@ -238,6 +234,14 @@ impl<T: RealNumber + ScalarOperand + AddAssign + SubAssign + MulAssign + DivAssi
self.sum()
}
fn max(&self) -> T {
self.iter().fold(T::neg_infinity(), |a, b| a.max(*b))
}
fn min(&self) -> T {
self.iter().fold(T::infinity(), |a, b| a.min(*b))
}
fn max_diff(&self, other: &Self) -> T {
let mut max_diff = T::zero();
for r in 0..self.nrows() {
@@ -454,15 +458,7 @@ mod tests {
let expected = arr2(&[[1., 2., 3., 7.], [4., 5., 6., 8.], [9., 10., 11., 12.]]);
let result = a1.v_stack(&a2).h_stack(&a3);
assert_eq!(result, expected);
}
#[test]
fn to_raw_vector() {
let result = arr2(&[[1., 2., 3.], [4., 5., 6.]]).to_raw_vector();
let expected = vec![1., 2., 3., 4., 5., 6.];
let result = a1.h_stack(&a2).v_stack(&a3);
assert_eq!(result, expected);
}
@@ -479,19 +475,19 @@ mod tests {
}
#[test]
fn dot() {
fn matmul() {
let a = arr2(&[[1., 2., 3.], [4., 5., 6.]]);
let b = arr2(&[[1., 2.], [3., 4.], [5., 6.]]);
let expected = arr2(&[[22., 28.], [49., 64.]]);
let result = BaseMatrix::dot(&a, &b);
let result = BaseMatrix::matmul(&a, &b);
assert_eq!(result, expected);
}
#[test]
fn vector_dot() {
fn dot() {
let a = arr2(&[[1., 2., 3.]]);
let b = arr2(&[[1., 2., 3.]]);
assert_eq!(14., a.vector_dot(&b));
assert_eq!(14., BaseMatrix::dot(&a, &b));
}
#[test]
@@ -559,9 +555,11 @@ mod tests {
}
#[test]
fn sum() {
let src = arr2(&[[1., 2., 3.]]);
assert_eq!(src.sum(), 6.);
fn min_max_sum() {
let a = arr2(&[[1., 2., 3.], [4., 5., 6.]]);
assert_eq!(21., a.sum());
assert_eq!(1., a.min());
assert_eq!(6., a.max());
}
#[test]
+6 -6
View File
@@ -166,13 +166,13 @@ mod tests {
#[test]
fn decompose() {
let a = DenseMatrix::from_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let q = DenseMatrix::from_array(&[
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let q = DenseMatrix::from_2d_array(&[
&[-0.7448, 0.2436, 0.6212],
&[-0.331, -0.9432, -0.027],
&[-0.5793, 0.2257, -0.7832],
]);
let r = DenseMatrix::from_array(&[
let r = DenseMatrix::from_2d_array(&[
&[-1.2083, -0.6373, -1.0842],
&[0.0, -0.3064, 0.0682],
&[0.0, 0.0, -0.1999],
@@ -184,9 +184,9 @@ mod tests {
#[test]
fn qr_solve_mut() {
let a = DenseMatrix::from_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let expected_w = DenseMatrix::from_array(&[
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let expected_w = DenseMatrix::from_2d_array(&[
&[-0.2027027, -1.2837838],
&[0.8783784, 2.2297297],
&[0.4729730, 0.6621622],
+10 -9
View File
@@ -428,7 +428,7 @@ mod tests {
#[test]
fn decompose_symmetric() {
let A = DenseMatrix::from_array(&[
let A = DenseMatrix::from_2d_array(&[
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.7000, 0.3000, 0.8000],
@@ -436,13 +436,13 @@ mod tests {
let s: Vec<f64> = vec![1.7498382, 0.3165784, 0.1335834];
let U = DenseMatrix::from_array(&[
let U = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.639158],
]);
let V = DenseMatrix::from_array(&[
let V = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.6391588],
@@ -459,7 +459,7 @@ mod tests {
#[test]
fn decompose_asymmetric() {
let A = DenseMatrix::from_array(&[
let A = DenseMatrix::from_2d_array(&[
&[
1.19720880,
-1.8391378,
@@ -523,7 +523,7 @@ mod tests {
3.8589375, 3.4396766, 2.6487176, 2.2317399, 1.5165054, 0.8109055, 0.2706515,
];
let U = DenseMatrix::from_array(&[
let U = DenseMatrix::from_2d_array(&[
&[
-0.3082776,
0.77676231,
@@ -589,7 +589,7 @@ mod tests {
],
]);
let V = DenseMatrix::from_array(&[
let V = DenseMatrix::from_2d_array(&[
&[
-0.2122609,
-0.54650056,
@@ -660,9 +660,10 @@ mod tests {
#[test]
fn solve() {
let a = DenseMatrix::from_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let expected_w = DenseMatrix::from_array(&[&[-0.20, -1.28], &[0.87, 2.22], &[0.47, 0.66]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let expected_w =
DenseMatrix::from_2d_array(&[&[-0.20, -1.28], &[0.87, 2.22], &[0.47, 0.66]]);
let w = a.svd_solve_mut(b);
assert!(w.approximate_eq(&expected_w, 1e-2));
}
+5 -5
View File
@@ -23,7 +23,7 @@
//! use smartcore::linear::linear_regression::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
//! &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
//! &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
@@ -125,7 +125,7 @@ impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
panic!("Number of rows of X doesn't match number of rows of Y");
}
let a = x.v_stack(&M::ones(x_nrows, 1));
let a = x.h_stack(&M::ones(x_nrows, 1));
let w = match parameters.solver {
LinearRegressionSolverName::QR => a.qr_solve_mut(b),
@@ -145,7 +145,7 @@ impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
/// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features.
pub fn predict(&self, x: &M) -> M::RowVector {
let (nrows, _) = x.shape();
let mut y_hat = x.dot(&self.coefficients);
let mut y_hat = x.matmul(&self.coefficients);
y_hat.add_mut(&M::fill(nrows, 1, self.intercept));
y_hat.transpose().to_row_vector()
}
@@ -168,7 +168,7 @@ mod tests {
#[test]
fn ols_fit_predict() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
@@ -215,7 +215,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
+18 -16
View File
@@ -14,7 +14,7 @@
//! use smartcore::linear::logistic_regression::*;
//!
//! //Iris data
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
@@ -277,8 +277,10 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
let mut result = M::zeros(1, n);
if self.num_classes == 2 {
let (nrows, _) = x.shape();
let x_and_bias = x.v_stack(&M::ones(nrows, 1));
let y_hat: Vec<T> = x_and_bias.dot(&self.weights.transpose()).to_raw_vector();
let x_and_bias = x.h_stack(&M::ones(nrows, 1));
let y_hat: Vec<T> = x_and_bias
.matmul(&self.weights.transpose())
.get_col_as_vec(0);
for i in 0..n {
result.set(
0,
@@ -288,8 +290,8 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
}
} else {
let (nrows, _) = x.shape();
let x_and_bias = x.v_stack(&M::ones(nrows, 1));
let y_hat = x_and_bias.dot(&self.weights.transpose());
let x_and_bias = x.h_stack(&M::ones(nrows, 1));
let y_hat = x_and_bias.matmul(&self.weights.transpose());
let class_idxs = y_hat.argmax();
for i in 0..n {
result.set(0, i, self.classes[class_idxs[i]]);
@@ -332,7 +334,7 @@ mod tests {
#[test]
fn multiclass_objective_f() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[1., -5.],
&[2., 5.],
&[3., -2.],
@@ -363,16 +365,16 @@ mod tests {
objective.df(
&mut g,
&DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]),
&DenseMatrix::row_vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]),
);
objective.df(
&mut g,
&DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]),
&DenseMatrix::row_vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]),
);
assert!((g.get(0, 0) + 33.000068218163484).abs() < std::f64::EPSILON);
let f = objective.f(&DenseMatrix::vector_from_array(&[
let f = objective.f(&DenseMatrix::row_vector_from_array(&[
1., 2., 3., 4., 5., 6., 7., 8., 9.,
]));
@@ -381,7 +383,7 @@ mod tests {
#[test]
fn binary_objective_f() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[1., -5.],
&[2., 5.],
&[3., -2.],
@@ -409,21 +411,21 @@ mod tests {
let mut g: DenseMatrix<f64> = DenseMatrix::zeros(1, 3);
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3.]));
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3.]));
objective.df(&mut g, &DenseMatrix::row_vector_from_array(&[1., 2., 3.]));
objective.df(&mut g, &DenseMatrix::row_vector_from_array(&[1., 2., 3.]));
assert!((g.get(0, 0) - 26.051064349381285).abs() < std::f64::EPSILON);
assert!((g.get(0, 1) - 10.239000702928523).abs() < std::f64::EPSILON);
assert!((g.get(0, 2) - 3.869294270156324).abs() < std::f64::EPSILON);
let f = objective.f(&DenseMatrix::vector_from_array(&[1., 2., 3.]));
let f = objective.f(&DenseMatrix::row_vector_from_array(&[1., 2., 3.]));
assert!((f - 59.76994756647412).abs() < std::f64::EPSILON);
}
#[test]
fn lr_fit_predict() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[1., -5.],
&[2., 5.],
&[3., -2.],
@@ -460,7 +462,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[1., -5.],
&[2., 5.],
&[3., -2.],
@@ -489,7 +491,7 @@ mod tests {
#[test]
fn lr_fit_predict_iris() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
+2 -2
View File
@@ -18,7 +18,7 @@
//! use smartcore::math::distance::Distance;
//! use smartcore::math::distance::mahalanobis::Mahalanobis;
//!
//! let data = DenseMatrix::from_array(&[
//! let data = DenseMatrix::from_2d_array(&[
//! &[64., 580., 29.],
//! &[66., 570., 33.],
//! &[68., 590., 37.],
@@ -135,7 +135,7 @@ mod tests {
#[test]
fn mahalanobis_distance() {
let data = DenseMatrix::from_array(&[
let data = DenseMatrix::from_2d_array(&[
&[64., 580., 29.],
&[66., 570., 33.],
&[68., 590., 37.],
+1 -1
View File
@@ -15,7 +15,7 @@
//! use smartcore::linear::logistic_regression::LogisticRegression;
//! use smartcore::metrics::*;
//!
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
+7 -5
View File
@@ -17,7 +17,7 @@
//! use smartcore::math::distance::*;
//!
//! //your explanatory variables. Each row is a training sample with 2 numerical features
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[1., 2.],
//! &[3., 4.],
//! &[5., 6.],
@@ -188,7 +188,8 @@ mod tests {
#[test]
fn knn_fit_predict() {
let x = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let y = vec![2., 2., 2., 3., 3.];
let knn = KNNClassifier::fit(&x, &y, Distances::euclidian(), Default::default());
let y_hat = knn.predict(&x);
@@ -198,7 +199,7 @@ mod tests {
#[test]
fn knn_fit_predict_weighted() {
let x = DenseMatrix::from_array(&[&[1.], &[2.], &[3.], &[4.], &[5.]]);
let x = DenseMatrix::from_2d_array(&[&[1.], &[2.], &[3.], &[4.], &[5.]]);
let y = vec![2., 2., 2., 3., 3.];
let knn = KNNClassifier::fit(
&x,
@@ -210,13 +211,14 @@ mod tests {
weight: KNNWeightFunction::Distance,
},
);
let y_hat = knn.predict(&DenseMatrix::from_array(&[&[4.1]]));
let y_hat = knn.predict(&DenseMatrix::from_2d_array(&[&[4.1]]));
assert_eq!(vec![3.0], y_hat);
}
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let y = vec![2., 2., 2., 3., 3.];
let knn = KNNClassifier::fit(&x, &y, Distances::euclidian(), Default::default());
+7 -4
View File
@@ -19,7 +19,7 @@
//! use smartcore::math::distance::*;
//!
//! //your explanatory variables. Each row is a training sample with 2 numerical features
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[1., 1.],
//! &[2., 2.],
//! &[3., 3.],
@@ -166,7 +166,8 @@ mod tests {
#[test]
fn knn_fit_predict_weighted() {
let x = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let y: Vec<f64> = vec![1., 2., 3., 4., 5.];
let y_exp = vec![1., 2., 3., 4., 5.];
let knn = KNNRegressor::fit(
@@ -188,7 +189,8 @@ mod tests {
#[test]
fn knn_fit_predict_uniform() {
let x = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let y: Vec<f64> = vec![1., 2., 3., 4., 5.];
let y_exp = vec![2., 2., 3., 4., 4.];
let knn = KNNRegressor::fit(&x, &y, Distances::euclidian(), Default::default());
@@ -201,7 +203,8 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
let y = vec![1., 2., 3., 4., 5.];
let knn = KNNRegressor::fit(&x, &y, Distances::euclidian(), Default::default());
@@ -58,10 +58,10 @@ impl<T: RealNumber> FirstOrderOptimizer<T> for GradientDescent<T> {
let mut dg = gvec.clone();
dx.mul_scalar_mut(alpha);
df(&mut dg, &dx.add_mut(&x)); //df(x) = df(x .+ gvec .* alpha)
gvec.vector_dot(&dg)
gvec.dot(&dg)
};
let df0 = step.vector_dot(&gvec);
let df0 = step.dot(&gvec);
let ls_r = ls.search(&f_alpha, &df_alpha, alpha, fx, df0);
alpha = ls_r.alpha;
@@ -90,7 +90,7 @@ mod tests {
#[test]
fn gradient_descent() {
let x0 = DenseMatrix::vector_from_array(&[-1., 1.]);
let x0 = DenseMatrix::row_vector_from_array(&[-1., 1.]);
let f = |x: &DenseMatrix<f64>| {
(1.0 - x.get(0, 0)).powf(2.) + 100.0 * (x.get(0, 1) - x.get(0, 0).powf(2.)).powf(2.)
};
+7 -7
View File
@@ -46,7 +46,7 @@ impl<T: RealNumber> LBFGS<T> {
let i = index.rem_euclid(self.m);
let dgi = &state.dg_history[i];
let dxi = &state.dx_history[i];
state.twoloop_alpha[i] = state.rho[i] * dxi.vector_dot(&state.twoloop_q);
state.twoloop_alpha[i] = state.rho[i] * dxi.dot(&state.twoloop_q);
state
.twoloop_q
.sub_mut(&dgi.mul_scalar(state.twoloop_alpha[i]));
@@ -56,7 +56,7 @@ impl<T: RealNumber> LBFGS<T> {
let i = (upper - 1).rem_euclid(self.m);
let dxi = &state.dx_history[i];
let dgi = &state.dg_history[i];
let scaling = dxi.vector_dot(dgi) / dgi.abs().pow_mut(T::two()).sum();
let scaling = dxi.dot(dgi) / dgi.abs().pow_mut(T::two()).sum();
state.s.copy_from(&state.twoloop_q.mul_scalar(scaling));
} else {
state.s.copy_from(&state.twoloop_q);
@@ -66,7 +66,7 @@ impl<T: RealNumber> LBFGS<T> {
let i = index.rem_euclid(self.m);
let dgi = &state.dg_history[i];
let dxi = &state.dx_history[i];
let beta = state.rho[i] * dgi.vector_dot(&state.s);
let beta = state.rho[i] * dgi.dot(&state.s);
state
.s
.add_mut(&dxi.mul_scalar(state.twoloop_alpha[i] - beta));
@@ -111,7 +111,7 @@ impl<T: RealNumber> LBFGS<T> {
state.x_f_prev = f(&state.x);
state.x_prev.copy_from(&state.x);
let df0 = state.x_df.vector_dot(&state.s);
let df0 = state.x_df.dot(&state.s);
let f_alpha = |alpha: T| -> T {
let mut dx = state.s.clone();
@@ -124,7 +124,7 @@ impl<T: RealNumber> LBFGS<T> {
let mut dg = state.x_df.clone();
dx.mul_scalar_mut(alpha);
df(&mut dg, &dx.add_mut(&state.x)); //df(x) = df(x .+ gvec .* alpha)
state.x_df.vector_dot(&dg)
state.x_df.dot(&dg)
};
let ls_r = ls.search(&f_alpha, &df_alpha, T::one(), state.x_f_prev, df0);
@@ -164,7 +164,7 @@ impl<T: RealNumber> LBFGS<T> {
fn update_hessian<'a, X: Matrix<T>>(&self, _: &'a DF<X>, state: &mut LBFGSState<T, X>) {
state.dg = state.x_df.sub(&state.x_df_prev);
let rho_iteration = T::one() / state.dx.vector_dot(&state.dg);
let rho_iteration = T::one() / state.dx.dot(&state.dg);
if !rho_iteration.is_infinite() {
let idx = state.iteration.rem_euclid(self.m);
state.dx_history[idx].copy_from(&state.dx);
@@ -240,7 +240,7 @@ mod tests {
#[test]
fn lbfgs() {
let x0 = DenseMatrix::vector_from_array(&[0., 0.]);
let x0 = DenseMatrix::row_vector_from_array(&[0., 0.]);
let f = |x: &DenseMatrix<f64>| {
(1.0 - x.get(0, 0)).powf(2.) + 100.0 * (x.get(0, 1) - x.get(0, 0).powf(2.)).powf(2.)
};
+4 -4
View File
@@ -25,7 +25,7 @@
//! use smartcore::tree::decision_tree_classifier::*;
//!
//! // Iris dataset
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
@@ -604,7 +604,7 @@ mod tests {
#[test]
fn fit_predict_iris() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
@@ -653,7 +653,7 @@ mod tests {
#[test]
fn fit_predict_baloons() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[1., 1., 1., 0.],
&[1., 1., 1., 0.],
&[1., 1., 1., 1.],
@@ -687,7 +687,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[1., 1., 1., 0.],
&[1., 1., 1., 0.],
&[1., 1., 1., 1.],
+3 -3
View File
@@ -22,7 +22,7 @@
//! use smartcore::tree::decision_tree_regressor::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
//! let x = DenseMatrix::from_array(&[
//! let x = DenseMatrix::from_2d_array(&[
//! &[234.289, 235.6, 159., 107.608, 1947., 60.323],
//! &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
//! &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
@@ -470,7 +470,7 @@ mod tests {
#[test]
fn fit_longley() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159., 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
@@ -540,7 +540,7 @@ mod tests {
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
let x = DenseMatrix::from_2d_array(&[
&[234.289, 235.6, 159., 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],