29 Commits

Author SHA1 Message Date
Lorenzo
cfc953b25c Merge branch 'development' into prdct-prb 2024-04-08 08:56:24 +01:00
Lorenzo
239c00428f Patch to version 0.4.0 (#257)
* uncomment test

* Add random test for logistic regression

* linting

* Bump version

* Add test for logistic regression

* linting

* initial commit

* final

* final-clean

* Bump to 0.4.0

* Fix linter

* cleanup

* Update CHANDELOG with breaking changes

* Update CHANDELOG date

* Add functional methods to DenseMatrix implementation

* linting

* add type declaration in test

* Fix Wasm tests failing

* linting

* fix tests

* linting

* Add type annotations on BBDTree constructor

* fix clippy

* fix clippy

* fix tests

* bump version

* run fmt. fix changelog

---------

Co-authored-by: Edmund Cape <edmund@Edmunds-MacBook-Pro.local>
2024-03-04 08:51:27 -05:00
morenol
80a93c1a0e chore: fix clippy (#276)
Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
2024-02-25 00:17:30 -05:00
Tushushu
4eadd16ce4 Implement the feature importance for Decision Tree Classifier (#275)
* store impurity in the node

* add number of features

* add a TODO

* draft feature importance

* feat

* n_samples of node

* compute_feature_importances

* unit tests

* always calculate impurity

* fix bug

* fix linter
2024-02-24 23:37:30 -05:00
Frédéric Meyer
886b5631b7 In Naive Bayes, avoid using Option::unwrap and so avoid panicking from NaN values (#274) 2024-01-10 14:59:10 -04:00
dependabot[bot]
9c07925d8a Update itertools requirement from 0.11.0 to 0.12.0 (#271)
Updates the requirements on [itertools](https://github.com/rust-itertools/itertools) to permit the latest version.
- [Changelog](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-itertools/itertools/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: itertools
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-20 22:00:34 -04:00
morenol
6f22bbd150 chore: update clippy lints (#272)
* chore: fix clippy lints
---------

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
2023-11-20 21:54:09 -04:00
dependabot[bot]
dbdc2b2a77 Update itertools requirement from 0.10.5 to 0.11.0 (#266)
Updates the requirements on [itertools](https://github.com/rust-itertools/itertools) to permit the latest version.
- [Changelog](https://github.com/rust-itertools/itertools/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-itertools/itertools/compare/v0.10.5...v0.11.0)

---
updated-dependencies:
- dependency-name: itertools
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-06-22 17:56:42 +01:00
Lorenzo
2d7c055154 Bump version 2023-05-01 13:20:17 +01:00
Ruben De Smet
545ed6ce2b Remove some allocations (#262)
* Remove some allocations

* Remove some more allocations
2023-04-26 21:46:26 +08:00
morenol
8939ed93b9 chore: fix clippy warnings from Rust release 1.69 (#263)
* chore: fix clippy warnings from Rust release 1.69

* chore: run `cargo fmt`

* refactor: remove unused type parameter

---------

Co-authored-by: Luis Moreno <morenol@users.noreply.github.com>
2023-04-26 01:35:58 +09:00
Lorenzo
9cd7348403 Update CONTRIBUTING.md 2023-04-10 15:13:27 +01:00
Hsiang-Cheng Yang
d52830a818 Update arrays.rs (#253)
fix a typo
2023-03-23 19:15:54 -04:00
Lorenzo
5bf7102fc2 Merge branch 'development' into prdct-prb 2023-03-21 14:03:04 +09:00
Lorenzo
d15ea43975 Remove failure in case of failed upload to codecov.io 2023-03-20 15:08:30 +00:00
Lorenzo
97604a2d83 Merge branch 'development' into prdct-prb 2023-01-27 10:42:48 +00:00
morenol
dae556776c Merge branch 'development' into prdct-prb 2023-01-26 20:14:05 -04:00
Lorenzo
24d80a0c9a Merge branch 'development' into prdct-prb 2022-11-09 16:31:03 +00:00
Lorenzo
c56370dfca Merge branch 'development' into prdct-prb 2022-11-03 11:59:46 +00:00
Lorenzo (Mec-iS)
78e53a28e7 apply fmt 2022-10-31 19:28:24 +00:00
Lorenzo (Mec-iS)
a9f89a2e15 Fix conflicts 2022-10-31 19:22:06 +00:00
Luis Moreno
e9ed9e85ae Merge remote-tracking branch 'sm/development' into predict-probability 2022-09-22 12:20:56 -04:00
Alan Race
28c81eb358 Test case now passing without transpose 2022-08-30 11:08:35 +02:00
Alan Race
7f7b2edca0 Fixed test by transposing matrix 2022-08-29 16:25:21 +02:00
Alan Race
d46b830bcd Merge branch 'development' into predict-probability 2022-08-29 16:24:39 +02:00
Alan Race
b6fb8191eb Merge pull request #1 from smartcorelib/alanrace-predict-probs
Add test to predict probabilities
2022-08-29 15:57:24 +02:00
Lorenzo (Mec-iS)
61db4ebd90 Add test 2022-08-24 12:34:56 +01:00
Lorenzo (Mec-iS)
2603a1f42b Add test 2022-08-24 11:44:30 +01:00
Alan Race
663db0334d Added per-class probability prediction for random forests 2022-07-11 16:08:03 +02:00
53 changed files with 1141 additions and 563 deletions
+2
View File
@@ -37,6 +37,8 @@ $ rust-code-analysis-cli -p src/algorithm/neighbour/fastpair.rs --ls 22 --le 213
```
* find more information about what happens in your binary with [`twiggy`](https://rustwasm.github.io/twiggy/install.html). This need a compiled binary so create a brief `main {}` function using `smartcore` and then point `twiggy` to that file.
* Please take a look to the output of a profiler to spot most evident performance problems, see [this guide about using a profiler](http://www.codeofview.com/fix-rs/2017/01/24/how-to-optimize-rust-programs-on-linux/).
## Issue Report Process
1. Go to the project's issues.
+1 -1
View File
@@ -41,4 +41,4 @@ jobs:
- name: Upload to codecov.io
uses: codecov/codecov-action@v2
with:
fail_ci_if_error: true
fail_ci_if_error: false
+6
View File
@@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.4.0] - 2023-04-05
## Added
- WARNING: Breaking changes!
- `DenseMatrix` constructor now returns `Result` to avoid user instantiating inconsistent rows/cols count. Their return values need to be unwrapped with `unwrap()`, see tests
## [0.3.0] - 2022-11-09
## Added
+2 -2
View File
@@ -2,7 +2,7 @@
name = "smartcore"
description = "Machine Learning in Rust."
homepage = "https://smartcorelib.org"
version = "0.3.1"
version = "0.4.0"
authors = ["smartcore Developers"]
edition = "2021"
license = "Apache-2.0"
@@ -48,7 +48,7 @@ getrandom = { version = "0.2.8", optional = true }
wasm-bindgen-test = "0.3"
[dev-dependencies]
itertools = "0.10.5"
itertools = "0.12.0"
serde_json = "1.0"
bincode = "1.3.1"
+4 -3
View File
@@ -40,11 +40,11 @@ impl BBDTreeNode {
impl BBDTree {
pub fn new<T: Number, M: Array2<T>>(data: &M) -> BBDTree {
let nodes = Vec::new();
let nodes: Vec<BBDTreeNode> = Vec::new();
let (n, _) = data.shape();
let index = (0..n).collect::<Vec<_>>();
let index = (0..n).collect::<Vec<usize>>();
let mut tree = BBDTree {
nodes,
@@ -343,7 +343,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let tree = BBDTree::new(&data);
+11 -7
View File
@@ -17,7 +17,7 @@
/// &[4.6, 3.1, 1.5, 0.2],
/// &[5.0, 3.6, 1.4, 0.2],
/// &[5.4, 3.9, 1.7, 0.4],
/// ]);
/// ]).unwrap();
/// let fastpair = FastPair::new(&x);
/// let closest_pair: PairwiseDistance<f64> = fastpair.unwrap().closest_pair();
/// ```
@@ -271,7 +271,7 @@ mod tests_fastpair {
fn dataset_has_at_least_three_points() {
// Create a dataset which consists of only two points:
// A(0.0, 0.0) and B(1.0, 1.0).
let dataset = DenseMatrix::<f64>::from_2d_array(&[&[0.0, 0.0], &[1.0, 1.0]]);
let dataset = DenseMatrix::<f64>::from_2d_array(&[&[0.0, 0.0], &[1.0, 1.0]]).unwrap();
// We expect an error when we run `FastPair` on this dataset,
// becuase `FastPair` currently only works on a minimum of 3
@@ -288,7 +288,7 @@ mod tests_fastpair {
#[test]
fn one_dimensional_dataset_minimal() {
let dataset = DenseMatrix::<f64>::from_2d_array(&[&[0.0], &[2.0], &[9.0]]);
let dataset = DenseMatrix::<f64>::from_2d_array(&[&[0.0], &[2.0], &[9.0]]).unwrap();
let result = FastPair::new(&dataset);
assert!(result.is_ok());
@@ -308,7 +308,8 @@ mod tests_fastpair {
#[test]
fn one_dimensional_dataset_2() {
let dataset = DenseMatrix::<f64>::from_2d_array(&[&[27.0], &[0.0], &[9.0], &[2.0]]);
let dataset =
DenseMatrix::<f64>::from_2d_array(&[&[27.0], &[0.0], &[9.0], &[2.0]]).unwrap();
let result = FastPair::new(&dataset);
assert!(result.is_ok());
@@ -343,7 +344,8 @@ mod tests_fastpair {
&[6.9, 3.1, 4.9, 1.5],
&[5.5, 2.3, 4.0, 1.3],
&[6.5, 2.8, 4.6, 1.5],
]);
])
.unwrap();
let fastpair = FastPair::new(&x);
assert!(fastpair.is_ok());
@@ -516,7 +518,8 @@ mod tests_fastpair {
&[6.9, 3.1, 4.9, 1.5],
&[5.5, 2.3, 4.0, 1.3],
&[6.5, 2.8, 4.6, 1.5],
]);
])
.unwrap();
// compute
let fastpair = FastPair::new(&x);
assert!(fastpair.is_ok());
@@ -564,7 +567,8 @@ mod tests_fastpair {
&[6.9, 3.1, 4.9, 1.5],
&[5.5, 2.3, 4.0, 1.3],
&[6.5, 2.8, 4.6, 1.5],
]);
])
.unwrap();
// compute
let fastpair = FastPair::new(&x);
assert!(fastpair.is_ok());
+5 -4
View File
@@ -315,8 +315,7 @@ impl<TX: Number, TY: Number, X: Array2<TX>, Y: Array1<TY>, D: Distance<Vec<TX>>>
}
}
while !neighbors.is_empty() {
let neighbor = neighbors.pop().unwrap();
while let Some(neighbor) = neighbors.pop() {
let index = neighbor.0;
if y[index] == outlier {
@@ -443,7 +442,8 @@ mod tests {
&[2.2, 1.2],
&[1.8, 0.8],
&[3.0, 5.0],
]);
])
.unwrap();
let expected_labels = vec![1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 0];
@@ -488,7 +488,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let dbscan = DBSCAN::fit(&x, Default::default()).unwrap();
+7 -5
View File
@@ -41,7 +41,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//!
//! let kmeans = KMeans::fit(&x, KMeansParameters::default().with_k(2)).unwrap(); // Fit to data, 2 clusters
//! let y_hat: Vec<u8> = kmeans.predict(&x).unwrap(); // use the same points for prediction
@@ -249,7 +249,7 @@ impl<TX: Number, TY: Number, X: Array2<TX>, Y: Array1<TY>> Predictor<X, Y>
impl<TX: Number, TY: Number, X: Array2<TX>, Y: Array1<TY>> KMeans<TX, TY, X, Y> {
/// Fit algorithm to _NxM_ matrix where _N_ is number of samples and _M_ is number of features.
/// * `data` - training instances to cluster
/// * `data` - training instances to cluster
/// * `parameters` - cluster parameters
pub fn fit(data: &X, parameters: KMeansParameters) -> Result<KMeans<TX, TY, X, Y>, Failed> {
let bbd = BBDTree::new(data);
@@ -424,7 +424,7 @@ mod tests {
)]
#[test]
fn invalid_k() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert!(KMeans::<i32, i32, DenseMatrix<i32>, Vec<i32>>::fit(
&x,
@@ -492,7 +492,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let kmeans = KMeans::fit(&x, Default::default()).unwrap();
@@ -531,7 +532,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let kmeans: KMeans<f32, f32, DenseMatrix<f32>, Vec<f32>> =
KMeans::fit(&x, Default::default()).unwrap();
+1 -1
View File
@@ -40,7 +40,7 @@ pub fn load_dataset() -> Dataset<f32, u32> {
target: y,
num_samples,
num_features,
feature_names: vec![
feature_names: [
"Age", "Sex", "BMI", "BP", "S1", "S2", "S3", "S4", "S5", "S6",
]
.iter()
+3 -5
View File
@@ -25,16 +25,14 @@ pub fn load_dataset() -> Dataset<f32, f32> {
target: y,
num_samples,
num_features,
feature_names: vec![
"sepal length (cm)",
feature_names: ["sepal length (cm)",
"sepal width (cm)",
"petal length (cm)",
"petal width (cm)",
]
"petal width (cm)"]
.iter()
.map(|s| s.to_string())
.collect(),
target_names: vec!["setosa", "versicolor", "virginica"]
target_names: ["setosa", "versicolor", "virginica"]
.iter()
.map(|s| s.to_string())
.collect(),
+2 -2
View File
@@ -36,7 +36,7 @@ pub fn load_dataset() -> Dataset<f32, u32> {
target: y,
num_samples,
num_features,
feature_names: vec![
feature_names: [
"sepal length (cm)",
"sepal width (cm)",
"petal length (cm)",
@@ -45,7 +45,7 @@ pub fn load_dataset() -> Dataset<f32, u32> {
.iter()
.map(|s| s.to_string())
.collect(),
target_names: vec!["setosa", "versicolor", "virginica"]
target_names: ["setosa", "versicolor", "virginica"]
.iter()
.map(|s| s.to_string())
.collect(),
+13 -7
View File
@@ -35,7 +35,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//!
//! let pca = PCA::fit(&iris, PCAParameters::default().with_n_components(2)).unwrap(); // Reduce number of features to 2
//!
@@ -443,6 +443,7 @@ mod tests {
&[2.6, 53.0, 66.0, 10.8],
&[6.8, 161.0, 60.0, 15.6],
])
.unwrap()
}
#[cfg_attr(
all(target_arch = "wasm32", not(target_os = "wasi")),
@@ -457,7 +458,8 @@ mod tests {
&[0.9952, 0.0588],
&[0.0463, 0.9769],
&[0.0752, 0.2007],
]);
])
.unwrap();
let pca = PCA::fit(&us_arrests, Default::default()).unwrap();
@@ -500,7 +502,8 @@ mod tests {
-0.974080592182491,
0.0723250196376097,
],
]);
])
.unwrap();
let expected_projection = DenseMatrix::from_2d_array(&[
&[-64.8022, -11.448, 2.4949, -2.4079],
@@ -553,7 +556,8 @@ mod tests {
&[91.5446, -22.9529, 0.402, -0.7369],
&[118.1763, 5.5076, 2.7113, -0.205],
&[10.4345, -5.9245, 3.7944, 0.5179],
]);
])
.unwrap();
let expected_eigenvalues: Vec<f64> = vec![
343544.6277001563,
@@ -616,7 +620,8 @@ mod tests {
-0.0881962972508558,
-0.0096011588898465,
],
]);
])
.unwrap();
let expected_projection = DenseMatrix::from_2d_array(&[
&[0.9856, -1.1334, 0.4443, -0.1563],
@@ -669,7 +674,8 @@ mod tests {
&[-2.1086, -1.4248, -0.1048, -0.1319],
&[-2.0797, 0.6113, 0.1389, -0.1841],
&[-0.6294, -0.321, 0.2407, 0.1667],
]);
])
.unwrap();
let expected_eigenvalues: Vec<f64> = vec![
2.480241579149493,
@@ -732,7 +738,7 @@ mod tests {
// &[4.9, 2.4, 3.3, 1.0],
// &[6.6, 2.9, 4.6, 1.3],
// &[5.2, 2.7, 3.9, 1.4],
// ]);
// ]).unwrap();
// let pca = PCA::fit(&iris, Default::default()).unwrap();
+6 -4
View File
@@ -32,7 +32,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//!
//! let svd = SVD::fit(&iris, SVDParameters::default().
//! with_n_components(2)).unwrap(); // Reduce number of features to 2
@@ -292,7 +292,8 @@ mod tests {
&[5.7, 81.0, 39.0, 9.3],
&[2.6, 53.0, 66.0, 10.8],
&[6.8, 161.0, 60.0, 15.6],
]);
])
.unwrap();
let expected = DenseMatrix::from_2d_array(&[
&[243.54655757, -18.76673788],
@@ -300,7 +301,8 @@ mod tests {
&[305.93972467, -15.39087376],
&[197.28420365, -11.66808306],
&[293.43187394, 1.91163633],
]);
])
.unwrap();
let svd = SVD::fit(&x, Default::default()).unwrap();
let x_transformed = svd.transform(&x).unwrap();
@@ -341,7 +343,7 @@ mod tests {
// &[4.9, 2.4, 3.3, 1.0],
// &[6.6, 2.9, 4.6, 1.3],
// &[5.2, 2.7, 3.9, 1.4],
// ]);
// ]).unwrap();
// let svd = SVD::fit(&iris, Default::default()).unwrap();
+104 -4
View File
@@ -33,7 +33,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y = vec![
//! 0, 0, 0, 0, 0, 0, 0, 0,
//! 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -580,6 +580,37 @@ impl<TX: FloatNumber + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY
which_max(&result)
}
/// Predict the per-class probabilties for each observation.
/// The probability is calculated as the fraction of trees that predicted a given class
pub fn predict_proba<R: Array2<f64>>(&self, x: &X) -> Result<R, Failed> {
let mut result: R = R::zeros(x.shape().0, self.classes.as_ref().unwrap().len());
let (n, _) = x.shape();
for i in 0..n {
let row_probs = self.predict_proba_for_row(x, i);
for (j, item) in row_probs.iter().enumerate() {
result.set((i, j), *item);
}
}
Ok(result)
}
fn predict_proba_for_row(&self, x: &X, row: usize) -> Vec<f64> {
let mut result = vec![0; self.classes.as_ref().unwrap().len()];
for tree in self.trees.as_ref().unwrap().iter() {
result[tree.predict_for_row(x, row)] += 1;
}
result
.iter()
.map(|n| *n as f64 / self.trees.as_ref().unwrap().len() as f64)
.collect()
}
fn sample_with_replacement(y: &[usize], num_classes: usize, rng: &mut impl Rng) -> Vec<usize> {
let class_weight = vec![1.; num_classes];
let nrows = y.len();
@@ -607,6 +638,7 @@ impl<TX: FloatNumber + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::arrays::Array;
use crate::linalg::basic::matrix::DenseMatrix;
use crate::metrics::*;
@@ -660,7 +692,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y = vec![0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let classifier = RandomForestClassifier::fit(
@@ -733,7 +766,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y = vec![0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let classifier = RandomForestClassifier::fit(
@@ -786,7 +820,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y = vec![0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let forest = RandomForestClassifier::fit(&x, &y, Default::default()).unwrap();
@@ -796,4 +831,69 @@ mod tests {
assert_eq!(forest, deserialized_forest);
}
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
#[test]
fn fit_predict_probabilities() {
let x = DenseMatrix::<f64>::from_2d_array(&[
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
&[4.6, 3.1, 1.5, 0.2],
&[5.0, 3.6, 1.4, 0.2],
&[5.4, 3.9, 1.7, 0.4],
&[4.6, 3.4, 1.4, 0.3],
&[5.0, 3.4, 1.5, 0.2],
&[4.4, 2.9, 1.4, 0.2],
&[4.9, 3.1, 1.5, 0.1],
&[7.0, 3.2, 4.7, 1.4],
&[6.4, 3.2, 4.5, 1.5],
&[6.9, 3.1, 4.9, 1.5],
&[5.5, 2.3, 4.0, 1.3],
&[6.5, 2.8, 4.6, 1.5],
&[5.7, 2.8, 4.5, 1.3],
&[6.3, 3.3, 4.7, 1.6],
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
let y = vec![0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let classifier = RandomForestClassifier::fit(
&x,
&y,
RandomForestClassifierParameters {
criterion: SplitCriterion::Gini,
max_depth: None,
min_samples_leaf: 1,
min_samples_split: 2,
n_trees: 100, // this is n_estimators in sklearn
m: Option::None,
keep_samples: false,
seed: 0,
},
)
.unwrap();
println!("{:?}", classifier.classes);
let results: DenseMatrix<f64> = classifier.predict_proba(&x).unwrap();
println!("{:?}", x.shape());
println!("{:?}", results);
println!("{:?}", results.shape());
assert_eq!(
results,
DenseMatrix::<f64>::new(
20,
2,
vec![
1.0, 0.0, 0.78, 0.22, 0.95, 0.05, 0.82, 0.18, 1.0, 0.0, 0.92, 0.08, 0.99, 0.01,
0.96, 0.04, 0.36, 0.64, 0.33, 0.67, 0.02, 0.98, 0.02, 0.98, 0.0, 1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0, 0.03, 0.97, 0.05, 0.95, 0.0, 1.0, 0.02, 0.98
],
true
)
);
}
}
+7 -4
View File
@@ -29,7 +29,7 @@
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//! ]).unwrap();
//! let y = vec![
//! 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2,
//! 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9
@@ -574,7 +574,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
@@ -648,7 +649,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
@@ -702,7 +704,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
+19
View File
@@ -32,6 +32,8 @@ pub enum FailedError {
SolutionFailed,
/// Error in input parameters
ParametersError,
/// Invalid state error (should never happen)
InvalidStateError,
}
impl Failed {
@@ -64,6 +66,22 @@ impl Failed {
}
}
/// new instance of `FailedError::ParametersError`
pub fn input(msg: &str) -> Self {
Failed {
err: FailedError::ParametersError,
msg: msg.to_string(),
}
}
/// new instance of `FailedError::InvalidStateError`
pub fn invalid_state(msg: &str) -> Self {
Failed {
err: FailedError::InvalidStateError,
msg: msg.to_string(),
}
}
/// new instance of `err`
pub fn because(err: FailedError, msg: &str) -> Self {
Failed {
@@ -97,6 +115,7 @@ impl fmt::Display for FailedError {
FailedError::DecompositionFailed => "Decomposition failed",
FailedError::SolutionFailed => "Can't find solution",
FailedError::ParametersError => "Error in input, check parameters",
FailedError::InvalidStateError => "Invalid state, this should never happen", // useful in development phase of lib
};
write!(f, "{failed_err_str}")
}
+1 -1
View File
@@ -64,7 +64,7 @@
//! &[3., 4.],
//! &[5., 6.],
//! &[7., 8.],
//! &[9., 10.]]);
//! &[9., 10.]]).unwrap();
//! // Our classes are defined as a vector
//! let y = vec![2, 2, 2, 3, 3];
//!
+125 -89
View File
@@ -188,8 +188,7 @@ pub trait ArrayView1<T: Debug + Display + Copy + Sized>: Array<T, usize> {
_ => max,
}
};
self.iterator(0)
.fold(T::min_value(), |max, x| max_f(max, x))
self.iterator(0).fold(T::min_value(), max_f)
}
/// return min value from the view
fn min(&self) -> T
@@ -202,8 +201,7 @@ pub trait ArrayView1<T: Debug + Display + Copy + Sized>: Array<T, usize> {
_ => min,
}
};
self.iterator(0)
.fold(T::max_value(), |max, x| min_f(max, x))
self.iterator(0).fold(T::max_value(), min_f)
}
/// return the position of the max value of the view
fn argmax(&self) -> usize
@@ -1570,7 +1568,7 @@ pub trait Array2<T: Debug + Display + Copy + Sized>: MutArrayView2<T> + Sized +
mean
}
/// copy coumn as a vector
/// copy column as a vector
fn copy_col_as_vec(&self, col: usize, result: &mut Vec<T>) {
for (r, result_r) in result.iter_mut().enumerate().take(self.shape().0) {
*result_r = *self.get((r, col));
@@ -1777,7 +1775,7 @@ mod tests {
#[test]
fn test_xa() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert_eq!(vec![7, 8].xa(false, &a), vec![39, 54, 69]);
assert_eq!(vec![7, 8, 9].xa(true, &a), vec![50, 122]);
}
@@ -1785,19 +1783,27 @@ mod tests {
#[test]
fn test_min_max() {
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).max(0),
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]])
.unwrap()
.max(0),
vec!(4, 5, 6)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).max(1),
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]])
.unwrap()
.max(1),
vec!(3, 6)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]).min(0),
DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]])
.unwrap()
.min(0),
vec!(1., 2., 3.)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]).min(1),
DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]])
.unwrap()
.min(1),
vec!(1., 4.)
);
}
@@ -1805,11 +1811,15 @@ mod tests {
#[test]
fn test_argmax() {
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 5, 3], &[4, 2, 6]]).argmax(0),
DenseMatrix::from_2d_array(&[&[1, 5, 3], &[4, 2, 6]])
.unwrap()
.argmax(0),
vec!(1, 0, 1)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[4, 2, 3], &[1, 5, 6]]).argmax(1),
DenseMatrix::from_2d_array(&[&[4, 2, 3], &[1, 5, 6]])
.unwrap()
.argmax(1),
vec!(0, 2)
);
}
@@ -1817,168 +1827,181 @@ mod tests {
#[test]
fn test_sum() {
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).sum(0),
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]])
.unwrap()
.sum(0),
vec!(5, 7, 9)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]).sum(1),
DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]])
.unwrap()
.sum(1),
vec!(6., 15.)
);
}
#[test]
fn test_abs() {
let mut x = DenseMatrix::from_2d_array(&[&[-1, 2, -3], &[4, -5, 6]]);
let mut x = DenseMatrix::from_2d_array(&[&[-1, 2, -3], &[4, -5, 6]]).unwrap();
x.abs_mut();
assert_eq!(x, DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]));
assert_eq!(
x,
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap()
);
}
#[test]
fn test_neg() {
let mut x = DenseMatrix::from_2d_array(&[&[-1, 2, -3], &[4, -5, 6]]);
let mut x = DenseMatrix::from_2d_array(&[&[-1, 2, -3], &[4, -5, 6]]).unwrap();
x.neg_mut();
assert_eq!(x, DenseMatrix::from_2d_array(&[&[1, -2, 3], &[-4, 5, -6]]));
assert_eq!(
x,
DenseMatrix::from_2d_array(&[&[1, -2, 3], &[-4, 5, -6]]).unwrap()
);
}
#[test]
fn test_copy_from() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
let mut y = DenseMatrix::<i32>::zeros(2, 3);
y.copy_from(&x);
assert_eq!(y, DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]));
assert_eq!(
y,
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap()
);
}
#[test]
fn test_init() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert_eq!(
DenseMatrix::<i32>::zeros(2, 2),
DenseMatrix::from_2d_array(&[&[0, 0], &[0, 0]])
DenseMatrix::from_2d_array(&[&[0, 0], &[0, 0]]).unwrap()
);
assert_eq!(
DenseMatrix::<i32>::ones(2, 2),
DenseMatrix::from_2d_array(&[&[1, 1], &[1, 1]])
DenseMatrix::from_2d_array(&[&[1, 1], &[1, 1]]).unwrap()
);
assert_eq!(
DenseMatrix::<i32>::eye(3),
DenseMatrix::from_2d_array(&[&[1, 0, 0], &[0, 1, 0], &[0, 0, 1]])
DenseMatrix::from_2d_array(&[&[1, 0, 0], &[0, 1, 0], &[0, 0, 1]]).unwrap()
);
assert_eq!(
DenseMatrix::from_slice(x.slice(0..2, 0..2).as_ref()),
DenseMatrix::from_2d_array(&[&[1, 2], &[4, 5]])
DenseMatrix::from_slice(x.slice(0..2, 0..2).as_ref()), // internal only?
DenseMatrix::from_2d_array(&[&[1, 2], &[4, 5]]).unwrap()
);
assert_eq!(
DenseMatrix::from_row(x.get_row(0).as_ref()),
DenseMatrix::from_2d_array(&[&[1, 2, 3]])
DenseMatrix::from_row(x.get_row(0).as_ref()), // internal only?
DenseMatrix::from_2d_array(&[&[1, 2, 3]]).unwrap()
);
assert_eq!(
DenseMatrix::from_column(x.get_col(0).as_ref()),
DenseMatrix::from_2d_array(&[&[1], &[4]])
DenseMatrix::from_column(x.get_col(0).as_ref()), // internal only?
DenseMatrix::from_2d_array(&[&[1], &[4]]).unwrap()
);
}
#[test]
fn test_transpose() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert_eq!(
x.transpose(),
DenseMatrix::from_2d_array(&[&[1, 4], &[2, 5], &[3, 6]])
DenseMatrix::from_2d_array(&[&[1, 4], &[2, 5], &[3, 6]]).unwrap()
);
}
#[test]
fn test_reshape() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert_eq!(
x.reshape(3, 2, 0),
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]).unwrap()
);
assert_eq!(
x.reshape(3, 2, 1),
DenseMatrix::from_2d_array(&[&[1, 4], &[2, 5], &[3, 6]])
DenseMatrix::from_2d_array(&[&[1, 4], &[2, 5], &[3, 6]]).unwrap()
);
}
#[test]
#[should_panic]
fn test_failed_reshape() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert_eq!(
x.reshape(4, 2, 0),
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]).unwrap()
);
}
#[test]
fn test_matmul() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]).unwrap();
assert_eq!(
a.matmul(&(*b.slice(0..3, 0..2))),
DenseMatrix::from_2d_array(&[&[22, 28], &[49, 64]])
DenseMatrix::from_2d_array(&[&[22, 28], &[49, 64]]).unwrap()
);
assert_eq!(
a.matmul(&b),
DenseMatrix::from_2d_array(&[&[22, 28], &[49, 64]])
DenseMatrix::from_2d_array(&[&[22, 28], &[49, 64]]).unwrap()
);
}
#[test]
fn test_concat() {
let a = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]);
let b = DenseMatrix::from_2d_array(&[&[5, 6], &[7, 8]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[5, 6], &[7, 8]]).unwrap();
assert_eq!(
DenseMatrix::concatenate_1d(&[&vec!(1, 2, 3), &vec!(4, 5, 6)], 0),
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]])
DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap()
);
assert_eq!(
DenseMatrix::concatenate_1d(&[&vec!(1, 2), &vec!(3, 4)], 1),
DenseMatrix::from_2d_array(&[&[1, 3], &[2, 4]])
DenseMatrix::from_2d_array(&[&[1, 3], &[2, 4]]).unwrap()
);
assert_eq!(
DenseMatrix::concatenate_2d(&[&a, &b], 0),
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6], &[7, 8]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6], &[7, 8]]).unwrap()
);
assert_eq!(
DenseMatrix::concatenate_2d(&[&a, &b], 1),
DenseMatrix::from_2d_array(&[&[1, 2, 5, 6], &[3, 4, 7, 8]])
DenseMatrix::from_2d_array(&[&[1, 2, 5, 6], &[3, 4, 7, 8]]).unwrap()
);
}
#[test]
fn test_take() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]).unwrap();
assert_eq!(
a.take(&[0, 2], 1),
DenseMatrix::from_2d_array(&[&[1, 3], &[4, 6]])
DenseMatrix::from_2d_array(&[&[1, 3], &[4, 6]]).unwrap()
);
assert_eq!(
b.take(&[0, 2], 0),
DenseMatrix::from_2d_array(&[&[1, 2], &[5, 6]])
DenseMatrix::from_2d_array(&[&[1, 2], &[5, 6]]).unwrap()
);
}
#[test]
fn test_merge() {
let a = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).unwrap();
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6], &[7, 8]]),
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6], &[7, 8]]).unwrap(),
a.merge_1d(&[&vec!(5, 6), &vec!(7, 8)], 0, true)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[5, 6], &[7, 8], &[1, 2], &[3, 4]]),
DenseMatrix::from_2d_array(&[&[5, 6], &[7, 8], &[1, 2], &[3, 4]]).unwrap(),
a.merge_1d(&[&vec!(5, 6), &vec!(7, 8)], 0, false)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2, 5, 7], &[3, 4, 6, 8]]),
DenseMatrix::from_2d_array(&[&[1, 2, 5, 7], &[3, 4, 6, 8]]).unwrap(),
a.merge_1d(&[&vec!(5, 6), &vec!(7, 8)], 1, true)
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[5, 7, 1, 2], &[6, 8, 3, 4]]),
DenseMatrix::from_2d_array(&[&[5, 7, 1, 2], &[6, 8, 3, 4]]).unwrap(),
a.merge_1d(&[&vec!(5, 6), &vec!(7, 8)], 1, false)
);
}
@@ -1986,20 +2009,28 @@ mod tests {
#[test]
fn test_ops() {
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).mul_scalar(2),
DenseMatrix::from_2d_array(&[&[2, 4], &[6, 8]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]])
.unwrap()
.mul_scalar(2),
DenseMatrix::from_2d_array(&[&[2, 4], &[6, 8]]).unwrap()
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).add_scalar(2),
DenseMatrix::from_2d_array(&[&[3, 4], &[5, 6]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]])
.unwrap()
.add_scalar(2),
DenseMatrix::from_2d_array(&[&[3, 4], &[5, 6]]).unwrap()
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).sub_scalar(1),
DenseMatrix::from_2d_array(&[&[0, 1], &[2, 3]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]])
.unwrap()
.sub_scalar(1),
DenseMatrix::from_2d_array(&[&[0, 1], &[2, 3]]).unwrap()
);
assert_eq!(
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).div_scalar(2),
DenseMatrix::from_2d_array(&[&[0, 1], &[1, 2]])
DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]])
.unwrap()
.div_scalar(2),
DenseMatrix::from_2d_array(&[&[0, 1], &[1, 2]]).unwrap()
);
}
@@ -2013,42 +2044,45 @@ mod tests {
#[test]
fn test_vstack() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]);
let b = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
let expected = DenseMatrix::from_2d_array(&[
&[1, 2, 3],
&[4, 5, 6],
&[7, 8, 9],
&[1, 2, 3],
&[4, 5, 6],
]);
])
.unwrap();
let result = a.v_stack(&b);
assert_eq!(result, expected);
}
#[test]
fn test_hstack() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]);
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]).unwrap();
let expected =
DenseMatrix::from_2d_array(&[&[1, 2, 3, 1, 2], &[4, 5, 6, 3, 4], &[7, 8, 9, 5, 6]]);
DenseMatrix::from_2d_array(&[&[1, 2, 3, 1, 2], &[4, 5, 6, 3, 4], &[7, 8, 9, 5, 6]])
.unwrap();
let result = a.h_stack(&b);
assert_eq!(result, expected);
}
#[test]
fn test_map() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0], &[4.0, 5.0, 6.0]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0], &[4.0, 5.0, 6.0]]).unwrap();
let result: DenseMatrix<f64> = a.map(|&v| v as f64);
assert_eq!(result, expected);
}
#[test]
fn scale() {
let mut m = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
let expected_0 = DenseMatrix::from_2d_array(&[&[-1., -1., -1.], &[1., 1., 1.]]);
let expected_1 = DenseMatrix::from_2d_array(&[&[-1.22, 0.0, 1.22], &[-1.22, 0.0, 1.22]]);
let mut m = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]).unwrap();
let expected_0 = DenseMatrix::from_2d_array(&[&[-1., -1., -1.], &[1., 1., 1.]]).unwrap();
let expected_1 =
DenseMatrix::from_2d_array(&[&[-1.22, 0.0, 1.22], &[-1.22, 0.0, 1.22]]).unwrap();
{
let mut m = m.clone();
@@ -2062,52 +2096,52 @@ mod tests {
#[test]
fn test_pow_mut() {
let mut a = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0], &[4.0, 5.0, 6.0]]);
let mut a = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0], &[4.0, 5.0, 6.0]]).unwrap();
a.pow_mut(2.0);
assert_eq!(
a,
DenseMatrix::from_2d_array(&[&[1.0, 4.0, 9.0], &[16.0, 25.0, 36.0]])
DenseMatrix::from_2d_array(&[&[1.0, 4.0, 9.0], &[16.0, 25.0, 36.0]]).unwrap()
);
}
#[test]
fn test_ab() {
let a = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]);
let b = DenseMatrix::from_2d_array(&[&[5, 6], &[7, 8]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[5, 6], &[7, 8]]).unwrap();
assert_eq!(
a.ab(false, &b, false),
DenseMatrix::from_2d_array(&[&[19, 22], &[43, 50]])
DenseMatrix::from_2d_array(&[&[19, 22], &[43, 50]]).unwrap()
);
assert_eq!(
a.ab(true, &b, false),
DenseMatrix::from_2d_array(&[&[26, 30], &[38, 44]])
DenseMatrix::from_2d_array(&[&[26, 30], &[38, 44]]).unwrap()
);
assert_eq!(
a.ab(false, &b, true),
DenseMatrix::from_2d_array(&[&[17, 23], &[39, 53]])
DenseMatrix::from_2d_array(&[&[17, 23], &[39, 53]]).unwrap()
);
assert_eq!(
a.ab(true, &b, true),
DenseMatrix::from_2d_array(&[&[23, 31], &[34, 46]])
DenseMatrix::from_2d_array(&[&[23, 31], &[34, 46]]).unwrap()
);
}
#[test]
fn test_ax() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
assert_eq!(
a.ax(false, &vec![7, 8, 9]).transpose(),
DenseMatrix::from_2d_array(&[&[50, 122]])
DenseMatrix::from_2d_array(&[&[50, 122]]).unwrap()
);
assert_eq!(
a.ax(true, &vec![7, 8]).transpose(),
DenseMatrix::from_2d_array(&[&[39, 54, 69]])
DenseMatrix::from_2d_array(&[&[39, 54, 69]]).unwrap()
);
}
#[test]
fn diag() {
let x = DenseMatrix::from_2d_array(&[&[0, 1, 2], &[3, 4, 5], &[6, 7, 8]]);
let x = DenseMatrix::from_2d_array(&[&[0, 1, 2], &[3, 4, 5], &[6, 7, 8]]).unwrap();
assert_eq!(x.diag(), vec![0, 4, 8]);
}
@@ -2119,13 +2153,15 @@ mod tests {
&[68, 590, 37],
&[69, 660, 46],
&[73, 600, 55],
]);
])
.unwrap();
let mut result = DenseMatrix::zeros(3, 3);
let expected = DenseMatrix::from_2d_array(&[
&[11.5, 50.0, 34.75],
&[50.0, 1250.0, 205.0],
&[34.75, 205.0, 110.0],
]);
])
.unwrap();
a.cov(&mut result);
+214 -85
View File
@@ -19,6 +19,8 @@ use crate::linalg::traits::svd::SVDDecomposable;
use crate::numbers::basenum::Number;
use crate::numbers::realnum::RealNumber;
use crate::error::Failed;
/// Dense matrix
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
@@ -50,26 +52,26 @@ pub struct DenseMatrixMutView<'a, T: Debug + Display + Copy + Sized> {
}
impl<'a, T: Debug + Display + Copy + Sized> DenseMatrixView<'a, T> {
fn new(m: &'a DenseMatrix<T>, rows: Range<usize>, cols: Range<usize>) -> Self {
let (start, end, stride) = if m.column_major {
(
rows.start + cols.start * m.nrows,
rows.end + (cols.end - 1) * m.nrows,
m.nrows,
)
fn new(
m: &'a DenseMatrix<T>,
vrows: Range<usize>,
vcols: Range<usize>,
) -> Result<Self, Failed> {
if m.is_valid_view(m.shape().0, m.shape().1, &vrows, &vcols) {
Err(Failed::input(
"The specified view is outside of the matrix range",
))
} else {
(
rows.start * m.ncols + cols.start,
(rows.end - 1) * m.ncols + cols.end,
m.ncols,
)
};
DenseMatrixView {
values: &m.values[start..end],
stride,
nrows: rows.end - rows.start,
ncols: cols.end - cols.start,
column_major: m.column_major,
let (start, end, stride) =
m.stride_range(m.shape().0, m.shape().1, &vrows, &vcols, m.column_major);
Ok(DenseMatrixView {
values: &m.values[start..end],
stride,
nrows: vrows.end - vrows.start,
ncols: vcols.end - vcols.start,
column_major: m.column_major,
})
}
}
@@ -102,26 +104,26 @@ impl<'a, T: Debug + Display + Copy + Sized> fmt::Display for DenseMatrixView<'a,
}
impl<'a, T: Debug + Display + Copy + Sized> DenseMatrixMutView<'a, T> {
fn new(m: &'a mut DenseMatrix<T>, rows: Range<usize>, cols: Range<usize>) -> Self {
let (start, end, stride) = if m.column_major {
(
rows.start + cols.start * m.nrows,
rows.end + (cols.end - 1) * m.nrows,
m.nrows,
)
fn new(
m: &'a mut DenseMatrix<T>,
vrows: Range<usize>,
vcols: Range<usize>,
) -> Result<Self, Failed> {
if m.is_valid_view(m.shape().0, m.shape().1, &vrows, &vcols) {
Err(Failed::input(
"The specified view is outside of the matrix range",
))
} else {
(
rows.start * m.ncols + cols.start,
(rows.end - 1) * m.ncols + cols.end,
m.ncols,
)
};
DenseMatrixMutView {
values: &mut m.values[start..end],
stride,
nrows: rows.end - rows.start,
ncols: cols.end - cols.start,
column_major: m.column_major,
let (start, end, stride) =
m.stride_range(m.shape().0, m.shape().1, &vrows, &vcols, m.column_major);
Ok(DenseMatrixMutView {
values: &mut m.values[start..end],
stride,
nrows: vrows.end - vrows.start,
ncols: vcols.end - vcols.start,
column_major: m.column_major,
})
}
}
@@ -182,42 +184,102 @@ impl<'a, T: Debug + Display + Copy + Sized> fmt::Display for DenseMatrixMutView<
impl<T: Debug + Display + Copy + Sized> DenseMatrix<T> {
/// Create new instance of `DenseMatrix` without copying data.
/// `values` should be in column-major order.
pub fn new(nrows: usize, ncols: usize, values: Vec<T>, column_major: bool) -> Self {
DenseMatrix {
ncols,
nrows,
values,
column_major,
pub fn new(
nrows: usize,
ncols: usize,
values: Vec<T>,
column_major: bool,
) -> Result<Self, Failed> {
let data_len = values.len();
if nrows * ncols != values.len() {
Err(Failed::input(&format!(
"The specified shape: (cols: {ncols}, rows: {nrows}) does not align with data len: {data_len}"
)))
} else {
Ok(DenseMatrix {
ncols,
nrows,
values,
column_major,
})
}
}
/// New instance of `DenseMatrix` from 2d array.
pub fn from_2d_array(values: &[&[T]]) -> Self {
pub fn from_2d_array(values: &[&[T]]) -> Result<Self, Failed> {
DenseMatrix::from_2d_vec(&values.iter().map(|row| Vec::from(*row)).collect())
}
/// New instance of `DenseMatrix` from 2d vector.
pub fn from_2d_vec(values: &Vec<Vec<T>>) -> Self {
let nrows = values.len();
let ncols = values
.first()
.unwrap_or_else(|| panic!("Cannot create 2d matrix from an empty vector"))
.len();
let mut m_values = Vec::with_capacity(nrows * ncols);
#[allow(clippy::ptr_arg)]
pub fn from_2d_vec(values: &Vec<Vec<T>>) -> Result<Self, Failed> {
if values.is_empty() || values[0].is_empty() {
Err(Failed::input(
"The 2d vec provided is empty; cannot instantiate the matrix",
))
} else {
let nrows = values.len();
let ncols = values
.first()
.unwrap_or_else(|| {
panic!("Invalid state: Cannot create 2d matrix from an empty vector")
})
.len();
let mut m_values = Vec::with_capacity(nrows * ncols);
for c in 0..ncols {
for r in values.iter().take(nrows) {
m_values.push(r[c])
for c in 0..ncols {
for r in values.iter().take(nrows) {
m_values.push(r[c])
}
}
}
DenseMatrix::new(nrows, ncols, m_values, true)
DenseMatrix::new(nrows, ncols, m_values, true)
}
}
/// Iterate over values of matrix
pub fn iter(&self) -> Iter<'_, T> {
self.values.iter()
}
/// Check if the size of the requested view is bounded to matrix rows/cols count
fn is_valid_view(
&self,
n_rows: usize,
n_cols: usize,
vrows: &Range<usize>,
vcols: &Range<usize>,
) -> bool {
!(vrows.end <= n_rows
&& vcols.end <= n_cols
&& vrows.start <= n_rows
&& vcols.start <= n_cols)
}
/// Compute the range of the requested view: start, end, size of the slice
fn stride_range(
&self,
n_rows: usize,
n_cols: usize,
vrows: &Range<usize>,
vcols: &Range<usize>,
column_major: bool,
) -> (usize, usize, usize) {
let (start, end, stride) = if column_major {
(
vrows.start + vcols.start * n_rows,
vrows.end + (vcols.end - 1) * n_rows,
n_rows,
)
} else {
(
vrows.start * n_cols + vcols.start,
(vrows.end - 1) * n_cols + vcols.end,
n_cols,
)
};
(start, end, stride)
}
}
impl<T: Debug + Display + Copy + Sized> fmt::Display for DenseMatrix<T> {
@@ -304,6 +366,7 @@ where
impl<T: Debug + Display + Copy + Sized> Array<T, (usize, usize)> for DenseMatrix<T> {
fn get(&self, pos: (usize, usize)) -> &T {
let (row, col) = pos;
if row >= self.nrows || col >= self.ncols {
panic!(
"Invalid index ({},{}) for {}x{} matrix",
@@ -383,15 +446,15 @@ impl<T: Debug + Display + Copy + Sized> MutArrayView2<T> for DenseMatrix<T> {}
impl<T: Debug + Display + Copy + Sized> Array2<T> for DenseMatrix<T> {
fn get_row<'a>(&'a self, row: usize) -> Box<dyn ArrayView1<T> + 'a> {
Box::new(DenseMatrixView::new(self, row..row + 1, 0..self.ncols))
Box::new(DenseMatrixView::new(self, row..row + 1, 0..self.ncols).unwrap())
}
fn get_col<'a>(&'a self, col: usize) -> Box<dyn ArrayView1<T> + 'a> {
Box::new(DenseMatrixView::new(self, 0..self.nrows, col..col + 1))
Box::new(DenseMatrixView::new(self, 0..self.nrows, col..col + 1).unwrap())
}
fn slice<'a>(&'a self, rows: Range<usize>, cols: Range<usize>) -> Box<dyn ArrayView2<T> + 'a> {
Box::new(DenseMatrixView::new(self, rows, cols))
Box::new(DenseMatrixView::new(self, rows, cols).unwrap())
}
fn slice_mut<'a>(
@@ -402,15 +465,17 @@ impl<T: Debug + Display + Copy + Sized> Array2<T> for DenseMatrix<T> {
where
Self: Sized,
{
Box::new(DenseMatrixMutView::new(self, rows, cols))
Box::new(DenseMatrixMutView::new(self, rows, cols).unwrap())
}
// private function so for now assume infalible
fn fill(nrows: usize, ncols: usize, value: T) -> Self {
DenseMatrix::new(nrows, ncols, vec![value; nrows * ncols], true)
DenseMatrix::new(nrows, ncols, vec![value; nrows * ncols], true).unwrap()
}
// private function so for now assume infalible
fn from_iterator<I: Iterator<Item = T>>(iter: I, nrows: usize, ncols: usize, axis: u8) -> Self {
DenseMatrix::new(nrows, ncols, iter.collect(), axis != 0)
DenseMatrix::new(nrows, ncols, iter.collect(), axis != 0).unwrap()
}
fn transpose(&self) -> Self {
@@ -431,9 +496,9 @@ impl<T: Number + RealNumber> SVDDecomposable<T> for DenseMatrix<T> {}
impl<'a, T: Debug + Display + Copy + Sized> Array<T, (usize, usize)> for DenseMatrixView<'a, T> {
fn get(&self, pos: (usize, usize)) -> &T {
if self.column_major {
&self.values[(pos.0 + pos.1 * self.stride)]
&self.values[pos.0 + pos.1 * self.stride]
} else {
&self.values[(pos.0 * self.stride + pos.1)]
&self.values[pos.0 * self.stride + pos.1]
}
}
@@ -495,9 +560,9 @@ impl<'a, T: Debug + Display + Copy + Sized> ArrayView1<T> for DenseMatrixView<'a
impl<'a, T: Debug + Display + Copy + Sized> Array<T, (usize, usize)> for DenseMatrixMutView<'a, T> {
fn get(&self, pos: (usize, usize)) -> &T {
if self.column_major {
&self.values[(pos.0 + pos.1 * self.stride)]
&self.values[pos.0 + pos.1 * self.stride]
} else {
&self.values[(pos.0 * self.stride + pos.1)]
&self.values[pos.0 * self.stride + pos.1]
}
}
@@ -519,9 +584,9 @@ impl<'a, T: Debug + Display + Copy + Sized> MutArray<T, (usize, usize)>
{
fn set(&mut self, pos: (usize, usize), x: T) {
if self.column_major {
self.values[(pos.0 + pos.1 * self.stride)] = x;
self.values[pos.0 + pos.1 * self.stride] = x;
} else {
self.values[(pos.0 * self.stride + pos.1)] = x;
self.values[pos.0 * self.stride + pos.1] = x;
}
}
@@ -544,15 +609,74 @@ mod tests {
use approx::relative_eq;
#[test]
fn test_display() {
fn test_instantiate_from_2d() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
assert!(x.is_ok());
}
#[test]
fn test_instantiate_from_2d_empty() {
let input: &[&[f64]] = &[&[]];
let x = DenseMatrix::from_2d_array(input);
assert!(x.is_err());
}
#[test]
fn test_instantiate_from_2d_empty2() {
let input: &[&[f64]] = &[&[], &[]];
let x = DenseMatrix::from_2d_array(input);
assert!(x.is_err());
}
#[test]
fn test_instantiate_ok_view1() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 0..2, 0..2);
assert!(v.is_ok());
}
#[test]
fn test_instantiate_ok_view2() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 0..3, 0..3);
assert!(v.is_ok());
}
#[test]
fn test_instantiate_ok_view3() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 2..3, 0..3);
assert!(v.is_ok());
}
#[test]
fn test_instantiate_ok_view4() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 3..3, 0..3);
assert!(v.is_ok());
}
#[test]
fn test_instantiate_err_view1() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 3..4, 0..3);
assert!(v.is_err());
}
#[test]
fn test_instantiate_err_view2() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 0..3, 3..4);
assert!(v.is_err());
}
#[test]
fn test_instantiate_err_view3() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let v = DenseMatrixView::new(&x, 0..3, 4..3);
assert!(v.is_err());
}
#[test]
fn test_display() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
println!("{}", &x);
}
#[test]
fn test_get_row_col() {
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let x = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
assert_eq!(15.0, x.get_col(1).sum());
assert_eq!(15.0, x.get_row(1).sum());
@@ -561,7 +685,7 @@ mod tests {
#[test]
fn test_row_major() {
let mut x = DenseMatrix::new(2, 3, vec![1, 2, 3, 4, 5, 6], false);
let mut x = DenseMatrix::new(2, 3, vec![1, 2, 3, 4, 5, 6], false).unwrap();
assert_eq!(5, *x.get_col(1).get(1));
assert_eq!(7, x.get_col(1).sum());
@@ -575,7 +699,8 @@ mod tests {
#[test]
fn test_get_slice() {
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9], &[10, 11, 12]]);
let x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9], &[10, 11, 12]])
.unwrap();
assert_eq!(
vec![4, 5, 6],
@@ -589,7 +714,7 @@ mod tests {
#[test]
fn test_iter_mut() {
let mut x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]);
let mut x = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]).unwrap();
assert_eq!(vec![1, 4, 7, 2, 5, 8, 3, 6, 9], x.values);
// add +2 to some elements
@@ -625,7 +750,8 @@ mod tests {
#[test]
fn test_str_array() {
let mut x =
DenseMatrix::from_2d_array(&[&["1", "2", "3"], &["4", "5", "6"], &["7", "8", "9"]]);
DenseMatrix::from_2d_array(&[&["1", "2", "3"], &["4", "5", "6"], &["7", "8", "9"]])
.unwrap();
assert_eq!(vec!["1", "4", "7", "2", "5", "8", "3", "6", "9"], x.values);
x.iterator_mut(0).for_each(|v| *v = "str");
@@ -637,7 +763,7 @@ mod tests {
#[test]
fn test_transpose() {
let x = DenseMatrix::<&str>::from_2d_array(&[&["1", "2", "3"], &["4", "5", "6"]]);
let x = DenseMatrix::<&str>::from_2d_array(&[&["1", "2", "3"], &["4", "5", "6"]]).unwrap();
assert_eq!(vec!["1", "4", "2", "5", "3", "6"], x.values);
assert!(x.column_major);
@@ -650,7 +776,7 @@ mod tests {
#[test]
fn test_from_iterator() {
let data = vec![1, 2, 3, 4, 5, 6];
let data = [1, 2, 3, 4, 5, 6];
let m = DenseMatrix::from_iterator(data.iter(), 2, 3, 0);
@@ -664,8 +790,8 @@ mod tests {
#[test]
fn test_take() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]);
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[1, 2], &[3, 4], &[5, 6]]).unwrap();
println!("{a}");
// take column 0 and 2
@@ -677,7 +803,7 @@ mod tests {
#[test]
fn test_mut() {
let a = DenseMatrix::from_2d_array(&[&[1.3, -2.1, 3.4], &[-4., -5.3, 6.1]]);
let a = DenseMatrix::from_2d_array(&[&[1.3, -2.1, 3.4], &[-4., -5.3, 6.1]]).unwrap();
let a = a.abs();
assert_eq!(vec![1.3, 4.0, 2.1, 5.3, 3.4, 6.1], a.values);
@@ -688,7 +814,8 @@ mod tests {
#[test]
fn test_reshape() {
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9], &[10, 11, 12]]);
let a = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9], &[10, 11, 12]])
.unwrap();
let a = a.reshape(2, 6, 0);
assert_eq!(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], a.values);
@@ -701,13 +828,15 @@ mod tests {
#[test]
fn test_eq() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]);
let b = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.]]).unwrap();
let b = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[4., 5., 6.], &[7., 8., 9.]]).unwrap();
let c = DenseMatrix::from_2d_array(&[
&[1. + f32::EPSILON, 2., 3.],
&[4., 5., 6. + f32::EPSILON],
]);
let d = DenseMatrix::from_2d_array(&[&[1. + 0.5, 2., 3.], &[4., 5., 6. + f32::EPSILON]]);
])
.unwrap();
let d = DenseMatrix::from_2d_array(&[&[1. + 0.5, 2., 3.], &[4., 5., 6. + f32::EPSILON]])
.unwrap();
assert!(!relative_eq!(a, b));
assert!(!relative_eq!(a, d));
+22 -1
View File
@@ -15,6 +15,25 @@ pub struct VecView<'a, T: Debug + Display + Copy + Sized> {
ptr: &'a [T],
}
impl<T: Debug + Display + Copy + Sized> Array<T, usize> for &[T] {
fn get(&self, i: usize) -> &T {
&self[i]
}
fn shape(&self) -> usize {
self.len()
}
fn is_empty(&self) -> bool {
self.len() > 0
}
fn iterator<'b>(&'b self, axis: u8) -> Box<dyn Iterator<Item = &'b T> + 'b> {
assert!(axis == 0, "For one dimensional array `axis` should == 0");
Box::new(self.iter())
}
}
impl<T: Debug + Display + Copy + Sized> Array<T, usize> for Vec<T> {
fn get(&self, i: usize) -> &T {
&self[i]
@@ -36,6 +55,7 @@ impl<T: Debug + Display + Copy + Sized> Array<T, usize> for Vec<T> {
impl<T: Debug + Display + Copy + Sized> MutArray<T, usize> for Vec<T> {
fn set(&mut self, i: usize, x: T) {
// NOTE: this panics in case of out of bounds index
self[i] = x
}
@@ -46,6 +66,7 @@ impl<T: Debug + Display + Copy + Sized> MutArray<T, usize> for Vec<T> {
}
impl<T: Debug + Display + Copy + Sized> ArrayView1<T> for Vec<T> {}
impl<T: Debug + Display + Copy + Sized> ArrayView1<T> for &[T] {}
impl<T: Debug + Display + Copy + Sized> MutArrayView1<T> for Vec<T> {}
@@ -191,7 +212,7 @@ mod tests {
#[test]
fn test_len() {
let x = vec![1, 2, 3];
let x = [1, 2, 3];
assert_eq!(3, x.len());
}
+11 -7
View File
@@ -15,7 +15,7 @@
//! &[25., 15., -5.],
//! &[15., 18., 0.],
//! &[-5., 0., 11.]
//! ]);
//! ]).unwrap();
//!
//! let cholesky = A.cholesky().unwrap();
//! let lower_triangular: DenseMatrix<f64> = cholesky.L();
@@ -175,11 +175,14 @@ mod tests {
)]
#[test]
fn cholesky_decompose() {
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]])
.unwrap();
let l =
DenseMatrix::from_2d_array(&[&[5.0, 0.0, 0.0], &[3.0, 3.0, 0.0], &[-1.0, 1.0, 3.0]]);
DenseMatrix::from_2d_array(&[&[5.0, 0.0, 0.0], &[3.0, 3.0, 0.0], &[-1.0, 1.0, 3.0]])
.unwrap();
let u =
DenseMatrix::from_2d_array(&[&[5.0, 3.0, -1.0], &[0.0, 3.0, 1.0], &[0.0, 0.0, 3.0]]);
DenseMatrix::from_2d_array(&[&[5.0, 3.0, -1.0], &[0.0, 3.0, 1.0], &[0.0, 0.0, 3.0]])
.unwrap();
let cholesky = a.cholesky().unwrap();
assert!(relative_eq!(cholesky.L().abs(), l.abs(), epsilon = 1e-4));
@@ -197,9 +200,10 @@ mod tests {
)]
#[test]
fn cholesky_solve_mut() {
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
let b = DenseMatrix::from_2d_array(&[&[40., 51., 28.]]);
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0]]);
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]])
.unwrap();
let b = DenseMatrix::from_2d_array(&[&[40., 51., 28.]]).unwrap();
let expected = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0]]).unwrap();
let cholesky = a.cholesky().unwrap();
+13 -7
View File
@@ -19,7 +19,7 @@
//! &[0.9000, 0.4000, 0.7000],
//! &[0.4000, 0.5000, 0.3000],
//! &[0.7000, 0.3000, 0.8000],
//! ]);
//! ]).unwrap();
//!
//! let evd = A.evd(true).unwrap();
//! let eigenvectors: DenseMatrix<f64> = evd.V;
@@ -820,7 +820,8 @@ mod tests {
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.7000, 0.3000, 0.8000],
]);
])
.unwrap();
let eigen_values: Vec<f64> = vec![1.7498382, 0.3165784, 0.1335834];
@@ -828,7 +829,8 @@ mod tests {
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.6391588],
]);
])
.unwrap();
let evd = A.evd(true).unwrap();
@@ -852,7 +854,8 @@ mod tests {
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.8000, 0.3000, 0.8000],
]);
])
.unwrap();
let eigen_values: Vec<f64> = vec![1.79171122, 0.31908143, 0.08920735];
@@ -860,7 +863,8 @@ mod tests {
&[0.7178958, 0.05322098, 0.6812010],
&[0.3837711, -0.84702111, -0.1494582],
&[0.6952105, 0.43984484, -0.7036135],
]);
])
.unwrap();
let evd = A.evd(false).unwrap();
@@ -885,7 +889,8 @@ mod tests {
&[4.0, -1.0, 1.0, 1.0],
&[1.0, 1.0, 3.0, -2.0],
&[1.0, 1.0, 4.0, -1.0],
]);
])
.unwrap();
let eigen_values_d: Vec<f64> = vec![0.0, 2.0, 2.0, 0.0];
let eigen_values_e: Vec<f64> = vec![2.2361, 0.9999, -0.9999, -2.2361];
@@ -895,7 +900,8 @@ mod tests {
&[-0.6707, 0.1059, 0.901, 0.6289],
&[0.9159, -0.1378, 0.3816, 0.0806],
&[0.6707, 0.1059, 0.901, -0.6289],
]);
])
.unwrap();
let evd = A.evd(false).unwrap();
+3 -3
View File
@@ -12,9 +12,9 @@ pub trait HighOrderOperations<T: Number>: Array2<T> {
/// use smartcore::linalg::traits::high_order::HighOrderOperations;
/// use smartcore::linalg::basic::arrays::Array2;
///
/// let a = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]);
/// let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]);
/// let expected = DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]]);
/// let a = DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.]]).unwrap();
/// let b = DenseMatrix::from_2d_array(&[&[5., 6.], &[7., 8.], &[9., 10.]]).unwrap();
/// let expected = DenseMatrix::from_2d_array(&[&[71., 80.], &[92., 104.]]).unwrap();
///
/// assert_eq!(a.ab(true, &b, false), expected);
/// ```
+8 -7
View File
@@ -18,7 +18,7 @@
//! &[1., 2., 3.],
//! &[0., 1., 5.],
//! &[5., 6., 0.]
//! ]);
//! ]).unwrap();
//!
//! let lu = A.lu().unwrap();
//! let lower: DenseMatrix<f64> = lu.L();
@@ -263,13 +263,13 @@ mod tests {
)]
#[test]
fn decompose() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]).unwrap();
let expected_L =
DenseMatrix::from_2d_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0.2, 0.8, 1.]]);
DenseMatrix::from_2d_array(&[&[1., 0., 0.], &[0., 1., 0.], &[0.2, 0.8, 1.]]).unwrap();
let expected_U =
DenseMatrix::from_2d_array(&[&[5., 6., 0.], &[0., 1., 5.], &[0., 0., -1.]]);
DenseMatrix::from_2d_array(&[&[5., 6., 0.], &[0., 1., 5.], &[0., 0., -1.]]).unwrap();
let expected_pivot =
DenseMatrix::from_2d_array(&[&[0., 0., 1.], &[0., 1., 0.], &[1., 0., 0.]]);
DenseMatrix::from_2d_array(&[&[0., 0., 1.], &[0., 1., 0.], &[1., 0., 0.]]).unwrap();
let lu = a.lu().unwrap();
assert!(relative_eq!(lu.L(), expected_L, epsilon = 1e-4));
assert!(relative_eq!(lu.U(), expected_U, epsilon = 1e-4));
@@ -281,9 +281,10 @@ mod tests {
)]
#[test]
fn inverse() {
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]);
let a = DenseMatrix::from_2d_array(&[&[1., 2., 3.], &[0., 1., 5.], &[5., 6., 0.]]).unwrap();
let expected =
DenseMatrix::from_2d_array(&[&[-6.0, 3.6, 1.4], &[5.0, -3.0, -1.0], &[-1.0, 0.8, 0.2]]);
DenseMatrix::from_2d_array(&[&[-6.0, 3.6, 1.4], &[5.0, -3.0, -1.0], &[-1.0, 0.8, 0.2]])
.unwrap();
let a_inv = a.lu().and_then(|lu| lu.inverse()).unwrap();
assert!(relative_eq!(a_inv, expected, epsilon = 1e-4));
}
+12 -7
View File
@@ -13,7 +13,7 @@
//! &[0.9, 0.4, 0.7],
//! &[0.4, 0.5, 0.3],
//! &[0.7, 0.3, 0.8]
//! ]);
//! ]).unwrap();
//!
//! let qr = A.qr().unwrap();
//! let orthogonal: DenseMatrix<f64> = qr.Q();
@@ -201,17 +201,20 @@ mod tests {
)]
#[test]
fn decompose() {
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]])
.unwrap();
let q = DenseMatrix::from_2d_array(&[
&[-0.7448, 0.2436, 0.6212],
&[-0.331, -0.9432, -0.027],
&[-0.5793, 0.2257, -0.7832],
]);
])
.unwrap();
let r = DenseMatrix::from_2d_array(&[
&[-1.2083, -0.6373, -1.0842],
&[0.0, -0.3064, 0.0682],
&[0.0, 0.0, -0.1999],
]);
])
.unwrap();
let qr = a.qr().unwrap();
assert!(relative_eq!(qr.Q().abs(), q.abs(), epsilon = 1e-4));
assert!(relative_eq!(qr.R().abs(), r.abs(), epsilon = 1e-4));
@@ -223,13 +226,15 @@ mod tests {
)]
#[test]
fn qr_solve_mut() {
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]])
.unwrap();
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]).unwrap();
let expected_w = DenseMatrix::from_2d_array(&[
&[-0.2027027, -1.2837838],
&[0.8783784, 2.2297297],
&[0.4729730, 0.6621622],
]);
])
.unwrap();
let w = a.qr_solve_mut(b).unwrap();
assert!(relative_eq!(w, expected_w, epsilon = 1e-2));
}
+15 -11
View File
@@ -136,8 +136,8 @@ pub trait MatrixPreprocessing<T: RealNumber>: MutArrayView2<T> + Clone {
/// ```rust
/// use smartcore::linalg::basic::matrix::DenseMatrix;
/// use smartcore::linalg::traits::stats::MatrixPreprocessing;
/// let mut a = DenseMatrix::from_2d_array(&[&[0., 2., 3.], &[-5., -6., -7.]]);
/// let expected = DenseMatrix::from_2d_array(&[&[0., 1., 1.],&[0., 0., 0.]]);
/// let mut a = DenseMatrix::from_2d_array(&[&[0., 2., 3.], &[-5., -6., -7.]]).unwrap();
/// let expected = DenseMatrix::from_2d_array(&[&[0., 1., 1.],&[0., 0., 0.]]).unwrap();
/// a.binarize_mut(0.);
///
/// assert_eq!(a, expected);
@@ -159,8 +159,8 @@ pub trait MatrixPreprocessing<T: RealNumber>: MutArrayView2<T> + Clone {
/// ```rust
/// use smartcore::linalg::basic::matrix::DenseMatrix;
/// use smartcore::linalg::traits::stats::MatrixPreprocessing;
/// let a = DenseMatrix::from_2d_array(&[&[0., 2., 3.], &[-5., -6., -7.]]);
/// let expected = DenseMatrix::from_2d_array(&[&[0., 1., 1.],&[0., 0., 0.]]);
/// let a = DenseMatrix::from_2d_array(&[&[0., 2., 3.], &[-5., -6., -7.]]).unwrap();
/// let expected = DenseMatrix::from_2d_array(&[&[0., 1., 1.],&[0., 0., 0.]]).unwrap();
///
/// assert_eq!(a.binarize(0.), expected);
/// ```
@@ -186,7 +186,8 @@ mod tests {
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
])
.unwrap();
let expected_0 = vec![4., 5., 6., 3., 4.];
let expected_1 = vec![1.8, 4.4, 7.];
@@ -196,7 +197,7 @@ mod tests {
#[test]
fn test_var() {
let m = DenseMatrix::from_2d_array(&[&[1., 2., 3., 4.], &[5., 6., 7., 8.]]);
let m = DenseMatrix::from_2d_array(&[&[1., 2., 3., 4.], &[5., 6., 7., 8.]]).unwrap();
let expected_0 = vec![4., 4., 4., 4.];
let expected_1 = vec![1.25, 1.25];
@@ -211,7 +212,8 @@ mod tests {
let m = DenseMatrix::from_2d_array(&[
&[0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25],
&[0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25],
]);
])
.unwrap();
let expected_0 = vec![0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
let expected_1 = vec![1.25, 1.25];
@@ -230,7 +232,8 @@ mod tests {
&[1., 2., 3., 1., 2.],
&[4., 5., 6., 3., 4.],
&[7., 8., 9., 5., 6.],
]);
])
.unwrap();
let expected_0 = vec![
2.449489742783178,
2.449489742783178,
@@ -251,10 +254,10 @@ mod tests {
#[test]
fn test_scale() {
let m: DenseMatrix<f64> =
DenseMatrix::from_2d_array(&[&[1., 2., 3., 4.], &[5., 6., 7., 8.]]);
DenseMatrix::from_2d_array(&[&[1., 2., 3., 4.], &[5., 6., 7., 8.]]).unwrap();
let expected_0: DenseMatrix<f64> =
DenseMatrix::from_2d_array(&[&[-1., -1., -1., -1.], &[1., 1., 1., 1.]]);
DenseMatrix::from_2d_array(&[&[-1., -1., -1., -1.], &[1., 1., 1., 1.]]).unwrap();
let expected_1: DenseMatrix<f64> = DenseMatrix::from_2d_array(&[
&[
-1.3416407864998738,
@@ -268,7 +271,8 @@ mod tests {
0.4472135954999579,
1.3416407864998738,
],
]);
])
.unwrap();
assert_eq!(m.mean(0), vec![3.0, 4.0, 5.0, 6.0]);
assert_eq!(m.mean(1), vec![2.5, 6.5]);
+19 -11
View File
@@ -17,7 +17,7 @@
//! &[0.9, 0.4, 0.7],
//! &[0.4, 0.5, 0.3],
//! &[0.7, 0.3, 0.8]
//! ]);
//! ]).unwrap();
//!
//! let svd = A.svd().unwrap();
//! let u: DenseMatrix<f64> = svd.U;
@@ -489,7 +489,8 @@ mod tests {
&[0.9000, 0.4000, 0.7000],
&[0.4000, 0.5000, 0.3000],
&[0.7000, 0.3000, 0.8000],
]);
])
.unwrap();
let s: Vec<f64> = vec![1.7498382, 0.3165784, 0.1335834];
@@ -497,13 +498,15 @@ mod tests {
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.639158],
]);
])
.unwrap();
let V = DenseMatrix::from_2d_array(&[
&[0.6881997, -0.07121225, 0.7220180],
&[0.3700456, 0.89044952, -0.2648886],
&[0.6240573, -0.44947578, -0.6391588],
]);
])
.unwrap();
let svd = A.svd().unwrap();
@@ -577,7 +580,8 @@ mod tests {
-0.2158704,
-0.27529472,
],
]);
])
.unwrap();
let s: Vec<f64> = vec![
3.8589375, 3.4396766, 2.6487176, 2.2317399, 1.5165054, 0.8109055, 0.2706515,
@@ -647,7 +651,8 @@ mod tests {
0.73034065,
-0.43965505,
],
]);
])
.unwrap();
let V = DenseMatrix::from_2d_array(&[
&[
@@ -707,7 +712,8 @@ mod tests {
0.1654796,
-0.32346758,
],
]);
])
.unwrap();
let svd = A.svd().unwrap();
@@ -723,10 +729,11 @@ mod tests {
)]
#[test]
fn solve() {
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]]);
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]);
let a = DenseMatrix::from_2d_array(&[&[0.9, 0.4, 0.7], &[0.4, 0.5, 0.3], &[0.7, 0.3, 0.8]])
.unwrap();
let b = DenseMatrix::from_2d_array(&[&[0.5, 0.2], &[0.5, 0.8], &[0.5, 0.3]]).unwrap();
let expected_w =
DenseMatrix::from_2d_array(&[&[-0.20, -1.28], &[0.87, 2.22], &[0.47, 0.66]]);
DenseMatrix::from_2d_array(&[&[-0.20, -1.28], &[0.87, 2.22], &[0.47, 0.66]]).unwrap();
let w = a.svd_solve_mut(b).unwrap();
assert!(relative_eq!(w, expected_w, epsilon = 1e-2));
}
@@ -737,7 +744,8 @@ mod tests {
)]
#[test]
fn decompose_restore() {
let a = DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0, 4.0], &[5.0, 6.0, 7.0, 8.0]]);
let a =
DenseMatrix::from_2d_array(&[&[1.0, 2.0, 3.0, 4.0], &[5.0, 6.0, 7.0, 8.0]]).unwrap();
let svd = a.svd().unwrap();
let u: &DenseMatrix<f32> = &svd.U; //U
let v: &DenseMatrix<f32> = &svd.V; // V
+5 -3
View File
@@ -12,7 +12,8 @@
//! pub struct BGSolver {}
//! impl<'a, T: FloatNumber, X: Array2<T>> BiconjugateGradientSolver<'a, T, X> for BGSolver {}
//!
//! let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
//! let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0.,
//! 11.]]).unwrap();
//! let b = vec![40., 51., 28.];
//! let expected = vec![1.0, 2.0, 3.0];
//! let mut x = Vec::zeros(3);
@@ -158,9 +159,10 @@ mod tests {
#[test]
fn bg_solver() {
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]]);
let a = DenseMatrix::from_2d_array(&[&[25., 15., -5.], &[15., 18., 0.], &[-5., 0., 11.]])
.unwrap();
let b = vec![40., 51., 28.];
let expected = vec![1.0, 2.0, 3.0];
let expected = [1.0, 2.0, 3.0];
let mut x = Vec::zeros(3);
+6 -4
View File
@@ -38,7 +38,7 @@
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//! ]).unwrap();
//!
//! let y: Vec<f64> = vec![83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0,
//! 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9];
@@ -511,7 +511,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
@@ -562,7 +563,8 @@ mod tests {
&[17.0, 1918.0, 1.4054969025700674],
&[18.0, 1929.0, 1.3271699396384906],
&[19.0, 1915.0, 1.1373332337674806],
]);
])
.unwrap();
let y: Vec<f64> = vec![
1.48, 2.72, 4.52, 5.72, 5.25, 4.07, 3.75, 4.75, 6.77, 4.72, 6.78, 6.79, 8.3, 7.42,
@@ -627,7 +629,7 @@ mod tests {
// &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
// &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
// &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
// ]);
// ]).unwrap();
// let y = vec![
// 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
+2 -1
View File
@@ -418,7 +418,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
+4 -3
View File
@@ -40,7 +40,7 @@
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//! ]).unwrap();
//!
//! let y: Vec<f64> = vec![83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0,
//! 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9];
@@ -341,7 +341,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8,
@@ -393,7 +394,7 @@ mod tests {
// &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
// &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
// &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
// ]);
// ]).unwrap();
// let y = vec![
// 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
+85 -39
View File
@@ -35,7 +35,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y: Vec<i32> = vec![
//! 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
//! ];
@@ -416,7 +416,7 @@ impl<TX: Number + FloatNumber + RealNumber, TY: Number + Ord, X: Array2<TX>, Y:
/// Fits Logistic Regression to your data.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `y` - target class values
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
pub fn fit(
x: &X,
y: &Y,
@@ -611,7 +611,8 @@ mod tests {
&[10., -2.],
&[8., 2.],
&[9., 0.],
]);
])
.unwrap();
let y = vec![0, 0, 1, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 1];
@@ -671,7 +672,8 @@ mod tests {
&[10., -2.],
&[8., 2.],
&[9., 0.],
]);
])
.unwrap();
let y = vec![0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1];
@@ -733,7 +735,8 @@ mod tests {
&[10., -2.],
&[8., 2.],
&[9., 0.],
]);
])
.unwrap();
let y: Vec<i32> = vec![0, 0, 1, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 1];
let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap();
@@ -818,37 +821,41 @@ mod tests {
assert!(reg_coeff_sum < coeff);
}
// TODO: serialization for the new DenseMatrix needs to be implemented
// #[cfg_attr(all(target_arch = "wasm32", not(target_os = "wasi")), wasm_bindgen_test::wasm_bindgen_test)]
// #[test]
// #[cfg(feature = "serde")]
// fn serde() {
// let x = DenseMatrix::from_2d_array(&[
// &[1., -5.],
// &[2., 5.],
// &[3., -2.],
// &[1., 2.],
// &[2., 0.],
// &[6., -5.],
// &[7., 5.],
// &[6., -2.],
// &[7., 2.],
// &[6., 0.],
// &[8., -5.],
// &[9., 5.],
// &[10., -2.],
// &[8., 2.],
// &[9., 0.],
// ]);
// let y: Vec<i32> = vec![0, 0, 1, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 1];
//TODO: serialization for the new DenseMatrix needs to be implemented
#[cfg_attr(
all(target_arch = "wasm32", not(target_os = "wasi")),
wasm_bindgen_test::wasm_bindgen_test
)]
#[test]
#[cfg(feature = "serde")]
fn serde() {
let x: DenseMatrix<f64> = DenseMatrix::from_2d_array(&[
&[1., -5.],
&[2., 5.],
&[3., -2.],
&[1., 2.],
&[2., 0.],
&[6., -5.],
&[7., 5.],
&[6., -2.],
&[7., 2.],
&[6., 0.],
&[8., -5.],
&[9., 5.],
&[10., -2.],
&[8., 2.],
&[9., 0.],
])
.unwrap();
let y: Vec<i32> = vec![0, 0, 1, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 1];
// let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap();
let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap();
// let deserialized_lr: LogisticRegression<f64, i32, DenseMatrix<f64>, Vec<i32>> =
// serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
let deserialized_lr: LogisticRegression<f64, i32, DenseMatrix<f64>, Vec<i32>> =
serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
// assert_eq!(lr, deserialized_lr);
// }
assert_eq!(lr, deserialized_lr);
}
#[cfg_attr(
all(target_arch = "wasm32", not(target_os = "wasi")),
@@ -877,7 +884,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<i32> = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap();
@@ -890,11 +898,7 @@ mod tests {
let y_hat = lr.predict(&x).unwrap();
let error: i32 = y
.into_iter()
.zip(y_hat.into_iter())
.map(|(a, b)| (a - b).abs())
.sum();
let error: i32 = y.into_iter().zip(y_hat).map(|(a, b)| (a - b).abs()).sum();
assert!(error <= 1);
@@ -903,4 +907,46 @@ mod tests {
assert!(reg_coeff_sum < coeff);
}
#[cfg_attr(
all(target_arch = "wasm32", not(target_os = "wasi")),
wasm_bindgen_test::wasm_bindgen_test
)]
#[test]
fn lr_fit_predict_random() {
let x: DenseMatrix<f32> = DenseMatrix::rand(52181, 94);
let y1: Vec<i32> = vec![1; 2181];
let y2: Vec<i32> = vec![0; 50000];
let y: Vec<i32> = y1.into_iter().chain(y2.into_iter()).collect();
let lr = LogisticRegression::fit(&x, &y, Default::default()).unwrap();
let lr_reg = LogisticRegression::fit(
&x,
&y,
LogisticRegressionParameters::default().with_alpha(1.0),
)
.unwrap();
let y_hat = lr.predict(&x).unwrap();
let y_hat_reg = lr_reg.predict(&x).unwrap();
assert_eq!(y.len(), y_hat.len());
assert_eq!(y.len(), y_hat_reg.len());
}
#[test]
fn test_logit() {
let x: &DenseMatrix<f64> = &DenseMatrix::rand(52181, 94);
let y1: Vec<u32> = vec![1; 2181];
let y2: Vec<u32> = vec![0; 50000];
let y: &Vec<u32> = &(y1.into_iter().chain(y2.into_iter()).collect());
println!("y vec height: {:?}", y.len());
println!("x matrix shape: {:?}", x.shape());
let lr = LogisticRegression::fit(x, y, Default::default()).unwrap();
let y_hat = lr.predict(&x).unwrap();
println!("y_hat shape: {:?}", y_hat.shape());
assert_eq!(y_hat.shape(), 52181);
}
}
+4 -3
View File
@@ -40,7 +40,7 @@
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//! ]).unwrap();
//!
//! let y: Vec<f64> = vec![83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0,
//! 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9];
@@ -455,7 +455,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
@@ -513,7 +514,7 @@ mod tests {
// &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
// &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
// &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
// ]);
// ]).unwrap();
// let y = vec![
// 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
+3 -2
View File
@@ -25,7 +25,7 @@
//! &[68., 590., 37.],
//! &[69., 660., 46.],
//! &[73., 600., 55.],
//! ]);
//! ]).unwrap();
//!
//! let a = data.mean_by(0);
//! let b = vec![66., 640., 44.];
@@ -151,7 +151,8 @@ mod tests {
&[68., 590., 37.],
&[69., 660., 46.],
&[73., 600., 55.],
]);
])
.unwrap();
let a = data.mean_by(0);
let b = vec![66., 640., 44.];
+1 -1
View File
@@ -37,7 +37,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y: Vec<i8> = vec![
//! 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
//! ];
@@ -3,9 +3,9 @@
use crate::{
api::{Predictor, SupervisedEstimator},
error::{Failed, FailedError},
linalg::basic::arrays::{Array2, Array1},
numbers::realnum::RealNumber,
linalg::basic::arrays::{Array1, Array2},
numbers::basenum::Number,
numbers::realnum::RealNumber,
};
use crate::model_selection::{cross_validate, BaseKFold, CrossValidationResult};
+2 -6
View File
@@ -283,9 +283,7 @@ mod tests {
(vec![0, 1, 2, 3, 7, 8, 9], vec![4, 5, 6]),
(vec![0, 1, 2, 3, 4, 5, 6], vec![7, 8, 9]),
];
for ((train, test), (expected_train, expected_test)) in
k.split(&x).into_iter().zip(expected)
{
for ((train, test), (expected_train, expected_test)) in k.split(&x).zip(expected) {
assert_eq!(test, expected_test);
assert_eq!(train, expected_train);
}
@@ -307,9 +305,7 @@ mod tests {
(vec![0, 1, 2, 3, 7, 8, 9], vec![4, 5, 6]),
(vec![0, 1, 2, 3, 4, 5, 6], vec![7, 8, 9]),
];
for ((train, test), (expected_train, expected_test)) in
k.split(&x).into_iter().zip(expected)
{
for ((train, test), (expected_train, expected_test)) in k.split(&x).zip(expected) {
assert_eq!(test.len(), expected_test.len());
assert_eq!(train.len(), expected_train.len());
}
+10 -6
View File
@@ -36,7 +36,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y: Vec<f64> = vec![
//! 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
//! ];
@@ -84,7 +84,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y: Vec<i32> = vec![
//! 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
//! ];
@@ -396,7 +396,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let cv = KFold {
@@ -441,7 +442,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
@@ -489,7 +491,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
@@ -539,7 +542,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<i32> = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
let cv = KFold::default().with_n_splits(3);
+9 -6
View File
@@ -19,14 +19,14 @@
//! &[0, 1, 0, 0, 1, 0],
//! &[0, 1, 0, 1, 0, 0],
//! &[0, 1, 1, 0, 0, 1],
//! ]);
//! ]).unwrap();
//! let y: Vec<u32> = vec![0, 0, 0, 1];
//!
//! let nb = BernoulliNB::fit(&x, &y, Default::default()).unwrap();
//!
//! // Testing data point is:
//! // Chinese Chinese Chinese Tokyo Japan
//! let x_test = DenseMatrix::from_2d_array(&[&[0, 1, 1, 0, 0, 1]]);
//! let x_test = DenseMatrix::from_2d_array(&[&[0, 1, 1, 0, 0, 1]]).unwrap();
//! let y_hat = nb.predict(&x_test).unwrap();
//! ```
//!
@@ -527,7 +527,8 @@ mod tests {
&[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
&[0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
&[0.0, 1.0, 1.0, 0.0, 0.0, 1.0],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 0, 1];
let bnb = BernoulliNB::fit(&x, &y, Default::default()).unwrap();
@@ -558,7 +559,7 @@ mod tests {
// Testing data point is:
// Chinese Chinese Chinese Tokyo Japan
let x_test = DenseMatrix::from_2d_array(&[&[0.0, 1.0, 1.0, 0.0, 0.0, 1.0]]);
let x_test = DenseMatrix::from_2d_array(&[&[0.0, 1.0, 1.0, 0.0, 0.0, 1.0]]).unwrap();
let y_hat = bnb.predict(&x_test).unwrap();
assert_eq!(y_hat, &[1]);
@@ -586,7 +587,8 @@ mod tests {
&[2, 0, 3, 3, 1, 2, 0, 2, 4, 1],
&[2, 4, 0, 4, 2, 4, 1, 3, 1, 4],
&[0, 2, 2, 3, 4, 0, 4, 4, 4, 4],
]);
])
.unwrap();
let y: Vec<u32> = vec![2, 2, 0, 0, 0, 2, 1, 1, 0, 1, 0, 0, 2, 0, 2];
let bnb = BernoulliNB::fit(&x, &y, Default::default()).unwrap();
@@ -643,7 +645,8 @@ mod tests {
&[0, 1, 0, 0, 1, 0],
&[0, 1, 0, 1, 0, 0],
&[0, 1, 1, 0, 0, 1],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 0, 1];
let bnb = BernoulliNB::fit(&x, &y, Default::default()).unwrap();
+8 -5
View File
@@ -24,7 +24,7 @@
//! &[3, 4, 2, 4],
//! &[0, 3, 1, 2],
//! &[0, 4, 1, 2],
//! ]);
//! ]).unwrap();
//! let y: Vec<u32> = vec![0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0];
//!
//! let nb = CategoricalNB::fit(&x, &y, Default::default()).unwrap();
@@ -455,7 +455,8 @@ mod tests {
&[1, 1, 1, 1],
&[1, 2, 0, 0],
&[2, 1, 1, 1],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0];
let cnb = CategoricalNB::fit(&x, &y, Default::default()).unwrap();
@@ -513,7 +514,7 @@ mod tests {
]
);
let x_test = DenseMatrix::from_2d_array(&[&[0, 2, 1, 0], &[2, 2, 0, 0]]);
let x_test = DenseMatrix::from_2d_array(&[&[0, 2, 1, 0], &[2, 2, 0, 0]]).unwrap();
let y_hat = cnb.predict(&x_test).unwrap();
assert_eq!(y_hat, vec![0, 1]);
}
@@ -539,7 +540,8 @@ mod tests {
&[3, 4, 2, 4],
&[0, 3, 1, 2],
&[0, 4, 1, 2],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0];
let cnb = CategoricalNB::fit(&x, &y, Default::default()).unwrap();
@@ -571,7 +573,8 @@ mod tests {
&[3, 4, 2, 4],
&[0, 3, 1, 2],
&[0, 4, 1, 2],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0];
let cnb = CategoricalNB::fit(&x, &y, Default::default()).unwrap();
+7 -4
View File
@@ -16,7 +16,7 @@
//! &[ 1., 1.],
//! &[ 2., 1.],
//! &[ 3., 2.],
//! ]);
//! ]).unwrap();
//! let y: Vec<u32> = vec![1, 1, 1, 2, 2, 2];
//!
//! let nb = GaussianNB::fit(&x, &y, Default::default()).unwrap();
@@ -395,7 +395,8 @@ mod tests {
&[1., 1.],
&[2., 1.],
&[3., 2.],
]);
])
.unwrap();
let y: Vec<u32> = vec![1, 1, 1, 2, 2, 2];
let gnb = GaussianNB::fit(&x, &y, Default::default()).unwrap();
@@ -435,7 +436,8 @@ mod tests {
&[1., 1.],
&[2., 1.],
&[3., 2.],
]);
])
.unwrap();
let y: Vec<u32> = vec![1, 1, 1, 2, 2, 2];
let priors = vec![0.3, 0.7];
@@ -462,7 +464,8 @@ mod tests {
&[1., 1.],
&[2., 1.],
&[3., 2.],
]);
])
.unwrap();
let y: Vec<u32> = vec![1, 1, 1, 2, 2, 2];
let gnb = GaussianNB::fit(&x, &y, Default::default()).unwrap();
+84 -10
View File
@@ -40,7 +40,7 @@ use crate::linalg::basic::arrays::{Array1, Array2, ArrayView1};
use crate::numbers::basenum::Number;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::marker::PhantomData;
use std::{cmp::Ordering, marker::PhantomData};
/// Distribution used in the Naive Bayes classifier.
pub(crate) trait NBDistribution<X: Number, Y: Number>: Clone {
@@ -92,11 +92,10 @@ impl<TX: Number, TY: Number, X: Array2<TX>, Y: Array1<TY>, D: NBDistribution<TX,
/// Returns a vector of size N with class estimates.
pub fn predict(&self, x: &X) -> Result<Y, Failed> {
let y_classes = self.distribution.classes();
let (rows, _) = x.shape();
let predictions = (0..rows)
.map(|row_index| {
let row = x.get_row(row_index);
let (prediction, _probability) = y_classes
let predictions = x
.row_iter()
.map(|row| {
y_classes
.iter()
.enumerate()
.map(|(class_index, class)| {
@@ -106,11 +105,26 @@ impl<TX: Number, TY: Number, X: Array2<TX>, Y: Array1<TY>, D: NBDistribution<TX,
+ self.distribution.prior(class_index).ln(),
)
})
.max_by(|(_, p1), (_, p2)| p1.partial_cmp(p2).unwrap())
.unwrap();
*prediction
// For some reason, the max_by method cannot use NaNs for finding the maximum value, it panics.
// NaN must be considered as minimum values,
// therefore it's like NaNs would not be considered for choosing the maximum value.
// So we need to handle this case for avoiding panicking by using `Option::unwrap`.
.max_by(|(_, p1), (_, p2)| match p1.partial_cmp(p2) {
Some(ordering) => ordering,
None => {
if p1.is_nan() {
Ordering::Less
} else if p2.is_nan() {
Ordering::Greater
} else {
Ordering::Equal
}
}
})
.map(|(prediction, _probability)| *prediction)
.ok_or_else(|| Failed::predict("Failed to predict, there is no result"))
})
.collect::<Vec<TY>>();
.collect::<Result<Vec<TY>, Failed>>()?;
let y_hat = Y::from_vec_slice(&predictions);
Ok(y_hat)
}
@@ -119,3 +133,63 @@ pub mod bernoulli;
pub mod categorical;
pub mod gaussian;
pub mod multinomial;
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::basic::arrays::Array;
use crate::linalg::basic::matrix::DenseMatrix;
use num_traits::float::Float;
type Model<'d> = BaseNaiveBayes<i32, i32, DenseMatrix<i32>, Vec<i32>, TestDistribution<'d>>;
#[derive(Debug, PartialEq, Clone)]
struct TestDistribution<'d>(&'d Vec<i32>);
impl<'d> NBDistribution<i32, i32> for TestDistribution<'d> {
fn prior(&self, _class_index: usize) -> f64 {
1.
}
fn log_likelihood<'a>(
&'a self,
class_index: usize,
_j: &'a Box<dyn ArrayView1<i32> + 'a>,
) -> f64 {
match self.0.get(class_index) {
&v @ 2 | &v @ 10 | &v @ 20 => v as f64,
_ => f64::nan(),
}
}
fn classes(&self) -> &Vec<i32> {
&self.0
}
}
#[test]
fn test_predict() {
let matrix = DenseMatrix::from_2d_array(&[&[1, 2, 3], &[4, 5, 6], &[7, 8, 9]]).unwrap();
let val = vec![];
match Model::fit(TestDistribution(&val)).unwrap().predict(&matrix) {
Ok(_) => panic!("Should return error in case of empty classes"),
Err(err) => assert_eq!(
err.to_string(),
"Predict failed: Failed to predict, there is no result"
),
}
let val = vec![1, 2, 3];
match Model::fit(TestDistribution(&val)).unwrap().predict(&matrix) {
Ok(r) => assert_eq!(r, vec![2, 2, 2]),
Err(_) => panic!("Should success in normal case with NaNs"),
}
let val = vec![20, 2, 10];
match Model::fit(TestDistribution(&val)).unwrap().predict(&matrix) {
Ok(r) => assert_eq!(r, vec![20, 20, 20]),
Err(_) => panic!("Should success in normal case without NaNs"),
}
}
}
+9 -6
View File
@@ -20,13 +20,13 @@
//! &[0, 2, 0, 0, 1, 0],
//! &[0, 1, 0, 1, 0, 0],
//! &[0, 1, 1, 0, 0, 1],
//! ]);
//! ]).unwrap();
//! let y: Vec<u32> = vec![0, 0, 0, 1];
//! let nb = MultinomialNB::fit(&x, &y, Default::default()).unwrap();
//!
//! // Testing data point is:
//! // Chinese Chinese Chinese Tokyo Japan
//! let x_test = DenseMatrix::from_2d_array(&[&[0, 3, 1, 0, 0, 1]]);
//! let x_test = DenseMatrix::from_2d_array(&[&[0, 3, 1, 0, 0, 1]]).unwrap();
//! let y_hat = nb.predict(&x_test).unwrap();
//! ```
//!
@@ -433,7 +433,8 @@ mod tests {
&[0, 2, 0, 0, 1, 0],
&[0, 1, 0, 1, 0, 0],
&[0, 1, 1, 0, 0, 1],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 0, 1];
let mnb = MultinomialNB::fit(&x, &y, Default::default()).unwrap();
@@ -467,7 +468,7 @@ mod tests {
// Testing data point is:
// Chinese Chinese Chinese Tokyo Japan
let x_test = DenseMatrix::<u32>::from_2d_array(&[&[0, 3, 1, 0, 0, 1]]);
let x_test = DenseMatrix::<u32>::from_2d_array(&[&[0, 3, 1, 0, 0, 1]]).unwrap();
let y_hat = mnb.predict(&x_test).unwrap();
assert_eq!(y_hat, &[0]);
@@ -495,7 +496,8 @@ mod tests {
&[2, 0, 3, 3, 1, 2, 0, 2, 4, 1],
&[2, 4, 0, 4, 2, 4, 1, 3, 1, 4],
&[0, 2, 2, 3, 4, 0, 4, 4, 4, 4],
]);
])
.unwrap();
let y: Vec<u32> = vec![2, 2, 0, 0, 0, 2, 1, 1, 0, 1, 0, 0, 2, 0, 2];
let nb = MultinomialNB::fit(&x, &y, Default::default()).unwrap();
@@ -554,7 +556,8 @@ mod tests {
&[0, 1, 0, 0, 1, 0],
&[0, 1, 0, 1, 0, 0],
&[0, 1, 1, 0, 0, 1],
]);
])
.unwrap();
let y = vec![0, 0, 0, 1];
let mnb = MultinomialNB::fit(&x, &y, Default::default()).unwrap();
+10 -6
View File
@@ -22,7 +22,7 @@
//! &[3., 4.],
//! &[5., 6.],
//! &[7., 8.],
//! &[9., 10.]]);
//! &[9., 10.]]).unwrap();
//! let y = vec![2, 2, 2, 3, 3]; //your class labels
//!
//! let knn = KNNClassifier::fit(&x, &y, Default::default()).unwrap();
@@ -211,7 +211,7 @@ impl<TX: Number, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>, D: Distance<Vec
{
/// Fits KNN classifier to a NxM matrix where N is number of samples and M is number of features.
/// * `x` - training data
/// * `y` - vector with target values (classes) of length N
/// * `y` - vector with target values (classes) of length N
/// * `parameters` - additional parameters like search algorithm and k
pub fn fit(
x: &X,
@@ -311,7 +311,8 @@ mod tests {
#[test]
fn knn_fit_predict() {
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]])
.unwrap();
let y = vec![2, 2, 2, 3, 3];
let knn = KNNClassifier::fit(&x, &y, Default::default()).unwrap();
let y_hat = knn.predict(&x).unwrap();
@@ -325,7 +326,7 @@ mod tests {
)]
#[test]
fn knn_fit_predict_weighted() {
let x = DenseMatrix::from_2d_array(&[&[1.], &[2.], &[3.], &[4.], &[5.]]);
let x = DenseMatrix::from_2d_array(&[&[1.], &[2.], &[3.], &[4.], &[5.]]).unwrap();
let y = vec![2, 2, 2, 3, 3];
let knn = KNNClassifier::fit(
&x,
@@ -336,7 +337,9 @@ mod tests {
.with_weight(KNNWeightFunction::Distance),
)
.unwrap();
let y_hat = knn.predict(&DenseMatrix::from_2d_array(&[&[4.1]])).unwrap();
let y_hat = knn
.predict(&DenseMatrix::from_2d_array(&[&[4.1]]).unwrap())
.unwrap();
assert_eq!(vec![3], y_hat);
}
@@ -348,7 +351,8 @@ mod tests {
#[cfg(feature = "serde")]
fn serde() {
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]])
.unwrap();
let y = vec![2, 2, 2, 3, 3];
let knn = KNNClassifier::fit(&x, &y, Default::default()).unwrap();
+10 -7
View File
@@ -24,7 +24,7 @@
//! &[2., 2.],
//! &[3., 3.],
//! &[4., 4.],
//! &[5., 5.]]);
//! &[5., 5.]]).unwrap();
//! let y = vec![1., 2., 3., 4., 5.]; //your target values
//!
//! let knn = KNNRegressor::fit(&x, &y, Default::default()).unwrap();
@@ -207,7 +207,7 @@ impl<TX: Number, TY: Number, X: Array2<TX>, Y: Array1<TY>, D: Distance<Vec<TX>>>
{
/// Fits KNN regressor to a NxM matrix where N is number of samples and M is number of features.
/// * `x` - training data
/// * `y` - vector with real values
/// * `y` - vector with real values
/// * `parameters` - additional parameters like search algorithm and k
pub fn fit(
x: &X,
@@ -295,9 +295,10 @@ mod tests {
#[test]
fn knn_fit_predict_weighted() {
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]])
.unwrap();
let y: Vec<f64> = vec![1., 2., 3., 4., 5.];
let y_exp = vec![1., 2., 3., 4., 5.];
let y_exp = [1., 2., 3., 4., 5.];
let knn = KNNRegressor::fit(
&x,
&y,
@@ -322,9 +323,10 @@ mod tests {
#[test]
fn knn_fit_predict_uniform() {
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]])
.unwrap();
let y: Vec<f64> = vec![1., 2., 3., 4., 5.];
let y_exp = vec![2., 2., 3., 4., 4.];
let y_exp = [2., 2., 3., 4., 4.];
let knn = KNNRegressor::fit(&x, &y, Default::default()).unwrap();
let y_hat = knn.predict(&x).unwrap();
assert_eq!(5, Vec::len(&y_hat));
@@ -341,7 +343,8 @@ mod tests {
#[cfg(feature = "serde")]
fn serde() {
let x =
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]]);
DenseMatrix::from_2d_array(&[&[1., 2.], &[3., 4.], &[5., 6.], &[7., 8.], &[9., 10.]])
.unwrap();
let y = vec![1., 2., 3., 4., 5.];
let knn = KNNRegressor::fit(&x, &y, Default::default()).unwrap();
+12 -7
View File
@@ -12,7 +12,7 @@
//! &[1.5, 2.0, 1.5, 4.0],
//! &[1.5, 1.0, 1.5, 5.0],
//! &[1.5, 2.0, 1.5, 6.0],
//! ]);
//! ]).unwrap();
//! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]);
//! // Infer number of categories from data and return a reusable encoder
//! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap();
@@ -240,14 +240,16 @@ mod tests {
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
])
.unwrap();
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
])
.unwrap();
(orig, oh_enc)
}
@@ -259,14 +261,16 @@ mod tests {
&[1.5, 2.0, 1.5, 4.0],
&[1.5, 1.0, 1.5, 5.0],
&[1.5, 2.0, 1.5, 6.0],
]);
])
.unwrap();
let oh_enc = DenseMatrix::from_2d_array(&[
&[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0],
&[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0],
&[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0],
]);
])
.unwrap();
(orig, oh_enc)
}
@@ -277,7 +281,7 @@ mod tests {
)]
#[test]
fn hash_encode_f64_series() {
let series = vec![3.0, 1.0, 2.0, 1.0];
let series = [3.0, 1.0, 2.0, 1.0];
let hashable_series: Vec<CategoricalFloat> =
series.iter().map(|v| v.to_category()).collect();
let enc = CategoryMapper::from_positional_category_vec(hashable_series);
@@ -334,7 +338,8 @@ mod tests {
&[2.0, 1.5, 4.0],
&[1.0, 1.5, 5.0],
&[2.0, 1.5, 6.0],
]);
])
.unwrap();
let params = OneHotEncoderParams::from_cat_idx(&[1]);
let result = OneHotEncoder::fit(&m, params);
+47 -38
View File
@@ -11,7 +11,7 @@
//! vec![0.0, 0.0],
//! vec![1.0, 1.0],
//! vec![1.0, 1.0],
//! ]);
//! ]).unwrap();
//!
//! let standard_scaler =
//! numerical::StandardScaler::fit(&data, numerical::StandardScalerParameters::default())
@@ -24,7 +24,7 @@
//! vec![-1.0, -1.0],
//! vec![1.0, 1.0],
//! vec![1.0, 1.0],
//! ])
//! ]).unwrap()
//! );
//! ```
use std::marker::PhantomData;
@@ -197,15 +197,18 @@ mod tests {
fn combine_three_columns() {
assert_eq!(
build_matrix_from_columns(vec![
DenseMatrix::from_2d_vec(&vec![vec![1.0], vec![1.0], vec![1.0],]),
DenseMatrix::from_2d_vec(&vec![vec![2.0], vec![2.0], vec![2.0],]),
DenseMatrix::from_2d_vec(&vec![vec![3.0], vec![3.0], vec![3.0],])
DenseMatrix::from_2d_vec(&vec![vec![1.0], vec![1.0], vec![1.0],]).unwrap(),
DenseMatrix::from_2d_vec(&vec![vec![2.0], vec![2.0], vec![2.0],]).unwrap(),
DenseMatrix::from_2d_vec(&vec![vec![3.0], vec![3.0], vec![3.0],]).unwrap()
]),
Some(DenseMatrix::from_2d_vec(&vec![
vec![1.0, 2.0, 3.0],
vec![1.0, 2.0, 3.0],
vec![1.0, 2.0, 3.0]
]))
Some(
DenseMatrix::from_2d_vec(&vec![
vec![1.0, 2.0, 3.0],
vec![1.0, 2.0, 3.0],
vec![1.0, 2.0, 3.0]
])
.unwrap()
)
)
}
@@ -287,13 +290,15 @@ mod tests {
/// sklearn.
#[test]
fn fit_transform_random_values() {
let transformed_values =
fit_transform_with_default_standard_scaler(&DenseMatrix::from_2d_array(&[
let transformed_values = fit_transform_with_default_standard_scaler(
&DenseMatrix::from_2d_array(&[
&[0.1004222429, 0.2194113576, 0.9310663354, 0.3313593793],
&[0.2045493861, 0.1683865411, 0.5071506765, 0.7257355264],
&[0.5708488802, 0.1846414616, 0.9590802982, 0.5591871046],
&[0.8387612750, 0.5754861361, 0.5537109852, 0.1077646442],
]));
])
.unwrap(),
);
println!("{transformed_values}");
assert!(transformed_values.approximate_eq(
&DenseMatrix::from_2d_array(&[
@@ -301,7 +306,8 @@ mod tests {
&[-0.7615464283, -0.7076698384, -1.1075452562, 1.2632979631],
&[0.4832504303, -0.6106747444, 1.0630075435, 0.5494084257],
&[1.3936980634, 1.7215431158, -0.8839228078, -1.3855590021],
]),
])
.unwrap(),
1.0
))
}
@@ -310,13 +316,10 @@ mod tests {
#[test]
fn fit_transform_with_zero_variance() {
assert_eq!(
fit_transform_with_default_standard_scaler(&DenseMatrix::from_2d_array(&[
&[1.0],
&[1.0],
&[1.0],
&[1.0]
])),
DenseMatrix::from_2d_array(&[&[0.0], &[0.0], &[0.0], &[0.0]]),
fit_transform_with_default_standard_scaler(
&DenseMatrix::from_2d_array(&[&[1.0], &[1.0], &[1.0], &[1.0]]).unwrap()
),
DenseMatrix::from_2d_array(&[&[0.0], &[0.0], &[0.0], &[0.0]]).unwrap(),
"When scaling values with zero variance, zero is expected as return value"
)
}
@@ -331,7 +334,8 @@ mod tests {
&[1.0, 2.0, 5.0],
&[1.0, 1.0, 1.0],
&[1.0, 2.0, 5.0]
]),
])
.unwrap(),
StandardScalerParameters::default(),
),
Ok(StandardScaler {
@@ -354,7 +358,8 @@ mod tests {
&[0.2045493861, 0.1683865411, 0.5071506765, 0.7257355264],
&[0.5708488802, 0.1846414616, 0.9590802982, 0.5591871046],
&[0.8387612750, 0.5754861361, 0.5537109852, 0.1077646442],
]),
])
.unwrap(),
StandardScalerParameters::default(),
)
.unwrap();
@@ -364,17 +369,18 @@ mod tests {
vec![0.42864544605, 0.2869813741, 0.737752073825, 0.431011663625],
);
assert!(
&DenseMatrix::<f64>::from_2d_vec(&vec![fitted_scaler.stds]).approximate_eq(
assert!(&DenseMatrix::<f64>::from_2d_vec(&vec![fitted_scaler.stds])
.unwrap()
.approximate_eq(
&DenseMatrix::from_2d_array(&[&[
0.29426447500954,
0.16758497615485,
0.20820945786863,
0.23329718831165
],]),
],])
.unwrap(),
0.00000000000001
)
)
))
}
/// If `with_std` is set to `false` the values should not be
@@ -392,8 +398,9 @@ mod tests {
};
assert_eq!(
standard_scaler.transform(&DenseMatrix::from_2d_array(&[&[0.0, 2.0], &[2.0, 4.0]])),
Ok(DenseMatrix::from_2d_array(&[&[-1.0, -1.0], &[1.0, 1.0]]))
standard_scaler
.transform(&DenseMatrix::from_2d_array(&[&[0.0, 2.0], &[2.0, 4.0]]).unwrap()),
Ok(DenseMatrix::from_2d_array(&[&[-1.0, -1.0], &[1.0, 1.0]]).unwrap())
)
}
@@ -413,8 +420,8 @@ mod tests {
assert_eq!(
standard_scaler
.transform(&DenseMatrix::from_2d_array(&[&[0.0, 9.0], &[4.0, 12.0]])),
Ok(DenseMatrix::from_2d_array(&[&[0.0, 3.0], &[2.0, 4.0]]))
.transform(&DenseMatrix::from_2d_array(&[&[0.0, 9.0], &[4.0, 12.0]]).unwrap()),
Ok(DenseMatrix::from_2d_array(&[&[0.0, 3.0], &[2.0, 4.0]]).unwrap())
)
}
@@ -433,7 +440,8 @@ mod tests {
&[0.2045493861, 0.1683865411, 0.5071506765, 0.7257355264],
&[0.5708488802, 0.1846414616, 0.9590802982, 0.5591871046],
&[0.8387612750, 0.5754861361, 0.5537109852, 0.1077646442],
]),
])
.unwrap(),
StandardScalerParameters::default(),
)
.unwrap();
@@ -446,17 +454,18 @@ mod tests {
vec![0.42864544605, 0.2869813741, 0.737752073825, 0.431011663625],
);
assert!(
&DenseMatrix::from_2d_vec(&vec![deserialized_scaler.stds]).approximate_eq(
assert!(&DenseMatrix::from_2d_vec(&vec![deserialized_scaler.stds])
.unwrap()
.approximate_eq(
&DenseMatrix::from_2d_array(&[&[
0.29426447500954,
0.16758497615485,
0.20820945786863,
0.23329718831165
],]),
],])
.unwrap(),
0.00000000000001
)
)
))
}
}
}
+6 -11
View File
@@ -83,7 +83,7 @@ where
Matrix: Array2<T>,
{
let csv_text = read_string_from_source(source)?;
let rows: Vec<Vec<T>> = extract_row_vectors_from_csv_text::<T, RowVector, Matrix>(
let rows: Vec<Vec<T>> = extract_row_vectors_from_csv_text(
&csv_text,
&definition,
detect_row_format(&csv_text, &definition)?,
@@ -103,12 +103,7 @@ where
/// Given a string containing the contents of a csv file, extract its value
/// into row-vectors.
fn extract_row_vectors_from_csv_text<
'a,
T: Number + RealNumber + std::str::FromStr,
RowVector: Array1<T>,
Matrix: Array2<T>,
>(
fn extract_row_vectors_from_csv_text<'a, T: Number + RealNumber + std::str::FromStr>(
csv_text: &'a str,
definition: &'a CSVDefinition<'_>,
row_format: CSVRowFormat<'_>,
@@ -243,7 +238,8 @@ mod tests {
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
]))
])
.unwrap())
)
}
#[test]
@@ -266,7 +262,7 @@ mod tests {
&[5.1, 3.5, 1.4, 0.2],
&[4.9, 3.0, 1.4, 0.2],
&[4.7, 3.2, 1.3, 0.2],
]))
]).unwrap())
)
}
#[test]
@@ -305,12 +301,11 @@ mod tests {
}
mod extract_row_vectors_from_csv_text {
use super::super::{extract_row_vectors_from_csv_text, CSVDefinition, CSVRowFormat};
use crate::linalg::basic::matrix::DenseMatrix;
#[test]
fn read_default_csv() {
assert_eq!(
extract_row_vectors_from_csv_text::<f64, Vec<_>, DenseMatrix<_>>(
extract_row_vectors_from_csv_text::<f64>(
"column 1, column 2, column3\n1.0,2.0,3.0\n4.0,5.0,6.0",
&CSVDefinition::default(),
CSVRowFormat {
+1 -1
View File
@@ -56,7 +56,7 @@ pub struct Kernels;
impl Kernels {
/// Return a default linear
pub fn linear() -> LinearKernel {
LinearKernel::default()
LinearKernel
}
/// Return a default RBF
pub fn rbf() -> RBFKernel {
+66 -70
View File
@@ -53,7 +53,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y = vec![ -1, -1, -1, -1, -1, -1, -1, -1,
//! 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
//!
@@ -322,19 +322,26 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX> + 'a, Y: Array
let (n, _) = x.shape();
let mut y_hat: Vec<TX> = Array1::zeros(n);
let mut row = Vec::with_capacity(n);
for i in 0..n {
let row_pred: TX =
self.predict_for_row(Vec::from_iterator(x.get_row(i).iterator(0).copied(), n));
row.clear();
row.extend(x.get_row(i).iterator(0).copied());
let row_pred: TX = self.predict_for_row(&row);
y_hat.set(i, row_pred);
}
Ok(y_hat)
}
fn predict_for_row(&self, x: Vec<TX>) -> TX {
fn predict_for_row(&self, x: &[TX]) -> TX {
let mut f = self.b.unwrap();
let xi: Vec<_> = x.iter().map(|e| e.to_f64().unwrap()).collect();
for i in 0..self.instances.as_ref().unwrap().len() {
let xj: Vec<_> = self.instances.as_ref().unwrap()[i]
.iter()
.map(|e| e.to_f64().unwrap())
.collect();
f += self.w.as_ref().unwrap()[i]
* TX::from(
self.parameters
@@ -343,13 +350,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX> + 'a, Y: Array
.kernel
.as_ref()
.unwrap()
.apply(
&x.iter().map(|e| e.to_f64().unwrap()).collect(),
&self.instances.as_ref().unwrap()[i]
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
)
.apply(&xi, &xj)
.unwrap(),
)
.unwrap();
@@ -472,14 +473,12 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
let tol = self.parameters.tol;
let good_enough = TX::from_i32(1000).unwrap();
let mut x = Vec::with_capacity(n);
for _ in 0..self.parameters.epoch {
for i in self.permutate(n) {
self.process(
i,
Vec::from_iterator(self.x.get_row(i).iterator(0).copied(), n),
*self.y.get(i),
&mut cache,
);
x.clear();
x.extend(self.x.get_row(i).iterator(0).take(n).copied());
self.process(i, &x, *self.y.get(i), &mut cache);
loop {
self.reprocess(tol, &mut cache);
self.find_min_max_gradient();
@@ -511,24 +510,17 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
let mut cp = 0;
let mut cn = 0;
let mut x = Vec::with_capacity(n);
for i in self.permutate(n) {
x.clear();
x.extend(self.x.get_row(i).iterator(0).take(n).copied());
if *self.y.get(i) == TY::one() && cp < few {
if self.process(
i,
Vec::from_iterator(self.x.get_row(i).iterator(0).copied(), n),
*self.y.get(i),
cache,
) {
if self.process(i, &x, *self.y.get(i), cache) {
cp += 1;
}
} else if *self.y.get(i) == TY::from(-1).unwrap()
&& cn < few
&& self.process(
i,
Vec::from_iterator(self.x.get_row(i).iterator(0).copied(), n),
*self.y.get(i),
cache,
)
&& self.process(i, &x, *self.y.get(i), cache)
{
cn += 1;
}
@@ -539,7 +531,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
}
}
fn process(&mut self, i: usize, x: Vec<TX>, y: TY, cache: &mut Cache<TX, TY, X, Y>) -> bool {
fn process(&mut self, i: usize, x: &[TX], y: TY, cache: &mut Cache<TX, TY, X, Y>) -> bool {
for j in 0..self.sv.len() {
if self.sv[j].index == i {
return true;
@@ -551,15 +543,14 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
let mut cache_values: Vec<((usize, usize), TX)> = Vec::new();
for v in self.sv.iter() {
let xi: Vec<_> = v.x.iter().map(|e| e.to_f64().unwrap()).collect();
let xj: Vec<_> = x.iter().map(|e| e.to_f64().unwrap()).collect();
let k = self
.parameters
.kernel
.as_ref()
.unwrap()
.apply(
&v.x.iter().map(|e| e.to_f64().unwrap()).collect(),
&x.iter().map(|e| e.to_f64().unwrap()).collect(),
)
.apply(&xi, &xj)
.unwrap();
cache_values.push(((i, v.index), TX::from(k).unwrap()));
g -= v.alpha * k;
@@ -578,7 +569,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
cache.insert(v.0, v.1.to_f64().unwrap());
}
let x_f64 = x.iter().map(|e| e.to_f64().unwrap()).collect();
let x_f64: Vec<_> = x.iter().map(|e| e.to_f64().unwrap()).collect();
let k_v = self
.parameters
.kernel
@@ -701,8 +692,10 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
let km = sv1.k;
let gm = sv1.grad;
let mut best = 0f64;
let xi: Vec<_> = sv1.x.iter().map(|e| e.to_f64().unwrap()).collect();
for i in 0..self.sv.len() {
let v = &self.sv[i];
let xj: Vec<_> = v.x.iter().map(|e| e.to_f64().unwrap()).collect();
let z = v.grad - gm;
let k = cache.get(
sv1,
@@ -711,10 +704,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.kernel
.as_ref()
.unwrap()
.apply(
&sv1.x.iter().map(|e| e.to_f64().unwrap()).collect(),
&v.x.iter().map(|e| e.to_f64().unwrap()).collect(),
)
.apply(&xi, &xj)
.unwrap(),
);
let mut curv = km + v.k - 2f64 * k;
@@ -732,6 +722,12 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
}
}
let xi: Vec<_> = self.sv[idx_1]
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect::<Vec<_>>();
idx_2.map(|idx_2| {
(
idx_1,
@@ -742,16 +738,12 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.as_ref()
.unwrap()
.apply(
&self.sv[idx_1]
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
&xi,
&self.sv[idx_2]
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
.collect::<Vec<_>>(),
)
.unwrap()
}),
@@ -765,8 +757,11 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
let km = sv2.k;
let gm = sv2.grad;
let mut best = 0f64;
let xi: Vec<_> = sv2.x.iter().map(|e| e.to_f64().unwrap()).collect();
for i in 0..self.sv.len() {
let v = &self.sv[i];
let xj: Vec<_> = v.x.iter().map(|e| e.to_f64().unwrap()).collect();
let z = gm - v.grad;
let k = cache.get(
sv2,
@@ -775,10 +770,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.kernel
.as_ref()
.unwrap()
.apply(
&sv2.x.iter().map(|e| e.to_f64().unwrap()).collect(),
&v.x.iter().map(|e| e.to_f64().unwrap()).collect(),
)
.apply(&xi, &xj)
.unwrap(),
);
let mut curv = km + v.k - 2f64 * k;
@@ -797,6 +789,12 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
}
}
let xj: Vec<_> = self.sv[idx_2]
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect();
idx_1.map(|idx_1| {
(
idx_1,
@@ -811,12 +809,8 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
&self.sv[idx_2]
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
.collect::<Vec<_>>(),
&xj,
)
.unwrap()
}),
@@ -835,12 +829,12 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
.collect::<Vec<_>>(),
&self.sv[idx_2]
.x
.iter()
.map(|e| e.to_f64().unwrap())
.collect(),
.collect::<Vec<_>>(),
)
.unwrap(),
)),
@@ -895,7 +889,10 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
self.sv[v1].alpha -= step.to_f64().unwrap();
self.sv[v2].alpha += step.to_f64().unwrap();
let xi_v1: Vec<_> = self.sv[v1].x.iter().map(|e| e.to_f64().unwrap()).collect();
let xi_v2: Vec<_> = self.sv[v2].x.iter().map(|e| e.to_f64().unwrap()).collect();
for i in 0..self.sv.len() {
let xj: Vec<_> = self.sv[i].x.iter().map(|e| e.to_f64().unwrap()).collect();
let k2 = cache.get(
&self.sv[v2],
&self.sv[i],
@@ -903,10 +900,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.kernel
.as_ref()
.unwrap()
.apply(
&self.sv[v2].x.iter().map(|e| e.to_f64().unwrap()).collect(),
&self.sv[i].x.iter().map(|e| e.to_f64().unwrap()).collect(),
)
.apply(&xi_v2, &xj)
.unwrap(),
);
let k1 = cache.get(
@@ -916,10 +910,7 @@ impl<'a, TX: Number + RealNumber, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>
.kernel
.as_ref()
.unwrap()
.apply(
&self.sv[v1].x.iter().map(|e| e.to_f64().unwrap()).collect(),
&self.sv[i].x.iter().map(|e| e.to_f64().unwrap()).collect(),
)
.apply(&xi_v1, &xj)
.unwrap(),
);
self.sv[i].grad -= step.to_f64().unwrap() * (k2 - k1);
@@ -966,7 +957,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<i32> = vec![
-1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -992,7 +984,8 @@ mod tests {
)]
#[test]
fn svc_fit_decision_function() {
let x = DenseMatrix::from_2d_array(&[&[4.0, 0.0], &[0.0, 4.0], &[8.0, 0.0], &[0.0, 8.0]]);
let x = DenseMatrix::from_2d_array(&[&[4.0, 0.0], &[0.0, 4.0], &[8.0, 0.0], &[0.0, 8.0]])
.unwrap();
let x2 = DenseMatrix::from_2d_array(&[
&[3.0, 3.0],
@@ -1001,7 +994,8 @@ mod tests {
&[10.0, 10.0],
&[1.0, 1.0],
&[0.0, 0.0],
]);
])
.unwrap();
let y: Vec<i32> = vec![-1, -1, 1, 1];
@@ -1054,7 +1048,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<i32> = vec![
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -1103,7 +1098,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<i32> = vec![
-1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+12 -12
View File
@@ -44,7 +44,7 @@
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//! ]).unwrap();
//!
//! let y: Vec<f64> = vec![83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0,
//! 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9];
@@ -248,19 +248,20 @@ impl<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>> SVR<'
let mut y_hat: Vec<T> = Vec::<T>::zeros(n);
let mut x_i = Vec::with_capacity(n);
for i in 0..n {
y_hat.set(
i,
self.predict_for_row(Vec::from_iterator(x.get_row(i).iterator(0).copied(), n)),
);
x_i.clear();
x_i.extend(x.get_row(i).iterator(0).copied());
y_hat.set(i, self.predict_for_row(&x_i));
}
Ok(y_hat)
}
pub(crate) fn predict_for_row(&self, x: Vec<T>) -> T {
pub(crate) fn predict_for_row(&self, x: &[T]) -> T {
let mut f = self.b;
let xi: Vec<_> = x.iter().map(|e| e.to_f64().unwrap()).collect();
for i in 0..self.instances.as_ref().unwrap().len() {
f += self.w.as_ref().unwrap()[i]
* T::from(
@@ -270,10 +271,7 @@ impl<'a, T: Number + FloatNumber + PartialOrd, X: Array2<T>, Y: Array1<T>> SVR<'
.kernel
.as_ref()
.unwrap()
.apply(
&x.iter().map(|e| e.to_f64().unwrap()).collect(),
&self.instances.as_ref().unwrap()[i],
)
.apply(&xi, &self.instances.as_ref().unwrap()[i])
.unwrap(),
)
.unwrap()
@@ -642,7 +640,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
@@ -690,7 +689,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
+93 -27
View File
@@ -48,7 +48,7 @@
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! ]).unwrap();
//! let y = vec![ 0, 0, 0, 0, 0, 0, 0, 0,
//! 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
//!
@@ -116,6 +116,7 @@ pub struct DecisionTreeClassifier<
num_classes: usize,
classes: Vec<TY>,
depth: u16,
num_features: usize,
_phantom_tx: PhantomData<TX>,
_phantom_x: PhantomData<X>,
_phantom_y: PhantomData<Y>,
@@ -159,11 +160,13 @@ pub enum SplitCriterion {
#[derive(Debug, Clone)]
struct Node {
output: usize,
n_node_samples: usize,
split_feature: usize,
split_value: Option<f64>,
split_score: Option<f64>,
true_child: Option<usize>,
false_child: Option<usize>,
impurity: Option<f64>,
}
impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>> PartialEq
@@ -400,14 +403,16 @@ impl Default for DecisionTreeClassifierSearchParameters {
}
impl Node {
fn new(output: usize) -> Self {
fn new(output: usize, n_node_samples: usize) -> Self {
Node {
output,
n_node_samples,
split_feature: 0,
split_value: Option::None,
split_score: Option::None,
true_child: Option::None,
false_child: Option::None,
impurity: Option::None,
}
}
}
@@ -507,6 +512,7 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
num_classes: 0usize,
classes: vec![],
depth: 0u16,
num_features: 0usize,
_phantom_tx: PhantomData,
_phantom_x: PhantomData,
_phantom_y: PhantomData,
@@ -578,7 +584,7 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
count[yi[i]] += samples[i];
}
let root = Node::new(which_max(&count));
let root = Node::new(which_max(&count), y_ncols);
change_nodes.push(root);
let mut order: Vec<Vec<usize>> = Vec::new();
@@ -593,6 +599,7 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
num_classes: k,
classes,
depth: 0u16,
num_features: num_attributes,
_phantom_tx: PhantomData,
_phantom_x: PhantomData,
_phantom_y: PhantomData,
@@ -678,16 +685,7 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
}
}
if is_pure {
return false;
}
let n = visitor.samples.iter().sum();
if n <= self.parameters().min_samples_split {
return false;
}
let mut count = vec![0; self.num_classes];
let mut false_count = vec![0; self.num_classes];
for i in 0..n_rows {
@@ -696,7 +694,15 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
}
}
let parent_impurity = impurity(&self.parameters().criterion, &count, n);
self.nodes[visitor.node].impurity = Some(impurity(&self.parameters().criterion, &count, n));
if is_pure {
return false;
}
if n <= self.parameters().min_samples_split {
return false;
}
let mut variables = (0..n_attr).collect::<Vec<_>>();
@@ -705,14 +711,7 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
}
for variable in variables.iter().take(mtry) {
self.find_best_split(
visitor,
n,
&count,
&mut false_count,
parent_impurity,
*variable,
);
self.find_best_split(visitor, n, &count, &mut false_count, *variable);
}
self.nodes()[visitor.node].split_score.is_some()
@@ -724,7 +723,6 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
n: usize,
count: &[usize],
false_count: &mut [usize],
parent_impurity: f64,
j: usize,
) {
let mut true_count = vec![0; self.num_classes];
@@ -760,6 +758,7 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
let true_label = which_max(&true_count);
let false_label = which_max(false_count);
let parent_impurity = self.nodes()[visitor.node].impurity.unwrap();
let gain = parent_impurity
- tc as f64 / n as f64
* impurity(&self.parameters().criterion, &true_count, tc)
@@ -827,9 +826,9 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
let true_child_idx = self.nodes().len();
self.nodes.push(Node::new(visitor.true_child_output));
self.nodes.push(Node::new(visitor.true_child_output, tc));
let false_child_idx = self.nodes().len();
self.nodes.push(Node::new(visitor.false_child_output));
self.nodes.push(Node::new(visitor.false_child_output, fc));
self.nodes[visitor.node].true_child = Some(true_child_idx);
self.nodes[visitor.node].false_child = Some(false_child_idx);
@@ -863,6 +862,33 @@ impl<TX: Number + PartialOrd, TY: Number + Ord, X: Array2<TX>, Y: Array1<TY>>
true
}
/// Compute feature importances for the fitted tree.
pub fn compute_feature_importances(&self, normalize: bool) -> Vec<f64> {
let mut importances = vec![0f64; self.num_features];
for node in self.nodes().iter() {
if node.true_child.is_none() && node.false_child.is_none() {
continue;
}
let left = &self.nodes()[node.true_child.unwrap()];
let right = &self.nodes()[node.false_child.unwrap()];
importances[node.split_feature] += node.n_node_samples as f64 * node.impurity.unwrap()
- left.n_node_samples as f64 * left.impurity.unwrap()
- right.n_node_samples as f64 * right.impurity.unwrap();
}
for item in importances.iter_mut() {
*item /= self.nodes()[0].n_node_samples as f64;
}
if normalize {
let sum = importances.iter().sum::<f64>();
for importance in importances.iter_mut() {
*importance /= sum;
}
}
importances
}
}
#[cfg(test)]
@@ -938,7 +964,8 @@ mod tests {
&[4.9, 2.4, 3.3, 1.0],
&[6.6, 2.9, 4.6, 1.3],
&[5.2, 2.7, 3.9, 1.4],
]);
])
.unwrap();
let y: Vec<u32> = vec![0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1];
assert_eq!(
@@ -1005,7 +1032,8 @@ mod tests {
&[0., 0., 1., 1.],
&[0., 0., 0., 0.],
&[0., 0., 0., 1.],
]);
])
.unwrap();
let y: Vec<u32> = vec![1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0];
assert_eq!(
@@ -1016,6 +1044,43 @@ mod tests {
);
}
#[test]
fn test_compute_feature_importances() {
let x: DenseMatrix<f64> = DenseMatrix::from_2d_array(&[
&[1., 1., 1., 0.],
&[1., 1., 1., 0.],
&[1., 1., 1., 1.],
&[1., 1., 0., 0.],
&[1., 1., 0., 1.],
&[1., 0., 1., 0.],
&[1., 0., 1., 0.],
&[1., 0., 1., 1.],
&[1., 0., 0., 0.],
&[1., 0., 0., 1.],
&[0., 1., 1., 0.],
&[0., 1., 1., 0.],
&[0., 1., 1., 1.],
&[0., 1., 0., 0.],
&[0., 1., 0., 1.],
&[0., 0., 1., 0.],
&[0., 0., 1., 0.],
&[0., 0., 1., 1.],
&[0., 0., 0., 0.],
&[0., 0., 0., 1.],
])
.unwrap();
let y: Vec<u32> = vec![1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0];
let tree = DecisionTreeClassifier::fit(&x, &y, Default::default()).unwrap();
assert_eq!(
tree.compute_feature_importances(false),
vec![0., 0., 0.21333333333333332, 0.26666666666666666]
);
assert_eq!(
tree.compute_feature_importances(true),
vec![0., 0., 0.4444444444444444, 0.5555555555555556]
);
}
#[cfg_attr(
all(target_arch = "wasm32", not(target_os = "wasi")),
wasm_bindgen_test::wasm_bindgen_test
@@ -1044,7 +1109,8 @@ mod tests {
&[0., 0., 1., 1.],
&[0., 0., 0., 0.],
&[0., 0., 0., 1.],
]);
])
.unwrap();
let y = vec![1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0];
let tree = DecisionTreeClassifier::fit(&x, &y, Default::default()).unwrap();
+7 -5
View File
@@ -39,7 +39,7 @@
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//! ]).unwrap();
//! let y: Vec<f64> = vec![
//! 83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0,
//! 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9,
@@ -753,7 +753,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
@@ -767,7 +768,7 @@ mod tests {
assert!((y_hat[i] - y[i]).abs() < 0.1);
}
let expected_y = vec![
let expected_y = [
87.3, 87.3, 87.3, 87.3, 98.9, 98.9, 98.9, 98.9, 98.9, 107.9, 107.9, 107.9, 114.85,
114.85, 114.85, 114.85,
];
@@ -788,7 +789,7 @@ mod tests {
assert!((y_hat[i] - expected_y[i]).abs() < 0.1);
}
let expected_y = vec![
let expected_y = [
83.0, 88.35, 88.35, 89.5, 97.15, 97.15, 99.5, 99.5, 101.2, 104.6, 109.6, 109.6, 113.4,
113.4, 116.30, 116.30,
];
@@ -834,7 +835,8 @@ mod tests {
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
])
.unwrap();
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,