fix: cargo fmt

This commit is contained in:
Volodymyr Orlov
2020-06-05 17:52:03 -07:00
parent 685be04488
commit a2784d6345
52 changed files with 3342 additions and 2829 deletions
+98 -82
View File
@@ -1,34 +1,32 @@
use std::fmt::Debug;
use serde::{Serialize, Deserialize};
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::linalg::Matrix;
use crate::math::num::FloatExt;
#[derive(Serialize, Deserialize, Debug)]
pub enum LinearRegressionSolver {
QR,
SVD
SVD,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct LinearRegression<T: FloatExt, M: Matrix<T>> {
pub struct LinearRegression<T: FloatExt, M: Matrix<T>> {
coefficients: M,
intercept: T,
solver: LinearRegressionSolver
solver: LinearRegressionSolver,
}
impl<T: FloatExt, M: Matrix<T>> PartialEq for LinearRegression<T, M> {
fn eq(&self, other: &Self) -> bool {
self.coefficients == other.coefficients &&
(self.intercept - other.intercept).abs() <= T::epsilon()
self.coefficients == other.coefficients
&& (self.intercept - other.intercept).abs() <= T::epsilon()
}
}
impl<T: FloatExt, M: Matrix<T>> LinearRegression<T, M> {
pub fn fit(x: &M, y: &M::RowVector, solver: LinearRegressionSolver) -> LinearRegression<T, M>{
pub fn fit(x: &M, y: &M::RowVector, solver: LinearRegressionSolver) -> LinearRegression<T, M> {
let y_m = M::from_row_vector(y.clone());
let b = y_m.transpose();
let (x_nrows, num_attributes) = x.shape();
@@ -37,20 +35,20 @@ impl<T: FloatExt, M: Matrix<T>> LinearRegression<T, M> {
if x_nrows != y_nrows {
panic!("Number of rows of X doesn't match number of rows of Y");
}
let a = x.v_stack(&M::ones(x_nrows, 1));
let w = match solver {
LinearRegressionSolver::QR => a.qr_solve_mut(b),
LinearRegressionSolver::SVD => a.svd_solve_mut(b)
LinearRegressionSolver::SVD => a.svd_solve_mut(b),
};
let wights = w.slice(0..num_attributes, 0..1);
let wights = w.slice(0..num_attributes, 0..1);
LinearRegression {
intercept: w.get(num_attributes, 0),
coefficients: wights,
solver: solver
solver: solver,
}
}
@@ -60,81 +58,54 @@ impl<T: FloatExt, M: Matrix<T>> LinearRegression<T, M> {
y_hat.add_mut(&M::fill(nrows, 1, self.intercept));
y_hat.transpose().to_row_vector()
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
use crate::linalg::naive::dense_matrix::*;
use nalgebra::{DMatrix, RowDVector};
use crate::linalg::naive::dense_matrix::*;
#[test]
fn ols_fit_predict() {
fn ols_fit_predict() {
let x = DMatrix::from_row_slice(
16,
6,
&[
234.289, 235.6, 159.0, 107.608, 1947., 60.323, 259.426, 232.5, 145.6, 108.632,
1948., 61.122, 258.054, 368.2, 161.6, 109.773, 1949., 60.171, 284.599, 335.1,
165.0, 110.929, 1950., 61.187, 328.975, 209.9, 309.9, 112.075, 1951., 63.221,
346.999, 193.2, 359.4, 113.270, 1952., 63.639, 365.385, 187.0, 354.7, 115.094,
1953., 64.989, 363.112, 357.8, 335.0, 116.219, 1954., 63.761, 397.469, 290.4,
304.8, 117.388, 1955., 66.019, 419.180, 282.2, 285.7, 118.734, 1956., 67.857,
442.769, 293.6, 279.8, 120.445, 1957., 68.169, 444.546, 468.1, 263.7, 121.950,
1958., 66.513, 482.704, 381.3, 255.2, 123.366, 1959., 68.655, 502.601, 393.1,
251.4, 125.368, 1960., 69.564, 518.173, 480.6, 257.2, 127.852, 1961., 69.331,
554.894, 400.7, 282.7, 130.081, 1962., 70.551,
],
);
let x = DMatrix::from_row_slice(16, 6, &[
234.289, 235.6, 159.0, 107.608, 1947., 60.323,
259.426, 232.5, 145.6, 108.632, 1948., 61.122,
258.054, 368.2, 161.6, 109.773, 1949., 60.171,
284.599, 335.1, 165.0, 110.929, 1950., 61.187,
328.975, 209.9, 309.9, 112.075, 1951., 63.221,
346.999, 193.2, 359.4, 113.270, 1952., 63.639,
365.385, 187.0, 354.7, 115.094, 1953., 64.989,
363.112, 357.8, 335.0, 116.219, 1954., 63.761,
397.469, 290.4, 304.8, 117.388, 1955., 66.019,
419.180, 282.2, 285.7, 118.734, 1956., 67.857,
442.769, 293.6, 279.8, 120.445, 1957., 68.169,
444.546, 468.1, 263.7, 121.950, 1958., 66.513,
482.704, 381.3, 255.2, 123.366, 1959., 68.655,
502.601, 393.1, 251.4, 125.368, 1960., 69.564,
518.173, 480.6, 257.2, 127.852, 1961., 69.331,
554.894, 400.7, 282.7, 130.081, 1962., 70.551]);
let y: RowDVector<f64> = RowDVector::from_vec(vec!(83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9));
let y: RowDVector<f64> = RowDVector::from_vec(vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
]);
let y_hat_qr = LinearRegression::fit(&x, &y, LinearRegressionSolver::QR).predict(&x);
let y_hat_qr = LinearRegression::fit(&x, &y, LinearRegressionSolver::QR).predict(&x);
let y_hat_svd = LinearRegression::fit(&x, &y, LinearRegressionSolver::SVD).predict(&x);
assert!(y.iter().zip(y_hat_qr.iter()).all(|(&a, &b)| (a - b).abs() <= 5.0));
assert!(y.iter().zip(y_hat_svd.iter()).all(|(&a, &b)| (a - b).abs() <= 5.0));
let y_hat_svd = LinearRegression::fit(&x, &y, LinearRegressionSolver::SVD).predict(&x);
assert!(y
.iter()
.zip(y_hat_qr.iter())
.all(|(&a, &b)| (a - b).abs() <= 5.0));
assert!(y
.iter()
.zip(y_hat_svd.iter())
.all(|(&a, &b)| (a - b).abs() <= 5.0));
}
#[test]
fn ols_fit_predict_nalgebra() {
let x = DenseMatrix::from_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
&[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
&[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
&[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
&[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
&[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
&[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
&[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
&[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
&[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
&[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551]]);
let y: Vec<f64> = vec!(83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9);
let y_hat_qr = LinearRegression::fit(&x, &y, LinearRegressionSolver::QR).predict(&x);
let y_hat_svd = LinearRegression::fit(&x, &y, LinearRegressionSolver::SVD).predict(&x);
assert!(y.iter().zip(y_hat_qr.iter()).all(|(&a, &b)| (a - b).abs() <= 5.0));
assert!(y.iter().zip(y_hat_svd.iter()).all(|(&a, &b)| (a - b).abs() <= 5.0));
}
#[test]
fn serde(){
fn ols_fit_predict_nalgebra() {
let x = DenseMatrix::from_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
@@ -151,14 +122,59 @@ mod tests {
&[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551]]);
let y = vec!(83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9);
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
let y: Vec<f64> = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
];
let y_hat_qr = LinearRegression::fit(&x, &y, LinearRegressionSolver::QR).predict(&x);
let y_hat_svd = LinearRegression::fit(&x, &y, LinearRegressionSolver::SVD).predict(&x);
assert!(y
.iter()
.zip(y_hat_qr.iter())
.all(|(&a, &b)| (a - b).abs() <= 5.0));
assert!(y
.iter()
.zip(y_hat_svd.iter())
.all(|(&a, &b)| (a - b).abs() <= 5.0));
}
#[test]
fn serde() {
let x = DenseMatrix::from_array(&[
&[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
&[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
&[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
&[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
&[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
&[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
&[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
&[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
&[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
&[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
&[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
&[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
&[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
&[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
&[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
&[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
]);
let y = vec![
83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0, 100.0, 101.2, 104.6, 108.4, 110.8, 112.6,
114.2, 115.7, 116.9,
];
let lr = LinearRegression::fit(&x, &y, LinearRegressionSolver::QR);
let deserialized_lr: LinearRegression<f64, DenseMatrix<f64>> = serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
assert_eq!(lr, deserialized_lr);
let deserialized_lr: LinearRegression<f64, DenseMatrix<f64>> =
serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
assert_eq!(lr, deserialized_lr);
}
}
}
+234 -225
View File
@@ -1,21 +1,21 @@
use std::fmt::Debug;
use std::marker::PhantomData;
use serde::{Serialize, Deserialize};
use serde::{Deserialize, Serialize};
use crate::math::num::FloatExt;
use crate::linalg::Matrix;
use crate::optimization::FunctionOrder;
use crate::math::num::FloatExt;
use crate::optimization::first_order::lbfgs::LBFGS;
use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::Backtracking;
use crate::optimization::first_order::lbfgs::LBFGS;
use crate::optimization::FunctionOrder;
#[derive(Serialize, Deserialize, Debug)]
pub struct LogisticRegression<T: FloatExt, M: Matrix<T>> {
weights: M,
pub struct LogisticRegression<T: FloatExt, M: Matrix<T>> {
weights: M,
classes: Vec<T>,
num_attributes: usize,
num_classes: usize
num_classes: usize,
}
trait ObjectiveFunction<T: FloatExt, M: Matrix<T>> {
@@ -24,11 +24,11 @@ trait ObjectiveFunction<T: FloatExt, M: Matrix<T>> {
fn partial_dot(w: &M, x: &M, v_col: usize, m_row: usize) -> T {
let mut sum = T::zero();
let p = x.shape().1;
let p = x.shape().1;
for i in 0..p {
sum = sum + x.get(m_row, i) * w.get(0, i + v_col);
}
sum + w.get(0, p + v_col)
}
}
@@ -36,121 +36,119 @@ trait ObjectiveFunction<T: FloatExt, M: Matrix<T>> {
struct BinaryObjectiveFunction<'a, T: FloatExt, M: Matrix<T>> {
x: &'a M,
y: Vec<usize>,
phantom: PhantomData<&'a T>
}
phantom: PhantomData<&'a T>,
}
impl<T: FloatExt, M: Matrix<T>> PartialEq for LogisticRegression<T, M> {
fn eq(&self, other: &Self) -> bool {
if self.num_classes != other.num_classes ||
self.num_attributes != other.num_attributes ||
self.classes.len() != other.classes.len() {
return false
if self.num_classes != other.num_classes
|| self.num_attributes != other.num_attributes
|| self.classes.len() != other.classes.len()
{
return false;
} else {
for i in 0..self.classes.len() {
if (self.classes[i] - other.classes[i]).abs() > T::epsilon(){
return false
if (self.classes[i] - other.classes[i]).abs() > T::epsilon() {
return false;
}
}
return self.weights == other.weights
return self.weights == other.weights;
}
}
}
impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M> for BinaryObjectiveFunction<'a, T, M> {
impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M> for BinaryObjectiveFunction<'a, T, M> {
fn f(&self, w_bias: &M) -> T {
let mut f = T::zero();
let (n, _) = self.x.shape();
fn f(&self, w_bias: &M) -> T {
let mut f = T::zero();
let (n, _) = self.x.shape();
for i in 0..n {
let wx = BinaryObjectiveFunction::partial_dot(w_bias, self.x, 0, i);
let wx = BinaryObjectiveFunction::partial_dot(w_bias, self.x, 0, i);
f = f + (wx.ln_1pe() - (T::from(self.y[i]).unwrap()) * wx);
}
f
}
f
}
fn df(&self, g: &mut M, w_bias: &M) {
g.copy_from(&M::zeros(1, g.shape().1));
let (n, p) = self.x.shape();
for i in 0..n {
let wx = BinaryObjectiveFunction::partial_dot(w_bias, self.x, 0, i);
let (n, p) = self.x.shape();
for i in 0..n {
let wx = BinaryObjectiveFunction::partial_dot(w_bias, self.x, 0, i);
let dyi = (T::from(self.y[i]).unwrap()) - wx.sigmoid();
for j in 0..p {
g.set(0, j, g.get(0, j) - dyi * self.x.get(i, j));
}
g.set(0, p, g.get(0, p) - dyi);
}
}
}
}
}
struct MultiClassObjectiveFunction<'a, T: FloatExt, M: Matrix<T>> {
x: &'a M,
y: Vec<usize>,
k: usize,
phantom: PhantomData<&'a T>
phantom: PhantomData<&'a T>,
}
impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M> for MultiClassObjectiveFunction<'a, T, M> {
fn f(&self, w_bias: &M) -> T {
impl<'a, T: FloatExt, M: Matrix<T>> ObjectiveFunction<T, M>
for MultiClassObjectiveFunction<'a, T, M>
{
fn f(&self, w_bias: &M) -> T {
let mut f = T::zero();
let mut prob = M::zeros(1, self.k);
let (n, p) = self.x.shape();
for i in 0..n {
for j in 0..self.k {
prob.set(0, j, MultiClassObjectiveFunction::partial_dot(w_bias, self.x, j * (p + 1), i));
for i in 0..n {
for j in 0..self.k {
prob.set(
0,
j,
MultiClassObjectiveFunction::partial_dot(w_bias, self.x, j * (p + 1), i),
);
}
prob.softmax_mut();
f = f - prob.get(0, self.y[i]).ln();
}
f
}
f
}
fn df(&self, g: &mut M, w: &M) {
g.copy_from(&M::zeros(1, g.shape().1));
let mut prob = M::zeros(1, self.k);
let (n, p) = self.x.shape();
for i in 0..n {
for j in 0..self.k {
prob.set(0, j, MultiClassObjectiveFunction::partial_dot(w, self.x, j * (p + 1), i));
}
prob.softmax_mut();
let mut prob = M::zeros(1, self.k);
let (n, p) = self.x.shape();
for i in 0..n {
for j in 0..self.k {
prob.set(
0,
j,
MultiClassObjectiveFunction::partial_dot(w, self.x, j * (p + 1), i),
);
}
prob.softmax_mut();
for j in 0..self.k {
let yi =(if self.y[i] == j { T::one() } else { T::zero() }) - prob.get(0, j);
let yi = (if self.y[i] == j { T::one() } else { T::zero() }) - prob.get(0, j);
for l in 0..p {
let pos = j * (p + 1);
g.set(0, pos + l, g.get(0, pos + l) - yi * self.x.get(i, l));
}
g.set(0, j * (p + 1) + p, g.get(0, j * (p + 1) + p) - yi);
g.set(0, j * (p + 1) + p, g.get(0, j * (p + 1) + p) - yi);
}
}
}
}
}
impl<T: FloatExt, M: Matrix<T>> LogisticRegression<T, M> {
pub fn fit(x: &M, y: &M::RowVector) -> LogisticRegression<T, M>{
pub fn fit(x: &M, y: &M::RowVector) -> LogisticRegression<T, M> {
let y_m = M::from_row_vector(y.clone());
let (x_nrows, num_attributes) = x.shape();
let (_, y_nrows) = y_m.shape();
@@ -158,271 +156,277 @@ impl<T: FloatExt, M: Matrix<T>> LogisticRegression<T, M> {
if x_nrows != y_nrows {
panic!("Number of rows of X doesn't match number of rows of Y");
}
let classes = y_m.unique();
let k = classes.len();
let classes = y_m.unique();
let k = classes.len();
let mut yi: Vec<usize> = vec![0; y_nrows];
for i in 0..y_nrows {
let yc = y_m.get(0, i);
let yc = y_m.get(0, i);
yi[i] = classes.iter().position(|c| yc == *c).unwrap();
}
if k < 2 {
panic!("Incorrect number of classes: {}", k);
} else if k == 2 {
let x0 = M::zeros(1, num_attributes + 1);
let objective = BinaryObjectiveFunction{
let objective = BinaryObjectiveFunction {
x: x,
y: yi,
phantom: PhantomData
};
phantom: PhantomData,
};
let result = LogisticRegression::minimize(x0, objective);
LogisticRegression {
weights: result.x,
weights: result.x,
classes: classes,
num_attributes: num_attributes,
num_classes: k,
}
num_classes: k,
}
} else {
let x0 = M::zeros(1, (num_attributes + 1) * k);
let objective = MultiClassObjectiveFunction{
let objective = MultiClassObjectiveFunction {
x: x,
y: yi,
k: k,
phantom: PhantomData
};
phantom: PhantomData,
};
let result = LogisticRegression::minimize(x0, objective);
let weights = result.x.reshape(k, num_attributes + 1);
LogisticRegression {
weights: weights,
weights: weights,
classes: classes,
num_attributes: num_attributes,
num_classes: k
}
}
num_classes: k,
}
}
}
pub fn predict(&self, x: &M) -> M::RowVector {
let n = x.shape().0;
let mut result = M::zeros(1, n);
if self.num_classes == 2 {
if self.num_classes == 2 {
let (nrows, _) = x.shape();
let x_and_bias = x.v_stack(&M::ones(nrows, 1));
let y_hat: Vec<T> = x_and_bias.dot(&self.weights.transpose()).to_raw_vector();
for i in 0..n {
result.set(0, i, self.classes[if y_hat[i].sigmoid() > T::half() { 1 } else { 0 }]);
}
result.set(
0,
i,
self.classes[if y_hat[i].sigmoid() > T::half() { 1 } else { 0 }],
);
}
} else {
let (nrows, _) = x.shape();
let x_and_bias = x.v_stack(&M::ones(nrows, 1));
let y_hat = x_and_bias.dot(&self.weights.transpose());
let x_and_bias = x.v_stack(&M::ones(nrows, 1));
let y_hat = x_and_bias.dot(&self.weights.transpose());
let class_idxs = y_hat.argmax();
for i in 0..n {
result.set(0, i, self.classes[class_idxs[i]]);
}
}
}
result.to_row_vector()
}
pub fn coefficients(&self) -> M {
self.weights.slice(0..self.num_classes, 0..self.num_attributes)
self.weights
.slice(0..self.num_classes, 0..self.num_attributes)
}
pub fn intercept(&self) -> M {
self.weights.slice(0..self.num_classes, self.num_attributes..self.num_attributes+1)
}
self.weights.slice(
0..self.num_classes,
self.num_attributes..self.num_attributes + 1,
)
}
fn minimize(x0: M, objective: impl ObjectiveFunction<T, M>) -> OptimizerResult<T, M> {
let f = |w: &M| -> T {
objective.f(w)
};
let f = |w: &M| -> T { objective.f(w) };
let df = |g: &mut M, w: &M| {
objective.df(g, w)
};
let df = |g: &mut M, w: &M| objective.df(g, w);
let mut ls: Backtracking<T> = Default::default();
ls.order = FunctionOrder::THIRD;
let optimizer: LBFGS<T> = Default::default();
let optimizer: LBFGS<T> = Default::default();
optimizer.optimize(&f, &df, &x0, &ls)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::*;
use ndarray::{arr1, arr2, Array1};
mod tests {
use super::*;
use crate::linalg::naive::dense_matrix::*;
use crate::metrics::*;
use ndarray::{arr1, arr2, Array1};
#[test]
fn multiclass_objective_f() {
fn multiclass_objective_f() {
let x = DenseMatrix::from_array(&[
&[1., -5.],
&[ 2., 5.],
&[ 3., -2.],
&[ 1., 2.],
&[ 2., 0.],
&[ 6., -5.],
&[ 7., 5.],
&[ 6., -2.],
&[ 7., 2.],
&[ 6., 0.],
&[ 8., -5.],
&[ 9., 5.],
&[2., 5.],
&[3., -2.],
&[1., 2.],
&[2., 0.],
&[6., -5.],
&[7., 5.],
&[6., -2.],
&[7., 2.],
&[6., 0.],
&[8., -5.],
&[9., 5.],
&[10., -2.],
&[ 8., 2.],
&[ 9., 0.]]);
&[8., 2.],
&[9., 0.],
]);
let y = vec![0, 0, 1, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 1];
let objective = MultiClassObjectiveFunction{
let objective = MultiClassObjectiveFunction {
x: &x,
y: y,
k: 3,
phantom: PhantomData
phantom: PhantomData,
};
let mut g: DenseMatrix<f64> = DenseMatrix::zeros(1, 9);
let mut g: DenseMatrix<f64> = DenseMatrix::zeros(1, 9);
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]));
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]));
assert!((g.get(0, 0) + 33.000068218163484).abs() < std::f64::EPSILON);
objective.df(
&mut g,
&DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]),
);
objective.df(
&mut g,
&DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]),
);
let f = objective.f(&DenseMatrix::vector_from_array(&[1., 2., 3., 4., 5., 6., 7., 8., 9.]));
assert!((g.get(0, 0) + 33.000068218163484).abs() < std::f64::EPSILON);
assert!((f - 408.0052230582765).abs() < std::f64::EPSILON);
let f = objective.f(&DenseMatrix::vector_from_array(&[
1., 2., 3., 4., 5., 6., 7., 8., 9.,
]));
assert!((f - 408.0052230582765).abs() < std::f64::EPSILON);
}
#[test]
fn binary_objective_f() {
fn binary_objective_f() {
let x = DenseMatrix::from_array(&[
&[1., -5.],
&[ 2., 5.],
&[ 3., -2.],
&[ 1., 2.],
&[ 2., 0.],
&[ 6., -5.],
&[ 7., 5.],
&[ 6., -2.],
&[ 7., 2.],
&[ 6., 0.],
&[ 8., -5.],
&[ 9., 5.],
&[2., 5.],
&[3., -2.],
&[1., 2.],
&[2., 0.],
&[6., -5.],
&[7., 5.],
&[6., -2.],
&[7., 2.],
&[6., 0.],
&[8., -5.],
&[9., 5.],
&[10., -2.],
&[ 8., 2.],
&[ 9., 0.]]);
&[8., 2.],
&[9., 0.],
]);
let y = vec![0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1];
let objective = BinaryObjectiveFunction{
let objective = BinaryObjectiveFunction {
x: &x,
y: y,
phantom: PhantomData
phantom: PhantomData,
};
let mut g: DenseMatrix<f64> = DenseMatrix::zeros(1, 3);
let mut g: DenseMatrix<f64> = DenseMatrix::zeros(1, 3);
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3.]));
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3.]));
assert!((g.get(0, 0) - 26.051064349381285).abs() < std::f64::EPSILON);
assert!((g.get(0, 1) - 10.239000702928523).abs() < std::f64::EPSILON);
assert!((g.get(0, 2) - 3.869294270156324).abs() < std::f64::EPSILON);
objective.df(&mut g, &DenseMatrix::vector_from_array(&[1., 2., 3.]));
let f = objective.f(&DenseMatrix::vector_from_array(&[1., 2., 3.]));
assert!((g.get(0, 0) - 26.051064349381285).abs() < std::f64::EPSILON);
assert!((g.get(0, 1) - 10.239000702928523).abs() < std::f64::EPSILON);
assert!((g.get(0, 2) - 3.869294270156324).abs() < std::f64::EPSILON);
assert!((f - 59.76994756647412).abs() < std::f64::EPSILON);
let f = objective.f(&DenseMatrix::vector_from_array(&[1., 2., 3.]));
assert!((f - 59.76994756647412).abs() < std::f64::EPSILON);
}
#[test]
fn lr_fit_predict() {
fn lr_fit_predict() {
let x = DenseMatrix::from_array(&[
&[1., -5.],
&[ 2., 5.],
&[ 3., -2.],
&[ 1., 2.],
&[ 2., 0.],
&[ 6., -5.],
&[ 7., 5.],
&[ 6., -2.],
&[ 7., 2.],
&[ 6., 0.],
&[ 8., -5.],
&[ 9., 5.],
&[2., 5.],
&[3., -2.],
&[1., 2.],
&[2., 0.],
&[6., -5.],
&[7., 5.],
&[6., -2.],
&[7., 2.],
&[6., 0.],
&[8., -5.],
&[9., 5.],
&[10., -2.],
&[ 8., 2.],
&[ 9., 0.]]);
&[8., 2.],
&[9., 0.],
]);
let y: Vec<f64> = vec![0., 0., 1., 1., 2., 1., 1., 0., 0., 2., 1., 1., 0., 0., 1.];
let lr = LogisticRegression::fit(&x, &y);
assert_eq!(lr.coefficients().shape(), (3, 2));
assert_eq!(lr.intercept().shape(), (3, 1));
assert!((lr.coefficients().get(0, 0) - 0.0435).abs() < 1e-4);
assert!((lr.intercept().get(0, 0) - 0.1250).abs() < 1e-4);
assert!((lr.intercept().get(0, 0) - 0.1250).abs() < 1e-4);
let y_hat = lr.predict(&x);
let y_hat = lr.predict(&x);
assert_eq!(y_hat, vec![0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]);
}
#[test]
fn serde(){
let x = DenseMatrix::from_array(&[
&[1., -5.],
&[ 2., 5.],
&[ 3., -2.],
&[ 1., 2.],
&[ 2., 0.],
&[ 6., -5.],
&[ 7., 5.],
&[ 6., -2.],
&[ 7., 2.],
&[ 6., 0.],
&[ 8., -5.],
&[ 9., 5.],
&[10., -2.],
&[ 8., 2.],
&[ 9., 0.]]);
let y: Vec<f64> = vec![0., 0., 1., 1., 2., 1., 1., 0., 0., 2., 1., 1., 0., 0., 1.];
let lr = LogisticRegression::fit(&x, &y);
let deserialized_lr: LogisticRegression<f64, DenseMatrix<f64>> = serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
assert_eq!(lr, deserialized_lr);
assert_eq!(
y_hat,
vec![0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
);
}
#[test]
fn lr_fit_predict_iris() {
fn serde() {
let x = DenseMatrix::from_array(&[
&[1., -5.],
&[2., 5.],
&[3., -2.],
&[1., 2.],
&[2., 0.],
&[6., -5.],
&[7., 5.],
&[6., -2.],
&[7., 2.],
&[6., 0.],
&[8., -5.],
&[9., 5.],
&[10., -2.],
&[8., 2.],
&[9., 0.],
]);
let y: Vec<f64> = vec![0., 0., 1., 1., 2., 1., 1., 0., 0., 2., 1., 1., 0., 0., 1.];
let lr = LogisticRegression::fit(&x, &y);
let deserialized_lr: LogisticRegression<f64, DenseMatrix<f64>> =
serde_json::from_str(&serde_json::to_string(&lr).unwrap()).unwrap();
assert_eq!(lr, deserialized_lr);
}
#[test]
fn lr_fit_predict_iris() {
let x = arr2(&[
[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
@@ -443,17 +447,22 @@ mod tests {
[6.3, 3.3, 4.7, 1.6],
[4.9, 2.4, 3.3, 1.0],
[6.6, 2.9, 4.6, 1.3],
[5.2, 2.7, 3.9, 1.4]]);
let y: Array1<f64> = arr1(&[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]);
[5.2, 2.7, 3.9, 1.4],
]);
let y: Array1<f64> = arr1(&[
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
]);
let lr = LogisticRegression::fit(&x, &y);
let y_hat = lr.predict(&x);
let error: f64 = y.into_iter().zip(y_hat.into_iter()).map(|(&a, &b)| (a - b).abs()).sum();
let y_hat = lr.predict(&x);
let error: f64 = y
.into_iter()
.zip(y_hat.into_iter())
.map(|(&a, &b)| (a - b).abs())
.sum();
assert!(error <= 1.0);
}
}
}
+1 -1
View File
@@ -1,2 +1,2 @@
pub mod linear_regression;
pub mod logistic_regression;
pub mod logistic_regression;