feat: documents linear models

This commit is contained in:
Volodymyr Orlov
2020-09-02 15:35:16 -07:00
parent 6d313821fa
commit 32081852ad
4 changed files with 163 additions and 1 deletions
+1 -1
View File
@@ -9,7 +9,7 @@
//! Dimension reduction is also used for the purposes of data visualization.
//!
//! ## References
//! * ["An Introduction to Statistical Learning", James G., Witten D., Hastie T., Tibshirani R., 10.3.1 K-Means Clustering, 6.3 Dimension Reduction Methods](http://faculty.marshall.usc.edu/gareth-james/ISL/)
//! * ["An Introduction to Statistical Learning", James G., Witten D., Hastie T., Tibshirani R., 6.3 Dimension Reduction Methods](http://faculty.marshall.usc.edu/gareth-james/ISL/)
/// PCA is a popular approach for deriving a low-dimensional set of features from a large set of variables.
pub mod pca;
+82
View File
@@ -1,3 +1,63 @@
//! # Linear Regression
//!
//! Linear regression is a very straightforward approach for predicting a quantitative response \\(y\\) on the basis of a linear combination of explanatory variables \\(X\\).
//! Linear regression assumes that there is approximately a linear relationship between \\(X\\) and \\(y\\). Formally, we can write this linear relationship as
//!
//! \\[y \approx \beta_0 + \sum_{i=1}^n \beta_iX_i + \epsilon\\]
//!
//! where \\(\epsilon\\) is a mean-zero random error term and the regression coefficients \\(\beta_0, \beta_0, ... \beta_n\\) are unknown, and must be estimated.
//!
//! While regression coefficients can be estimated directly by solving
//!
//! \\[\hat{\beta} = (X^TX)^{-1}X^Ty \\]
//!
//! the \\((X^TX)^{-1}\\) term is both computationally expensive and numerically unstable. An alternative approach is to use a matrix decomposition to avoid this operation.
//! SmartCore uses [SVD](../../linalg/svd/index.html) and [QR](../../linalg/qr/index.html) matrix decomposition to find estimates of \\(\hat{\beta}\\).
//! The QR decomposition is more computationally efficient and more numerically stable than calculating the normal equation directly,
//! but does not work for all data matrices. Unlike the QR decomposition, all matrices have an SVD decomposition.
//!
//! Example:
//!
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linear::linear_regression::*;
//!
//! // Longley dataset (https://www.statsmodels.org/stable/datasets/generated/longley.html)
//! let x = DenseMatrix::from_array(&[
//! &[234.289, 235.6, 159.0, 107.608, 1947., 60.323],
//! &[259.426, 232.5, 145.6, 108.632, 1948., 61.122],
//! &[258.054, 368.2, 161.6, 109.773, 1949., 60.171],
//! &[284.599, 335.1, 165.0, 110.929, 1950., 61.187],
//! &[328.975, 209.9, 309.9, 112.075, 1951., 63.221],
//! &[346.999, 193.2, 359.4, 113.270, 1952., 63.639],
//! &[365.385, 187.0, 354.7, 115.094, 1953., 64.989],
//! &[363.112, 357.8, 335.0, 116.219, 1954., 63.761],
//! &[397.469, 290.4, 304.8, 117.388, 1955., 66.019],
//! &[419.180, 282.2, 285.7, 118.734, 1956., 67.857],
//! &[442.769, 293.6, 279.8, 120.445, 1957., 68.169],
//! &[444.546, 468.1, 263.7, 121.950, 1958., 66.513],
//! &[482.704, 381.3, 255.2, 123.366, 1959., 68.655],
//! &[502.601, 393.1, 251.4, 125.368, 1960., 69.564],
//! &[518.173, 480.6, 257.2, 127.852, 1961., 69.331],
//! &[554.894, 400.7, 282.7, 130.081, 1962., 70.551],
//! ]);
//!
//! let y: Vec<f64> = vec![83.0, 88.5, 88.2, 89.5, 96.2, 98.1, 99.0,
//! 100.0, 101.2, 104.6, 108.4, 110.8, 112.6, 114.2, 115.7, 116.9];
//!
//! let lr = LinearRegression::fit(&x, &y, LinearRegressionParameters {
//! solver: LinearRegressionSolverName::QR, // or SVD
//! });
//!
//! let y_hat = lr.predict(&x);
//! ```
//!
//! ## References:
//!
//! * ["An Introduction to Statistical Learning", James G., Witten D., Hastie T., Tibshirani R., 3. Linear Regression](http://faculty.marshall.usc.edu/gareth-james/ISL/)
//! * ["Numerical Recipes: The Art of Scientific Computing", Press W.H., Teukolsky S.A., Vetterling W.T, Flannery B.P, 3rd ed., Section 15.4 General Linear Least Squares](http://numerical.recipes/)
//!
//! <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS_CHTML"></script>
use std::fmt::Debug;
use serde::{Deserialize, Serialize};
@@ -6,16 +66,22 @@ use crate::linalg::Matrix;
use crate::math::num::RealNumber;
#[derive(Serialize, Deserialize, Debug)]
/// Approach to use for estimation of regression coefficients. QR is more efficient but SVD is more stable.
pub enum LinearRegressionSolverName {
/// QR decomposition, see [QR](../../linalg/qr/index.html)
QR,
/// SVD decomposition, see [SVD](../../linalg/svd/index.html)
SVD,
}
/// Linear Regression parameters
#[derive(Serialize, Deserialize, Debug)]
pub struct LinearRegressionParameters {
/// Solver to use for estimation of regression coefficients.
pub solver: LinearRegressionSolverName,
}
/// Linear Regression
#[derive(Serialize, Deserialize, Debug)]
pub struct LinearRegression<T: RealNumber, M: Matrix<T>> {
coefficients: M,
@@ -39,6 +105,10 @@ impl<T: RealNumber, M: Matrix<T>> PartialEq for LinearRegression<T, M> {
}
impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
/// Fits Linear Regression to your data.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `y` - target values
/// * `parameters` - other parameters, use `Default::default()` to set parameters to default values.
pub fn fit(
x: &M,
y: &M::RowVector,
@@ -69,12 +139,24 @@ impl<T: RealNumber, M: Matrix<T>> LinearRegression<T, M> {
}
}
/// Predict target values from `x`
/// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features.
pub fn predict(&self, x: &M) -> M::RowVector {
let (nrows, _) = x.shape();
let mut y_hat = x.dot(&self.coefficients);
y_hat.add_mut(&M::fill(nrows, 1, self.intercept));
y_hat.transpose().to_row_vector()
}
/// Get estimates regression coefficients
pub fn coefficients(&self) -> M {
self.coefficients.clone()
}
/// Get estimate of intercept
pub fn intercept(&self) -> T {
self.intercept
}
}
#[cfg(test)]
+59
View File
@@ -1,3 +1,54 @@
//! # Logistic Regression
//!
//! As [Linear Regression](../linear_regression/index.html), logistic regression explains your outcome as a linear combination of predictor variables \\(X\\) but rather than modeling this response directly,
//! logistic regression models the probability that \\(y\\) belongs to a particular category, \\(Pr(y = 1|X) \\), as:
//!
//! \\[ Pr(y=1) \approx \frac{e^{\beta_0 + \sum_{i=1}^n \beta_iX_i}}{1 + e^{\beta_0 + \sum_{i=1}^n \beta_iX_i}} \\]
//!
//! SmartCore uses [limited memory BFGS](https://en.wikipedia.org/wiki/Limited-memory_BFGS) method to find estimates of regression coefficients, \\(\beta\\)
//!
//! Example:
//!
//! ```
//! use smartcore::linalg::naive::dense_matrix::*;
//! use smartcore::linear::logistic_regression::*;
//!
//! //Iris data
//! let x = DenseMatrix::from_array(&[
//! &[5.1, 3.5, 1.4, 0.2],
//! &[4.9, 3.0, 1.4, 0.2],
//! &[4.7, 3.2, 1.3, 0.2],
//! &[4.6, 3.1, 1.5, 0.2],
//! &[5.0, 3.6, 1.4, 0.2],
//! &[5.4, 3.9, 1.7, 0.4],
//! &[4.6, 3.4, 1.4, 0.3],
//! &[5.0, 3.4, 1.5, 0.2],
//! &[4.4, 2.9, 1.4, 0.2],
//! &[4.9, 3.1, 1.5, 0.1],
//! &[7.0, 3.2, 4.7, 1.4],
//! &[6.4, 3.2, 4.5, 1.5],
//! &[6.9, 3.1, 4.9, 1.5],
//! &[5.5, 2.3, 4.0, 1.3],
//! &[6.5, 2.8, 4.6, 1.5],
//! &[5.7, 2.8, 4.5, 1.3],
//! &[6.3, 3.3, 4.7, 1.6],
//! &[4.9, 2.4, 3.3, 1.0],
//! &[6.6, 2.9, 4.6, 1.3],
//! &[5.2, 2.7, 3.9, 1.4],
//! ]);
//! let y: Vec<f64> = vec![
//! 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
//! ];
//!
//! let lr = LogisticRegression::fit(&x, &y);
//!
//! let y_hat = lr.predict(&x);
//! ```
//!
//! ## References:
//! * ["An Introduction to Statistical Learning", James G., Witten D., Hastie T., Tibshirani R., 4.3 Logistic Regression](http://faculty.marshall.usc.edu/gareth-james/ISL/)
//! * ["On the Limited Memory Method for Large Scale Optimization", Nocedal et al., Mathematical Programming, 1989](http://users.iems.northwestern.edu/~nocedal/PDFfiles/limited.pdf)
//! <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS_CHTML"></script>
use std::fmt::Debug;
use std::marker::PhantomData;
@@ -10,6 +61,7 @@ use crate::optimization::first_order::{FirstOrderOptimizer, OptimizerResult};
use crate::optimization::line_search::Backtracking;
use crate::optimization::FunctionOrder;
/// Logistic Regression
#[derive(Serialize, Deserialize, Debug)]
pub struct LogisticRegression<T: RealNumber, M: Matrix<T>> {
weights: M,
@@ -150,6 +202,9 @@ impl<'a, T: RealNumber, M: Matrix<T>> ObjectiveFunction<T, M>
}
impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
/// Fits Logistic Regression to your data.
/// * `x` - _NxM_ matrix with _N_ observations and _M_ features in each observation.
/// * `y` - target class values
pub fn fit(x: &M, y: &M::RowVector) -> LogisticRegression<T, M> {
let y_m = M::from_row_vector(y.clone());
let (x_nrows, num_attributes) = x.shape();
@@ -212,6 +267,8 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
}
}
/// Predict class labels for samples in `x`.
/// * `x` - _KxM_ data where _K_ is number of observations and _M_ is number of features.
pub fn predict(&self, x: &M) -> M::RowVector {
let n = x.shape().0;
let mut result = M::zeros(1, n);
@@ -238,11 +295,13 @@ impl<T: RealNumber, M: Matrix<T>> LogisticRegression<T, M> {
result.to_row_vector()
}
/// Get estimates regression coefficients
pub fn coefficients(&self) -> M {
self.weights
.slice(0..self.num_classes, 0..self.num_attributes)
}
/// Get estimate of intercept
pub fn intercept(&self) -> M {
self.weights.slice(
0..self.num_classes,
+21
View File
@@ -1,2 +1,23 @@
//! # Linear Models
//! Linear models describe a continuous response variable as a function of one or more predictor variables.
//! The model describes the relationship between a dependent variable y (also called the response) as a function of one or more independent, or explanatory variables \\(X_i\\). The general equation for a linear model is:
//! \\[y = \beta_0 + \sum_{i=1}^n \beta_iX_i + \epsilon\\]
//!
//! where \\(\beta_0 \\) is the intercept term (the expected value of Y when X = 0), \\(\epsilon \\) is an error term that is is independent of X and \\(\beta_i \\)
//! is the average increase in y associated with a one-unit increase in \\(X_i\\)
//!
//! Model assumptions:
//! * _Linearity_. The relationship between X and the mean of y is linear.
//! * _Constant variance_. The variance of residual is the same for any value of X.
//! * _Normality_. For any fixed value of X, Y is normally distributed.
//! * _Independence_. Observations are independent of each other.
//!
//! ## References:
//!
//! * ["An Introduction to Statistical Learning", James G., Witten D., Hastie T., Tibshirani R., 3. Linear Regression](http://faculty.marshall.usc.edu/gareth-james/ISL/)
//! * ["The Statistical Sleuth, A Course in Methods of Data Analysis", Ramsey F.L., Schafer D.W., Ch 7, 8, 3rd edition, 2013](http://www.statisticalsleuth.com/)
//!
//! <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS_CHTML"></script>
pub mod linear_regression;
pub mod logistic_regression;