Use log likelihood to make calculations more stable (#28)

* Use log likelihood to make calculations more stable

* Fix problem with class_count in categoricalnb

* Use a similar approach to the one used in scikitlearn to define which are the possible categories of each feature.
This commit is contained in:
morenol
2020-11-16 23:56:50 -04:00
committed by GitHub
parent aeddbc8a21
commit 72e9f8293f
2 changed files with 93 additions and 51 deletions
+87 -47
View File
@@ -6,11 +6,11 @@ use crate::naive_bayes::{BaseNaiveBayes, NBDistribution};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// Naive Bayes classifier for categorical features /// Naive Bayes classifier for categorical features
#[derive(Debug)]
struct CategoricalNBDistribution<T: RealNumber> { struct CategoricalNBDistribution<T: RealNumber> {
class_labels: Vec<T>, class_labels: Vec<T>,
class_probabilities: Vec<T>, class_priors: Vec<T>,
coef: Vec<Vec<Vec<T>>>, coefficients: Vec<Vec<Vec<T>>>,
feature_categories: Vec<Vec<T>>,
} }
impl<T: RealNumber, M: Matrix<T>> NBDistribution<T, M> for CategoricalNBDistribution<T> { impl<T: RealNumber, M: Matrix<T>> NBDistribution<T, M> for CategoricalNBDistribution<T> {
@@ -18,24 +18,22 @@ impl<T: RealNumber, M: Matrix<T>> NBDistribution<T, M> for CategoricalNBDistribu
if class_index >= self.class_labels.len() { if class_index >= self.class_labels.len() {
T::zero() T::zero()
} else { } else {
self.class_probabilities[class_index] self.class_priors[class_index]
} }
} }
fn conditional_probability(&self, class_index: usize, j: &M::RowVector) -> T { fn log_likelihood(&self, class_index: usize, j: &M::RowVector) -> T {
if class_index < self.class_labels.len() { if class_index < self.class_labels.len() {
let mut prob = T::one(); let mut likelihood = T::zero();
for feature in 0..j.len() { for feature in 0..j.len() {
let value = j.get(feature); let value = j.get(feature).floor().to_usize().unwrap();
match self.feature_categories[feature] if self.coefficients[class_index][feature].len() > value {
.iter() likelihood += self.coefficients[class_index][feature][value];
.position(|&t| t == value) } else {
{ return T::zero();
Some(_i) => prob *= self.coef[class_index][feature][_i],
None => return T::zero(),
} }
} }
prob likelihood
} else { } else {
T::zero() T::zero()
} }
@@ -74,31 +72,45 @@ impl<T: RealNumber> CategoricalNBDistribution<T> {
n_samples n_samples
))); )));
} }
let y: Vec<usize> = y
.to_vec()
.iter()
.map(|y_i| y_i.floor().to_usize().unwrap())
.collect();
let mut y_sorted = y.to_vec(); let y_max = y
y_sorted.sort_by(|a, b| a.partial_cmp(b).unwrap()); .iter()
let mut class_labels = Vec::with_capacity(y.len()); .max()
class_labels.push(y_sorted[0]); .ok_or_else(|| Failed::fit(&"Failed to get the labels of y.".to_string()))?;
let mut classes_count = Vec::with_capacity(y.len());
let mut current_count = T::one(); let class_labels: Vec<T> = (0..*y_max + 1)
for idx in 1..y_samples { .map(|label| T::from(label).unwrap())
if y_sorted[idx] == y_sorted[idx - 1] { .collect();
current_count += T::one(); let mut classes_count: Vec<T> = vec![T::zero(); class_labels.len()];
} else { for elem in y.iter() {
classes_count.push(current_count); classes_count[*elem] += T::one();
class_labels.push(y_sorted[idx]);
current_count = T::one()
}
classes_count.push(current_count);
} }
let mut feature_categories: Vec<Vec<T>> = Vec::with_capacity(n_features); let mut feature_categories: Vec<Vec<T>> = Vec::with_capacity(n_features);
for feature in 0..n_features { for feature in 0..n_features {
let feature_types = x.get_col_as_vec(feature).unique(); let feature_max = x
.get_col_as_vec(feature)
.iter()
.map(|f_i| f_i.floor().to_usize().unwrap())
.max()
.ok_or_else(|| {
Failed::fit(&format!(
"Failed to get the categories for feature = {}",
feature
))
})?;
let feature_types = (0..feature_max + 1)
.map(|feat| T::from(feat).unwrap())
.collect();
feature_categories.push(feature_types); feature_categories.push(feature_types);
} }
let mut coef: Vec<Vec<Vec<T>>> = Vec::with_capacity(class_labels.len());
let mut coefficients: Vec<Vec<Vec<T>>> = Vec::with_capacity(class_labels.len());
for (label, label_count) in class_labels.iter().zip(classes_count.iter()) { for (label, label_count) in class_labels.iter().zip(classes_count.iter()) {
let mut coef_i: Vec<Vec<T>> = Vec::with_capacity(n_features); let mut coef_i: Vec<Vec<T>> = Vec::with_capacity(n_features);
for (feature_index, feature_options) in for (feature_index, feature_options) in
@@ -108,37 +120,36 @@ impl<T: RealNumber> CategoricalNBDistribution<T> {
.get_col_as_vec(feature_index) .get_col_as_vec(feature_index)
.iter() .iter()
.enumerate() .enumerate()
.filter(|(i, _j)| y.get(*i) == *label) .filter(|(i, _j)| T::from(y[*i]).unwrap() == *label)
.map(|(_, j)| *j) .map(|(_, j)| *j)
.collect::<Vec<T>>(); .collect::<Vec<T>>();
let mut feat_count: Vec<usize> = Vec::with_capacity(feature_options.len()); let mut feat_count: Vec<T> = vec![T::zero(); feature_options.len()];
for k in feature_options.iter() { for row in col.iter() {
let feat_k_count = col.iter().filter(|&v| v == k).count(); let index = row.floor().to_usize().unwrap();
feat_count.push(feat_k_count); feat_count[index] += T::one();
} }
let coef_i_j = feat_count let coef_i_j = feat_count
.iter() .iter()
.map(|c| { .map(|c| {
(T::from(*c).unwrap() + alpha) ((*c + alpha)
/ (T::from(*label_count).unwrap() / (*label_count + T::from(feature_options.len()).unwrap() * alpha))
+ T::from(feature_options.len()).unwrap() * alpha) .ln()
}) })
.collect::<Vec<T>>(); .collect::<Vec<T>>();
coef_i.push(coef_i_j); coef_i.push(coef_i_j);
} }
coef.push(coef_i); coefficients.push(coef_i);
} }
let class_probabilities = classes_count
let class_priors = classes_count
.into_iter() .into_iter()
.map(|count| count / T::from(n_samples).unwrap()) .map(|count| count / T::from(n_samples).unwrap())
.collect::<Vec<T>>(); .collect::<Vec<T>>();
Ok(Self { Ok(Self {
class_labels, class_labels,
class_probabilities, class_priors,
coef, coefficients,
feature_categories,
}) })
} }
} }
@@ -170,6 +181,7 @@ impl<T: RealNumber> Default for CategoricalNBParameters<T> {
} }
/// CategoricalNB implements the categorical naive Bayes algorithm for categorically distributed data. /// CategoricalNB implements the categorical naive Bayes algorithm for categorically distributed data.
#[derive(Debug)]
pub struct CategoricalNB<T: RealNumber, M: Matrix<T>> { pub struct CategoricalNB<T: RealNumber, M: Matrix<T>> {
inner: BaseNaiveBayes<T, M, CategoricalNBDistribution<T>>, inner: BaseNaiveBayes<T, M, CategoricalNBDistribution<T>>,
} }
@@ -205,7 +217,7 @@ mod tests {
use crate::linalg::naive::dense_matrix::DenseMatrix; use crate::linalg::naive::dense_matrix::DenseMatrix;
#[test] #[test]
fn run_base_naive_bayes() { fn run_categorical_naive_bayes() {
let x = DenseMatrix::from_2d_array(&[ let x = DenseMatrix::from_2d_array(&[
&[0., 2., 1., 0.], &[0., 2., 1., 0.],
&[0., 2., 1., 1.], &[0., 2., 1., 1.],
@@ -229,4 +241,32 @@ mod tests {
let y_hat = cnb.predict(&x_test).unwrap(); let y_hat = cnb.predict(&x_test).unwrap();
assert_eq!(y_hat, vec![0., 1.]); assert_eq!(y_hat, vec![0., 1.]);
} }
#[test]
fn run_categorical_naive_bayes2() {
let x = DenseMatrix::from_2d_array(&[
&[3., 4., 0., 1.],
&[3., 0., 0., 1.],
&[4., 4., 1., 2.],
&[4., 2., 4., 3.],
&[4., 2., 4., 2.],
&[4., 1., 1., 0.],
&[1., 1., 1., 1.],
&[0., 4., 1., 0.],
&[0., 3., 2., 1.],
&[0., 3., 1., 1.],
&[3., 4., 0., 1.],
&[3., 4., 2., 4.],
&[0., 3., 1., 2.],
&[0., 4., 1., 2.],
]);
let y = vec![0., 0., 1., 1., 1., 0., 1., 0., 1., 1., 1., 1., 1., 0.];
let cnb = CategoricalNB::fit(&x, &y, Default::default()).unwrap();
let y_hat = cnb.predict(&x).unwrap();
assert_eq!(
y_hat,
vec![0., 0., 1., 1., 1., 0., 1., 0., 1., 1., 0., 1., 1., 1.]
);
}
} }
+6 -4
View File
@@ -2,6 +2,7 @@ use crate::error::Failed;
use crate::linalg::BaseVector; use crate::linalg::BaseVector;
use crate::linalg::Matrix; use crate::linalg::Matrix;
use crate::math::num::RealNumber; use crate::math::num::RealNumber;
use serde::{Deserialize, Serialize};
use std::marker::PhantomData; use std::marker::PhantomData;
/// Distribution used in the Naive Bayes classifier. /// Distribution used in the Naive Bayes classifier.
@@ -9,14 +10,15 @@ pub(crate) trait NBDistribution<T: RealNumber, M: Matrix<T>> {
/// Prior of class at the given index. /// Prior of class at the given index.
fn prior(&self, class_index: usize) -> T; fn prior(&self, class_index: usize) -> T;
/// Conditional probability of sample j given class in the specified index. /// Logarithm of conditional probability of sample j given class in the specified index.
fn conditional_probability(&self, class_index: usize, j: &M::RowVector) -> T; fn log_likelihood(&self, class_index: usize, j: &M::RowVector) -> T;
/// Possible classes of the distribution. /// Possible classes of the distribution.
fn classes(&self) -> &Vec<T>; fn classes(&self) -> &Vec<T>;
} }
/// Base struct for the Naive Bayes classifier. /// Base struct for the Naive Bayes classifier.
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub(crate) struct BaseNaiveBayes<T: RealNumber, M: Matrix<T>, D: NBDistribution<T, M>> { pub(crate) struct BaseNaiveBayes<T: RealNumber, M: Matrix<T>, D: NBDistribution<T, M>> {
distribution: D, distribution: D,
_phantom_t: PhantomData<T>, _phantom_t: PhantomData<T>,
@@ -49,8 +51,8 @@ impl<T: RealNumber, M: Matrix<T>, D: NBDistribution<T, M>> BaseNaiveBayes<T, M,
.map(|(class_index, class)| { .map(|(class_index, class)| {
( (
class, class,
self.distribution.conditional_probability(class_index, &row) self.distribution.log_likelihood(class_index, &row)
* self.distribution.prior(class_index), + self.distribution.prior(class_index).ln(),
) )
}) })
.max_by(|(_, p1), (_, p2)| p1.partial_cmp(p2).unwrap()) .max_by(|(_, p1), (_, p2)| p1.partial_cmp(p2).unwrap())