//! # Accuracy score //! //! Calculates accuracy of predictions \\(\hat{y}\\) when compared to true labels \\(y\\) //! //! \\[ accuracy(y, \hat{y}) = \frac{1}{n_{samples}} \sum_{i=1}^{n_{samples}} 1(y_i = \hat{y_i}) \\] //! //! Example: //! //! ``` //! use smartcore::metrics::accuracy::Accuracy; //! let y_pred: Vec = vec![0., 2., 1., 3.]; //! let y_true: Vec = vec![0., 1., 2., 3.]; //! //! let score: f64 = Accuracy {}.get_score(&y_pred, &y_true); //! ``` //! //! //! #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use crate::linalg::BaseVector; use crate::math::num::RealNumber; /// Accuracy metric. #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug)] pub struct Accuracy {} impl Accuracy { /// Function that calculated accuracy score. /// * `y_true` - cround truth (correct) labels /// * `y_pred` - predicted labels, as returned by a classifier. pub fn get_score>(&self, y_true: &V, y_pred: &V) -> T { if y_true.len() != y_pred.len() { panic!( "The vector sizes don't match: {} != {}", y_true.len(), y_pred.len() ); } let n = y_true.len(); let mut positive = 0; for i in 0..n { if y_true.get(i) == y_pred.get(i) { positive += 1; } } T::from_i64(positive).unwrap() / T::from_usize(n).unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn accuracy() { let y_pred: Vec = vec![0., 2., 1., 3.]; let y_true: Vec = vec![0., 1., 2., 3.]; let score1: f64 = Accuracy {}.get_score(&y_pred, &y_true); let score2: f64 = Accuracy {}.get_score(&y_true, &y_true); assert!((score1 - 0.5).abs() < 1e-8); assert!((score2 - 1.0).abs() < 1e-8); } }