pub fn f1_score<T>(
pred: &[T],
actual: &[T],
average: Average,
) -> Result<f32, LengthError>
Expand description
The f1 score of a dataset
Returns an f1 score where 1 is perfect and 0 is atrocious.
Supports macro and weighted averages
#[macro_use] extern crate approx; // for approximate equality check
use parsnip::{Average, f1_score};
let actual = vec![0, 1, 2, 0, 1, 2];
let pred = vec![0, 2, 1, 0, 0, 1];
assert_ulps_eq!(f1_score(&pred, &actual, Average::Macro)?, 0.26666666);
assert_ulps_eq!(f1_score(&pred, &actual, Average::Weighted)?, 0.26666666);