use ferrolearn_core::error::FerroError;
use ferrolearn_core::introspection::HasClasses;
use ferrolearn_core::pipeline::{FittedPipelineEstimator, PipelineEstimator};
use ferrolearn_core::traits::{Fit, Predict};
use ndarray::{Array1, Array2};
use num_traits::{Float, FromPrimitive, ToPrimitive};
#[derive(Debug, Clone)]
pub struct MultinomialNB<F> {
pub alpha: F,
pub class_prior: Option<Vec<F>>,
pub fit_prior: bool,
pub force_alpha: bool,
}
impl<F: Float> MultinomialNB<F> {
#[must_use]
pub fn new() -> Self {
Self {
alpha: F::one(),
class_prior: None,
fit_prior: true,
force_alpha: true,
}
}
#[must_use]
pub fn with_alpha(mut self, alpha: F) -> Self {
self.alpha = alpha;
self
}
#[must_use]
pub fn with_class_prior(mut self, priors: Vec<F>) -> Self {
self.class_prior = Some(priors);
self
}
#[must_use]
pub fn with_fit_prior(mut self, fit_prior: bool) -> Self {
self.fit_prior = fit_prior;
self
}
#[must_use]
pub fn with_force_alpha(mut self, force_alpha: bool) -> Self {
self.force_alpha = force_alpha;
self
}
}
impl<F: Float> Default for MultinomialNB<F> {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct FittedMultinomialNB<F> {
classes: Vec<usize>,
log_prior: Array1<F>,
log_theta: Array2<F>,
feature_counts: Array2<F>,
class_counts: Vec<usize>,
alpha: F,
class_prior: Option<Vec<F>>,
fit_prior: bool,
}
impl<F: Float + Send + Sync + 'static> Fit<Array2<F>, Array1<usize>> for MultinomialNB<F> {
type Fitted = FittedMultinomialNB<F>;
type Error = FerroError;
fn fit(&self, x: &Array2<F>, y: &Array1<usize>) -> Result<FittedMultinomialNB<F>, FerroError> {
let (n_samples, n_features) = x.dim();
if n_samples == 0 {
return Err(FerroError::InsufficientSamples {
required: 1,
actual: 0,
context: "MultinomialNB requires at least one sample".into(),
});
}
if n_samples != y.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![n_samples],
actual: vec![y.len()],
context: "y length must match number of samples in X".into(),
});
}
if x.iter().any(|&v| v < F::zero()) {
return Err(FerroError::InvalidParameter {
name: "X".into(),
reason: "MultinomialNB requires non-negative feature values".into(),
});
}
let mut classes: Vec<usize> = y.to_vec();
classes.sort_unstable();
classes.dedup();
let n_classes = classes.len();
let n_f = F::from(n_samples).unwrap();
let n_feat_f = F::from(n_features).unwrap();
let alpha = crate::clamp_alpha(self.alpha, self.force_alpha);
let mut log_prior = Array1::<F>::zeros(n_classes);
let mut log_theta = Array2::<F>::zeros((n_classes, n_features));
let mut all_feature_counts = Array2::<F>::zeros((n_classes, n_features));
let mut class_counts_vec = vec![0usize; n_classes];
for (ci, &class_label) in classes.iter().enumerate() {
let class_indices: Vec<usize> = y
.iter()
.enumerate()
.filter_map(|(i, &label)| if label == class_label { Some(i) } else { None })
.collect();
let n_c = class_indices.len();
let n_c_f = F::from(n_c).unwrap();
log_prior[ci] = (n_c_f / n_f).ln();
class_counts_vec[ci] = n_c;
for &i in &class_indices {
for j in 0..n_features {
all_feature_counts[[ci, j]] = all_feature_counts[[ci, j]] + x[[i, j]];
}
}
let total_count = all_feature_counts.row(ci).sum();
let denom = total_count + alpha * n_feat_f;
for j in 0..n_features {
log_theta[[ci, j]] = ((all_feature_counts[[ci, j]] + alpha) / denom).ln();
}
}
if let Some(ref priors) = self.class_prior {
if priors.len() != n_classes {
return Err(FerroError::InvalidParameter {
name: "class_prior".into(),
reason: format!(
"length {} does not match number of classes {}",
priors.len(),
n_classes
),
});
}
for (ci, &p) in priors.iter().enumerate() {
log_prior[ci] = p.ln();
}
} else if !self.fit_prior {
let uniform = (F::one() / F::from(n_classes).unwrap()).ln();
for ci in 0..n_classes {
log_prior[ci] = uniform;
}
}
Ok(FittedMultinomialNB {
classes,
log_prior,
log_theta,
feature_counts: all_feature_counts,
class_counts: class_counts_vec,
alpha,
class_prior: self.class_prior.clone(),
fit_prior: self.fit_prior,
})
}
}
impl<F: Float + Send + Sync + 'static> FittedMultinomialNB<F> {
pub fn partial_fit(&mut self, x: &Array2<F>, y: &Array1<usize>) -> Result<(), FerroError> {
let (n_samples, n_features) = x.dim();
if n_samples == 0 {
return Ok(());
}
if n_samples != y.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![n_samples],
actual: vec![y.len()],
context: "y length must match number of samples in X".into(),
});
}
if n_features != self.log_theta.ncols() {
return Err(FerroError::ShapeMismatch {
expected: vec![self.log_theta.ncols()],
actual: vec![n_features],
context: "number of features must match fitted MultinomialNB".into(),
});
}
if x.iter().any(|&v| v < F::zero()) {
return Err(FerroError::InvalidParameter {
name: "X".into(),
reason: "MultinomialNB requires non-negative feature values".into(),
});
}
let n_feat_f = F::from(n_features).unwrap();
for (ci, &class_label) in self.classes.clone().iter().enumerate() {
let new_indices: Vec<usize> = y
.iter()
.enumerate()
.filter_map(|(i, &label)| if label == class_label { Some(i) } else { None })
.collect();
if new_indices.is_empty() {
continue;
}
self.class_counts[ci] += new_indices.len();
for &i in &new_indices {
for j in 0..n_features {
self.feature_counts[[ci, j]] = self.feature_counts[[ci, j]] + x[[i, j]];
}
}
}
let n_classes = self.classes.len();
for ci in 0..n_classes {
let total_count = self.feature_counts.row(ci).sum();
let denom = total_count + self.alpha * n_feat_f;
for j in 0..n_features {
self.log_theta[[ci, j]] =
((self.feature_counts[[ci, j]] + self.alpha) / denom).ln();
}
}
if self.class_prior.is_none() {
if self.fit_prior {
let total: usize = self.class_counts.iter().sum();
let total_f = F::from(total).unwrap();
for (ci, &count) in self.class_counts.iter().enumerate() {
self.log_prior[ci] = (F::from(count).unwrap() / total_f).ln();
}
} else {
let uniform = (F::one() / F::from(n_classes).unwrap()).ln();
for ci in 0..n_classes {
self.log_prior[ci] = uniform;
}
}
}
Ok(())
}
fn joint_log_likelihood(&self, x: &Array2<F>) -> Array2<F> {
let n_samples = x.nrows();
let n_classes = self.classes.len();
let n_features = x.ncols();
let mut scores = Array2::<F>::zeros((n_samples, n_classes));
for i in 0..n_samples {
for ci in 0..n_classes {
let mut score = self.log_prior[ci];
for j in 0..n_features {
score = score + x[[i, j]] * self.log_theta[[ci, j]];
}
scores[[i, ci]] = score;
}
}
scores
}
pub fn predict_proba(&self, x: &Array2<F>) -> Result<Array2<F>, FerroError> {
let n_features_fitted = self.log_theta.ncols();
if x.ncols() != n_features_fitted {
return Err(FerroError::ShapeMismatch {
expected: vec![n_features_fitted],
actual: vec![x.ncols()],
context: "number of features must match fitted MultinomialNB".into(),
});
}
let log_scores = self.joint_log_likelihood(x);
let n_samples = x.nrows();
let n_classes = self.classes.len();
let mut proba = Array2::<F>::zeros((n_samples, n_classes));
for i in 0..n_samples {
let max_score = log_scores
.row(i)
.iter()
.fold(F::neg_infinity(), |a, &b| a.max(b));
let mut row_sum = F::zero();
for ci in 0..n_classes {
let p = (log_scores[[i, ci]] - max_score).exp();
proba[[i, ci]] = p;
row_sum = row_sum + p;
}
for ci in 0..n_classes {
proba[[i, ci]] = proba[[i, ci]] / row_sum;
}
}
Ok(proba)
}
pub fn predict_joint_log_proba(&self, x: &Array2<F>) -> Result<Array2<F>, FerroError> {
let n_features_fitted = self.log_theta.ncols();
if x.ncols() != n_features_fitted {
return Err(FerroError::ShapeMismatch {
expected: vec![n_features_fitted],
actual: vec![x.ncols()],
context: "number of features must match fitted MultinomialNB".into(),
});
}
Ok(self.joint_log_likelihood(x))
}
pub fn predict_log_proba(&self, x: &Array2<F>) -> Result<Array2<F>, FerroError> {
let jll = self.predict_joint_log_proba(x)?;
Ok(crate::log_softmax_rows(&jll))
}
pub fn score(&self, x: &Array2<F>, y: &Array1<usize>) -> Result<F, FerroError> {
if x.nrows() != y.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![x.nrows()],
actual: vec![y.len()],
context: "y length must match number of samples in X".into(),
});
}
let preds = self.predict(x)?;
let n = y.len();
if n == 0 {
return Ok(F::zero());
}
let correct = preds.iter().zip(y.iter()).filter(|(p, t)| p == t).count();
Ok(F::from(correct).unwrap() / F::from(n).unwrap())
}
}
impl<F: Float + Send + Sync + 'static> Predict<Array2<F>> for FittedMultinomialNB<F> {
type Output = Array1<usize>;
type Error = FerroError;
fn predict(&self, x: &Array2<F>) -> Result<Array1<usize>, FerroError> {
let n_features_fitted = self.log_theta.ncols();
if x.ncols() != n_features_fitted {
return Err(FerroError::ShapeMismatch {
expected: vec![n_features_fitted],
actual: vec![x.ncols()],
context: "number of features must match fitted MultinomialNB".into(),
});
}
let scores = self.joint_log_likelihood(x);
let n_samples = x.nrows();
let n_classes = self.classes.len();
let mut predictions = Array1::<usize>::zeros(n_samples);
for i in 0..n_samples {
let mut best_class = 0;
let mut best_score = scores[[i, 0]];
for ci in 1..n_classes {
if scores[[i, ci]] > best_score {
best_score = scores[[i, ci]];
best_class = ci;
}
}
predictions[i] = self.classes[best_class];
}
Ok(predictions)
}
}
impl<F: Float + Send + Sync + 'static> HasClasses for FittedMultinomialNB<F> {
fn classes(&self) -> &[usize] {
&self.classes
}
fn n_classes(&self) -> usize {
self.classes.len()
}
}
impl<F: Float + ToPrimitive + FromPrimitive + Send + Sync + 'static> PipelineEstimator<F>
for MultinomialNB<F>
{
fn fit_pipeline(
&self,
x: &Array2<F>,
y: &Array1<F>,
) -> Result<Box<dyn FittedPipelineEstimator<F>>, FerroError> {
let y_usize: Array1<usize> = y.mapv(|v| v.to_usize().unwrap_or(0));
let fitted = self.fit(x, &y_usize)?;
Ok(Box::new(FittedMultinomialNBPipeline(fitted)))
}
}
struct FittedMultinomialNBPipeline<F: Float + Send + Sync + 'static>(FittedMultinomialNB<F>);
unsafe impl<F: Float + Send + Sync + 'static> Send for FittedMultinomialNBPipeline<F> {}
unsafe impl<F: Float + Send + Sync + 'static> Sync for FittedMultinomialNBPipeline<F> {}
impl<F: Float + ToPrimitive + FromPrimitive + Send + Sync + 'static> FittedPipelineEstimator<F>
for FittedMultinomialNBPipeline<F>
{
fn predict_pipeline(&self, x: &Array2<F>) -> Result<Array1<F>, FerroError> {
let preds = self.0.predict(x)?;
Ok(preds.mapv(|v| F::from_usize(v).unwrap_or_else(F::nan)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use ndarray::array;
fn make_count_data() -> (Array2<f64>, Array1<usize>) {
let x = Array2::from_shape_vec(
(6, 3),
vec![
5.0, 1.0, 0.0, 4.0, 2.0, 0.0, 6.0, 0.0, 1.0, 0.0, 1.0, 5.0, 1.0, 0.0, 4.0, 0.0,
2.0, 6.0,
],
)
.unwrap();
let y = array![0usize, 0, 0, 1, 1, 1];
(x, y)
}
#[test]
fn test_multinomial_nb_fit_predict() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new();
let fitted = model.fit(&x, &y).unwrap();
let preds = fitted.predict(&x).unwrap();
let correct = preds.iter().zip(y.iter()).filter(|(p, a)| p == a).count();
assert_eq!(correct, 6);
}
#[test]
fn test_multinomial_nb_predict_proba_sums_to_one() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new();
let fitted = model.fit(&x, &y).unwrap();
let proba = fitted.predict_proba(&x).unwrap();
for i in 0..proba.nrows() {
assert_relative_eq!(proba.row(i).sum(), 1.0, epsilon = 1e-10);
}
}
#[test]
fn test_multinomial_nb_has_classes() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new();
let fitted = model.fit(&x, &y).unwrap();
assert_eq!(fitted.classes(), &[0, 1]);
assert_eq!(fitted.n_classes(), 2);
}
#[test]
fn test_multinomial_nb_alpha_smoothing_effect() {
let (x, y) = make_count_data();
let model_sharp = MultinomialNB::<f64>::new().with_alpha(0.0);
let fitted_sharp = model_sharp.fit(&x, &y).unwrap();
let proba_sharp = fitted_sharp.predict_proba(&x).unwrap();
let model_smooth = MultinomialNB::<f64>::new().with_alpha(100.0);
let fitted_smooth = model_smooth.fit(&x, &y).unwrap();
let proba_smooth = fitted_smooth.predict_proba(&x).unwrap();
assert!(proba_smooth[[0, 0]] < proba_sharp[[0, 0]]);
}
#[test]
fn test_multinomial_nb_negative_features_error() {
let x =
Array2::from_shape_vec((4, 2), vec![1.0, 2.0, -1.0, 3.0, 2.0, 1.0, 0.0, 4.0]).unwrap();
let y = array![0usize, 0, 1, 1];
let model = MultinomialNB::<f64>::new();
let result = model.fit(&x, &y);
assert!(result.is_err());
match result.unwrap_err() {
FerroError::InvalidParameter { name, .. } => assert_eq!(name, "X"),
e => panic!("expected InvalidParameter, got {e:?}"),
}
}
#[test]
fn test_multinomial_nb_shape_mismatch_fit() {
let x = Array2::from_shape_vec((4, 3), vec![1.0; 12]).unwrap();
let y = array![0usize, 1]; let model = MultinomialNB::<f64>::new();
assert!(model.fit(&x, &y).is_err());
}
#[test]
fn test_multinomial_nb_shape_mismatch_predict() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new();
let fitted = model.fit(&x, &y).unwrap();
let x_bad = Array2::from_shape_vec((3, 5), vec![1.0; 15]).unwrap();
assert!(fitted.predict(&x_bad).is_err());
assert!(fitted.predict_proba(&x_bad).is_err());
}
#[test]
fn test_multinomial_nb_single_class() {
let x = Array2::from_shape_vec((3, 3), vec![1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0])
.unwrap();
let y = array![2usize, 2, 2];
let model = MultinomialNB::<f64>::new();
let fitted = model.fit(&x, &y).unwrap();
assert_eq!(fitted.classes(), &[2]);
let preds = fitted.predict(&x).unwrap();
assert!(preds.iter().all(|&p| p == 2));
}
#[test]
fn test_multinomial_nb_empty_data() {
let x = Array2::<f64>::zeros((0, 3));
let y = Array1::<usize>::zeros(0);
let model = MultinomialNB::<f64>::new();
assert!(model.fit(&x, &y).is_err());
}
#[test]
fn test_multinomial_nb_default() {
let model = MultinomialNB::<f64>::default();
assert_relative_eq!(model.alpha, 1.0, epsilon = 1e-15);
}
#[test]
fn test_multinomial_nb_partial_fit() {
let x1 = Array2::from_shape_vec(
(4, 3),
vec![5.0, 1.0, 0.0, 4.0, 2.0, 0.0, 0.0, 1.0, 5.0, 1.0, 0.0, 4.0],
)
.unwrap();
let y1 = array![0usize, 0, 1, 1];
let model = MultinomialNB::<f64>::new();
let mut fitted = model.fit(&x1, &y1).unwrap();
let x2 = Array2::from_shape_vec((2, 3), vec![6.0, 0.0, 1.0, 0.0, 2.0, 6.0]).unwrap();
let y2 = array![0usize, 1];
fitted.partial_fit(&x2, &y2).unwrap();
let preds = fitted.predict(&x1).unwrap();
let correct = preds.iter().zip(y1.iter()).filter(|(p, a)| p == a).count();
assert!(correct >= 3);
}
#[test]
fn test_multinomial_nb_partial_fit_shape_mismatch() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new();
let mut fitted = model.fit(&x, &y).unwrap();
let x_bad = Array2::from_shape_vec((2, 5), vec![1.0; 10]).unwrap();
let y_bad = array![0usize, 1];
assert!(fitted.partial_fit(&x_bad, &y_bad).is_err());
}
#[test]
fn test_multinomial_nb_class_prior() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new().with_class_prior(vec![0.9, 0.1]);
let fitted = model.fit(&x, &y).unwrap();
let preds = fitted.predict(&x).unwrap();
assert_eq!(preds.len(), 6);
}
#[test]
fn test_multinomial_nb_class_prior_wrong_length() {
let (x, y) = make_count_data();
let model = MultinomialNB::<f64>::new().with_class_prior(vec![0.5]);
assert!(model.fit(&x, &y).is_err());
}
}