#![allow(clippy::cast_possible_truncation)]
#![allow(clippy::cast_sign_loss)]
use crate::{
basis::Basis, display::PolynomialDisplay, score::ModelScoreProvider, statistics, value::Value,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Aic;
impl<B: Basis<T> + PolynomialDisplay<T>, T: Value> ModelScoreProvider<B, T> for Aic {
fn minimum_significant_distance(&self) -> Option<usize> {
Some(2)
}
fn score(
&self,
_: &crate::CurveFit<B, T>,
y: impl Iterator<Item = T>,
y_fit: impl Iterator<Item = T>,
k: T,
) -> T {
let (log_likelihood, n) = statistics::robust_mse_with_n(y, y_fit);
let log_likelihood = nalgebra::RealField::max(log_likelihood, T::epsilon());
if n == T::zero() {
return T::nan();
}
let mut aic = n * log_likelihood.ln() + T::two() * k;
if n / k < T::two() + T::two() && n > k + T::one() {
aic += T::two() * k * (k + T::one()) / (n - k - T::one());
}
aic
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Bic;
impl<B: Basis<T> + PolynomialDisplay<T>, T: Value> ModelScoreProvider<B, T> for Bic {
fn minimum_significant_distance(&self) -> Option<usize> {
Some(2)
}
fn score(
&self,
_: &crate::CurveFit<B, T>,
y: impl Iterator<Item = T>,
y_fit: impl Iterator<Item = T>,
k: T,
) -> T {
let (log_likelihood, n) = statistics::robust_mse_with_n(y, y_fit);
let log_likelihood = nalgebra::RealField::max(log_likelihood, T::epsilon());
if n == T::zero() {
return T::nan();
}
n * log_likelihood.ln() + k * n.ln()
}
}
pub struct RootMeanSquaredError;
impl<B: Basis<T> + PolynomialDisplay<T>, T: Value> ModelScoreProvider<B, T>
for RootMeanSquaredError
{
fn minimum_significant_distance(&self) -> Option<usize> {
None
}
fn score(
&self,
_: &crate::CurveFit<B, T>,
y: impl Iterator<Item = T>,
y_fit: impl Iterator<Item = T>,
_: T,
) -> T {
statistics::root_mean_squared_error(y, y_fit)
}
}
pub struct MeanAbsoluteError;
impl<B: Basis<T> + PolynomialDisplay<T>, T: Value> ModelScoreProvider<B, T> for MeanAbsoluteError {
fn minimum_significant_distance(&self) -> Option<usize> {
None
}
fn score(
&self,
_: &crate::CurveFit<B, T>,
y: impl Iterator<Item = T>,
y_fit: impl Iterator<Item = T>,
_: T,
) -> T {
statistics::mean_absolute_error(y, y_fit)
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ScoringMethod {
Aic,
Bic,
RootMeanSquaredError,
MeanAbsoluteError,
}
impl<B: Basis<T> + PolynomialDisplay<T>, T: Value> ModelScoreProvider<B, T> for ScoringMethod {
fn minimum_significant_distance(&self) -> Option<usize> {
match self {
ScoringMethod::Aic => {
<Aic as ModelScoreProvider<B, T>>::minimum_significant_distance(&Aic)
}
ScoringMethod::Bic => {
<Bic as ModelScoreProvider<B, T>>::minimum_significant_distance(&Bic)
}
ScoringMethod::RootMeanSquaredError => {
<RootMeanSquaredError as ModelScoreProvider<B, T>>::minimum_significant_distance(
&RootMeanSquaredError,
)
}
ScoringMethod::MeanAbsoluteError => {
<MeanAbsoluteError as ModelScoreProvider<B, T>>::minimum_significant_distance(
&MeanAbsoluteError,
)
}
}
}
fn score(
&self,
model: &crate::CurveFit<B, T>,
y: impl Iterator<Item = T>,
y_fit: impl Iterator<Item = T>,
k: T,
) -> T {
match self {
ScoringMethod::Aic => Aic.score(model, y, y_fit, k),
ScoringMethod::Bic => Bic.score(model, y, y_fit, k),
ScoringMethod::RootMeanSquaredError => RootMeanSquaredError.score(model, y, y_fit, k),
ScoringMethod::MeanAbsoluteError => MeanAbsoluteError.score(model, y, y_fit, k),
}
}
}
impl From<MeanAbsoluteError> for ScoringMethod {
fn from(_: MeanAbsoluteError) -> Self {
ScoringMethod::MeanAbsoluteError
}
}
impl From<RootMeanSquaredError> for ScoringMethod {
fn from(_: RootMeanSquaredError) -> Self {
ScoringMethod::RootMeanSquaredError
}
}
impl From<Aic> for ScoringMethod {
fn from(_: Aic) -> Self {
ScoringMethod::Aic
}
}
impl From<Bic> for ScoringMethod {
fn from(_: Bic) -> Self {
ScoringMethod::Bic
}
}