irithyll 10.0.0

Streaming ML in Rust -- gradient boosted trees, neural architectures (TTT/KAN/MoE/Mamba/SNN), AutoML, kernel methods, and composable pipelines
//! Quantile (pinball) loss for conditional quantile regression.
//!
//! L(y, f) = tau * (y - f)     if y >= f   (under-prediction)
//!         = (1-tau) * (f - y)  if y < f    (over-prediction)
//!
//! The pinball loss has zero Hessian everywhere (it is piecewise linear),
//! which breaks the standard Newton leaf formula w = -G/(H+lambda).
//! We use the pseudo-Huber trick: set hessian = 1.0, so leaf weights
//! become w = -G_sum / (count + lambda), effectively gradient descent.
//! This converges to the empirical conditional quantile as more samples
//! arrive at each leaf -- the same approach used by LightGBM and XGBoost.

pub use super::{Loss, LossType};
pub use irithyll_core::loss::quantile::*;

#[cfg(test)]
mod tests {
    use super::*;

    const EPS: f64 = 1e-12;

    #[test]
    fn test_n_outputs() {
        assert_eq!(QuantileLoss::new(0.5).n_outputs(), 1);
    }

    #[test]
    fn test_gradient_over_predict() {
        let loss = QuantileLoss::new(0.9);
        // pred > target: gradient = 1 - tau = 0.1
        assert!((loss.gradient(1.0, 3.0) - 0.1).abs() < EPS);
        assert!((loss.gradient(0.0, 100.0) - 0.1).abs() < EPS);
    }

    #[test]
    fn test_gradient_under_predict() {
        let loss = QuantileLoss::new(0.9);
        // pred < target: gradient = -tau = -0.9
        assert!((loss.gradient(3.0, 1.0) - (-0.9)).abs() < EPS);
        assert!((loss.gradient(100.0, 0.0) - (-0.9)).abs() < EPS);
    }

    #[test]
    fn test_gradient_at_exact() {
        let loss = QuantileLoss::new(0.5);
        // pred == target: gradient = 1 - tau = 0.5 (>= branch)
        assert!((loss.gradient(5.0, 5.0) - 0.5).abs() < EPS);
    }

    #[test]
    fn test_hessian_is_one() {
        let loss = QuantileLoss::new(0.9);
        assert!((loss.hessian(0.0, 0.0) - 1.0).abs() < EPS);
        assert!((loss.hessian(100.0, -50.0) - 1.0).abs() < EPS);
        assert!((loss.hessian(-7.0, 42.0) - 1.0).abs() < EPS);
    }

    #[test]
    fn test_loss_pinball() {
        let loss = QuantileLoss::new(0.9);
        // Under-prediction (target > pred): loss = tau * (target - pred)
        assert!((loss.loss(5.0, 3.0) - 0.9 * 2.0).abs() < EPS);
        // Over-prediction (target < pred): loss = (1-tau) * (pred - target)
        assert!((loss.loss(3.0, 5.0) - 0.1 * 2.0).abs() < EPS);
        // Exact: loss = 0
        assert!((loss.loss(4.0, 4.0)).abs() < EPS);
    }

    #[test]
    fn test_median_loss_is_half_mae() {
        // At tau=0.5, pinball loss = 0.5 * |target - prediction| = MAE/2
        let loss = QuantileLoss::new(0.5);
        assert!((loss.loss(5.0, 3.0) - 1.0).abs() < EPS); // 0.5 * 2
        assert!((loss.loss(3.0, 5.0) - 1.0).abs() < EPS); // 0.5 * 2
    }

    #[test]
    fn test_predict_transform_is_identity() {
        let loss = QuantileLoss::new(0.5);
        assert!((loss.predict_transform(42.0) - 42.0).abs() < EPS);
    }

    #[test]
    fn test_initial_prediction_is_quantile() {
        let loss = QuantileLoss::new(0.5);
        let targets = [1.0, 2.0, 3.0, 4.0, 5.0];
        // Median of [1,2,3,4,5] at index floor(0.5*5)=2 => 3.0
        assert!((loss.initial_prediction(&targets) - 3.0).abs() < EPS);

        let loss90 = QuantileLoss::new(0.9);
        // 90th percentile: index floor(0.9*5)=4 => 5.0
        assert!((loss90.initial_prediction(&targets) - 5.0).abs() < EPS);
    }

    #[test]
    fn test_initial_prediction_empty() {
        let loss = QuantileLoss::new(0.5);
        assert!((loss.initial_prediction(&[])).abs() < EPS);
    }

    #[test]
    fn test_loss_type_returns_some() {
        let loss = QuantileLoss::new(0.75);
        match loss.loss_type() {
            Some(LossType::Quantile { tau }) => assert!((tau - 0.75).abs() < EPS),
            other => panic!("expected Quantile, got {other:?}"),
        }
    }

    #[test]
    fn test_gradient_is_subderivative_of_loss() {
        // Numerical check away from the kink point (pred != target)
        let loss = QuantileLoss::new(0.75);
        let target = 2.5;

        // Over-prediction side
        let pred = 4.0;
        let h = 1e-6;
        let numerical = (loss.loss(target, pred + h) - loss.loss(target, pred - h)) / (2.0 * h);
        let analytical = loss.gradient(target, pred);
        assert!(
            (numerical - analytical).abs() < 1e-4,
            "over: numerical={numerical}, analytical={analytical}"
        );

        // Under-prediction side
        let pred2 = 1.0;
        let numerical2 = (loss.loss(target, pred2 + h) - loss.loss(target, pred2 - h)) / (2.0 * h);
        let analytical2 = loss.gradient(target, pred2);
        assert!(
            (numerical2 - analytical2).abs() < 1e-4,
            "under: numerical={numerical2}, analytical={analytical2}"
        );
    }

    #[test]
    #[should_panic(expected = "tau must be in (0, 1)")]
    fn test_invalid_tau_zero() {
        QuantileLoss::new(0.0);
    }

    #[test]
    #[should_panic(expected = "tau must be in (0, 1)")]
    fn test_invalid_tau_one() {
        QuantileLoss::new(1.0);
    }
}