use ferrolearn_core::error::FerroError;
use ferrolearn_core::introspection::HasCoefficients;
use ferrolearn_core::pipeline::{FittedPipelineEstimator, PipelineEstimator};
use ferrolearn_core::traits::{Fit, Predict};
use ndarray::{Array1, Array2, Axis, ScalarOperand};
use num_traits::{Float, FromPrimitive};
#[derive(Debug, Clone)]
pub struct HuberRegressor<F> {
pub epsilon: F,
pub alpha: F,
pub max_iter: usize,
pub tol: F,
pub fit_intercept: bool,
}
impl<F: Float + FromPrimitive> HuberRegressor<F> {
#[must_use]
pub fn new() -> Self {
Self {
epsilon: F::from(1.35).unwrap(),
alpha: F::from(1e-4).unwrap(),
max_iter: 100,
tol: F::from(1e-5).unwrap(),
fit_intercept: true,
}
}
#[must_use]
pub fn with_epsilon(mut self, epsilon: F) -> Self {
self.epsilon = epsilon;
self
}
#[must_use]
pub fn with_alpha(mut self, alpha: F) -> Self {
self.alpha = alpha;
self
}
#[must_use]
pub fn with_max_iter(mut self, max_iter: usize) -> Self {
self.max_iter = max_iter;
self
}
#[must_use]
pub fn with_tol(mut self, tol: F) -> Self {
self.tol = tol;
self
}
#[must_use]
pub fn with_fit_intercept(mut self, fit_intercept: bool) -> Self {
self.fit_intercept = fit_intercept;
self
}
}
impl<F: Float + FromPrimitive> Default for HuberRegressor<F> {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct FittedHuberRegressor<F> {
coefficients: Array1<F>,
intercept: F,
}
fn weighted_ridge_solve<F: Float + FromPrimitive>(
x: &Array2<F>,
y: &Array1<F>,
weights: &Array1<F>,
alpha: F,
) -> Result<Array1<F>, FerroError> {
let (_n_samples, n_features) = x.dim();
let mut xtwx = Array2::<F>::zeros((n_features, n_features));
let mut xtwy = Array1::<F>::zeros(n_features);
for i in 0.._n_samples {
let wi = weights[i];
let xi = x.row(i);
for r in 0..n_features {
xtwy[r] = xtwy[r] + wi * xi[r] * y[i];
for c in 0..n_features {
xtwx[[r, c]] = xtwx[[r, c]] + wi * xi[r] * xi[c];
}
}
}
for i in 0..n_features {
xtwx[[i, i]] = xtwx[[i, i]] + alpha;
}
cholesky_solve(&xtwx, &xtwy).or_else(|_| gaussian_solve(n_features, &xtwx, &xtwy))
}
fn cholesky_solve<F: Float>(a: &Array2<F>, b: &Array1<F>) -> Result<Array1<F>, FerroError> {
let n = a.nrows();
let mut l = Array2::<F>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut s = a[[i, j]];
for k in 0..j {
s = s - l[[i, k]] * l[[j, k]];
}
if i == j {
if s <= F::zero() {
return Err(FerroError::NumericalInstability {
message: "Cholesky: matrix not positive definite".into(),
});
}
l[[i, j]] = s.sqrt();
} else {
l[[i, j]] = s / l[[j, j]];
}
}
}
let mut z = Array1::<F>::zeros(n);
for i in 0..n {
let mut s = b[i];
for j in 0..i {
s = s - l[[i, j]] * z[j];
}
z[i] = s / l[[i, i]];
}
let mut x_sol = Array1::<F>::zeros(n);
for i in (0..n).rev() {
let mut s = z[i];
for j in (i + 1)..n {
s = s - l[[j, i]] * x_sol[j];
}
x_sol[i] = s / l[[i, i]];
}
Ok(x_sol)
}
fn gaussian_solve<F: Float>(
n: usize,
a: &Array2<F>,
b: &Array1<F>,
) -> Result<Array1<F>, FerroError> {
let mut aug = Array2::<F>::zeros((n, n + 1));
for i in 0..n {
for j in 0..n {
aug[[i, j]] = a[[i, j]];
}
aug[[i, n]] = b[i];
}
for col in 0..n {
let mut max_val = aug[[col, col]].abs();
let mut max_row = col;
for row in (col + 1)..n {
let v = aug[[row, col]].abs();
if v > max_val {
max_val = v;
max_row = row;
}
}
if max_val < F::from(1e-12).unwrap_or_else(F::epsilon) {
return Err(FerroError::NumericalInstability {
message: "singular matrix in Gaussian elimination".into(),
});
}
if max_row != col {
for j in 0..=n {
let tmp = aug[[col, j]];
aug[[col, j]] = aug[[max_row, j]];
aug[[max_row, j]] = tmp;
}
}
let pivot = aug[[col, col]];
for row in (col + 1)..n {
let factor = aug[[row, col]] / pivot;
for j in col..=n {
let above = aug[[col, j]];
aug[[row, j]] = aug[[row, j]] - factor * above;
}
}
}
let mut x_sol = Array1::<F>::zeros(n);
for i in (0..n).rev() {
let mut s = aug[[i, n]];
for j in (i + 1)..n {
s = s - aug[[i, j]] * x_sol[j];
}
if aug[[i, i]].abs() < F::from(1e-12).unwrap_or_else(F::epsilon) {
return Err(FerroError::NumericalInstability {
message: "near-zero pivot in back substitution".into(),
});
}
x_sol[i] = s / aug[[i, i]];
}
Ok(x_sol)
}
impl<F: Float + Send + Sync + ScalarOperand + FromPrimitive + 'static> Fit<Array2<F>, Array1<F>>
for HuberRegressor<F>
{
type Fitted = FittedHuberRegressor<F>;
type Error = FerroError;
fn fit(&self, x: &Array2<F>, y: &Array1<F>) -> Result<FittedHuberRegressor<F>, FerroError> {
let (n_samples, n_features) = x.dim();
if n_samples != y.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![n_samples],
actual: vec![y.len()],
context: "y length must match number of samples in X".into(),
});
}
if self.epsilon <= F::one() {
return Err(FerroError::InvalidParameter {
name: "epsilon".into(),
reason: "must be strictly greater than 1.0".into(),
});
}
if self.alpha < F::zero() {
return Err(FerroError::InvalidParameter {
name: "alpha".into(),
reason: "must be non-negative".into(),
});
}
if n_samples == 0 {
return Err(FerroError::InsufficientSamples {
required: 1,
actual: 0,
context: "HuberRegressor requires at least one sample".into(),
});
}
let (x_work, y_work, x_mean, y_mean) = if self.fit_intercept {
let x_mean = x
.mean_axis(Axis(0))
.ok_or_else(|| FerroError::NumericalInstability {
message: "failed to compute column means".into(),
})?;
let y_mean = y.mean().ok_or_else(|| FerroError::NumericalInstability {
message: "failed to compute target mean".into(),
})?;
let x_c = x - &x_mean;
let y_c = y - y_mean;
(x_c, y_c, Some(x_mean), Some(y_mean))
} else {
(x.clone(), y.clone(), None, None)
};
let mut w = Array1::<F>::zeros(n_features);
let mut weights = Array1::<F>::ones(n_samples);
let one = F::one();
let min_weight = F::from(1e-10).unwrap();
for _iter in 0..self.max_iter {
let w_old = w.clone();
w = weighted_ridge_solve(&x_work, &y_work, &weights, self.alpha)?;
let residuals = &y_work - x_work.dot(&w);
for i in 0..n_samples {
let abs_r = residuals[i].abs();
weights[i] = if abs_r <= self.epsilon {
one
} else {
(self.epsilon / abs_r).max(min_weight)
};
}
let max_change = w
.iter()
.zip(w_old.iter())
.map(|(&wn, &wo)| (wn - wo).abs())
.fold(F::zero(), |a, b| if b > a { b } else { a });
if max_change < self.tol {
break;
}
}
let intercept = if let (Some(xm), Some(ym)) = (&x_mean, &y_mean) {
*ym - xm.dot(&w)
} else {
F::zero()
};
Ok(FittedHuberRegressor {
coefficients: w,
intercept,
})
}
}
impl<F: Float + Send + Sync + ScalarOperand + 'static> Predict<Array2<F>>
for FittedHuberRegressor<F>
{
type Output = Array1<F>;
type Error = FerroError;
fn predict(&self, x: &Array2<F>) -> Result<Array1<F>, FerroError> {
let n_features = x.ncols();
if n_features != self.coefficients.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![self.coefficients.len()],
actual: vec![n_features],
context: "number of features must match fitted model".into(),
});
}
let preds = x.dot(&self.coefficients) + self.intercept;
Ok(preds)
}
}
impl<F: Float + Send + Sync + ScalarOperand + 'static> HasCoefficients<F>
for FittedHuberRegressor<F>
{
fn coefficients(&self) -> &Array1<F> {
&self.coefficients
}
fn intercept(&self) -> F {
self.intercept
}
}
impl<F> PipelineEstimator<F> for HuberRegressor<F>
where
F: Float + FromPrimitive + ScalarOperand + Send + Sync + 'static,
{
fn fit_pipeline(
&self,
x: &Array2<F>,
y: &Array1<F>,
) -> Result<Box<dyn FittedPipelineEstimator<F>>, FerroError> {
let fitted = self.fit(x, y)?;
Ok(Box::new(fitted))
}
}
impl<F> FittedPipelineEstimator<F> for FittedHuberRegressor<F>
where
F: Float + ScalarOperand + Send + Sync + 'static,
{
fn predict_pipeline(&self, x: &Array2<F>) -> Result<Array1<F>, FerroError> {
self.predict(x)
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use ndarray::array;
#[test]
fn test_default_constructor() {
let m = HuberRegressor::<f64>::new();
assert_relative_eq!(m.epsilon, 1.35);
assert_relative_eq!(m.alpha, 1e-4);
assert_eq!(m.max_iter, 100);
assert!(m.fit_intercept);
}
#[test]
fn test_builder_setters() {
let m = HuberRegressor::<f64>::new()
.with_epsilon(2.0)
.with_alpha(0.1)
.with_max_iter(50)
.with_tol(1e-6)
.with_fit_intercept(false);
assert_relative_eq!(m.epsilon, 2.0);
assert_relative_eq!(m.alpha, 0.1);
assert_eq!(m.max_iter, 50);
assert!(!m.fit_intercept);
}
#[test]
fn test_epsilon_too_small_error() {
let x = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
let y = array![1.0, 2.0, 3.0];
let result = HuberRegressor::<f64>::new().with_epsilon(0.5).fit(&x, &y);
assert!(result.is_err());
}
#[test]
fn test_epsilon_exactly_one_error() {
let x = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
let y = array![1.0, 2.0, 3.0];
let result = HuberRegressor::<f64>::new().with_epsilon(1.0).fit(&x, &y);
assert!(result.is_err());
}
#[test]
fn test_negative_alpha_error() {
let x = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
let y = array![1.0, 2.0, 3.0];
let result = HuberRegressor::<f64>::new().with_alpha(-1.0).fit(&x, &y);
assert!(result.is_err());
}
#[test]
fn test_shape_mismatch_error() {
let x = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
let y = array![1.0, 2.0];
let result = HuberRegressor::<f64>::new().fit(&x, &y);
assert!(result.is_err());
}
#[test]
fn test_fits_clean_linear_data() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![3.0, 5.0, 7.0, 9.0, 11.0];
let fitted = HuberRegressor::<f64>::new()
.with_alpha(0.0)
.fit(&x, &y)
.unwrap();
assert_relative_eq!(fitted.coefficients()[0], 2.0, epsilon = 0.1);
assert_relative_eq!(fitted.intercept(), 1.0, epsilon = 0.5);
}
#[test]
fn test_robust_to_outliers() {
let x = Array2::from_shape_vec(
(10, 1),
vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
)
.unwrap();
let y_clean = array![2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0];
let y_outlier = array![2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 200.0];
let fitted_clean = HuberRegressor::<f64>::new()
.with_alpha(0.0)
.with_max_iter(200)
.fit(&x, &y_clean)
.unwrap();
let fitted_huber = HuberRegressor::<f64>::new()
.with_alpha(0.0)
.with_max_iter(200)
.fit(&x, &y_outlier)
.unwrap();
let ols_coef = {
let n = 10.0_f64;
let xv: Vec<f64> = (1..=10).map(f64::from).collect();
let yv = vec![2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 200.0];
let xmean = xv.iter().sum::<f64>() / n;
let ymean = yv.iter().sum::<f64>() / n;
let num: f64 = xv
.iter()
.zip(yv.iter())
.map(|(xi, yi)| xi * yi)
.sum::<f64>()
- n * xmean * ymean;
let den: f64 = xv.iter().map(|xi| xi * xi).sum::<f64>() - n * xmean * xmean;
num / den
};
let huber_coef = fitted_huber.coefficients()[0];
let clean_coef = fitted_clean.coefficients()[0];
let huber_err = (huber_coef - clean_coef).abs();
let ols_err = (ols_coef - clean_coef).abs();
assert!(
huber_err < ols_err,
"Huber error {huber_err:.4} should be less than OLS error {ols_err:.4} \
(huber coef={huber_coef:.4}, ols coef={ols_coef:.4}, clean coef={clean_coef:.4})"
);
}
#[test]
fn test_no_intercept() {
let x = Array2::from_shape_vec((4, 1), vec![1.0, 2.0, 3.0, 4.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0];
let fitted = HuberRegressor::<f64>::new()
.with_alpha(0.0)
.with_fit_intercept(false)
.fit(&x, &y)
.unwrap();
assert_relative_eq!(fitted.intercept(), 0.0, epsilon = 1e-10);
}
#[test]
fn test_predict_length() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0, 10.0];
let fitted = HuberRegressor::<f64>::new().fit(&x, &y).unwrap();
let preds = fitted.predict(&x).unwrap();
assert_eq!(preds.len(), 5);
}
#[test]
fn test_predict_feature_mismatch() {
let x = Array2::from_shape_vec((3, 2), vec![1.0, 0.0, 2.0, 0.0, 3.0, 0.0]).unwrap();
let y = array![1.0, 2.0, 3.0];
let fitted = HuberRegressor::<f64>::new().fit(&x, &y).unwrap();
let x_bad = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
assert!(fitted.predict(&x_bad).is_err());
}
#[test]
fn test_has_coefficients_length() {
let x = Array2::from_shape_vec(
(4, 3),
vec![1.0, 0.5, 0.2, 2.0, 1.0, 0.4, 3.0, 1.5, 0.6, 4.0, 2.0, 0.8],
)
.unwrap();
let y = array![1.0, 2.0, 3.0, 4.0];
let fitted = HuberRegressor::<f64>::new().fit(&x, &y).unwrap();
assert_eq!(fitted.coefficients().len(), 3);
}
#[test]
fn test_large_epsilon_approaches_ols() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![3.0, 5.0, 7.0, 9.0, 11.0];
let fitted = HuberRegressor::<f64>::new()
.with_epsilon(1000.0)
.with_alpha(0.0)
.fit(&x, &y)
.unwrap();
assert_relative_eq!(fitted.coefficients()[0], 2.0, epsilon = 0.1);
assert_relative_eq!(fitted.intercept(), 1.0, epsilon = 0.5);
}
#[test]
fn test_l2_regularization_shrinks_coefficients() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![3.0, 5.0, 7.0, 9.0, 11.0];
let low = HuberRegressor::<f64>::new()
.with_alpha(0.0001)
.fit(&x, &y)
.unwrap();
let high = HuberRegressor::<f64>::new()
.with_alpha(100.0)
.fit(&x, &y)
.unwrap();
assert!(
high.coefficients()[0].abs() <= low.coefficients()[0].abs(),
"higher alpha should shrink more"
);
}
#[test]
fn test_pipeline_integration() {
let x = Array2::from_shape_vec((4, 1), vec![1.0, 2.0, 3.0, 4.0]).unwrap();
let y = array![3.0, 5.0, 7.0, 9.0];
let model = HuberRegressor::<f64>::new();
let fitted_pipe = model.fit_pipeline(&x, &y).unwrap();
let preds = fitted_pipe.predict_pipeline(&x).unwrap();
assert_eq!(preds.len(), 4);
}
#[test]
fn test_multivariate() {
let x =
Array2::from_shape_vec((4, 2), vec![1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 0.5]).unwrap();
let y = array![1.0, 2.0, 3.0, 3.0];
let fitted = HuberRegressor::<f64>::new()
.with_alpha(0.0)
.fit(&x, &y)
.unwrap();
let preds = fitted.predict(&x).unwrap();
assert_eq!(preds.len(), 4);
}
}