use ferrolearn_core::error::FerroError;
use ferrolearn_core::introspection::HasCoefficients;
use ferrolearn_core::pipeline::{FittedPipelineEstimator, PipelineEstimator};
use ferrolearn_core::traits::{Fit, Predict};
use ndarray::{Array1, Array2, Axis, ScalarOperand};
use num_traits::{Float, FromPrimitive};
#[derive(Debug, Clone)]
pub struct ARDRegression<F> {
pub max_iter: usize,
pub tol: F,
pub alpha_1: F,
pub alpha_2: F,
pub lambda_1: F,
pub lambda_2: F,
pub threshold_lambda: F,
pub fit_intercept: bool,
}
impl<F: Float + FromPrimitive> ARDRegression<F> {
#[must_use]
pub fn new() -> Self {
Self {
max_iter: 300,
tol: F::from(1e-3).unwrap(),
alpha_1: F::from(1e-6).unwrap(),
alpha_2: F::from(1e-6).unwrap(),
lambda_1: F::from(1e-6).unwrap(),
lambda_2: F::from(1e-6).unwrap(),
threshold_lambda: F::from(1e4).unwrap(),
fit_intercept: true,
}
}
#[must_use]
pub fn with_max_iter(mut self, max_iter: usize) -> Self {
self.max_iter = max_iter;
self
}
#[must_use]
pub fn with_tol(mut self, tol: F) -> Self {
self.tol = tol;
self
}
#[must_use]
pub fn with_alpha_1(mut self, alpha_1: F) -> Self {
self.alpha_1 = alpha_1;
self
}
#[must_use]
pub fn with_alpha_2(mut self, alpha_2: F) -> Self {
self.alpha_2 = alpha_2;
self
}
#[must_use]
pub fn with_lambda_1(mut self, lambda_1: F) -> Self {
self.lambda_1 = lambda_1;
self
}
#[must_use]
pub fn with_lambda_2(mut self, lambda_2: F) -> Self {
self.lambda_2 = lambda_2;
self
}
#[must_use]
pub fn with_threshold_lambda(mut self, threshold_lambda: F) -> Self {
self.threshold_lambda = threshold_lambda;
self
}
#[must_use]
pub fn with_fit_intercept(mut self, fit_intercept: bool) -> Self {
self.fit_intercept = fit_intercept;
self
}
}
impl<F: Float + FromPrimitive> Default for ARDRegression<F> {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone)]
pub struct FittedARDRegression<F> {
coefficients: Array1<F>,
intercept: F,
alpha: F,
lambda: Array1<F>,
sigma: Array1<F>,
}
impl<F: Float> FittedARDRegression<F> {
#[must_use]
pub fn alpha(&self) -> F {
self.alpha
}
#[must_use]
pub fn lambda(&self) -> &Array1<F> {
&self.lambda
}
#[must_use]
pub fn sigma(&self) -> &Array1<F> {
&self.sigma
}
}
fn ard_solve<F: Float + FromPrimitive + 'static>(
x: &Array2<F>,
y: &Array1<F>,
alpha: F,
lambda: &Array1<F>,
) -> Result<(Array1<F>, Array1<F>), FerroError> {
let n_features = x.ncols();
let xt = x.t();
let mut xtx = xt.dot(x);
for i in 0..n_features {
for j in 0..n_features {
xtx[[i, j]] = xtx[[i, j]] * alpha;
}
xtx[[i, i]] = xtx[[i, i]] + lambda[i];
}
let xty = xt.dot(y);
let xty_scaled: Array1<F> = xty.mapv(|v| v * alpha);
let n = n_features;
let mut l = Array2::<F>::zeros((n, n));
for i in 0..n {
for j in 0..=i {
let mut s = xtx[[i, j]];
for k in 0..j {
s = s - l[[i, k]] * l[[j, k]];
}
if i == j {
if s <= F::zero() {
return Err(FerroError::NumericalInstability {
message: "ARD: matrix not positive definite".into(),
});
}
l[[i, j]] = s.sqrt();
} else {
l[[i, j]] = s / l[[j, j]];
}
}
}
let mut z = Array1::<F>::zeros(n);
for i in 0..n {
let mut s = xty_scaled[i];
for j in 0..i {
s = s - l[[i, j]] * z[j];
}
z[i] = s / l[[i, i]];
}
let mut w = Array1::<F>::zeros(n);
for i in (0..n).rev() {
let mut s = z[i];
for j in (i + 1)..n {
s = s - l[[j, i]] * w[j];
}
w[i] = s / l[[i, i]];
}
let mut sigma_diag = Array1::<F>::zeros(n);
for col in 0..n {
let mut z_inv = Array1::<F>::zeros(n);
z_inv[col] = F::one() / l[[col, col]];
for i in (col + 1)..n {
let mut s = F::zero();
for k in col..i {
s = s + l[[i, k]] * z_inv[k];
}
z_inv[i] = -s / l[[i, i]];
}
for i in 0..n {
sigma_diag[i] = sigma_diag[i] + z_inv[i] * z_inv[i];
}
}
Ok((w, sigma_diag))
}
impl<F: Float + Send + Sync + ScalarOperand + FromPrimitive + 'static> Fit<Array2<F>, Array1<F>>
for ARDRegression<F>
{
type Fitted = FittedARDRegression<F>;
type Error = FerroError;
fn fit(
&self,
x: &Array2<F>,
y: &Array1<F>,
) -> Result<FittedARDRegression<F>, FerroError> {
let (n_samples, n_features) = x.dim();
if n_samples != y.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![n_samples],
actual: vec![y.len()],
context: "y length must match number of samples in X".into(),
});
}
if n_samples < 2 {
return Err(FerroError::InsufficientSamples {
required: 2,
actual: n_samples,
context: "ARDRegression requires at least 2 samples".into(),
});
}
let n_f = F::from(n_samples).unwrap();
let (x_work, y_work, x_mean, y_mean) = if self.fit_intercept {
let x_mean = x
.mean_axis(Axis(0))
.ok_or_else(|| FerroError::NumericalInstability {
message: "failed to compute column means".into(),
})?;
let y_mean = y.mean().ok_or_else(|| FerroError::NumericalInstability {
message: "failed to compute target mean".into(),
})?;
let x_c = x - &x_mean;
let y_c = y - y_mean;
(x_c, y_c, Some(x_mean), Some(y_mean))
} else {
(x.clone(), y.clone(), None, None)
};
let mut alpha = F::one();
let mut lambda = Array1::<F>::from_elem(n_features, F::one());
let clamp_max = F::from(1e10).unwrap();
let clamp_min = F::from(1e-10).unwrap();
let mut w = Array1::<F>::zeros(n_features);
let mut sigma_diag = Array1::<F>::ones(n_features);
for _iter in 0..self.max_iter {
let alpha_old = alpha;
let lambda_old = lambda.clone();
let (w_new, sd_new) = ard_solve(&x_work, &y_work, alpha, &lambda)?;
let gamma: Array1<F> = Array1::from_shape_fn(n_features, |i| {
F::one() - lambda[i] * sd_new[i]
});
let gamma_sum: F = gamma.iter().fold(F::zero(), |a, &b| a + b);
let residual = &y_work - x_work.dot(&w_new);
let sse = residual.dot(&residual);
let two = F::from(2.0).unwrap();
let new_alpha = (n_f - gamma_sum + two * self.alpha_1)
/ (sse + two * self.alpha_2).max(F::from(1e-300).unwrap());
let mut new_lambda = Array1::<F>::zeros(n_features);
for i in 0..n_features {
let wi_sq = w_new[i] * w_new[i];
new_lambda[i] = (gamma[i] + two * self.lambda_1)
/ (wi_sq + two * self.lambda_2).max(F::from(1e-300).unwrap());
}
alpha = new_alpha.min(clamp_max).max(clamp_min);
for i in 0..n_features {
new_lambda[i] = new_lambda[i].min(clamp_max).max(clamp_min);
}
lambda = new_lambda;
w = w_new;
sigma_diag = sd_new;
let delta_alpha =
(alpha - alpha_old).abs() / (alpha_old.abs() + F::from(1e-10).unwrap());
let mut max_delta_lambda = F::zero();
for i in 0..n_features {
let delta = (lambda[i] - lambda_old[i]).abs()
/ (lambda_old[i].abs() + F::from(1e-10).unwrap());
if delta > max_delta_lambda {
max_delta_lambda = delta;
}
}
if delta_alpha < self.tol && max_delta_lambda < self.tol {
break;
}
}
for i in 0..n_features {
if lambda[i] > self.threshold_lambda {
w[i] = F::zero();
}
}
let intercept = if let (Some(xm), Some(ym)) = (&x_mean, &y_mean) {
*ym - xm.dot(&w)
} else {
F::zero()
};
Ok(FittedARDRegression {
coefficients: w,
intercept,
alpha,
lambda,
sigma: sigma_diag,
})
}
}
impl<F: Float + Send + Sync + ScalarOperand + 'static> Predict<Array2<F>>
for FittedARDRegression<F>
{
type Output = Array1<F>;
type Error = FerroError;
fn predict(&self, x: &Array2<F>) -> Result<Array1<F>, FerroError> {
let n_features = x.ncols();
if n_features != self.coefficients.len() {
return Err(FerroError::ShapeMismatch {
expected: vec![self.coefficients.len()],
actual: vec![n_features],
context: "number of features must match fitted model".into(),
});
}
let preds = x.dot(&self.coefficients) + self.intercept;
Ok(preds)
}
}
impl<F: Float + Send + Sync + ScalarOperand + 'static> HasCoefficients<F>
for FittedARDRegression<F>
{
fn coefficients(&self) -> &Array1<F> {
&self.coefficients
}
fn intercept(&self) -> F {
self.intercept
}
}
impl<F> PipelineEstimator<F> for ARDRegression<F>
where
F: Float + FromPrimitive + ScalarOperand + Send + Sync + 'static,
{
fn fit_pipeline(
&self,
x: &Array2<F>,
y: &Array1<F>,
) -> Result<Box<dyn FittedPipelineEstimator<F>>, FerroError> {
let fitted = self.fit(x, y)?;
Ok(Box::new(fitted))
}
}
impl<F> FittedPipelineEstimator<F> for FittedARDRegression<F>
where
F: Float + ScalarOperand + Send + Sync + 'static,
{
fn predict_pipeline(&self, x: &Array2<F>) -> Result<Array1<F>, FerroError> {
self.predict(x)
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use ndarray::array;
#[test]
fn test_default_constructor() {
let m = ARDRegression::<f64>::new();
assert_eq!(m.max_iter, 300);
assert!(m.fit_intercept);
assert_relative_eq!(m.alpha_1, 1e-6);
}
#[test]
fn test_builder_setters() {
let m = ARDRegression::<f64>::new()
.with_max_iter(50)
.with_tol(1e-6)
.with_alpha_1(1e-3)
.with_alpha_2(1e-3)
.with_lambda_1(1e-3)
.with_lambda_2(1e-3)
.with_threshold_lambda(1e5)
.with_fit_intercept(false);
assert_eq!(m.max_iter, 50);
assert!(!m.fit_intercept);
assert_relative_eq!(m.threshold_lambda, 1e5);
}
#[test]
fn test_shape_mismatch() {
let x = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
let y = array![1.0, 2.0];
let result = ARDRegression::<f64>::new().fit(&x, &y);
assert!(result.is_err());
}
#[test]
fn test_insufficient_samples() {
let x = Array2::from_shape_vec((1, 1), vec![1.0]).unwrap();
let y = array![1.0];
let result = ARDRegression::<f64>::new().fit(&x, &y);
assert!(result.is_err());
}
#[test]
fn test_fits_linear_data() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![3.0, 5.0, 7.0, 9.0, 11.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
assert_relative_eq!(fitted.coefficients()[0], 2.0, epsilon = 0.5);
assert_relative_eq!(fitted.intercept(), 1.0, epsilon = 1.5);
}
#[test]
fn test_alpha_positive() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0, 10.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
assert!(fitted.alpha() > 0.0);
}
#[test]
fn test_lambda_positive() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0, 10.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
for &v in fitted.lambda().iter() {
assert!(v > 0.0, "lambda must be positive, got {v}");
}
}
#[test]
fn test_sigma_positive() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0, 10.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
for &v in fitted.sigma().iter() {
assert!(v > 0.0, "sigma diagonal must be positive, got {v}");
}
}
#[test]
fn test_predict_length() {
let x = Array2::from_shape_vec((5, 1), vec![1.0, 2.0, 3.0, 4.0, 5.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0, 10.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
let preds = fitted.predict(&x).unwrap();
assert_eq!(preds.len(), 5);
}
#[test]
fn test_predict_feature_mismatch() {
let x = Array2::from_shape_vec((3, 2), vec![1.0, 0.0, 2.0, 0.0, 3.0, 0.0]).unwrap();
let y = array![1.0, 2.0, 3.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
let x_bad = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap();
assert!(fitted.predict(&x_bad).is_err());
}
#[test]
fn test_no_intercept() {
let x = Array2::from_shape_vec((4, 1), vec![1.0, 2.0, 3.0, 4.0]).unwrap();
let y = array![2.0, 4.0, 6.0, 8.0];
let fitted = ARDRegression::<f64>::new()
.with_fit_intercept(false)
.fit(&x, &y)
.unwrap();
assert_relative_eq!(fitted.intercept(), 0.0, epsilon = 1e-10);
}
#[test]
fn test_sparsity_on_irrelevant_features() {
let x = Array2::from_shape_vec(
(6, 2),
vec![1.0, 100.0, 2.0, 200.0, 3.0, 300.0, 4.0, 400.0, 5.0, 500.0, 6.0, 600.0],
)
.unwrap();
let y = array![2.0, 4.0, 6.0, 8.0, 10.0, 12.0];
let fitted = ARDRegression::<f64>::new()
.with_max_iter(1000)
.fit(&x, &y)
.unwrap();
let preds = fitted.predict(&x).unwrap();
assert_eq!(preds.len(), 6);
}
#[test]
fn test_has_coefficients_length() {
let x = Array2::from_shape_vec(
(4, 3),
vec![1.0, 0.0, 0.5, 2.0, 1.0, 1.0, 3.0, 0.0, 1.5, 4.0, 1.0, 2.0],
)
.unwrap();
let y = array![1.0, 2.0, 3.0, 4.0];
let fitted = ARDRegression::<f64>::new().fit(&x, &y).unwrap();
assert_eq!(fitted.coefficients().len(), 3);
}
#[test]
fn test_pipeline_integration() {
let x = Array2::from_shape_vec((4, 1), vec![1.0, 2.0, 3.0, 4.0]).unwrap();
let y = array![3.0, 5.0, 7.0, 9.0];
let model = ARDRegression::<f64>::new();
let fitted_pipe = model.fit_pipeline(&x, &y).unwrap();
let preds = fitted_pipe.predict_pipeline(&x).unwrap();
assert_eq!(preds.len(), 4);
}
}