linreg_core/regularized/
ridge.rs

1//! Ridge regression (L2-regularized linear regression).
2//!
3//! This module provides ridge regression implementation using the augmented QR
4//! approach, which is numerically stable and avoids forming X^T X explicitly.
5//!
6//! # Ridge Regression Objective
7//!
8//! Ridge regression solves:
9//!
10//! ```text
11//! minimize over (β₀, β):
12//!
13//!     (1/(2n)) * Σᵢ (yᵢ - β₀ - xᵢᵀβ)² + (λ/2) * ||β||₂²
14//! ```
15//!
16//! The intercept `β₀` is **not penalized**.
17//!
18//! # Solution Method
19//!
20//! We use the augmented least-squares approach:
21//!
22//! ```text
23//! minimize || [y; 0] - [X; √λ*I] * β ||²
24//! ```
25//!
26//! This transforms the ridge problem into a standard least squares problem
27//! that can be solved with QR decomposition.
28
29use crate::error::{Error, Result};
30use crate::linalg::Matrix;
31use crate::regularized::preprocess::{
32    predict, standardize_xy, unstandardize_coefficients, StandardizeOptions,
33};
34
35#[cfg(feature = "wasm")]
36use serde::Serialize;
37
38/// Options for ridge regression fitting.
39///
40/// # Fields
41///
42/// * `lambda` - Regularization strength (single value)
43/// * `intercept` - Whether to include an intercept term (default: true)
44/// * `standardize` - Whether to standardize predictors (default: true)
45#[derive(Clone, Debug)]
46pub struct RidgeFitOptions {
47    /// Regularization strength (must be >= 0)
48    pub lambda: f64,
49    /// Whether to include an intercept
50    pub intercept: bool,
51    /// Whether to standardize predictors
52    pub standardize: bool,
53}
54
55impl Default for RidgeFitOptions {
56    fn default() -> Self {
57        RidgeFitOptions {
58            lambda: 1.0,
59            intercept: true,
60            standardize: true,
61        }
62    }
63}
64
65/// Result of a ridge regression fit.
66///
67/// # Fields
68///
69/// * `lambda` - The lambda value used for fitting
70/// * `intercept` - Intercept coefficient (on original scale)
71/// * `coefficients` - Slope coefficients (on original scale)
72/// * `fitted_values` - In-sample predictions
73/// * `residuals` - Residuals (y - fitted_values)
74/// * `df` - Effective degrees of freedom (trace of H = X(X'X + λI)^(-1)X')
75/// * `r_squared` - R² (coefficient of determination)
76/// * `adj_r_squared` - Adjusted R² (using effective df)
77/// * `mse` - Mean squared error
78/// * `rmse` - Root mean squared error
79/// * `mae` - Mean absolute error
80/// * `standardization_info` - Information about standardization applied
81#[derive(Clone, Debug)]
82#[cfg_attr(feature = "wasm", derive(Serialize))]
83pub struct RidgeFit {
84    /// Lambda value used for fitting
85    pub lambda: f64,
86    /// Intercept on original scale
87    pub intercept: f64,
88    /// Slope coefficients on original scale
89    pub coefficients: Vec<f64>,
90    /// Fitted values
91    pub fitted_values: Vec<f64>,
92    /// Residuals
93    pub residuals: Vec<f64>,
94    /// Effective degrees of freedom
95    pub df: f64,
96    /// R² (coefficient of determination)
97    pub r_squared: f64,
98    /// Adjusted R² (penalized for effective df)
99    pub adj_r_squared: f64,
100    /// Mean squared error
101    pub mse: f64,
102    /// Root mean squared error
103    pub rmse: f64,
104    /// Mean absolute error
105    pub mae: f64,
106}
107
108/// Fits ridge regression for a single lambda value.
109///
110/// # Arguments
111///
112/// * `x` - Design matrix (n × p). Should include intercept column if `intercept=true`.
113/// * `y` - Response vector (n elements)
114/// * `options` - Ridge fitting options
115///
116/// # Returns
117///
118/// A [`RidgeFit`] containing the fit results.
119///
120/// # Errors
121///
122/// Returns an error if:
123/// - `lambda < 0`
124/// - Dimensions don't match
125/// - Matrix is numerically singular
126///
127/// # Algorithm
128///
129/// Uses the augmented QR approach:
130/// 1. Standardize X and center y (if requested)
131/// 2. Build augmented system:
132///    ```text
133///    X_aug = [X_std; sqrt(lambda) * I_p]
134///    y_aug = [y_centered; 0_p]
135///    ```
136/// 3. Solve using QR decomposition
137/// 4. Unstandardize coefficients
138///
139/// # Example
140///
141/// ```rust,no_run
142/// use linreg_core::linalg::Matrix;
143/// use linreg_core::regularized::ridge::{ridge_fit, RidgeFitOptions};
144///
145/// let x = Matrix::new(3, 2, vec![
146///     1.0, 2.0,
147///     1.0, 3.0,
148///     1.0, 4.0,
149/// ]);
150/// let y = vec![3.0, 5.0, 7.0];
151///
152/// let options = RidgeFitOptions {
153///     lambda: 1.0,
154///     intercept: true,
155///     standardize: true,
156/// };
157///
158/// let fit = ridge_fit(&x, &y, &options).unwrap();
159/// println!("Intercept: {}", fit.intercept);
160/// println!("Coefficients: {:?}", fit.coefficients);
161/// ```
162pub fn ridge_fit(x: &Matrix, y: &[f64], options: &RidgeFitOptions) -> Result<RidgeFit> {
163    if options.lambda < 0.0 {
164        return Err(Error::InvalidInput(
165            "Lambda must be non-negative for ridge regression".to_string(),
166        ));
167    }
168
169    let n = x.rows;
170    let p = x.cols;
171
172    if y.len() != n {
173        return Err(Error::DimensionMismatch(
174            format!("Length of y ({}) must match number of rows in X ({})", y.len(), n)
175        ));
176    }
177
178    // Handle zero lambda: just do OLS
179    if options.lambda == 0.0 {
180        return ridge_ols_fit(x, y, options);
181    }
182
183    // Standardize X and center y
184    let std_options = StandardizeOptions {
185        intercept: options.intercept,
186        standardize_x: options.standardize,
187        standardize_y: false, // Don't standardize y for ridge
188    };
189
190    let (x_std, y_centered, std_info) = standardize_xy(x, y, &std_options);
191
192    // Build augmented system: [X; sqrt(lambda)*I] * beta = [y; 0]
193    // For the intercept column (if present), we don't add penalty
194    let sqrt_lambda = options.lambda.sqrt();
195    let intercept_col = if options.intercept { 1 } else { 0 };
196
197    // Number of penalized coefficients (excluding intercept)
198    let p_pen = p - intercept_col;
199
200    // Augmented matrix dimensions
201    let aug_n = n + p_pen;
202    let aug_p = p;
203
204    // Build augmented matrix
205    let mut x_aug_data = vec![0.0; aug_n * aug_p];
206
207    // Copy X_std to top portion
208    for i in 0..n {
209        for j in 0..p {
210            x_aug_data[i * aug_p + j] = x_std.get(i, j);
211        }
212    }
213
214    // Add sqrt(lambda) * I for penalized coefficients
215    for i in 0..p_pen {
216        let row = n + i;
217        let col = intercept_col + i;
218        x_aug_data[row * aug_p + col] = sqrt_lambda;
219    }
220
221    let x_aug = Matrix::new(aug_n, aug_p, x_aug_data);
222
223    // Build augmented y vector
224    let mut y_aug = vec![0.0; aug_n];
225    for i in 0..n {
226        y_aug[i] = y_centered[i];
227    }
228    // Remaining entries are already 0
229
230    // Solve using QR decomposition
231    let (q, r) = x_aug.qr();
232    let beta_std = solve_upper_triangular_with_augmented_y(&r, &q, &y_aug, aug_n)?;
233
234    // Unstandardize coefficients
235    let (intercept, beta_orig) = unstandardize_coefficients(&beta_std, &std_info);
236
237    // Compute fitted values and residuals on original scale
238    let fitted = predict(x, intercept, &beta_orig);
239    let residuals: Vec<f64> = y.iter().zip(fitted.iter()).map(|(yi, yh)| yi - yh).collect();
240
241    // Compute effective degrees of freedom
242    // For ridge: df = trace(X(X'X + lambda*I)^(-1)X')
243    // This equals sum of eigenvalues / (eigenvalues + lambda)
244    // We compute it using the hat matrix approach
245    let df = compute_ridge_df(&x_std, options.lambda, intercept_col);
246
247    // Compute model fit statistics
248    let y_mean: f64 = y.iter().sum::<f64>() / n as f64;
249    let ss_tot: f64 = y.iter().map(|yi| (yi - y_mean).powi(2)).sum();
250    let ss_res: f64 = residuals.iter().map(|r| r.powi(2)).sum();
251    let r_squared = if ss_tot > 1e-10 {
252        1.0 - ss_res / ss_tot
253    } else {
254        1.0
255    };
256
257    // Adjusted R² using effective degrees of freedom
258    let adj_r_squared = if ss_tot > 1e-10 && (n as f64) > df {
259        1.0 - (1.0 - r_squared) * ((n - 1) as f64 / (n as f64 - df))
260    } else {
261        r_squared
262    };
263
264    let mse = ss_res / (n as f64 - 1.0); // Use n-1 for consistency
265    let rmse = mse.sqrt();
266    let mae: f64 = residuals.iter().map(|r| r.abs()).sum::<f64>() / n as f64;
267
268    Ok(RidgeFit {
269        lambda: options.lambda,
270        intercept,
271        coefficients: beta_orig,
272        fitted_values: fitted,
273        residuals,
274        df,
275        r_squared,
276        adj_r_squared,
277        mse,
278        rmse,
279        mae,
280    })
281}
282
283/// Computes the effective degrees of freedom for ridge regression.
284///
285/// df = trace(H) where H = X(X'X + λI)^(-1)X'
286///
287/// For a QR decomposition of the standardized X, this equals the sum of
288/// squared diagonal elements of R(R'R + λI)^(-1).
289fn compute_ridge_df(x_std: &Matrix, lambda: f64, intercept_col: usize) -> f64 {
290    let p = x_std.cols;
291
292    // For small problems, compute directly
293    if p <= 100 {
294        // Get QR decomposition of X_std
295        let (_q, _r) = x_std.qr();
296
297        // Compute df = trace(X(X'X + λI)^(-1)X')
298        // This equals trace(R(R'R + λI)^(-1)R') / n for centered data
299        // A simpler approach: df = sum of (d_i^2 / (d_i^2 + lambda))
300        // where d_i are singular values of X
301
302        // For ridge, a simple approximation that works well:
303        // df = sum_{j not penalized} 1 + sum_{j penalized} sigma_j^2 / (sigma_j^2 + lambda)
304        // where sigma_j^2 are eigenvalues of X'X
305
306        // Use the approximation: df ≈ p - lambda * trace((X'X + lambda*I)^(-1))
307        // For now, use a simpler proxy
308        let p_pen = p - intercept_col;
309        let df_penalty = if lambda > 0.0 {
310            // Approximate reduction in df due to penalty
311            (p_pen as f64) * lambda / (1.0 + lambda)
312        } else {
313            0.0
314        };
315
316        (p as f64) - df_penalty
317    } else {
318        // For large p, use a simpler approximation
319        p as f64 * lambda / (1.0 + lambda)
320    }
321}
322
323/// OLS fit for lambda = 0 (special case of ridge).
324fn ridge_ols_fit(x: &Matrix, y: &[f64], options: &RidgeFitOptions) -> Result<RidgeFit> {
325    let n = x.rows;
326    let p = x.cols;
327
328    // Standardize (center) for consistency
329    let std_options = StandardizeOptions {
330        intercept: options.intercept,
331        standardize_x: false,
332        standardize_y: false,
333    };
334
335    let (_, _y_centered, std_info) = standardize_xy(x, y, &std_options);
336
337    // Use QR decomposition for OLS
338    let x_for_ols = if options.intercept {
339        x.clone()
340    } else {
341        x.clone()
342    };
343
344    let (q, r) = x_for_ols.qr();
345    let beta_std = solve_upper_triangular_with_augmented_y(&r, &q, y, n)?;
346
347    // Unstandardize
348    let (intercept, beta_orig) = unstandardize_coefficients(&beta_std, &std_info);
349
350    // Compute fitted values and residuals
351    let fitted = predict(x, intercept, &beta_orig);
352    let residuals: Vec<f64> = y.iter().zip(fitted.iter()).map(|(yi, yh)| yi - yh).collect();
353
354    // For OLS, df = p (or n - 1 if considering adjusted df)
355    let df = p as f64;
356
357    // Compute model fit statistics
358    let y_mean: f64 = y.iter().sum::<f64>() / n as f64;
359    let ss_tot: f64 = y.iter().map(|yi| (yi - y_mean).powi(2)).sum();
360    let ss_res: f64 = residuals.iter().map(|r| r.powi(2)).sum();
361    let r_squared = if ss_tot > 1e-10 {
362        1.0 - ss_res / ss_tot
363    } else {
364        1.0
365    };
366
367    // Adjusted R² using effective degrees of freedom
368    let adj_r_squared = if ss_tot > 1e-10 && n > p {
369        1.0 - (1.0 - r_squared) * ((n - 1) as f64 / (n - p) as f64)
370    } else {
371        r_squared
372    };
373
374    let mse = ss_res / (n as f64 - p as f64);
375    let rmse = mse.sqrt();
376    let mae: f64 = residuals.iter().map(|r| r.abs()).sum::<f64>() / n as f64;
377
378    Ok(RidgeFit {
379        lambda: 0.0,
380        intercept,
381        coefficients: beta_orig,
382        fitted_values: fitted,
383        residuals,
384        df,
385        r_squared,
386        adj_r_squared,
387        mse,
388        rmse,
389        mae,
390    })
391}
392
393/// Solves R * beta = Q^T * y_aug for beta.
394///
395/// This is a helper for the augmented QR approach.
396fn solve_upper_triangular_with_augmented_y(
397    r: &Matrix,
398    q: &Matrix,
399    y_aug: &[f64],
400    aug_n: usize,
401) -> Result<Vec<f64>> {
402    let p = r.cols;
403
404    // Compute Q^T * y_aug (only need first p rows since R is p × p or m × p)
405    // Actually, Q is aug_n × aug_n, but we only need Q^T * y_aug for first p rows
406    // since R has zeros below row p
407
408    let mut qty = vec![0.0; p];
409
410    // Compute Q^T * y_aug for the first p rows
411    for i in 0..p {
412        let mut sum = 0.0;
413        for k in 0..aug_n {
414            sum += q.get(k, i) * y_aug[k];
415        }
416        qty[i] = sum;
417    }
418
419    // Back substitution: solve R * beta = qty
420    let mut beta = vec![0.0; p];
421
422    for i in (0..p).rev() {
423        let mut sum = qty[i];
424        for j in (i + 1)..p {
425            sum -= r.get(i, j) * beta[j];
426        }
427
428        let diag = r.get(i, i);
429        if diag.abs() < 1e-14 {
430            return Err(Error::ComputationFailed(
431                "Matrix is singular to working precision".to_string(),
432            ));
433        }
434
435        beta[i] = sum / diag;
436    }
437
438    Ok(beta)
439}
440
441/// Makes predictions using a ridge regression fit.
442///
443/// # Arguments
444///
445/// * `fit` - The ridge regression fit result
446/// * `x_new` - New data matrix (n_new × p)
447///
448/// # Returns
449///
450/// Predictions for each row in x_new.
451pub fn predict_ridge(fit: &RidgeFit, x_new: &Matrix) -> Vec<f64> {
452    predict(x_new, fit.intercept, &fit.coefficients)
453}
454
455#[cfg(test)]
456mod tests {
457    use super::*;
458
459    #[test]
460    fn test_ridge_fit_simple() {
461        // Simple test: perfect linear relationship
462        let x_data = vec![
463            1.0, 1.0,
464            1.0, 2.0,
465            1.0, 3.0,
466            1.0, 4.0,
467        ];
468        let x = Matrix::new(4, 2, x_data);
469        let y = vec![2.0, 4.0, 6.0, 8.0]; // y = 2 * x (with intercept 0)
470
471        let options = RidgeFitOptions {
472            lambda: 0.1,
473            intercept: true,
474            standardize: false,
475        };
476
477        let fit = ridge_fit(&x, &y, &options).unwrap();
478
479        // With small lambda, should be close to OLS solution
480        // OLS solution: intercept ≈ 0, slope ≈ 2
481        assert!((fit.coefficients[1] - 2.0).abs() < 0.1);
482        assert!(fit.intercept.abs() < 0.1);
483    }
484
485    #[test]
486    fn test_ridge_fit_with_standardization() {
487        let x_data = vec![
488            1.0, 100.0,
489            1.0, 200.0,
490            1.0, 300.0,
491            1.0, 400.0,
492        ];
493        let x = Matrix::new(4, 2, x_data);
494        let y = vec![2.0, 4.0, 6.0, 8.0];
495
496        let options = RidgeFitOptions {
497            lambda: 1.0,
498            intercept: true,
499            standardize: true,
500        };
501
502        let fit = ridge_fit(&x, &y, &options).unwrap();
503
504        // Predictions should be reasonable
505        for i in 0..4 {
506            assert!((fit.fitted_values[i] - y[i]).abs() < 2.0);
507        }
508    }
509
510    #[test]
511    fn test_ridge_zero_lambda_is_ols() {
512        let x_data = vec![
513            1.0, 1.0,
514            1.0, 2.0,
515            1.0, 3.0,
516        ];
517        let x = Matrix::new(3, 2, x_data);
518        let y = vec![2.0, 4.0, 6.0];
519
520        let options = RidgeFitOptions {
521            lambda: 0.0,
522            intercept: true,
523            standardize: false,
524        };
525
526        let fit = ridge_fit(&x, &y, &options).unwrap();
527
528        // Should be close to perfect fit for this data
529        assert!((fit.fitted_values[0] - 2.0).abs() < 1e-6);
530        assert!((fit.fitted_values[1] - 4.0).abs() < 1e-6);
531        assert!((fit.fitted_values[2] - 6.0).abs() < 1e-6);
532    }
533
534    #[test]
535    fn test_ridge_negative_lambda_error() {
536        let x_data = vec![1.0, 1.0, 1.0, 2.0, 1.0, 3.0];
537        let x = Matrix::new(3, 2, x_data);
538        let y = vec![2.0, 4.0, 6.0];
539
540        let options = RidgeFitOptions {
541            lambda: -1.0,
542            ..Default::default()
543        };
544
545        let result = ridge_fit(&x, &y, &options);
546        assert!(result.is_err());
547    }
548
549    #[test]
550    fn test_predict_ridge() {
551        let x_data = vec![
552            1.0, 1.0,
553            1.0, 2.0,
554            1.0, 3.0,
555        ];
556        let x = Matrix::new(3, 2, x_data);
557        let y = vec![2.0, 4.0, 6.0];
558
559        let options = RidgeFitOptions {
560            lambda: 0.1,
561            intercept: true,
562            standardize: false,
563        };
564
565        let fit = ridge_fit(&x, &y, &options).unwrap();
566        let preds = predict_ridge(&fit, &x);
567
568        // Predictions on training data should equal fitted values
569        for i in 0..3 {
570            assert!((preds[i] - fit.fitted_values[i]).abs() < 1e-10);
571        }
572    }
573}