Skip to main content

linreg_core/
core.rs

1//! Core OLS regression implementation.
2//!
3//! This module provides the main Ordinary Least Squares regression functionality
4//! that can be used directly in Rust code. Functions accept native Rust slices
5//! and return Result types for proper error handling.
6//!
7//! # Example
8//!
9//! ```
10//! # use linreg_core::core::ols_regression;
11//! # use linreg_core::Error;
12//! let y = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
13//! let x1 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
14//! let x2 = vec![2.0, 3.0, 3.5, 4.0, 4.5, 5.0];
15//! let names = vec![
16//!     "Intercept".to_string(),
17//!     "X1".to_string(),
18//!     "X2".to_string(),
19//! ];
20//!
21//! let result = ols_regression(&y, &[x1, x2], &names)?;
22//! # Ok::<(), Error>(())
23//! ```
24
25use crate::distributions::{fisher_snedecor_cdf, student_t_cdf, student_t_inverse_cdf};
26use crate::error::{Error, Result};
27use crate::linalg::{vec_dot, vec_mean, vec_sub, Matrix};
28use crate::serialization::types::ModelType;
29// Import the macro from crate root (where #[macro_export] places it)
30use crate::impl_serialization;
31use serde::{Deserialize, Serialize};
32
33// ============================================================================
34// Numerical Constants
35// ============================================================================
36
37/// Minimum threshold for standardized residual denominator to avoid division by zero.
38/// When (1 - leverage) is very small, the observation has extremely high leverage
39/// and standardized residuals may be unreliable.
40const MIN_LEVERAGE_DENOM: f64 = 1e-10;
41
42// ============================================================================
43// Result Types
44// ============================================================================
45//
46// Structs containing the output of regression computations.
47
48/// Result of VIF (Variance Inflation Factor) calculation.
49///
50/// VIF measures how much the variance of an estimated regression coefficient
51/// increases due to multicollinearity among the predictors.
52///
53/// # Fields
54///
55/// * `variable` - Name of the predictor variable
56/// * `vif` - Variance Inflation Factor (VIF > 10 indicates high multicollinearity)
57/// * `rsquared` - R-squared from regressing this predictor on all others
58/// * `interpretation` - Human-readable interpretation of the VIF value
59///
60/// # Example
61///
62/// ```
63/// # use linreg_core::core::VifResult;
64/// let vif = VifResult {
65///     variable: "X1".to_string(),
66///     vif: 2.5,
67///     rsquared: 0.6,
68///     interpretation: "Low multicollinearity".to_string(),
69/// };
70/// assert_eq!(vif.variable, "X1");
71/// ```
72#[derive(Debug, Clone, Serialize, Deserialize)]
73pub struct VifResult {
74    /// Name of the predictor variable
75    pub variable: String,
76    /// Variance Inflation Factor (VIF > 10 indicates high multicollinearity)
77    pub vif: f64,
78    /// R-squared from regressing this predictor on all others
79    pub rsquared: f64,
80    /// Human-readable interpretation of the VIF value
81    pub interpretation: String,
82}
83
84/// Complete output from OLS regression.
85///
86/// Contains all coefficients, statistics, diagnostics, and residuals from
87/// an Ordinary Least Squares regression.
88///
89/// # Example
90///
91/// ```
92/// # use linreg_core::core::ols_regression;
93/// # use linreg_core::serialization::{ModelSave, ModelLoad};
94/// # use std::fs;
95/// let y = vec![1.0, 2.0, 3.0, 4.0, 5.0];
96/// let x1 = vec![1.0, 2.0, 3.0, 4.0, 5.0];
97/// let names = vec!["Intercept".to_string(), "X1".to_string()];
98///
99/// let result = ols_regression(&y, &[x1], &names).unwrap();
100/// assert!(result.r_squared > 0.0);
101/// assert_eq!(result.coefficients.len(), 2); // intercept + 1 predictor
102///
103/// // Save and load the model
104/// # let path = "test_model.json";
105/// # result.save(path).unwrap();
106/// # let loaded = linreg_core::core::RegressionOutput::load(path).unwrap();
107/// # assert_eq!(loaded.coefficients, result.coefficients);
108/// # fs::remove_file(path).ok();
109/// ```
110#[derive(Debug, Clone, Serialize, Deserialize)]
111pub struct RegressionOutput {
112    /// Regression coefficients (including intercept)
113    pub coefficients: Vec<f64>,
114    /// Standard errors of coefficients
115    pub std_errors: Vec<f64>,
116    /// t-statistics for coefficient significance tests
117    pub t_stats: Vec<f64>,
118    /// Two-tailed p-values for coefficients
119    pub p_values: Vec<f64>,
120    /// Lower bounds of 95% confidence intervals
121    pub conf_int_lower: Vec<f64>,
122    /// Upper bounds of 95% confidence intervals
123    pub conf_int_upper: Vec<f64>,
124    /// R-squared (coefficient of determination)
125    pub r_squared: f64,
126    /// Adjusted R-squared (accounts for number of predictors)
127    pub adj_r_squared: f64,
128    /// F-statistic for overall model significance
129    pub f_statistic: f64,
130    /// P-value for F-statistic
131    pub f_p_value: f64,
132    /// Mean squared error of residuals
133    pub mse: f64,
134    /// Root mean squared error (prediction error in original units)
135    pub rmse: f64,
136    /// Mean absolute error of residuals
137    pub mae: f64,
138    /// Standard error of the regression (residual standard deviation)
139    pub std_error: f64,
140    /// Raw residuals (observed - predicted)
141    pub residuals: Vec<f64>,
142    /// Standardized residuals (accounting for leverage)
143    pub standardized_residuals: Vec<f64>,
144    /// Fitted/predicted values
145    pub predictions: Vec<f64>,
146    /// Leverage values for each observation (diagonal of hat matrix)
147    pub leverage: Vec<f64>,
148    /// Variance Inflation Factors for detecting multicollinearity
149    pub vif: Vec<VifResult>,
150    /// Number of observations
151    pub n: usize,
152    /// Number of predictor variables (excluding intercept)
153    pub k: usize,
154    /// Degrees of freedom for residuals (n - k - 1)
155    pub df: usize,
156    /// Names of variables (including intercept)
157    pub variable_names: Vec<String>,
158    /// Log-likelihood of the model (useful for AIC/BIC calculation and model comparison)
159    pub log_likelihood: f64,
160    /// Akaike Information Criterion (lower = better model, penalizes complexity)
161    pub aic: f64,
162    /// Bayesian Information Criterion (lower = better model, penalizes complexity more heavily than AIC)
163    pub bic: f64,
164}
165
166// ============================================================================
167// Statistical Helper Functions
168// ============================================================================
169//
170// Utility functions for computing p-values, critical values, and leverage.
171
172/// Computes a two-tailed p-value from a t-statistic.
173///
174/// Uses the Student's t-distribution CDF to calculate the probability
175/// of observing a t-statistic as extreme as the one provided.
176///
177/// # Arguments
178///
179/// * `t` - The t-statistic value
180/// * `df` - Degrees of freedom
181///
182/// # Example
183///
184/// ```
185/// # use linreg_core::core::two_tailed_p_value;
186/// let p = two_tailed_p_value(2.0, 20.0);
187/// assert!(p > 0.0 && p < 0.1);
188/// ```
189pub fn two_tailed_p_value(t: f64, df: f64) -> f64 {
190    if t.abs() > 100.0 {
191        return 0.0;
192    }
193
194    let cdf = student_t_cdf(t, df);
195    if t >= 0.0 {
196        2.0 * (1.0 - cdf)
197    } else {
198        2.0 * cdf
199    }
200}
201
202/// Computes the critical t-value for a given significance level and degrees of freedom.
203///
204/// Returns the t-value such that the area under the t-distribution curve
205/// to the right of it equals alpha/2 (two-tailed test).
206///
207/// # Arguments
208///
209/// * `df` - Degrees of freedom
210/// * `alpha` - Significance level (typically 0.05 for 95% confidence)
211///
212/// # Example
213///
214/// ```
215/// # use linreg_core::core::t_critical_quantile;
216/// let t_crit = t_critical_quantile(20.0, 0.05);
217/// assert!(t_crit > 2.0); // approximately 2.086 for df=20, alpha=0.05
218/// ```
219pub fn t_critical_quantile(df: f64, alpha: f64) -> f64 {
220    let p = 1.0 - alpha / 2.0;
221    student_t_inverse_cdf(p, df)
222}
223
224/// Computes a p-value from an F-statistic.
225///
226/// Uses the F-distribution CDF to calculate the probability of observing
227/// an F-statistic as extreme as the one provided.
228///
229/// # Arguments
230///
231/// * `f_stat` - The F-statistic value
232/// * `df1` - Numerator degrees of freedom
233/// * `df2` - Denominator degrees of freedom
234///
235/// # Example
236///
237/// ```
238/// # use linreg_core::core::f_p_value;
239/// let p = f_p_value(5.0, 2.0, 20.0);
240/// assert!(p > 0.0 && p < 0.05);
241/// ```
242pub fn f_p_value(f_stat: f64, df1: f64, df2: f64) -> f64 {
243    if f_stat <= 0.0 {
244        return 1.0;
245    }
246    1.0 - fisher_snedecor_cdf(f_stat, df1, df2)
247}
248
249/// Computes leverage values from the design matrix and its inverse.
250///
251/// Leverage measures how far an observation's predictor values are from
252/// the center of the predictor space. High leverage points can have
253/// disproportionate influence on the regression results.
254///
255/// # Arguments
256///
257/// * `x` - Design matrix (including intercept column)
258/// * `xtx_inv` - Inverse of X'X matrix
259///
260/// # Example
261///
262/// ```
263/// # use linreg_core::core::compute_leverage;
264/// # use linreg_core::linalg::Matrix;
265/// // Design matrix with intercept: [[1, 1], [1, 2], [1, 3]]
266/// let x = Matrix::new(3, 2, vec![1.0, 1.0, 1.0, 2.0, 1.0, 3.0]);
267/// let xtx = x.transpose().matmul(&x);
268/// let xtx_inv = xtx.invert().unwrap();
269///
270/// let leverage = compute_leverage(&x, &xtx_inv);
271/// assert_eq!(leverage.len(), 3);
272/// // Leverage values should sum to the number of parameters (2)
273/// assert!((leverage.iter().sum::<f64>() - 2.0).abs() < 0.01);
274/// ```
275#[allow(clippy::needless_range_loop)]
276pub fn compute_leverage(x: &Matrix, xtx_inv: &Matrix) -> Vec<f64> {
277    let n = x.rows;
278    let mut leverage = vec![0.0; n];
279    for i in 0..n {
280        // x_row is (1, cols)
281        // temp = x_row * xtx_inv (1, cols)
282        // lev = temp * x_row^T (1, 1)
283
284        // Manual row extraction and multiplication
285        let mut row_vec = vec![0.0; x.cols];
286        for j in 0..x.cols {
287            row_vec[j] = x.get(i, j);
288        }
289
290        let mut temp_vec = vec![0.0; x.cols];
291        for c in 0..x.cols {
292            let mut sum = 0.0;
293            for k in 0..x.cols {
294                sum += row_vec[k] * xtx_inv.get(k, c);
295            }
296            temp_vec[c] = sum;
297        }
298
299        leverage[i] = vec_dot(&temp_vec, &row_vec);
300    }
301    leverage
302}
303
304// ============================================================================
305// Model Selection Criteria
306// ============================================================================
307//
308// Log-likelihood, AIC, and BIC for model comparison.
309
310/// Computes the log-likelihood of the OLS model.
311///
312/// For a linear regression with normally distributed errors, the log-likelihood is:
313///
314/// ```text
315/// log L = -n/2 * ln(2π) - n/2 * ln(SSR/n) - n/2
316///       = -n/2 * ln(2π*SSR/n) - n/2
317/// ```
318///
319/// where SSR is the sum of squared residuals and n is the number of observations.
320/// This matches R's `logLik.lm()` implementation.
321///
322/// # Arguments
323///
324/// * `n` - Number of observations
325/// * `mse` - Mean squared error (estimate of σ², but NOT directly used in formula)
326/// * `ssr` - Sum of squared residuals
327///
328/// # Example
329///
330/// ```
331/// # use linreg_core::core::log_likelihood;
332/// let ll = log_likelihood(100, 4.5, 450.0);
333/// assert!(ll < 0.0);  // Log-likelihood is negative for typical data
334/// ```
335pub fn log_likelihood(n: usize, _mse: f64, ssr: f64) -> f64 {
336    let n_f64 = n as f64;
337    let two_pi = 2.0 * std::f64::consts::PI;
338
339    // R's logLik.lm formula: -n/2 * log(2*pi*SSR/n) - n/2
340    // This is equivalent to: -n/2 * (log(2*pi) + log(SSR/n) + 1)
341    -0.5 * n_f64 * (two_pi * ssr / n_f64).ln() - n_f64 / 2.0
342}
343
344/// Computes the Akaike Information Criterion (AIC).
345///
346/// AIC = 2k - 2logL
347///
348/// where k is the number of estimated parameters and logL is the log-likelihood.
349/// Lower AIC indicates a better model, with a penalty for additional parameters.
350///
351/// Note: R's AIC for lm models counts k as (n_coef + 1) where n_coef is the
352/// number of coefficients (including intercept) and +1 is for the estimated
353/// variance parameter. This implementation follows that convention.
354///
355/// # Arguments
356///
357/// * `log_likelihood` - Log-likelihood of the model
358/// * `n_coef` - Number of coefficients (including intercept)
359///
360/// # Example
361///
362/// ```
363/// # use linreg_core::core::aic;
364/// let aic_value = aic(-150.5, 3);  // 3 coefficients
365/// ```
366pub fn aic(log_likelihood: f64, n_coef: usize) -> f64 {
367    // R's AIC for lm: 2k - 2*logL
368    // where k = n_coef + 1 (coefficients + variance parameter)
369    let k = n_coef + 1;
370    2.0 * k as f64 - 2.0 * log_likelihood
371}
372
373/// Computes the Bayesian Information Criterion (BIC).
374///
375/// BIC = k*ln(n) - 2logL
376///
377/// where k is the number of estimated parameters, n is the sample size, and
378/// logL is the log-likelihood. BIC penalizes model complexity more heavily
379/// than AIC for larger sample sizes.
380///
381/// Note: R's BIC for lm models counts k as (n_coef + 1) where n_coef is the
382/// number of coefficients (including intercept) and +1 is for the estimated
383/// variance parameter. This implementation follows that convention.
384///
385/// # Arguments
386///
387/// * `log_likelihood` - Log-likelihood of the model
388/// * `n_coef` - Number of coefficients (including intercept)
389/// * `n_obs` - Number of observations
390///
391/// # Example
392///
393/// ```
394/// # use linreg_core::core::bic;
395/// let bic_value = bic(-150.5, 3, 100);  // 3 coefficients, 100 obs
396/// ```
397pub fn bic(log_likelihood: f64, n_coef: usize, n_obs: usize) -> f64 {
398    // R's BIC for lm: k * ln(n) - 2*logL
399    // where k = n_coef + 1 (coefficients + variance parameter)
400    let k = n_coef + 1;
401    k as f64 * (n_obs as f64).ln() - 2.0 * log_likelihood
402}
403
404/// Computes AIC using Python/statsmodels convention.
405///
406/// AIC = 2k - 2logL
407///
408/// where k is the number of coefficients (NOT including variance parameter).
409/// This matches Python's statsmodels OLS.aic behavior.
410///
411/// # Arguments
412///
413/// * `log_likelihood` - Log-likelihood of the model
414/// * `n_coef` - Number of coefficients (including intercept)
415///
416/// # Example
417///
418/// ```
419/// # use linreg_core::core::aic_python;
420/// let aic_value = aic_python(-150.5, 3);  // 3 coefficients
421/// ```
422pub fn aic_python(log_likelihood: f64, n_coef: usize) -> f64 {
423    // Python's statsmodels AIC: 2k - 2*logL
424    // where k = n_coef (does NOT include variance parameter)
425    2.0 * n_coef as f64 - 2.0 * log_likelihood
426}
427
428/// Computes BIC using Python/statsmodels convention.
429///
430/// BIC = k*ln(n) - 2logL
431///
432/// where k is the number of coefficients (NOT including variance parameter).
433/// This matches Python's statsmodels OLS.bic behavior.
434///
435/// # Arguments
436///
437/// * `log_likelihood` - Log-likelihood of the model
438/// * `n_coef` - Number of coefficients (including intercept)
439/// * `n_obs` - Number of observations
440///
441/// # Example
442///
443/// ```
444/// # use linreg_core::core::bic_python;
445/// let bic_value = bic_python(-150.5, 3, 100);  // 3 coefficients, 100 obs
446/// ```
447pub fn bic_python(log_likelihood: f64, n_coef: usize, n_obs: usize) -> f64 {
448    // Python's statsmodels BIC: k * ln(n) - 2*logL
449    // where k = n_coef (does NOT include variance parameter)
450    n_coef as f64 * (n_obs as f64).ln() - 2.0 * log_likelihood
451}
452
453// ============================================================================
454// VIF Calculation
455// ============================================================================
456//
457// Variance Inflation Factor analysis for detecting multicollinearity.
458
459/// Calculates Variance Inflation Factors for all predictors.
460///
461/// VIF quantifies the severity of multicollinearity in a regression analysis.
462/// A VIF > 10 indicates high multicollinearity that may need to be addressed.
463///
464/// # Arguments
465///
466/// * `x_vars` - Predictor variables (each of length n)
467/// * `names` - Variable names (including intercept as first element)
468/// * `n` - Number of observations
469///
470/// # Returns
471///
472/// Vector of VIF results for each predictor (excluding intercept).
473///
474/// # Example
475///
476/// ```
477/// # use linreg_core::core::calculate_vif;
478/// let x1 = vec![1.0, 2.0, 3.0, 4.0, 5.0];
479/// let x2 = vec![2.0, 4.0, 6.0, 8.0, 10.0];
480/// let names = vec!["Intercept".to_string(), "X1".to_string(), "X2".to_string()];
481///
482/// let vif_results = calculate_vif(&[x1, x2], &names, 5);
483/// assert_eq!(vif_results.len(), 2);
484/// // Perfectly collinear variables will have VIF = infinity
485/// ```
486pub fn calculate_vif(x_vars: &[Vec<f64>], names: &[String], n: usize) -> Vec<VifResult> {
487    let k = x_vars.len();
488    if k <= 1 {
489        return vec![];
490    }
491
492    // Standardize predictors (Z-score)
493    let mut z_data = vec![0.0; n * k];
494
495    for (j, var) in x_vars.iter().enumerate() {
496        let mean = vec_mean(var);
497        let variance = var.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / ((n - 1) as f64);
498        let std_dev = variance.sqrt();
499
500        // Handle constant variables
501        if std_dev.abs() < 1e-10 {
502            return names
503                .iter()
504                .skip(1)
505                .map(|name| VifResult {
506                    variable: name.clone(),
507                    vif: f64::INFINITY,
508                    rsquared: 1.0,
509                    interpretation: "Constant variable (undefined correlation)".to_string(),
510                })
511                .collect();
512        }
513
514        for i in 0..n {
515            z_data[i * k + j] = (var[i] - mean) / std_dev;
516        }
517    }
518
519    let z = Matrix::new(n, k, z_data);
520
521    // Correlation Matrix R = (1/(n-1)) * Z^T * Z
522    let z_t = z.transpose();
523    let zt_z = z_t.matmul(&z);
524
525    // Scale by 1/(n-1)
526    let mut r_corr = zt_z; // Copy
527    let factor = 1.0 / ((n - 1) as f64);
528    for val in &mut r_corr.data {
529        *val *= factor;
530    }
531
532    // Invert R using QR on R_corr (since it's symmetric positive definite, Cholesky is better but QR works)
533    // Or just generic inversion. We implemented generic inversion for Upper Triangular.
534    // Let's use QR: A = QR => A^-1 = R^-1 Q^T
535    let (q_corr, r_corr_tri) = r_corr.qr();
536
537    let r_inv_opt = r_corr_tri.invert_upper_triangular();
538
539    let r_inv = match r_inv_opt {
540        Some(inv) => inv.matmul(&q_corr.transpose()),
541        None => {
542            return names
543                .iter()
544                .skip(1)
545                .map(|name| VifResult {
546                    variable: name.clone(),
547                    vif: f64::INFINITY,
548                    rsquared: 1.0,
549                    interpretation: "Perfect multicollinearity (singular matrix)".to_string(),
550                })
551                .collect();
552        },
553    };
554
555    // Extract diagonals
556    let mut results = vec![];
557    for j in 0..k {
558        let vif = r_inv.get(j, j);
559        let vif = if vif < 1.0 { 1.0 } else { vif };
560        let rsquared = 1.0 - 1.0 / vif;
561
562        let interpretation = if vif.is_infinite() {
563            "Perfect multicollinearity".to_string()
564        } else if vif > 10.0 {
565            "High multicollinearity - consider removing".to_string()
566        } else if vif > 5.0 {
567            "Moderate multicollinearity".to_string()
568        } else {
569            "Low multicollinearity".to_string()
570        };
571
572        results.push(VifResult {
573            variable: names[j + 1].clone(),
574            vif,
575            rsquared,
576            interpretation,
577        });
578    }
579
580    results
581}
582
583// ============================================================================
584// OLS Regression
585// ============================================================================
586//
587// Ordinary Least Squares regression implementation using QR decomposition.
588
589/// Performs Ordinary Least Squares regression using QR decomposition.
590///
591/// Uses a numerically stable QR decomposition approach to solve the normal
592/// equations. Returns comprehensive statistics including coefficients,
593/// standard errors, t-statistics, p-values, and diagnostic measures.
594///
595/// # Arguments
596///
597/// * `y` - Response variable (n observations)
598/// * `x_vars` - Predictor variables (each of length n)
599/// * `variable_names` - Names for variables (including intercept)
600///
601/// # Returns
602///
603/// A [`RegressionOutput`] containing all regression statistics and diagnostics.
604///
605/// # Errors
606///
607/// Returns [`Error::InsufficientData`] if n ≤ k + 1.
608/// Returns [`Error::SingularMatrix`] if perfect multicollinearity exists.
609/// Returns [`Error::InvalidInput`] if coefficients are NaN.
610///
611/// # Example
612///
613/// ```
614/// # use linreg_core::core::ols_regression;
615/// # use linreg_core::Error;
616/// let y = vec![2.5, 3.7, 4.2, 5.1, 6.3, 7.0];
617/// let x1 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
618/// let x2 = vec![2.0, 4.0, 5.0, 4.0, 3.0, 2.0];
619/// let names = vec![
620///     "Intercept".to_string(),
621///     "Temperature".to_string(),
622///     "Pressure".to_string(),
623/// ];
624///
625/// let result = ols_regression(&y, &[x1, x2], &names)?;
626/// println!("R-squared: {}", result.r_squared);
627/// # Ok::<(), Error>(())
628/// ```
629#[allow(clippy::needless_range_loop)]
630pub fn ols_regression(
631    y: &[f64],
632    x_vars: &[Vec<f64>],
633    variable_names: &[String],
634) -> Result<RegressionOutput> {
635    let n = y.len();
636    let k = x_vars.len();
637    let p = k + 1;
638
639    // Validate inputs
640    if n <= k + 1 {
641        return Err(Error::InsufficientData {
642            required: k + 2,
643            available: n,
644        });
645    }
646
647    // Validate dimensions and finite values using shared helper
648    crate::diagnostics::validate_regression_data(y, x_vars)?;
649
650    // Prepare variable names
651    let mut names = variable_names.to_vec();
652    while names.len() <= k {
653        names.push(format!("X{}", names.len()));
654    }
655
656    // Create design matrix
657    let mut x_data = vec![0.0; n * p];
658    for (row, _yi) in y.iter().enumerate() {
659        x_data[row * p] = 1.0; // intercept
660        for (col, x_var) in x_vars.iter().enumerate() {
661            x_data[row * p + col + 1] = x_var[row];
662        }
663    }
664
665    let x_matrix = Matrix::new(n, p, x_data);
666
667    // QR Decomposition with column pivoting (LINPACK)
668    // This handles rank-deficient matrices gracefully
669    let qr_result = x_matrix.qr_linpack(None);
670    let rank = qr_result.rank;
671
672    // Solve for coefficients using the pivoted QR
673    // For rank-deficient cases, dropped coefficients are set to NAN
674    let beta = match x_matrix.qr_solve_linpack(&qr_result, y) {
675        Some(b) => b,
676        None => return Err(Error::SingularMatrix),
677    };
678
679    // Identify which coefficients are active (non-NAN) vs dropped
680    let active: Vec<bool> = beta.iter().map(|b| !b.is_nan()).collect();
681    let n_active = active.iter().filter(|&&a| a).count();
682
683    // For predictions, treat NAN coefficients as 0
684    let beta_for_pred: Vec<f64> = beta.iter().map(|&b| if b.is_nan() { 0.0 } else { b }).collect();
685    let predictions = x_matrix.mul_vec(&beta_for_pred);
686    let residuals = vec_sub(y, &predictions);
687
688    // Degrees of freedom based on rank 
689    let df = n - rank;
690
691    // Compute sums of squares
692    let y_mean = vec_mean(y);
693    let ss_total: f64 = y.iter().map(|&yi| (yi - y_mean).powi(2)).sum();
694    let ss_residual: f64 = residuals.iter().map(|&r| r.powi(2)).sum();
695    let ss_regression = ss_total - ss_residual;
696
697    // R-squared and adjusted R-squared
698    // For rank-deficient cases, use rank-1 as the number of effective predictors
699    let k_effective = rank.saturating_sub(1); // rank minus intercept
700    let r_squared = if ss_total == 0.0 {
701        f64::NAN
702    } else {
703        1.0 - ss_residual / ss_total
704    };
705
706    let adj_r_squared = if ss_total == 0.0 || df == 0 {
707        f64::NAN
708    } else {
709        1.0 - (1.0 - r_squared) * ((n - 1) as f64 / df as f64)
710    };
711
712    // Mean squared error and standard error
713    let mse = if df > 0 { ss_residual / df as f64 } else { f64::NAN };
714    let std_error = mse.sqrt();
715
716    // Standard errors, t-stats, p-values, confidence intervals
717    // For active coefficients: compute from (X'X)^{-1} of the active subset
718    // For dropped coefficients: NAN (matching R's behavior)
719    let mut std_errors = vec![f64::NAN; p];
720    let mut t_stats = vec![f64::NAN; p];
721    let mut p_values = vec![f64::NAN; p];
722
723    // Build (X'X)^{-1} for the full model using the LINPACK R matrix
724    // Extract the rank×rank upper triangular R and invert it
725    let mut xtx_inv = Matrix::zeros(p, p);
726    if rank > 0 && df > 0 {
727        // Extract the rank×rank R sub-matrix (in pivoted order)
728        let mut r_sub = Matrix::zeros(rank, rank);
729        for i in 0..rank {
730            for j in i..rank {
731                r_sub.set(i, j, qr_result.qr.get(i, j));
732            }
733        }
734
735        // Invert the R sub-matrix
736        if let Some(r_inv_sub) = r_sub.invert_upper_triangular() {
737            // Compute (R^{-1})(R^{-1})^T in pivoted space
738            let xtx_inv_pivoted = r_inv_sub.matmul(&r_inv_sub.transpose());
739
740            // Map back to original column order via pivot permutation
741            for i in 0..rank {
742                let orig_i = qr_result.pivot[i] - 1; // Convert to 0-based
743                for j in 0..rank {
744                    let orig_j = qr_result.pivot[j] - 1;
745                    xtx_inv.set(orig_i, orig_j, xtx_inv_pivoted.get(i, j));
746                }
747            }
748
749            // Compute standard errors for active coefficients
750            for col in 0..p {
751                if active[col] {
752                    let se = (xtx_inv.get(col, col) * mse).sqrt();
753                    if !se.is_nan() {
754                        std_errors[col] = se;
755                        t_stats[col] = beta[col] / se;
756                        p_values[col] = two_tailed_p_value(t_stats[col], df as f64);
757                    }
758                }
759            }
760        }
761    }
762
763    // Confidence intervals
764    let alpha = 0.05;
765    let t_critical = if df > 0 { t_critical_quantile(df as f64, alpha) } else { f64::NAN };
766
767    let conf_int_lower: Vec<f64> = beta
768        .iter()
769        .zip(&std_errors)
770        .map(|(&b, &se)| if b.is_nan() || se.is_nan() { f64::NAN } else { b - t_critical * se })
771        .collect();
772    let conf_int_upper: Vec<f64> = beta
773        .iter()
774        .zip(&std_errors)
775        .map(|(&b, &se)| if b.is_nan() || se.is_nan() { f64::NAN } else { b + t_critical * se })
776        .collect();
777
778    // Leverage (using the rank-restricted (X'X)^{-1})
779    let leverage = compute_leverage(&x_matrix, &xtx_inv);
780
781    // Standardized residuals
782    let residuals_vec = residuals.clone();
783    let standardized_residuals: Vec<f64> = residuals_vec
784        .iter()
785        .zip(&leverage)
786        .map(|(&r, &h)| {
787            let factor = (1.0 - h).max(MIN_LEVERAGE_DENOM).sqrt();
788            let denom = std_error * factor;
789            if denom > MIN_LEVERAGE_DENOM {
790                r / denom
791            } else {
792                0.0
793            }
794        })
795        .collect();
796
797    // F-statistic (uses effective number of predictors)
798    let f_statistic = if k_effective > 0 && df > 0 {
799        (ss_regression / k_effective as f64) / mse
800    } else {
801        f64::NAN
802    };
803    let f_p_value = if f_statistic.is_nan() {
804        f64::NAN
805    } else {
806        f_p_value(f_statistic, k_effective as f64, df as f64)
807    };
808
809    // RMSE and MAE
810    let rmse = std_error;
811    let mae: f64 = residuals_vec.iter().map(|&r| r.abs()).sum::<f64>() / n as f64;
812
813    // VIF
814    let vif = calculate_vif(x_vars, &names, n);
815
816    // Model selection criteria (for model comparison)
817    let ll = log_likelihood(n, mse, ss_residual);
818    let n_coef = n_active;  // only active coefficients
819    let aic_val = aic(ll, n_coef);
820    let bic_val = bic(ll, n_coef, n);
821
822    Ok(RegressionOutput {
823        coefficients: beta,
824        std_errors,
825        t_stats,
826        p_values,
827        conf_int_lower,
828        conf_int_upper,
829        r_squared,
830        adj_r_squared,
831        f_statistic,
832        f_p_value,
833        mse,
834        rmse,
835        mae,
836        std_error,
837        residuals: residuals_vec,
838        standardized_residuals,
839        predictions,
840        leverage,
841        vif,
842        n,
843        k,
844        df,
845        variable_names: names,
846        log_likelihood: ll,
847        aic: aic_val,
848        bic: bic_val,
849    })
850}
851
852// ============================================================================
853// Model Serialization Traits
854// ============================================================================
855
856// Generate ModelSave and ModelLoad implementations using macro
857impl_serialization!(RegressionOutput, ModelType::OLS, "OLS");
858
859#[cfg(test)]
860mod tests {
861    use super::*;
862
863    #[test]
864    fn test_aic_bic_formulas_known_values() {
865        // Test formulas with simple known inputs
866        let ll = -100.0;
867        let n_coef = 3; // 3 coefficients (e.g., intercept + 2 predictors)
868        let n_obs = 100;
869
870        let aic_val = aic(ll, n_coef);
871        let bic_val = bic(ll, n_coef, n_obs);
872
873        // AIC = 2k - 2logL where k = n_coef + 1 (variance parameter)
874        // AIC = 2*4 - 2*(-100) = 8 + 200 = 208
875        assert!((aic_val - 208.0).abs() < 1e-10);
876
877        // BIC = k*ln(n) - 2logL where k = n_coef + 1
878        // BIC = 4*ln(100) - 2*(-100) = 4*4.605... + 200
879        let expected_bic = 4.0 * (100.0_f64).ln() + 200.0;
880        assert!((bic_val - expected_bic).abs() < 1e-10);
881    }
882
883    #[test]
884    fn test_bic_greater_than_aic_for_reasonable_n() {
885        // For n >= 8, ln(n) > 2, so BIC > AIC (both have -2logL term)
886        // BIC uses k*ln(n) while AIC uses 2k, so when ln(n) > 2, BIC > AIC
887        let ll = -50.0;
888        let n_coef = 2;
889
890        let aic_val = aic(ll, n_coef);
891        let bic_val = bic(ll, n_coef, 100); // n=100, ln(100) ≈ 4.6 > 2
892
893        assert!(bic_val > aic_val);
894    }
895
896    #[test]
897    fn test_log_likelihood_returns_finite() {
898        // Ensure log_likelihood returns finite values for valid inputs
899        let n = 10;
900        let mse = 4.0;
901        let ssr = mse * (n - 2) as f64;
902
903        let ll = log_likelihood(n, mse, ssr);
904        assert!(ll.is_finite());
905    }
906
907    #[test]
908    fn test_log_likelihood_increases_with_better_fit() {
909        // Lower SSR (better fit) should give higher log-likelihood
910        let n = 10;
911
912        // Worse fit (higher residuals)
913        let ll_worse = log_likelihood(n, 10.0, 80.0);
914
915        // Better fit (lower residuals)
916        let ll_better = log_likelihood(n, 2.0, 16.0);
917
918        assert!(ll_better > ll_worse);
919    }
920
921    #[test]
922    fn test_model_selection_criteria_present_in_output() {
923        // Basic sanity check that the fields are populated
924        let y = vec![2.0, 4.0, 5.0, 4.0, 5.0];
925        let x1 = vec![1.0, 2.0, 3.0, 4.0, 5.0];
926        let names = vec!["Intercept".to_string(), "X1".to_string()];
927
928        let result = ols_regression(&y, &[x1], &names).unwrap();
929
930        // All three should be finite
931        assert!(result.log_likelihood.is_finite());
932        assert!(result.aic.is_finite());
933        assert!(result.bic.is_finite());
934
935        // AIC and BIC should be positive for typical cases
936        // (since log_likelihood is usually negative and bounded)
937        assert!(result.aic > 0.0);
938        assert!(result.bic > 0.0);
939    }
940
941    #[test]
942    fn test_regression_output_has_correct_dimensions() {
943        // Verify AIC/BIC use k = n_coef + 1 (coefficients + variance parameter)
944        let y = vec![3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0];
945        let x1 = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
946        let x2 = vec![3.0, 2.0, 4.0, 3.0, 5.0, 4.0, 6.0, 5.0];
947        let names = vec!["Intercept".into(), "X1".into(), "X2".into()];
948
949        let result = ols_regression(&y, &[x1, x2], &names).unwrap();
950
951        // n_coef = 3 (intercept + 2 predictors)
952        // k = n_coef + 1 = 4 (including variance parameter, following R convention)
953        let n_coef = 3;
954        let k = n_coef + 1; // R's convention includes variance parameter
955
956        // Verify by recalculating AIC from log_likelihood
957        let expected_aic = 2.0 * k as f64 - 2.0 * result.log_likelihood;
958        assert!((result.aic - expected_aic).abs() < 1e-10);
959
960        // Verify by recalculating BIC from log_likelihood
961        let expected_bic = k as f64 * (result.n as f64).ln() - 2.0 * result.log_likelihood;
962        assert!((result.bic - expected_bic).abs() < 1e-10);
963    }
964
965    #[test]
966    fn test_aic_python_convention() {
967        // Python's statsmodels uses k = n_coef (no variance parameter)
968        let ll = -100.0;
969        let n_coef = 3;
970
971        let aic_py = aic_python(ll, n_coef);
972        // AIC = 2k - 2logL where k = n_coef (Python convention)
973        // AIC = 2*3 - 2*(-100) = 6 + 200 = 206
974        assert!((aic_py - 206.0).abs() < 1e-10);
975    }
976
977    #[test]
978    fn test_bic_python_convention() {
979        // Python's statsmodels uses k = n_coef (no variance parameter)
980        let ll = -100.0;
981        let n_coef = 3;
982        let n_obs = 100;
983
984        let bic_py = bic_python(ll, n_coef, n_obs);
985        // BIC = k*ln(n) - 2logL where k = n_coef (Python convention)
986        // BIC = 3*ln(100) - 2*(-100) = 3*4.605... + 200
987        let expected_bic = 3.0 * (100.0_f64).ln() + 200.0;
988        assert!((bic_py - expected_bic).abs() < 1e-10);
989    }
990
991    #[test]
992    fn test_python_aic_smaller_than_r_aic() {
993        // Python convention uses k = n_coef, R uses k = n_coef + 1
994        // So Python AIC should be 2 smaller than R AIC
995        let ll = -50.0;
996        let n_coef = 2;
997
998        let aic_r = aic(ll, n_coef);
999        let aic_py = aic_python(ll, n_coef);
1000
1001        assert_eq!(aic_r - aic_py, 2.0);
1002    }
1003
1004    #[test]
1005    fn test_log_likelihood_formula_matches_r() {
1006        // Test against R's logLik.lm() formula
1007        // For a model with n=100, SSR=450, logL = -n/2 * log(2*pi*SSR/n) - n/2
1008        let n = 100;
1009        let ssr = 450.0;
1010        let mse = ssr / (n as f64 - 2.0); // 2 parameters
1011
1012        let ll = log_likelihood(n, mse, ssr);
1013
1014        // Calculate expected value manually
1015        let two_pi = 2.0 * std::f64::consts::PI;
1016        let expected = -0.5 * n as f64 * (two_pi * ssr / n as f64).ln() - n as f64 / 2.0;
1017
1018        assert!((ll - expected).abs() < 1e-10);
1019    }
1020
1021    #[test]
1022    fn test_aic_bic_with_perfect_fit() {
1023        // Perfect fit (zero residuals) - edge case
1024        let n = 10;
1025        let ssr = 0.001; // Very small but non-zero to avoid log(0)
1026        let mse = ssr / (n as f64 - 2.0);
1027
1028        let ll = log_likelihood(n, mse, ssr);
1029        let aic_val = aic(ll, 2);
1030        let bic_val = bic(ll, 2, n);
1031
1032        // Perfect fit gives very high log-likelihood
1033        assert!(ll > 0.0);
1034        // AIC/BIC penalize complexity, so may be negative for very good fits
1035        assert!(aic_val.is_finite());
1036        assert!(bic_val.is_finite());
1037    }
1038
1039    #[test]
1040    fn test_aic_bic_model_selection() {
1041        // Simulate model comparison: simpler model vs complex model
1042        // Both models fit same data with similar R² but different complexity
1043        let n = 100;
1044
1045        // Simple model (2 params): better log-likelihood due to less penalty
1046        let ll_simple = -150.0;
1047        let aic_simple = aic(ll_simple, 2);
1048        let bic_simple = bic(ll_simple, 2, n);
1049
1050        // Complex model (5 params): slightly better fit but more parameters
1051        let ll_complex = -148.0; // Better fit (less negative)
1052        let aic_complex = aic(ll_complex, 5);
1053        let bic_complex = bic(ll_complex, 5, n);
1054
1055        // AIC might favor complex model (2*2 - 2*(-150) = 304 vs 2*6 - 2*(-148) = 308)
1056        // Actually: 4 + 300 = 304 vs 12 + 296 = 308, so simple wins
1057        assert!(aic_simple < aic_complex);
1058
1059        // BIC more heavily penalizes complexity, so simple should win
1060        assert!(bic_simple < bic_complex);
1061    }
1062
1063    #[test]
1064    fn test_log_likelihood_scale_invariance() {
1065        // Log-likelihood scales with sample size for same per-observation fit quality
1066        let ssr_per_obs = 1.0;
1067
1068        let n1 = 50;
1069        let ssr1 = ssr_per_obs * n1 as f64;
1070        let ll1 = log_likelihood(n1, ssr1 / (n1 as f64 - 2.0), ssr1);
1071
1072        let n2 = 100;
1073        let ssr2 = ssr_per_obs * n2 as f64;
1074        let ll2 = log_likelihood(n2, ssr2 / (n2 as f64 - 2.0), ssr2);
1075
1076        // The log-likelihood should become more negative with larger n for the same SSR/n ratio
1077        // because -n/2 * ln(2*pi*SSR/n) - n/2 becomes more negative as n increases
1078        assert!(ll2 < ll1);
1079
1080        // But when normalized by n, they should be similar
1081        let ll_per_obs1 = ll1 / n1 as f64;
1082        let ll_per_obs2 = ll2 / n2 as f64;
1083        assert!((ll_per_obs1 - ll_per_obs2).abs() < 0.1);
1084    }
1085
1086    #[test]
1087    fn test_regularized_regression_has_model_selection_criteria() {
1088        // Test that Ridge regression also calculates AIC/BIC/log_likelihood
1089        let y = vec![2.0, 4.0, 6.0, 8.0, 10.0];
1090        let x_data = vec![1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 4.0, 1.0, 5.0];
1091        let x = crate::linalg::Matrix::new(5, 2, x_data);
1092
1093        let options = crate::regularized::ridge::RidgeFitOptions {
1094            lambda: 0.1,
1095            intercept: true,
1096            standardize: false,
1097            ..Default::default()
1098        };
1099
1100        let fit = crate::regularized::ridge::ridge_fit(&x, &y, &options).unwrap();
1101
1102        assert!(fit.log_likelihood.is_finite());
1103        assert!(fit.aic.is_finite());
1104        assert!(fit.bic.is_finite());
1105    }
1106
1107    #[test]
1108    fn test_elastic_net_regression_has_model_selection_criteria() {
1109        // Test that Elastic Net regression also calculates AIC/BIC/log_likelihood
1110        let y = vec![2.0, 4.0, 6.0, 8.0, 10.0];
1111        let x_data = vec![1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 4.0, 1.0, 5.0];
1112        let x = crate::linalg::Matrix::new(5, 2, x_data);
1113
1114        let options = crate::regularized::elastic_net::ElasticNetOptions {
1115            lambda: 0.1,
1116            alpha: 0.5,
1117            intercept: true,
1118            standardize: false,
1119            ..Default::default()
1120        };
1121
1122        let fit = crate::regularized::elastic_net::elastic_net_fit(&x, &y, &options).unwrap();
1123
1124        assert!(fit.log_likelihood.is_finite());
1125        assert!(fit.aic.is_finite());
1126        assert!(fit.bic.is_finite());
1127    }
1128}