Skip to main content

fdars_core/
scalar_on_function.rs

1//! Scalar-on-function regression with mixed scalar/functional covariates.
2//!
3//! Implements models of the form:
4//! ```text
5//! y = α + ∫β(t)X(t)dt + γᵀz + ε
6//! ```
7//! where X(t) is a functional predictor, z is a vector of scalar covariates,
8//! β(t) is the functional coefficient, and γ is the vector of scalar coefficients.
9//!
10//! # Methods
11//!
12//! - [`fregre_lm`]: FPC-based functional linear model with optional scalar covariates
13//! - [`fregre_np_mixed`]: Nonparametric kernel regression with product kernels
14//! - [`functional_logistic`]: Logistic regression for binary outcomes
15//! - [`fregre_cv`]: Cross-validation for number of FPC components
16
17use crate::iter_maybe_parallel;
18use crate::matrix::FdMatrix;
19use crate::regression::{fdata_to_pc_1d, FpcaResult};
20use rand::prelude::*;
21#[cfg(feature = "parallel")]
22use rayon::iter::ParallelIterator;
23
24// ---------------------------------------------------------------------------
25// Result types
26// ---------------------------------------------------------------------------
27
28/// Result of functional linear regression.
29pub struct FregreLmResult {
30    /// Intercept α
31    pub intercept: f64,
32    /// Functional coefficient β(t), evaluated on the original grid (length m)
33    pub beta_t: Vec<f64>,
34    /// Pointwise standard errors of β(t) (length m)
35    pub beta_se: Vec<f64>,
36    /// Scalar coefficients γ (one per scalar covariate)
37    pub gamma: Vec<f64>,
38    /// Fitted values ŷ (length n)
39    pub fitted_values: Vec<f64>,
40    /// Residuals y - ŷ (length n)
41    pub residuals: Vec<f64>,
42    /// R² statistic
43    pub r_squared: f64,
44    /// Adjusted R²
45    pub r_squared_adj: f64,
46    /// Standard errors of all coefficients (intercept, FPC scores, scalar covariates)
47    pub std_errors: Vec<f64>,
48    /// Number of FPC components used
49    pub ncomp: usize,
50    /// FPCA result (for projecting new data)
51    pub fpca: FpcaResult,
52    /// Regression coefficients on (FPC scores, scalar covariates) — internal
53    pub coefficients: Vec<f64>,
54    /// Residual standard error
55    pub residual_se: f64,
56    /// GCV criterion value (if computed)
57    pub gcv: f64,
58}
59
60/// Result of nonparametric functional regression with mixed predictors.
61pub struct FregreNpResult {
62    /// Fitted values ŷ (length n)
63    pub fitted_values: Vec<f64>,
64    /// Residuals y - ŷ (length n)
65    pub residuals: Vec<f64>,
66    /// R² statistic
67    pub r_squared: f64,
68    /// Bandwidth for functional distance kernel
69    pub h_func: f64,
70    /// Bandwidth for scalar covariates kernel
71    pub h_scalar: f64,
72    /// Leave-one-out CV error
73    pub cv_error: f64,
74}
75
76/// Result of functional logistic regression.
77pub struct FunctionalLogisticResult {
78    /// Intercept α
79    pub intercept: f64,
80    /// Functional coefficient β(t), evaluated on the original grid (length m)
81    pub beta_t: Vec<f64>,
82    /// Pointwise standard errors of β(t) (length m)
83    pub beta_se: Vec<f64>,
84    /// Scalar coefficients γ (one per scalar covariate)
85    pub gamma: Vec<f64>,
86    /// Predicted probabilities P(Y=1) (length n)
87    pub probabilities: Vec<f64>,
88    /// Predicted class labels (0 or 1)
89    pub predicted_classes: Vec<u8>,
90    /// Number of FPC components used
91    pub ncomp: usize,
92    /// Classification accuracy on training data
93    pub accuracy: f64,
94    /// Standard errors of all coefficients (intercept, FPC scores, scalar covariates)
95    pub std_errors: Vec<f64>,
96    /// Regression coefficients on (FPC scores, scalar covariates) — internal
97    pub coefficients: Vec<f64>,
98    /// Log-likelihood at convergence
99    pub log_likelihood: f64,
100    /// Number of IRLS iterations
101    pub iterations: usize,
102    /// FPCA result (for projecting new data)
103    pub fpca: FpcaResult,
104}
105
106/// Result of cross-validation for K selection.
107pub struct FregreCvResult {
108    /// Candidate K values tested
109    pub k_values: Vec<usize>,
110    /// CV error for each K
111    pub cv_errors: Vec<f64>,
112    /// Optimal K (minimizing CV error)
113    pub optimal_k: usize,
114    /// Minimum CV error
115    pub min_cv_error: f64,
116}
117
118// ---------------------------------------------------------------------------
119// Shared linear algebra helpers
120// ---------------------------------------------------------------------------
121
122/// Compute X'X (symmetric, p×p stored flat row-major).
123pub(crate) fn compute_xtx(x: &FdMatrix) -> Vec<f64> {
124    let (n, p) = x.shape();
125    let mut xtx = vec![0.0; p * p];
126    for k in 0..p {
127        for j in k..p {
128            let mut s = 0.0;
129            for i in 0..n {
130                s += x[(i, k)] * x[(i, j)];
131            }
132            xtx[k * p + j] = s;
133            xtx[j * p + k] = s;
134        }
135    }
136    xtx
137}
138
139/// Compute X'y (length p).
140fn compute_xty(x: &FdMatrix, y: &[f64]) -> Vec<f64> {
141    let (n, p) = x.shape();
142    (0..p)
143        .map(|k| {
144            let mut s = 0.0;
145            for i in 0..n {
146                s += x[(i, k)] * y[i];
147            }
148            s
149        })
150        .collect()
151}
152
153/// Cholesky factorization: A = LL'. Returns L (p×p flat row-major) or None if singular.
154pub(crate) fn cholesky_factor(a: &[f64], p: usize) -> Option<Vec<f64>> {
155    let mut l = vec![0.0; p * p];
156    for j in 0..p {
157        let mut diag = a[j * p + j];
158        for k in 0..j {
159            diag -= l[j * p + k] * l[j * p + k];
160        }
161        if diag <= 1e-12 {
162            return None;
163        }
164        l[j * p + j] = diag.sqrt();
165        for i in (j + 1)..p {
166            let mut s = a[i * p + j];
167            for k in 0..j {
168                s -= l[i * p + k] * l[j * p + k];
169            }
170            l[i * p + j] = s / l[j * p + j];
171        }
172    }
173    Some(l)
174}
175
176/// Solve Lz = b (forward) then L'x = z (back). L is p×p flat row-major.
177pub(crate) fn cholesky_forward_back(l: &[f64], b: &[f64], p: usize) -> Vec<f64> {
178    let mut z = b.to_vec();
179    for j in 0..p {
180        for k in 0..j {
181            z[j] -= l[j * p + k] * z[k];
182        }
183        z[j] /= l[j * p + j];
184    }
185    for j in (0..p).rev() {
186        for k in (j + 1)..p {
187            z[j] -= l[k * p + j] * z[k];
188        }
189        z[j] /= l[j * p + j];
190    }
191    z
192}
193
194/// Solve Ax = b via Cholesky decomposition (A must be symmetric positive definite).
195fn cholesky_solve(a: &[f64], b: &[f64], p: usize) -> Option<Vec<f64>> {
196    let l = cholesky_factor(a, p)?;
197    Some(cholesky_forward_back(&l, b, p))
198}
199
200/// Compute hat matrix diagonal: H_ii = x_i' (X'X)^{-1} x_i, given Cholesky factor L of X'X.
201pub(crate) fn compute_hat_diagonal(x: &FdMatrix, l: &[f64]) -> Vec<f64> {
202    let (n, p) = x.shape();
203    let mut hat_diag = vec![0.0; n];
204    for i in 0..n {
205        let mut v = vec![0.0; p];
206        for j in 0..p {
207            v[j] = x[(i, j)];
208            for k in 0..j {
209                v[j] -= l[j * p + k] * v[k];
210            }
211            v[j] /= l[j * p + j];
212        }
213        hat_diag[i] = v.iter().map(|vi| vi * vi).sum();
214    }
215    hat_diag
216}
217
218/// Compute diagonal of (X'X)^{-1} given Cholesky factor L, then SE = sqrt(sigma² * diag).
219fn compute_ols_std_errors(l: &[f64], p: usize, sigma2: f64) -> Vec<f64> {
220    let mut se = vec![0.0; p];
221    for j in 0..p {
222        let mut v = vec![0.0; p];
223        v[j] = 1.0;
224        for k in 0..p {
225            for kk in 0..k {
226                v[k] -= l[k * p + kk] * v[kk];
227            }
228            v[k] /= l[k * p + k];
229        }
230        se[j] = (sigma2 * v.iter().map(|vi| vi * vi).sum::<f64>()).sqrt();
231    }
232    se
233}
234
235// ---------------------------------------------------------------------------
236// Design matrix and coefficient recovery
237// ---------------------------------------------------------------------------
238
239/// Build design matrix: \[1, ξ_1, ..., ξ_K, z_1, ..., z_p\].
240/// Validate inputs for fregre_lm / functional_logistic.
241fn validate_fregre_inputs(
242    n: usize,
243    m: usize,
244    y: &[f64],
245    scalar_covariates: Option<&FdMatrix>,
246) -> Option<()> {
247    if n < 3 || m == 0 || y.len() != n {
248        return None;
249    }
250    if let Some(sc) = scalar_covariates {
251        if sc.nrows() != n {
252            return None;
253        }
254    }
255    Some(())
256}
257
258/// Resolve ncomp: auto-select via CV if 0, otherwise clamp.
259fn resolve_ncomp(
260    ncomp: usize,
261    data: &FdMatrix,
262    y: &[f64],
263    scalar_covariates: Option<&FdMatrix>,
264    n: usize,
265    m: usize,
266) -> Option<usize> {
267    if ncomp == 0 {
268        let cv = fregre_cv(data, y, scalar_covariates, 1, m.min(n - 1).min(20), 5)?;
269        Some(cv.optimal_k)
270    } else {
271        Some(ncomp.min(n - 1).min(m))
272    }
273}
274
275pub(crate) fn build_design_matrix(
276    fpca_scores: &FdMatrix,
277    ncomp: usize,
278    scalar_covariates: Option<&FdMatrix>,
279    n: usize,
280) -> FdMatrix {
281    let p_scalar = scalar_covariates.map_or(0, |sc| sc.ncols());
282    let p_total = 1 + ncomp + p_scalar;
283    let mut design = FdMatrix::zeros(n, p_total);
284    for i in 0..n {
285        design[(i, 0)] = 1.0;
286        for k in 0..ncomp {
287            design[(i, 1 + k)] = fpca_scores[(i, k)];
288        }
289        if let Some(sc) = scalar_covariates {
290            for j in 0..p_scalar {
291                design[(i, 1 + ncomp + j)] = sc[(i, j)];
292            }
293        }
294    }
295    design
296}
297
298/// Recover functional coefficient β(t) = Σ_k γ_k φ_k(t).
299fn recover_beta_t(fpc_coeffs: &[f64], rotation: &FdMatrix, m: usize) -> Vec<f64> {
300    let ncomp = fpc_coeffs.len();
301    let mut beta_t = vec![0.0; m];
302    for k in 0..ncomp {
303        for j in 0..m {
304            beta_t[j] += fpc_coeffs[k] * rotation[(j, k)];
305        }
306    }
307    beta_t
308}
309
310/// Pointwise standard error of β(t) via error propagation through FPCA rotation.
311///
312/// SE[β(t_j)]² = Σ_k φ_k(t_j)² · SE[γ_k]²
313fn compute_beta_se(gamma_se: &[f64], rotation: &FdMatrix, m: usize) -> Vec<f64> {
314    let ncomp = gamma_se.len();
315    let mut beta_se = vec![0.0; m];
316    for j in 0..m {
317        let mut var_j = 0.0;
318        for k in 0..ncomp {
319            var_j += rotation[(j, k)].powi(2) * gamma_se[k].powi(2);
320        }
321        beta_se[j] = var_j.sqrt();
322    }
323    beta_se
324}
325
326/// Compute fitted values ŷ = X β.
327fn compute_fitted(design: &FdMatrix, coeffs: &[f64]) -> Vec<f64> {
328    let (n, p) = design.shape();
329    (0..n)
330        .map(|i| {
331            let mut yhat = 0.0;
332            for j in 0..p {
333                yhat += design[(i, j)] * coeffs[j];
334            }
335            yhat
336        })
337        .collect()
338}
339
340/// Compute R² and adjusted R².
341fn compute_r_squared(y: &[f64], residuals: &[f64], p_total: usize) -> (f64, f64) {
342    let n = y.len();
343    let y_mean = y.iter().sum::<f64>() / n as f64;
344    let ss_tot: f64 = y.iter().map(|&yi| (yi - y_mean).powi(2)).sum();
345    let ss_res: f64 = residuals.iter().map(|r| r * r).sum();
346    let r_squared = if ss_tot > 0.0 {
347        1.0 - ss_res / ss_tot
348    } else {
349        0.0
350    };
351    let df_model = (p_total - 1) as f64;
352    let r_squared_adj = if n as f64 - df_model - 1.0 > 0.0 {
353        1.0 - (1.0 - r_squared) * (n as f64 - 1.0) / (n as f64 - df_model - 1.0)
354    } else {
355        r_squared
356    };
357    (r_squared, r_squared_adj)
358}
359
360// ---------------------------------------------------------------------------
361// OLS solver
362// ---------------------------------------------------------------------------
363
364/// Solve ordinary least squares: min ||Xb - y||² via normal equations with Cholesky.
365/// Returns (coefficients, hat_matrix_diagonal) or None if singular.
366fn ols_solve(x: &FdMatrix, y: &[f64]) -> Option<(Vec<f64>, Vec<f64>)> {
367    let (n, p) = x.shape();
368    if n < p || p == 0 {
369        return None;
370    }
371    let xtx = compute_xtx(x);
372    let xty = compute_xty(x, y);
373    let l = cholesky_factor(&xtx, p)?;
374    let b = cholesky_forward_back(&l, &xty, p);
375    let hat_diag = compute_hat_diagonal(x, &l);
376    Some((b, hat_diag))
377}
378
379// ---------------------------------------------------------------------------
380// fregre_lm: FPC-based functional linear model
381// ---------------------------------------------------------------------------
382
383/// Functional linear model with optional scalar covariates.
384///
385/// Fits the model: `y = α + Σ_k γ_k ξ_k + γ_z' z + ε`
386/// where ξ_k are FPC scores of the functional predictor X(t) and z are scalar covariates.
387/// The functional coefficient is recovered as `β(t) = Σ_k γ_k φ_k(t)`.
388///
389/// # Arguments
390/// * `data` - Functional predictor matrix (n × m)
391/// * `y` - Scalar response vector (length n)
392/// * `scalar_covariates` - Optional scalar covariates matrix (n × p), `None` for pure functional model
393/// * `ncomp` - Number of FPC components (if 0, selected by GCV)
394///
395/// # Returns
396/// [`FregreLmResult`] with estimated coefficients, fitted values, and diagnostics
397pub fn fregre_lm(
398    data: &FdMatrix,
399    y: &[f64],
400    scalar_covariates: Option<&FdMatrix>,
401    ncomp: usize,
402) -> Option<FregreLmResult> {
403    let (n, m) = data.shape();
404    validate_fregre_inputs(n, m, y, scalar_covariates)?;
405
406    let ncomp = resolve_ncomp(ncomp, data, y, scalar_covariates, n, m)?;
407
408    let fpca = fdata_to_pc_1d(data, ncomp)?;
409    let design = build_design_matrix(&fpca.scores, ncomp, scalar_covariates, n);
410    let p_total = design.ncols();
411    let (coeffs, hat_diag) = ols_solve(&design, y)?;
412
413    let fitted_values = compute_fitted(&design, &coeffs);
414    let residuals: Vec<f64> = y
415        .iter()
416        .zip(&fitted_values)
417        .map(|(&yi, &yh)| yi - yh)
418        .collect();
419    let (r_squared, r_squared_adj) = compute_r_squared(y, &residuals, p_total);
420
421    let ss_res: f64 = residuals.iter().map(|r| r * r).sum();
422    let df_resid = (n as f64 - p_total as f64).max(1.0);
423    let residual_se = (ss_res / df_resid).sqrt();
424    let sigma2 = ss_res / df_resid;
425
426    let xtx = compute_xtx(&design);
427    let l = cholesky_factor(&xtx, p_total).unwrap_or_else(|| vec![1.0; p_total * p_total]);
428    let std_errors = compute_ols_std_errors(&l, p_total, sigma2);
429
430    let gcv = hat_diag
431        .iter()
432        .zip(&residuals)
433        .map(|(&h, &r)| (r / (1.0 - h).max(1e-10)).powi(2))
434        .sum::<f64>()
435        / n as f64;
436
437    let beta_t = recover_beta_t(&coeffs[1..1 + ncomp], &fpca.rotation, m);
438    let beta_se = compute_beta_se(&std_errors[1..1 + ncomp], &fpca.rotation, m);
439    let gamma: Vec<f64> = coeffs[1 + ncomp..].to_vec();
440
441    Some(FregreLmResult {
442        intercept: coeffs[0],
443        beta_t,
444        beta_se,
445        gamma,
446        fitted_values,
447        residuals,
448        r_squared,
449        r_squared_adj,
450        std_errors,
451        ncomp,
452        fpca,
453        coefficients: coeffs,
454        residual_se,
455        gcv,
456    })
457}
458
459// ---------------------------------------------------------------------------
460// fregre_cv: Cross-validation for K selection
461// ---------------------------------------------------------------------------
462
463/// Copy a subset of rows from src into dst.
464fn copy_rows(dst: &mut FdMatrix, src: &FdMatrix, src_rows: &[usize]) {
465    let ncols = src.ncols();
466    for (dst_i, &src_i) in src_rows.iter().enumerate() {
467        for j in 0..ncols {
468            dst[(dst_i, j)] = src[(src_i, j)];
469        }
470    }
471}
472
473/// Split data into train/test for a given fold.
474fn cv_split_fold(
475    data: &FdMatrix,
476    y: &[f64],
477    scalar_covariates: Option<&FdMatrix>,
478    test_start: usize,
479    test_end: usize,
480) -> (
481    FdMatrix,
482    Vec<f64>,
483    FdMatrix,
484    Vec<f64>,
485    Option<FdMatrix>,
486    Option<FdMatrix>,
487) {
488    let n = data.nrows();
489    let ncols = data.ncols();
490
491    let train_idx: Vec<usize> = (0..test_start).chain(test_end..n).collect();
492    let test_idx: Vec<usize> = (test_start..test_end).collect();
493
494    let mut train_data = FdMatrix::zeros(train_idx.len(), ncols);
495    let mut test_data = FdMatrix::zeros(test_idx.len(), ncols);
496    copy_rows(&mut train_data, data, &train_idx);
497    copy_rows(&mut test_data, data, &test_idx);
498
499    let train_y: Vec<f64> = train_idx.iter().map(|&i| y[i]).collect();
500    let test_y: Vec<f64> = test_idx.iter().map(|&i| y[i]).collect();
501
502    let train_sc = scalar_covariates.map(|sc| {
503        let mut m = FdMatrix::zeros(train_idx.len(), sc.ncols());
504        copy_rows(&mut m, sc, &train_idx);
505        m
506    });
507    let test_sc = scalar_covariates.map(|sc| {
508        let mut m = FdMatrix::zeros(test_idx.len(), sc.ncols());
509        copy_rows(&mut m, sc, &test_idx);
510        m
511    });
512
513    (train_data, train_y, test_data, test_y, train_sc, test_sc)
514}
515
516/// Compute CV error for a single K across all folds.
517fn cv_error_for_k(
518    data: &FdMatrix,
519    y: &[f64],
520    scalar_covariates: Option<&FdMatrix>,
521    k: usize,
522    n_folds: usize,
523) -> Option<f64> {
524    let n = data.nrows();
525    let fold_size = n / n_folds;
526    let mut total_error = 0.0;
527    let mut count = 0;
528
529    for fold in 0..n_folds {
530        let test_start = fold * fold_size;
531        let test_end = if fold == n_folds - 1 {
532            n
533        } else {
534            test_start + fold_size
535        };
536        let n_test = test_end - test_start;
537        if n - n_test < k + 2 {
538            continue;
539        }
540
541        let (train_data, train_y, test_data, test_y, train_sc, test_sc) =
542            cv_split_fold(data, y, scalar_covariates, test_start, test_end);
543
544        let fit = match fregre_lm(&train_data, &train_y, train_sc.as_ref(), k) {
545            Some(f) => f,
546            None => continue,
547        };
548
549        let predictions = predict_fregre_lm(&fit, &test_data, test_sc.as_ref());
550        let fold_mse: f64 = predictions
551            .iter()
552            .zip(&test_y)
553            .map(|(&yhat, &yi)| (yhat - yi).powi(2))
554            .sum::<f64>()
555            / n_test as f64;
556
557        total_error += fold_mse * n_test as f64;
558        count += n_test;
559    }
560
561    if count > 0 {
562        Some(total_error / count as f64)
563    } else {
564        None
565    }
566}
567
568/// K-fold cross-validation for selecting the number of FPC components.
569///
570/// # Arguments
571/// * `data` - Functional predictor matrix (n × m)
572/// * `y` - Scalar response vector (length n)
573/// * `scalar_covariates` - Optional scalar covariates matrix
574/// * `k_min` - Minimum number of components to test
575/// * `k_max` - Maximum number of components to test
576/// * `n_folds` - Number of CV folds
577pub fn fregre_cv(
578    data: &FdMatrix,
579    y: &[f64],
580    scalar_covariates: Option<&FdMatrix>,
581    k_min: usize,
582    k_max: usize,
583    n_folds: usize,
584) -> Option<FregreCvResult> {
585    let n = data.nrows();
586    if n < n_folds || k_min < 1 || k_min > k_max {
587        return None;
588    }
589
590    let k_max = k_max.min(n - 2).min(data.ncols());
591
592    let mut k_values = Vec::new();
593    let mut cv_errors = Vec::new();
594
595    for k in k_min..=k_max {
596        if let Some(err) = cv_error_for_k(data, y, scalar_covariates, k, n_folds) {
597            k_values.push(k);
598            cv_errors.push(err);
599        }
600    }
601
602    if k_values.is_empty() {
603        return None;
604    }
605
606    let (optimal_idx, &min_cv_error) = cv_errors
607        .iter()
608        .enumerate()
609        .min_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
610        .unwrap();
611
612    Some(FregreCvResult {
613        k_values: k_values.clone(),
614        cv_errors,
615        optimal_k: k_values[optimal_idx],
616        min_cv_error,
617    })
618}
619
620/// Predict new responses using a fitted functional linear model.
621///
622/// # Arguments
623/// * `fit` - A fitted [`FregreLmResult`]
624/// * `new_data` - New functional predictor matrix (n_new × m)
625/// * `new_scalar` - Optional new scalar covariates (n_new × p)
626pub fn predict_fregre_lm(
627    fit: &FregreLmResult,
628    new_data: &FdMatrix,
629    new_scalar: Option<&FdMatrix>,
630) -> Vec<f64> {
631    let (n_new, m) = new_data.shape();
632    let ncomp = fit.ncomp;
633    let p_scalar = fit.gamma.len();
634
635    let mut predictions = vec![0.0; n_new];
636    for i in 0..n_new {
637        let mut yhat = fit.intercept;
638        for k in 0..ncomp {
639            let mut s = 0.0;
640            for j in 0..m {
641                s += (new_data[(i, j)] - fit.fpca.mean[j]) * fit.fpca.rotation[(j, k)];
642            }
643            yhat += fit.coefficients[1 + k] * s;
644        }
645        if let Some(sc) = new_scalar {
646            for j in 0..p_scalar {
647                yhat += fit.gamma[j] * sc[(i, j)];
648            }
649        }
650        predictions[i] = yhat;
651    }
652    predictions
653}
654
655// ---------------------------------------------------------------------------
656// Nonparametric kernel regression
657// ---------------------------------------------------------------------------
658
659/// Gaussian kernel: K(d, h) = exp(-d² / (2h²))
660fn gaussian_kernel(d: f64, h: f64) -> f64 {
661    (-d * d / (2.0 * h * h)).exp()
662}
663
664/// Compute symmetric pairwise distance matrix (flat n×n).
665fn compute_pairwise_distances(data: &FdMatrix, argvals: &[f64]) -> Vec<f64> {
666    let n = data.nrows();
667    let weights = crate::helpers::simpsons_weights(argvals);
668    let mut dists = vec![0.0; n * n];
669    for i in 0..n {
670        for j in (i + 1)..n {
671            let d = crate::helpers::l2_distance(&data.row(i), &data.row(j), &weights);
672            dists[i * n + j] = d;
673            dists[j * n + i] = d;
674        }
675    }
676    dists
677}
678
679/// Compute pairwise Euclidean distance matrix for scalar covariates.
680fn compute_scalar_distances(sc: &FdMatrix) -> Vec<f64> {
681    let n = sc.nrows();
682    let p = sc.ncols();
683    let mut dists = vec![0.0; n * n];
684    for i in 0..n {
685        for j in (i + 1)..n {
686            let mut d2 = 0.0;
687            for k in 0..p {
688                let diff = sc[(i, k)] - sc[(j, k)];
689                d2 += diff * diff;
690            }
691            let d = d2.sqrt();
692            dists[i * n + j] = d;
693            dists[j * n + i] = d;
694        }
695    }
696    dists
697}
698
699/// Nadaraya-Watson LOO prediction for one observation.
700fn nw_loo_predict(
701    i: usize,
702    n: usize,
703    y: &[f64],
704    func_dists: &[f64],
705    scalar_dists: &[f64],
706    h_func: f64,
707    h_scalar: f64,
708    has_scalar: bool,
709) -> f64 {
710    let mut num = 0.0;
711    let mut den = 0.0;
712    for j in 0..n {
713        if i == j {
714            continue;
715        }
716        let kf = gaussian_kernel(func_dists[i * n + j], h_func);
717        let ks = if has_scalar {
718            gaussian_kernel(scalar_dists[i * n + j], h_scalar)
719        } else {
720            1.0
721        };
722        let w = kf * ks;
723        num += w * y[j];
724        den += w;
725    }
726    if den > 1e-15 {
727        num / den
728    } else {
729        y[i]
730    }
731}
732
733/// LOO-CV error for Nadaraya-Watson with a single bandwidth.
734fn loo_cv_error(dists: &[f64], y: &[f64], n: usize, h: f64) -> f64 {
735    (0..n)
736        .map(|i| {
737            let mut num = 0.0;
738            let mut den = 0.0;
739            for j in 0..n {
740                if i == j {
741                    continue;
742                }
743                let w = gaussian_kernel(dists[i * n + j], h);
744                num += w * y[j];
745                den += w;
746            }
747            let yhat = if den > 1e-15 { num / den } else { y[i] };
748            (y[i] - yhat).powi(2)
749        })
750        .sum::<f64>()
751        / n as f64
752}
753
754/// Select bandwidth by minimizing LOO-CV error on a grid of distance quantiles.
755fn select_bandwidth_loo(dists: &[f64], y: &[f64], n: usize, _other_dists: Option<&[f64]>) -> f64 {
756    let mut nonzero_dists: Vec<f64> = (0..n)
757        .flat_map(|i| ((i + 1)..n).map(move |j| dists[i * n + j]))
758        .filter(|&d| d > 0.0)
759        .collect();
760    nonzero_dists.sort_by(|a, b| a.partial_cmp(b).unwrap());
761
762    if nonzero_dists.is_empty() {
763        return 1.0;
764    }
765
766    let n_cand = 20;
767    let mut best_h = nonzero_dists[nonzero_dists.len() / 2];
768    let mut best_cv = f64::INFINITY;
769
770    for qi in 1..=n_cand {
771        let q = qi as f64 / (n_cand + 1) as f64;
772        let idx = ((nonzero_dists.len() as f64 * q) as usize).min(nonzero_dists.len() - 1);
773        let h = nonzero_dists[idx].max(1e-10);
774        let cv = loo_cv_error(dists, y, n, h);
775        if cv < best_cv {
776            best_cv = cv;
777            best_h = h;
778        }
779    }
780    best_h
781}
782
783/// Nonparametric kernel regression with mixed functional and scalar predictors.
784///
785/// Uses product kernels:
786/// ```text
787/// ŷ(x, z) = Σᵢ K_func(Xᵢ, x) · K_scalar(zᵢ, z) · yᵢ / Σᵢ K_func(Xᵢ, x) · K_scalar(zᵢ, z)
788/// ```
789///
790/// Bandwidths are selected via leave-one-out CV if set to 0.
791///
792/// # Arguments
793/// * `data` - Functional predictor matrix (n × m)
794/// * `y` - Scalar response vector
795/// * `argvals` - Grid points for integration (length m)
796/// * `scalar_covariates` - Optional scalar covariates (n × p)
797/// * `h_func` - Bandwidth for functional kernel (0 for automatic)
798/// * `h_scalar` - Bandwidth for scalar kernel (0 for automatic)
799pub fn fregre_np_mixed(
800    data: &FdMatrix,
801    y: &[f64],
802    argvals: &[f64],
803    scalar_covariates: Option<&FdMatrix>,
804    h_func: f64,
805    h_scalar: f64,
806) -> Option<FregreNpResult> {
807    let n = data.nrows();
808    if n < 3 || data.ncols() == 0 || y.len() != n || argvals.len() != data.ncols() {
809        return None;
810    }
811
812    let func_dists = compute_pairwise_distances(data, argvals);
813    let has_scalar = scalar_covariates.is_some();
814    let scalar_dists = scalar_covariates
815        .map(compute_scalar_distances)
816        .unwrap_or_default();
817
818    let h_func = if h_func <= 0.0 {
819        select_bandwidth_loo(&func_dists, y, n, None)
820    } else {
821        h_func
822    };
823
824    let h_scalar = if has_scalar && h_scalar <= 0.0 {
825        select_bandwidth_loo(&scalar_dists, y, n, Some(&func_dists))
826    } else {
827        h_scalar
828    };
829
830    let mut fitted_values = vec![0.0; n];
831    let mut cv_error = 0.0;
832    for i in 0..n {
833        fitted_values[i] = nw_loo_predict(
834            i,
835            n,
836            y,
837            &func_dists,
838            &scalar_dists,
839            h_func,
840            h_scalar,
841            has_scalar,
842        );
843        cv_error += (y[i] - fitted_values[i]).powi(2);
844    }
845    cv_error /= n as f64;
846
847    let residuals: Vec<f64> = y
848        .iter()
849        .zip(&fitted_values)
850        .map(|(&yi, &yh)| yi - yh)
851        .collect();
852    let (r_squared, _) = compute_r_squared(y, &residuals, 1);
853
854    Some(FregreNpResult {
855        fitted_values,
856        residuals,
857        r_squared,
858        h_func,
859        h_scalar,
860        cv_error,
861    })
862}
863
864/// Predict new responses using a fitted nonparametric model.
865pub fn predict_fregre_np(
866    train_data: &FdMatrix,
867    y: &[f64],
868    train_scalar: Option<&FdMatrix>,
869    new_data: &FdMatrix,
870    new_scalar: Option<&FdMatrix>,
871    argvals: &[f64],
872    h_func: f64,
873    h_scalar: f64,
874) -> Vec<f64> {
875    let n_train = train_data.nrows();
876    let n_new = new_data.nrows();
877    let weights = crate::helpers::simpsons_weights(argvals);
878
879    (0..n_new)
880        .map(|i| {
881            let new_row = new_data.row(i);
882            let mut num = 0.0;
883            let mut den = 0.0;
884            for j in 0..n_train {
885                let d_func = crate::helpers::l2_distance(&new_row, &train_data.row(j), &weights);
886                let kf = gaussian_kernel(d_func, h_func);
887                let ks = match (new_scalar, train_scalar) {
888                    (Some(ns), Some(ts)) => {
889                        let d2: f64 = (0..ns.ncols())
890                            .map(|k| (ns[(i, k)] - ts[(j, k)]).powi(2))
891                            .sum();
892                        gaussian_kernel(d2.sqrt(), h_scalar)
893                    }
894                    _ => 1.0,
895                };
896                let w = kf * ks;
897                num += w * y[j];
898                den += w;
899            }
900            if den > 1e-15 {
901                num / den
902            } else {
903                0.0
904            }
905        })
906        .collect()
907}
908
909// ---------------------------------------------------------------------------
910// Functional logistic regression
911// ---------------------------------------------------------------------------
912
913/// One IRLS step: compute working response and solve weighted least squares.
914/// Returns updated beta or None if system is singular.
915fn irls_step(design: &FdMatrix, y: &[f64], beta: &[f64]) -> Option<Vec<f64>> {
916    let (n, p) = design.shape();
917
918    // Linear predictor η = Xβ, probabilities μ = sigmoid(η)
919    let eta: Vec<f64> = (0..n)
920        .map(|i| (0..p).map(|j| design[(i, j)] * beta[j]).sum())
921        .collect();
922    let mu: Vec<f64> = eta.iter().map(|&e| sigmoid(e)).collect();
923    let w: Vec<f64> = mu.iter().map(|&p| (p * (1.0 - p)).max(1e-10)).collect();
924    let z_work: Vec<f64> = (0..n).map(|i| eta[i] + (y[i] - mu[i]) / w[i]).collect();
925
926    // Weighted normal equations: (X'WX) β = X'Wz
927    let mut xtwx = vec![0.0; p * p];
928    for k in 0..p {
929        for j in k..p {
930            let mut s = 0.0;
931            for i in 0..n {
932                s += design[(i, k)] * w[i] * design[(i, j)];
933            }
934            xtwx[k * p + j] = s;
935            xtwx[j * p + k] = s;
936        }
937    }
938
939    let mut xtwz = vec![0.0; p];
940    for k in 0..p {
941        let mut s = 0.0;
942        for i in 0..n {
943            s += design[(i, k)] * w[i] * z_work[i];
944        }
945        xtwz[k] = s;
946    }
947
948    cholesky_solve(&xtwx, &xtwz, p)
949}
950
951/// Compute log-likelihood of logistic model.
952fn logistic_log_likelihood(probabilities: &[f64], y: &[f64]) -> f64 {
953    probabilities
954        .iter()
955        .zip(y)
956        .map(|(&p, &yi)| {
957            let p = p.clamp(1e-15, 1.0 - 1e-15);
958            yi * p.ln() + (1.0 - yi) * (1.0 - p).ln()
959        })
960        .sum()
961}
962
963/// Sigmoid function: 1 / (1 + exp(-x))
964pub(crate) fn sigmoid(x: f64) -> f64 {
965    if x >= 0.0 {
966        1.0 / (1.0 + (-x).exp())
967    } else {
968        let ex = x.exp();
969        ex / (1.0 + ex)
970    }
971}
972
973/// Run IRLS iteration loop and return (beta, iterations).
974fn irls_loop(design: &FdMatrix, y: &[f64], max_iter: usize, tol: f64) -> (Vec<f64>, usize) {
975    let p_total = design.ncols();
976    let mut beta = vec![0.0; p_total];
977    let mut iterations = 0;
978    for iter in 0..max_iter {
979        iterations = iter + 1;
980        let beta_new = match irls_step(design, y, &beta) {
981            Some(b) => b,
982            None => break,
983        };
984        let change: f64 = beta_new
985            .iter()
986            .zip(&beta)
987            .map(|(a, b)| (a - b).abs())
988            .fold(0.0_f64, f64::max);
989        beta = beta_new;
990        if change < tol {
991            break;
992        }
993    }
994    (beta, iterations)
995}
996
997/// Build logistic result from converged beta.
998fn build_logistic_result(
999    design: &FdMatrix,
1000    beta: Vec<f64>,
1001    y: &[f64],
1002    fpca: FpcaResult,
1003    ncomp: usize,
1004    m: usize,
1005    iterations: usize,
1006) -> FunctionalLogisticResult {
1007    let (n, p) = design.shape();
1008    let eta = compute_fitted(design, &beta);
1009    let probabilities: Vec<f64> = eta.iter().map(|&e| sigmoid(e)).collect();
1010    let predicted_classes: Vec<u8> = probabilities
1011        .iter()
1012        .map(|&p| if p >= 0.5 { 1 } else { 0 })
1013        .collect();
1014    let correct: usize = predicted_classes
1015        .iter()
1016        .zip(y)
1017        .filter(|(&pred, &actual)| pred as f64 == actual)
1018        .count();
1019    let beta_t = recover_beta_t(&beta[1..1 + ncomp], &fpca.rotation, m);
1020    let gamma: Vec<f64> = beta[1 + ncomp..].to_vec();
1021
1022    // Compute coefficient SEs from Fisher information matrix (X'WX)^{-1}
1023    let w: Vec<f64> = probabilities
1024        .iter()
1025        .map(|&mu| (mu * (1.0 - mu)).max(1e-10))
1026        .collect();
1027    let mut xtwx = vec![0.0; p * p];
1028    for k in 0..p {
1029        for j in k..p {
1030            let mut s = 0.0;
1031            for i in 0..n {
1032                s += design[(i, k)] * w[i] * design[(i, j)];
1033            }
1034            xtwx[k * p + j] = s;
1035            xtwx[j * p + k] = s;
1036        }
1037    }
1038    let std_errors = cholesky_factor(&xtwx, p)
1039        .map(|l| compute_ols_std_errors(&l, p, 1.0))
1040        .unwrap_or_else(|| vec![f64::NAN; p]);
1041    let beta_se = compute_beta_se(&std_errors[1..1 + ncomp], &fpca.rotation, m);
1042
1043    FunctionalLogisticResult {
1044        intercept: beta[0],
1045        beta_t,
1046        beta_se,
1047        gamma,
1048        accuracy: correct as f64 / n as f64,
1049        log_likelihood: logistic_log_likelihood(&probabilities, y),
1050        probabilities,
1051        predicted_classes,
1052        ncomp,
1053        std_errors,
1054        coefficients: beta,
1055        iterations,
1056        fpca,
1057    }
1058}
1059
1060/// Functional logistic regression for binary outcomes.
1061///
1062/// Fits: `log(P(Y=1)/P(Y=0)) = α + ∫β(t)X(t)dt + γᵀz`
1063/// using IRLS (iteratively reweighted least squares) on FPC scores.
1064///
1065/// # Arguments
1066/// * `data` - Functional predictor matrix (n × m)
1067/// * `y` - Binary response vector (0.0 or 1.0, length n)
1068/// * `scalar_covariates` - Optional scalar covariates (n × p)
1069/// * `ncomp` - Number of FPC components
1070/// * `max_iter` - Maximum IRLS iterations (default: 25)
1071/// * `tol` - Convergence tolerance (default: 1e-6)
1072pub fn functional_logistic(
1073    data: &FdMatrix,
1074    y: &[f64],
1075    scalar_covariates: Option<&FdMatrix>,
1076    ncomp: usize,
1077    max_iter: usize,
1078    tol: f64,
1079) -> Option<FunctionalLogisticResult> {
1080    let (n, m) = data.shape();
1081    if n < 3 || m == 0 || y.len() != n {
1082        return None;
1083    }
1084    if y.iter().any(|&yi| yi != 0.0 && yi != 1.0) {
1085        return None;
1086    }
1087
1088    let ncomp = ncomp.min(n - 1).min(m);
1089    let fpca = fdata_to_pc_1d(data, ncomp)?;
1090    let design = build_design_matrix(&fpca.scores, ncomp, scalar_covariates, n);
1091
1092    let max_iter = if max_iter == 0 { 25 } else { max_iter };
1093    let tol = if tol <= 0.0 { 1e-6 } else { tol };
1094
1095    let (beta, iterations) = irls_loop(&design, y, max_iter, tol);
1096    Some(build_logistic_result(
1097        &design, beta, y, fpca, ncomp, m, iterations,
1098    ))
1099}
1100
1101// ---------------------------------------------------------------------------
1102// Bootstrap CIs for β(t)
1103// ---------------------------------------------------------------------------
1104
1105/// Result of bootstrap confidence intervals for β(t).
1106pub struct BootstrapCiResult {
1107    /// Pointwise lower bound (length m).
1108    pub lower: Vec<f64>,
1109    /// Pointwise upper bound (length m).
1110    pub upper: Vec<f64>,
1111    /// Original β(t) estimate (length m).
1112    pub center: Vec<f64>,
1113    /// Simultaneous lower bound (sup-norm adjusted, length m).
1114    pub sim_lower: Vec<f64>,
1115    /// Simultaneous upper bound (sup-norm adjusted, length m).
1116    pub sim_upper: Vec<f64>,
1117    /// Number of bootstrap replicates that converged.
1118    pub n_boot_success: usize,
1119}
1120
1121/// Gather rows from `src` by index (with replacement), returning a new matrix.
1122fn subsample_rows(src: &FdMatrix, indices: &[usize]) -> FdMatrix {
1123    let ncols = src.ncols();
1124    let mut out = FdMatrix::zeros(indices.len(), ncols);
1125    for (dst_i, &src_i) in indices.iter().enumerate() {
1126        for j in 0..ncols {
1127            out[(dst_i, j)] = src[(src_i, j)];
1128        }
1129    }
1130    out
1131}
1132
1133/// Compute the q-th quantile of a sorted slice (linear interpolation).
1134fn quantile(sorted: &[f64], q: f64) -> f64 {
1135    if sorted.is_empty() {
1136        return f64::NAN;
1137    }
1138    if sorted.len() == 1 {
1139        return sorted[0];
1140    }
1141    let pos = q * (sorted.len() - 1) as f64;
1142    let lo = pos.floor() as usize;
1143    let hi = lo + 1;
1144    let frac = pos - lo as f64;
1145    if hi >= sorted.len() {
1146        sorted[sorted.len() - 1]
1147    } else {
1148        sorted[lo] * (1.0 - frac) + sorted[hi] * frac
1149    }
1150}
1151
1152/// Bootstrap confidence intervals for β(t) from a functional linear model.
1153///
1154/// Uses cases bootstrap (resampling observation indices with replacement) to
1155/// build pointwise and simultaneous confidence bands for the functional
1156/// coefficient β(t).
1157///
1158/// # Arguments
1159/// * `data` — Functional predictor matrix (n × m)
1160/// * `y` — Scalar response vector (length n)
1161/// * `scalar_covariates` — Optional scalar covariates (n × p)
1162/// * `ncomp` — Number of FPC components
1163/// * `n_boot` — Number of bootstrap replicates
1164/// * `alpha` — Significance level (e.g., 0.05 for 95% CI)
1165/// * `seed` — RNG seed for reproducibility
1166pub fn bootstrap_ci_fregre_lm(
1167    data: &FdMatrix,
1168    y: &[f64],
1169    scalar_covariates: Option<&FdMatrix>,
1170    ncomp: usize,
1171    n_boot: usize,
1172    alpha: f64,
1173    seed: u64,
1174) -> Option<BootstrapCiResult> {
1175    let (n, m) = data.shape();
1176    if n < 3 || m == 0 || y.len() != n || n_boot == 0 || alpha <= 0.0 || alpha >= 1.0 {
1177        return None;
1178    }
1179
1180    // Fit original model
1181    let original = fregre_lm(data, y, scalar_covariates, ncomp)?;
1182    let center = original.beta_t.clone();
1183
1184    // Bootstrap replicates
1185    let boot_betas: Vec<Vec<f64>> = iter_maybe_parallel!(0..n_boot)
1186        .filter_map(|b| {
1187            let mut rng = StdRng::seed_from_u64(seed.wrapping_add(b as u64));
1188            let indices: Vec<usize> = (0..n).map(|_| rng.gen_range(0..n)).collect();
1189
1190            let boot_data = subsample_rows(data, &indices);
1191            let boot_y: Vec<f64> = indices.iter().map(|&i| y[i]).collect();
1192            let boot_sc = scalar_covariates.map(|sc| subsample_rows(sc, &indices));
1193
1194            fregre_lm(&boot_data, &boot_y, boot_sc.as_ref(), ncomp).map(|fit| fit.beta_t)
1195        })
1196        .collect();
1197
1198    let n_boot_success = boot_betas.len();
1199    if n_boot_success < 3 {
1200        return None;
1201    }
1202
1203    // Pointwise bands: sort each grid point across replicates
1204    let lo_q = alpha / 2.0;
1205    let hi_q = 1.0 - alpha / 2.0;
1206    let mut lower = vec![0.0; m];
1207    let mut upper = vec![0.0; m];
1208    let mut boot_se = vec![0.0; m];
1209
1210    for j in 0..m {
1211        let mut vals: Vec<f64> = boot_betas.iter().map(|b| b[j]).collect();
1212        vals.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
1213        lower[j] = quantile(&vals, lo_q);
1214        upper[j] = quantile(&vals, hi_q);
1215
1216        // Bootstrap SE at this grid point
1217        let mean_j: f64 = vals.iter().sum::<f64>() / vals.len() as f64;
1218        let var_j: f64 =
1219            vals.iter().map(|&v| (v - mean_j).powi(2)).sum::<f64>() / (vals.len() - 1) as f64;
1220        boot_se[j] = var_j.sqrt().max(1e-15);
1221    }
1222
1223    // Simultaneous bands: sup-norm bootstrap
1224    let mut t_stats: Vec<f64> = boot_betas
1225        .iter()
1226        .map(|b| {
1227            (0..m)
1228                .map(|j| ((b[j] - center[j]) / boot_se[j]).abs())
1229                .fold(0.0_f64, f64::max)
1230        })
1231        .collect();
1232    t_stats.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
1233
1234    let c_alpha = quantile(&t_stats, 1.0 - alpha);
1235    let sim_lower: Vec<f64> = (0..m).map(|j| center[j] - c_alpha * boot_se[j]).collect();
1236    let sim_upper: Vec<f64> = (0..m).map(|j| center[j] + c_alpha * boot_se[j]).collect();
1237
1238    Some(BootstrapCiResult {
1239        lower,
1240        upper,
1241        center,
1242        sim_lower,
1243        sim_upper,
1244        n_boot_success,
1245    })
1246}
1247
1248/// Bootstrap confidence intervals for β(t) from a functional logistic model.
1249///
1250/// Same algorithm as [`bootstrap_ci_fregre_lm`] but using [`functional_logistic`]
1251/// as the inner estimator. Degenerate resamples (all-0 or all-1 y) fail naturally.
1252///
1253/// # Arguments
1254/// * `data` — Functional predictor matrix (n × m)
1255/// * `y` — Binary response vector (0.0 or 1.0, length n)
1256/// * `scalar_covariates` — Optional scalar covariates (n × p)
1257/// * `ncomp` — Number of FPC components
1258/// * `n_boot` — Number of bootstrap replicates
1259/// * `alpha` — Significance level
1260/// * `seed` — RNG seed
1261/// * `max_iter` — Maximum IRLS iterations per replicate
1262/// * `tol` — IRLS convergence tolerance
1263pub fn bootstrap_ci_functional_logistic(
1264    data: &FdMatrix,
1265    y: &[f64],
1266    scalar_covariates: Option<&FdMatrix>,
1267    ncomp: usize,
1268    n_boot: usize,
1269    alpha: f64,
1270    seed: u64,
1271    max_iter: usize,
1272    tol: f64,
1273) -> Option<BootstrapCiResult> {
1274    let (n, m) = data.shape();
1275    if n < 3 || m == 0 || y.len() != n || n_boot == 0 || alpha <= 0.0 || alpha >= 1.0 {
1276        return None;
1277    }
1278
1279    // Fit original model
1280    let original = functional_logistic(data, y, scalar_covariates, ncomp, max_iter, tol)?;
1281    let center = original.beta_t.clone();
1282
1283    // Bootstrap replicates
1284    let boot_betas: Vec<Vec<f64>> = iter_maybe_parallel!(0..n_boot)
1285        .filter_map(|b| {
1286            let mut rng = StdRng::seed_from_u64(seed.wrapping_add(b as u64));
1287            let indices: Vec<usize> = (0..n).map(|_| rng.gen_range(0..n)).collect();
1288
1289            let boot_data = subsample_rows(data, &indices);
1290            let boot_y: Vec<f64> = indices.iter().map(|&i| y[i]).collect();
1291            let boot_sc = scalar_covariates.map(|sc| subsample_rows(sc, &indices));
1292
1293            functional_logistic(&boot_data, &boot_y, boot_sc.as_ref(), ncomp, max_iter, tol)
1294                .map(|fit| fit.beta_t)
1295        })
1296        .collect();
1297
1298    let n_boot_success = boot_betas.len();
1299    if n_boot_success < 3 {
1300        return None;
1301    }
1302
1303    let lo_q = alpha / 2.0;
1304    let hi_q = 1.0 - alpha / 2.0;
1305    let mut lower = vec![0.0; m];
1306    let mut upper = vec![0.0; m];
1307    let mut boot_se = vec![0.0; m];
1308
1309    for j in 0..m {
1310        let mut vals: Vec<f64> = boot_betas.iter().map(|b| b[j]).collect();
1311        vals.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
1312        lower[j] = quantile(&vals, lo_q);
1313        upper[j] = quantile(&vals, hi_q);
1314
1315        let mean_j: f64 = vals.iter().sum::<f64>() / vals.len() as f64;
1316        let var_j: f64 =
1317            vals.iter().map(|&v| (v - mean_j).powi(2)).sum::<f64>() / (vals.len() - 1) as f64;
1318        boot_se[j] = var_j.sqrt().max(1e-15);
1319    }
1320
1321    let mut t_stats: Vec<f64> = boot_betas
1322        .iter()
1323        .map(|b| {
1324            (0..m)
1325                .map(|j| ((b[j] - center[j]) / boot_se[j]).abs())
1326                .fold(0.0_f64, f64::max)
1327        })
1328        .collect();
1329    t_stats.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
1330
1331    let c_alpha = quantile(&t_stats, 1.0 - alpha);
1332    let sim_lower: Vec<f64> = (0..m).map(|j| center[j] - c_alpha * boot_se[j]).collect();
1333    let sim_upper: Vec<f64> = (0..m).map(|j| center[j] + c_alpha * boot_se[j]).collect();
1334
1335    Some(BootstrapCiResult {
1336        lower,
1337        upper,
1338        center,
1339        sim_lower,
1340        sim_upper,
1341        n_boot_success,
1342    })
1343}
1344
1345// ---------------------------------------------------------------------------
1346// Tests
1347// ---------------------------------------------------------------------------
1348
1349#[cfg(test)]
1350mod tests {
1351    use super::*;
1352    use std::f64::consts::PI;
1353
1354    fn generate_test_data(n: usize, m: usize, seed: u64) -> (FdMatrix, Vec<f64>, Vec<f64>) {
1355        let t: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1) as f64).collect();
1356        let mut data = FdMatrix::zeros(n, m);
1357        let mut y = vec![0.0; n];
1358
1359        for i in 0..n {
1360            let phase =
1361                (seed.wrapping_mul(17).wrapping_add(i as u64 * 31) % 1000) as f64 / 1000.0 * PI;
1362            let amplitude =
1363                ((seed.wrapping_mul(13).wrapping_add(i as u64 * 7) % 100) as f64 / 100.0) - 0.5;
1364            for j in 0..m {
1365                data[(i, j)] =
1366                    (2.0 * PI * t[j] + phase).sin() + amplitude * (4.0 * PI * t[j]).cos();
1367            }
1368            y[i] = 2.0 * phase + 3.0 * amplitude + 0.05 * (seed.wrapping_add(i as u64) % 10) as f64;
1369        }
1370        (data, y, t)
1371    }
1372
1373    // ----- fregre_lm tests -----
1374
1375    #[test]
1376    fn test_fregre_lm_basic() {
1377        let (data, y, _t) = generate_test_data(30, 50, 42);
1378        let result = fregre_lm(&data, &y, None, 3);
1379        assert!(result.is_some());
1380        let fit = result.unwrap();
1381        assert_eq!(fit.fitted_values.len(), 30);
1382        assert_eq!(fit.residuals.len(), 30);
1383        assert_eq!(fit.beta_t.len(), 50);
1384        assert_eq!(fit.ncomp, 3);
1385        assert!(fit.r_squared >= 0.0 && fit.r_squared <= 1.0 + 1e-10);
1386    }
1387
1388    #[test]
1389    fn test_fregre_lm_with_scalar_covariates() {
1390        let (data, y, _t) = generate_test_data(30, 50, 42);
1391        let mut sc = FdMatrix::zeros(30, 2);
1392        for i in 0..30 {
1393            sc[(i, 0)] = i as f64 / 30.0;
1394            sc[(i, 1)] = (i as f64 * 0.7).sin();
1395        }
1396        let result = fregre_lm(&data, &y, Some(&sc), 3);
1397        assert!(result.is_some());
1398        let fit = result.unwrap();
1399        assert_eq!(fit.gamma.len(), 2);
1400    }
1401
1402    #[test]
1403    fn test_fregre_lm_residuals_sum_near_zero() {
1404        let (data, y, _t) = generate_test_data(30, 50, 42);
1405        let fit = fregre_lm(&data, &y, None, 3).unwrap();
1406        let resid_sum: f64 = fit.residuals.iter().sum::<f64>();
1407        assert!(
1408            resid_sum.abs() < 1e-8,
1409            "Residuals should sum to ~0 with intercept, got {}",
1410            resid_sum
1411        );
1412    }
1413
1414    #[test]
1415    fn test_fregre_lm_fitted_plus_residuals_equals_y() {
1416        let (data, y, _t) = generate_test_data(30, 50, 42);
1417        let fit = fregre_lm(&data, &y, None, 3).unwrap();
1418        for i in 0..30 {
1419            let reconstructed = fit.fitted_values[i] + fit.residuals[i];
1420            assert!(
1421                (reconstructed - y[i]).abs() < 1e-10,
1422                "ŷ + r should equal y at index {}",
1423                i
1424            );
1425        }
1426    }
1427
1428    #[test]
1429    fn test_fregre_lm_more_components_higher_r2() {
1430        let (data, y, _t) = generate_test_data(50, 50, 42);
1431        let fit1 = fregre_lm(&data, &y, None, 1).unwrap();
1432        let fit3 = fregre_lm(&data, &y, None, 3).unwrap();
1433        assert!(
1434            fit3.r_squared >= fit1.r_squared - 1e-10,
1435            "More components should give >= R²: {} vs {}",
1436            fit3.r_squared,
1437            fit1.r_squared
1438        );
1439    }
1440
1441    #[test]
1442    fn test_fregre_lm_invalid_input() {
1443        let data = FdMatrix::zeros(2, 50);
1444        let y = vec![1.0, 2.0];
1445        assert!(fregre_lm(&data, &y, None, 1).is_none());
1446
1447        let data = FdMatrix::zeros(10, 50);
1448        let y = vec![1.0; 5];
1449        assert!(fregre_lm(&data, &y, None, 2).is_none());
1450    }
1451
1452    #[test]
1453    fn test_fregre_lm_std_errors_positive() {
1454        let (data, y, _t) = generate_test_data(30, 50, 42);
1455        let fit = fregre_lm(&data, &y, None, 3).unwrap();
1456        for (i, &se) in fit.std_errors.iter().enumerate() {
1457            assert!(
1458                se > 0.0 && se.is_finite(),
1459                "Std error {} should be positive finite, got {}",
1460                i,
1461                se
1462            );
1463        }
1464    }
1465
1466    // ----- predict_fregre_lm tests -----
1467
1468    #[test]
1469    fn test_predict_fregre_lm_on_training_data() {
1470        let (data, y, _t) = generate_test_data(30, 50, 42);
1471        let fit = fregre_lm(&data, &y, None, 3).unwrap();
1472        let preds = predict_fregre_lm(&fit, &data, None);
1473        for i in 0..30 {
1474            assert!(
1475                (preds[i] - fit.fitted_values[i]).abs() < 1e-6,
1476                "Prediction on training data should match fitted values"
1477            );
1478        }
1479    }
1480
1481    // ----- fregre_cv tests -----
1482
1483    #[test]
1484    fn test_fregre_cv_returns_result() {
1485        let (data, y, _t) = generate_test_data(30, 50, 42);
1486        let result = fregre_cv(&data, &y, None, 1, 8, 5);
1487        assert!(result.is_some());
1488        let cv = result.unwrap();
1489        assert!(cv.optimal_k >= 1 && cv.optimal_k <= 8);
1490        assert!(cv.min_cv_error >= 0.0);
1491    }
1492
1493    #[test]
1494    fn test_fregre_cv_with_scalar_covariates() {
1495        let (data, y, _t) = generate_test_data(30, 50, 42);
1496        let mut sc = FdMatrix::zeros(30, 1);
1497        for i in 0..30 {
1498            sc[(i, 0)] = i as f64;
1499        }
1500        let result = fregre_cv(&data, &y, Some(&sc), 1, 5, 3);
1501        assert!(result.is_some());
1502    }
1503
1504    // ----- fregre_np_mixed tests -----
1505
1506    #[test]
1507    fn test_fregre_np_mixed_basic() {
1508        let (data, y, t) = generate_test_data(30, 50, 42);
1509        let result = fregre_np_mixed(&data, &y, &t, None, 0.0, 0.0);
1510        assert!(result.is_some());
1511        let fit = result.unwrap();
1512        assert_eq!(fit.fitted_values.len(), 30);
1513        assert!(fit.h_func > 0.0);
1514        assert!(fit.cv_error >= 0.0);
1515    }
1516
1517    #[test]
1518    fn test_fregre_np_mixed_with_scalars() {
1519        let (data, y, t) = generate_test_data(30, 50, 42);
1520        let mut sc = FdMatrix::zeros(30, 1);
1521        for i in 0..30 {
1522            sc[(i, 0)] = i as f64 / 30.0;
1523        }
1524        let result = fregre_np_mixed(&data, &y, &t, Some(&sc), 0.0, 0.0);
1525        assert!(result.is_some());
1526        let fit = result.unwrap();
1527        assert!(fit.h_scalar > 0.0);
1528    }
1529
1530    #[test]
1531    fn test_fregre_np_mixed_manual_bandwidth() {
1532        let (data, y, t) = generate_test_data(30, 50, 42);
1533        let result = fregre_np_mixed(&data, &y, &t, None, 0.5, 0.0);
1534        assert!(result.is_some());
1535        let fit = result.unwrap();
1536        assert!(
1537            (fit.h_func - 0.5).abs() < 1e-10,
1538            "Should use provided bandwidth"
1539        );
1540    }
1541
1542    // ----- functional_logistic tests -----
1543
1544    #[test]
1545    fn test_functional_logistic_basic() {
1546        let (data, y_cont, _t) = generate_test_data(30, 50, 42);
1547        let y_median = {
1548            let mut sorted = y_cont.clone();
1549            sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1550            sorted[sorted.len() / 2]
1551        };
1552        let y_bin: Vec<f64> = y_cont
1553            .iter()
1554            .map(|&v| if v >= y_median { 1.0 } else { 0.0 })
1555            .collect();
1556
1557        let result = functional_logistic(&data, &y_bin, None, 3, 25, 1e-6);
1558        assert!(result.is_some());
1559        let fit = result.unwrap();
1560        assert_eq!(fit.probabilities.len(), 30);
1561        assert_eq!(fit.predicted_classes.len(), 30);
1562        assert!(fit.accuracy >= 0.0 && fit.accuracy <= 1.0);
1563        for &p in &fit.probabilities {
1564            assert!((0.0..=1.0).contains(&p), "Probability out of range: {}", p);
1565        }
1566    }
1567
1568    #[test]
1569    fn test_functional_logistic_with_scalar_covariates() {
1570        let (data, y_cont, _t) = generate_test_data(30, 50, 42);
1571        let y_median = {
1572            let mut sorted = y_cont.clone();
1573            sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1574            sorted[sorted.len() / 2]
1575        };
1576        let y_bin: Vec<f64> = y_cont
1577            .iter()
1578            .map(|&v| if v >= y_median { 1.0 } else { 0.0 })
1579            .collect();
1580
1581        let mut sc = FdMatrix::zeros(30, 1);
1582        for i in 0..30 {
1583            sc[(i, 0)] = i as f64 / 30.0;
1584        }
1585        let result = functional_logistic(&data, &y_bin, Some(&sc), 3, 25, 1e-6);
1586        assert!(result.is_some());
1587        let fit = result.unwrap();
1588        assert_eq!(fit.gamma.len(), 1);
1589    }
1590
1591    #[test]
1592    fn test_functional_logistic_invalid_response() {
1593        let (data, _, _) = generate_test_data(30, 50, 42);
1594        let y: Vec<f64> = (0..30).map(|i| i as f64).collect();
1595        assert!(functional_logistic(&data, &y, None, 3, 25, 1e-6).is_none());
1596    }
1597
1598    #[test]
1599    fn test_functional_logistic_convergence() {
1600        let (data, y_cont, _t) = generate_test_data(40, 50, 42);
1601        let y_median = {
1602            let mut sorted = y_cont.clone();
1603            sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1604            sorted[sorted.len() / 2]
1605        };
1606        let y_bin: Vec<f64> = y_cont
1607            .iter()
1608            .map(|&v| if v >= y_median { 1.0 } else { 0.0 })
1609            .collect();
1610
1611        let fit = functional_logistic(&data, &y_bin, None, 3, 100, 1e-8).unwrap();
1612        assert!(fit.iterations <= 100, "Should converge within max_iter");
1613    }
1614
1615    // ----- Edge cases -----
1616
1617    #[test]
1618    fn test_fregre_lm_single_component() {
1619        let (data, y, _t) = generate_test_data(20, 50, 42);
1620        let result = fregre_lm(&data, &y, None, 1);
1621        assert!(result.is_some());
1622        let fit = result.unwrap();
1623        assert_eq!(fit.ncomp, 1);
1624    }
1625
1626    #[test]
1627    fn test_fregre_lm_auto_k_selection() {
1628        let (data, y, _t) = generate_test_data(30, 50, 42);
1629        let result = fregre_lm(&data, &y, None, 0);
1630        assert!(result.is_some());
1631        let fit = result.unwrap();
1632        assert!(fit.ncomp >= 1);
1633    }
1634
1635    #[test]
1636    fn test_predict_fregre_np_basic() {
1637        let (data, y, t) = generate_test_data(30, 50, 42);
1638        let preds = predict_fregre_np(&data, &y, None, &data, None, &t, 0.5, 0.5);
1639        assert_eq!(preds.len(), 30);
1640        for &p in &preds {
1641            assert!(p.is_finite(), "Prediction should be finite");
1642        }
1643    }
1644
1645    #[test]
1646    fn test_sigmoid_properties() {
1647        assert!((sigmoid(0.0) - 0.5).abs() < 1e-10);
1648        assert!(sigmoid(10.0) > 0.999);
1649        assert!(sigmoid(-10.0) < 0.001);
1650        assert!((sigmoid(3.0) + sigmoid(-3.0) - 1.0).abs() < 1e-10);
1651    }
1652
1653    // ----- beta_se tests -----
1654
1655    #[test]
1656    fn test_fregre_lm_beta_se() {
1657        let (data, y, _t) = generate_test_data(30, 50, 42);
1658        let fit = fregre_lm(&data, &y, None, 3).unwrap();
1659        assert_eq!(fit.beta_se.len(), 50, "beta_se should have length m");
1660        for (j, &se) in fit.beta_se.iter().enumerate() {
1661            assert!(
1662                se > 0.0 && se.is_finite(),
1663                "beta_se[{}] should be positive finite, got {}",
1664                j,
1665                se
1666            );
1667        }
1668    }
1669
1670    #[test]
1671    fn test_fregre_lm_beta_se_coverage() {
1672        // Use generate_test_data which is known to produce valid FPCA, then check SE properties
1673        let (data, y, _t) = generate_test_data(50, 50, 99);
1674        let fit = fregre_lm(&data, &y, None, 3).unwrap();
1675        assert_eq!(fit.beta_se.len(), 50);
1676        // With valid data, beta_se should be positive everywhere
1677        for (j, &se) in fit.beta_se.iter().enumerate() {
1678            assert!(
1679                se > 0.0 && se.is_finite(),
1680                "beta_se[{}] should be positive finite, got {}",
1681                j,
1682                se
1683            );
1684        }
1685        // The CI band [beta_t ± 2·SE] should have non-zero width everywhere
1686        for j in 0..50 {
1687            let width = 4.0 * fit.beta_se[j];
1688            assert!(width > 0.0, "CI width should be positive at j={}", j);
1689        }
1690    }
1691
1692    #[test]
1693    fn test_functional_logistic_beta_se() {
1694        let (data, y_cont, _t) = generate_test_data(30, 50, 42);
1695        let y_median = {
1696            let mut sorted = y_cont.clone();
1697            sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1698            sorted[sorted.len() / 2]
1699        };
1700        let y_bin: Vec<f64> = y_cont
1701            .iter()
1702            .map(|&v| if v >= y_median { 1.0 } else { 0.0 })
1703            .collect();
1704
1705        let fit = functional_logistic(&data, &y_bin, None, 3, 25, 1e-6).unwrap();
1706        assert_eq!(fit.beta_se.len(), 50, "beta_se should have length m");
1707        assert_eq!(
1708            fit.std_errors.len(),
1709            1 + 3,
1710            "std_errors should have length 1 + ncomp"
1711        );
1712        for (j, &se) in fit.beta_se.iter().enumerate() {
1713            assert!(
1714                se > 0.0 && se.is_finite(),
1715                "beta_se[{}] should be positive finite, got {}",
1716                j,
1717                se
1718            );
1719        }
1720        for (j, &se) in fit.std_errors.iter().enumerate() {
1721            assert!(
1722                se > 0.0 && se.is_finite(),
1723                "std_errors[{}] should be positive finite, got {}",
1724                j,
1725                se
1726            );
1727        }
1728    }
1729
1730    #[test]
1731    fn test_beta_se_zero_for_constant() {
1732        // When all curves are nearly identical, β(t) ≈ 0 but SE should still be finite/positive
1733        let n = 30;
1734        let m = 20;
1735        let mut data = FdMatrix::zeros(n, m);
1736        let mut y = vec![0.0; n];
1737        for i in 0..n {
1738            for j in 0..m {
1739                // Nearly identical curves with tiny variation
1740                data[(i, j)] = 1.0 + 0.001 * (i as f64 / n as f64);
1741            }
1742            y[i] = i as f64 / n as f64;
1743        }
1744        let fit = fregre_lm(&data, &y, None, 1).unwrap();
1745        assert_eq!(fit.beta_se.len(), m);
1746        for (j, &se) in fit.beta_se.iter().enumerate() {
1747            assert!(
1748                se.is_finite() && se >= 0.0,
1749                "beta_se[{}] should be finite non-negative, got {}",
1750                j,
1751                se
1752            );
1753        }
1754    }
1755
1756    // ----- Bootstrap CI tests -----
1757
1758    #[test]
1759    fn test_bootstrap_ci_fregre_lm_shape() {
1760        let (data, y, _t) = generate_test_data(30, 20, 42);
1761        let result = bootstrap_ci_fregre_lm(&data, &y, None, 2, 50, 0.05, 123);
1762        assert!(result.is_some(), "bootstrap_ci_fregre_lm should succeed");
1763        let ci = result.unwrap();
1764        assert_eq!(ci.lower.len(), 20);
1765        assert_eq!(ci.upper.len(), 20);
1766        assert_eq!(ci.center.len(), 20);
1767        assert_eq!(ci.sim_lower.len(), 20);
1768        assert_eq!(ci.sim_upper.len(), 20);
1769        assert!(ci.n_boot_success > 0);
1770    }
1771
1772    #[test]
1773    fn test_bootstrap_ci_fregre_lm_ordering() {
1774        let (data, y, _t) = generate_test_data(30, 20, 42);
1775        let ci = bootstrap_ci_fregre_lm(&data, &y, None, 2, 100, 0.05, 42).unwrap();
1776        for j in 0..20 {
1777            // Pointwise: lower ≤ center ≤ upper
1778            assert!(
1779                ci.lower[j] <= ci.center[j] + 1e-10,
1780                "lower <= center at j={}: {} > {}",
1781                j,
1782                ci.lower[j],
1783                ci.center[j]
1784            );
1785            assert!(
1786                ci.center[j] <= ci.upper[j] + 1e-10,
1787                "center <= upper at j={}: {} > {}",
1788                j,
1789                ci.center[j],
1790                ci.upper[j]
1791            );
1792            // Simultaneous: sim_lower ≤ center ≤ sim_upper
1793            assert!(
1794                ci.sim_lower[j] <= ci.center[j] + 1e-10,
1795                "sim_lower <= center at j={}: {} > {}",
1796                j,
1797                ci.sim_lower[j],
1798                ci.center[j]
1799            );
1800            assert!(
1801                ci.center[j] <= ci.sim_upper[j] + 1e-10,
1802                "center <= sim_upper at j={}: {} > {}",
1803                j,
1804                ci.center[j],
1805                ci.sim_upper[j]
1806            );
1807        }
1808        // Simultaneous band should be wider on average
1809        let pw_width: f64 = (0..20).map(|j| ci.upper[j] - ci.lower[j]).sum::<f64>() / 20.0;
1810        let sim_width: f64 = (0..20)
1811            .map(|j| ci.sim_upper[j] - ci.sim_lower[j])
1812            .sum::<f64>()
1813            / 20.0;
1814        assert!(
1815            sim_width >= pw_width - 1e-10,
1816            "Simultaneous band should be wider on average: sim={}, pw={}",
1817            sim_width,
1818            pw_width
1819        );
1820    }
1821
1822    #[test]
1823    fn test_bootstrap_ci_fregre_lm_center_matches_fit() {
1824        let (data, y, _t) = generate_test_data(30, 20, 42);
1825        let fit = fregre_lm(&data, &y, None, 2).unwrap();
1826        let ci = bootstrap_ci_fregre_lm(&data, &y, None, 2, 50, 0.05, 42).unwrap();
1827        for j in 0..20 {
1828            assert!(
1829                (ci.center[j] - fit.beta_t[j]).abs() < 1e-12,
1830                "center should match original beta_t at j={}",
1831                j
1832            );
1833        }
1834    }
1835
1836    #[test]
1837    fn test_bootstrap_ci_functional_logistic_shape() {
1838        let (data, y_cont, _t) = generate_test_data(40, 20, 42);
1839        let y_median = {
1840            let mut sorted = y_cont.clone();
1841            sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1842            sorted[sorted.len() / 2]
1843        };
1844        let y_bin: Vec<f64> = y_cont
1845            .iter()
1846            .map(|&v| if v >= y_median { 1.0 } else { 0.0 })
1847            .collect();
1848
1849        let result =
1850            bootstrap_ci_functional_logistic(&data, &y_bin, None, 2, 50, 0.05, 123, 25, 1e-6);
1851        assert!(
1852            result.is_some(),
1853            "bootstrap_ci_functional_logistic should succeed"
1854        );
1855        let ci = result.unwrap();
1856        assert_eq!(ci.lower.len(), 20);
1857        assert_eq!(ci.upper.len(), 20);
1858        assert_eq!(ci.center.len(), 20);
1859        assert!(ci.n_boot_success > 0);
1860    }
1861
1862    #[test]
1863    fn test_bootstrap_ci_logistic_ordering() {
1864        let (data, y_cont, _t) = generate_test_data(40, 20, 42);
1865        let y_median = {
1866            let mut sorted = y_cont.clone();
1867            sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
1868            sorted[sorted.len() / 2]
1869        };
1870        let y_bin: Vec<f64> = y_cont
1871            .iter()
1872            .map(|&v| if v >= y_median { 1.0 } else { 0.0 })
1873            .collect();
1874
1875        let ci = bootstrap_ci_functional_logistic(&data, &y_bin, None, 2, 100, 0.05, 42, 25, 1e-6)
1876            .unwrap();
1877        for j in 0..20 {
1878            assert!(
1879                ci.lower[j] <= ci.upper[j] + 1e-10,
1880                "lower <= upper at j={}",
1881                j
1882            );
1883        }
1884    }
1885}