Skip to main content

fdars_core/
regression.rs

1//! Regression functions for functional data.
2//!
3//! This module provides functional PCA, PLS, and ridge regression.
4
5use crate::error::FdarError;
6use crate::helpers::simpsons_weights;
7use crate::matrix::FdMatrix;
8#[cfg(feature = "linalg")]
9use anofox_regression::solvers::RidgeRegressor;
10#[cfg(feature = "linalg")]
11use anofox_regression::{FittedRegressor, Regressor};
12use nalgebra::SVD;
13
14/// Result of functional PCA.
15#[derive(Debug, Clone, PartialEq)]
16#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
17#[non_exhaustive]
18pub struct FpcaResult {
19    /// Singular values
20    pub singular_values: Vec<f64>,
21    /// Rotation matrix (loadings), m x ncomp
22    pub rotation: FdMatrix,
23    /// Scores matrix, n x ncomp
24    pub scores: FdMatrix,
25    /// Mean function
26    pub mean: Vec<f64>,
27    /// Centered data, n x m
28    pub centered: FdMatrix,
29    /// Integration weights used for the functional inner product
30    pub weights: Vec<f64>,
31}
32
33impl FpcaResult {
34    /// Project new functional data onto the FPC score space.
35    ///
36    /// Centers the input data by subtracting the mean function estimated
37    /// during FPCA, then multiplies by the rotation (loadings) matrix to
38    /// obtain FPC scores for the new observations.
39    ///
40    /// # Arguments
41    /// * `data` - Matrix (n_new x m) of new observations
42    ///
43    /// # Errors
44    ///
45    /// Returns [`FdarError::InvalidDimension`] if the number of columns in
46    /// `data` does not match the length of the mean vector (i.e. the number
47    /// of evaluation points used during FPCA).
48    ///
49    /// # Examples
50    ///
51    /// ```
52    /// use fdars_core::matrix::FdMatrix;
53    /// use fdars_core::regression::fdata_to_pc_1d;
54    ///
55    /// let data = FdMatrix::from_column_major(
56    ///     (0..50).map(|i| (i as f64 * 0.1).sin()).collect(),
57    ///     5, 10,
58    /// ).unwrap();
59    /// let argvals: Vec<f64> = (0..10).map(|i| i as f64 / 9.0).collect();
60    /// let fpca = fdata_to_pc_1d(&data, 3, &argvals).unwrap();
61    ///
62    /// // Project the original data (scores should match)
63    /// let scores = fpca.project(&data).unwrap();
64    /// assert_eq!(scores.shape(), (5, 3));
65    ///
66    /// // Project new data
67    /// let new_data = FdMatrix::from_column_major(
68    ///     (0..20).map(|i| (i as f64 * 0.2).cos()).collect(),
69    ///     2, 10,
70    /// ).unwrap();
71    /// let new_scores = fpca.project(&new_data).unwrap();
72    /// assert_eq!(new_scores.shape(), (2, 3));
73    /// ```
74    pub fn project(&self, data: &FdMatrix) -> Result<FdMatrix, FdarError> {
75        let (n, m) = data.shape();
76        let ncomp = self.rotation.ncols();
77        if m != self.mean.len() {
78            return Err(FdarError::InvalidDimension {
79                parameter: "data",
80                expected: format!("{} columns", self.mean.len()),
81                actual: format!("{m} columns"),
82            });
83        }
84
85        let mut scores = FdMatrix::zeros(n, ncomp);
86        for i in 0..n {
87            for k in 0..ncomp {
88                let mut sum = 0.0;
89                for j in 0..m {
90                    sum += (data[(i, j)] - self.mean[j]) * self.rotation[(j, k)] * self.weights[j];
91                }
92                scores[(i, k)] = sum;
93            }
94        }
95        Ok(scores)
96    }
97
98    /// Reconstruct functional data from FPC scores.
99    ///
100    /// Computes the approximation of functional data using the first
101    /// `ncomp` principal components:
102    /// `data[i, j] = mean[j] + sum_k scores[i, k] * rotation[j, k]`
103    ///
104    /// # Arguments
105    /// * `scores` - Score matrix (n x p) where p >= `ncomp`
106    /// * `ncomp` - Number of components to use for reconstruction
107    ///
108    /// # Errors
109    ///
110    /// Returns [`FdarError::InvalidParameter`] if `ncomp` is zero or exceeds
111    /// the number of columns in `scores` or the number of available components
112    /// in the rotation matrix.
113    ///
114    /// # Examples
115    ///
116    /// ```
117    /// use fdars_core::matrix::FdMatrix;
118    /// use fdars_core::regression::fdata_to_pc_1d;
119    ///
120    /// let data = FdMatrix::from_column_major(
121    ///     (0..100).map(|i| (i as f64 * 0.1).sin()).collect(),
122    ///     10, 10,
123    /// ).unwrap();
124    /// let argvals: Vec<f64> = (0..10).map(|i| i as f64 / 9.0).collect();
125    /// let fpca = fdata_to_pc_1d(&data, 5, &argvals).unwrap();
126    ///
127    /// // Reconstruct using all 5 components
128    /// let recon = fpca.reconstruct(&fpca.scores, 5).unwrap();
129    /// assert_eq!(recon.shape(), (10, 10));
130    ///
131    /// // Reconstruct using fewer components
132    /// let recon2 = fpca.reconstruct(&fpca.scores, 2).unwrap();
133    /// assert_eq!(recon2.shape(), (10, 10));
134    /// ```
135    pub fn reconstruct(&self, scores: &FdMatrix, ncomp: usize) -> Result<FdMatrix, FdarError> {
136        let (n, p) = scores.shape();
137        let m = self.mean.len();
138        let max_comp = self.rotation.ncols().min(p);
139        if ncomp == 0 {
140            return Err(FdarError::InvalidParameter {
141                parameter: "ncomp",
142                message: "ncomp must be >= 1".to_string(),
143            });
144        }
145        if ncomp > max_comp {
146            return Err(FdarError::InvalidParameter {
147                parameter: "ncomp",
148                message: format!("ncomp={ncomp} exceeds available components ({max_comp})"),
149            });
150        }
151
152        let mut recon = FdMatrix::zeros(n, m);
153        for i in 0..n {
154            for j in 0..m {
155                let mut val = self.mean[j];
156                for k in 0..ncomp {
157                    val += scores[(i, k)] * self.rotation[(j, k)];
158                }
159                recon[(i, j)] = val;
160            }
161        }
162        Ok(recon)
163    }
164}
165
166/// Center columns of a matrix and return (centered_matrix, column_means).
167fn center_columns(data: &FdMatrix) -> (FdMatrix, Vec<f64>) {
168    let (n, m) = data.shape();
169    let mut centered = FdMatrix::zeros(n, m);
170    let mut means = vec![0.0; m];
171    for j in 0..m {
172        let col = data.column(j);
173        let mean = col.iter().sum::<f64>() / n as f64;
174        means[j] = mean;
175        let out_col = centered.column_mut(j);
176        for i in 0..n {
177            out_col[i] = col[i] - mean;
178        }
179    }
180    (centered, means)
181}
182
183/// Extract rotation (V) and scores (U*S) from SVD results.
184fn extract_pc_components(
185    svd: &SVD<f64, nalgebra::Dyn, nalgebra::Dyn>,
186    n: usize,
187    m: usize,
188    ncomp: usize,
189) -> Option<(Vec<f64>, FdMatrix, FdMatrix)> {
190    let singular_values: Vec<f64> = svd.singular_values.iter().take(ncomp).copied().collect();
191
192    let v_t = svd.v_t.as_ref()?;
193    let mut rotation = FdMatrix::zeros(m, ncomp);
194    for k in 0..ncomp {
195        for j in 0..m {
196            rotation[(j, k)] = v_t[(k, j)];
197        }
198    }
199
200    let u = svd.u.as_ref()?;
201    let mut scores = FdMatrix::zeros(n, ncomp);
202    for k in 0..ncomp {
203        let sv_k = singular_values[k];
204        for i in 0..n {
205            scores[(i, k)] = u[(i, k)] * sv_k;
206        }
207    }
208
209    Some((singular_values, rotation, scores))
210}
211
212/// Perform functional PCA via SVD on centered data with integration weights.
213///
214/// Uses Simpson's-rule weights derived from `argvals` so that the resulting
215/// scores represent functional inner products and are invariant to grid
216/// density.
217///
218/// # Arguments
219/// * `data` - Matrix (n x m): n observations, m evaluation points
220/// * `ncomp` - Number of components to extract
221/// * `argvals` - Evaluation grid points (length m)
222///
223/// # Errors
224///
225/// Returns [`FdarError::InvalidDimension`] if `data` has zero rows or zero
226/// columns, or if `argvals.len() != m`.
227/// Returns [`FdarError::InvalidParameter`] if `ncomp` is zero.
228/// Returns [`FdarError::ComputationFailed`] if the SVD decomposition fails to
229/// produce U or V_t matrices.
230///
231/// # Examples
232///
233/// ```
234/// use fdars_core::matrix::FdMatrix;
235/// use fdars_core::regression::fdata_to_pc_1d;
236///
237/// // 5 curves, each evaluated at 10 points
238/// let data = FdMatrix::from_column_major(
239///     (0..50).map(|i| (i as f64 * 0.1).sin()).collect(),
240///     5, 10,
241/// ).unwrap();
242/// let argvals: Vec<f64> = (0..10).map(|i| i as f64 / 9.0).collect();
243/// let result = fdata_to_pc_1d(&data, 3, &argvals).unwrap();
244/// assert_eq!(result.scores.shape(), (5, 3));
245/// assert_eq!(result.rotation.shape(), (10, 3));
246/// assert_eq!(result.mean.len(), 10);
247/// ```
248#[must_use = "expensive computation whose result should not be discarded"]
249pub fn fdata_to_pc_1d(
250    data: &FdMatrix,
251    ncomp: usize,
252    argvals: &[f64],
253) -> Result<FpcaResult, FdarError> {
254    let (n, m) = data.shape();
255    if n == 0 {
256        return Err(FdarError::InvalidDimension {
257            parameter: "data",
258            expected: "n > 0 rows".to_string(),
259            actual: format!("n = {n}"),
260        });
261    }
262    if m == 0 {
263        return Err(FdarError::InvalidDimension {
264            parameter: "data",
265            expected: "m > 0 columns".to_string(),
266            actual: format!("m = {m}"),
267        });
268    }
269    if argvals.len() != m {
270        return Err(FdarError::InvalidDimension {
271            parameter: "argvals",
272            expected: format!("{m} elements"),
273            actual: format!("{} elements", argvals.len()),
274        });
275    }
276    if ncomp < 1 {
277        return Err(FdarError::InvalidParameter {
278            parameter: "ncomp",
279            message: format!("ncomp must be >= 1, got {ncomp}"),
280        });
281    }
282
283    let ncomp = ncomp.min(n).min(m);
284    let (centered, means) = center_columns(data);
285
286    // Compute integration weights for functional inner product
287    let weights = simpsons_weights(argvals);
288    let sqrt_weights: Vec<f64> = weights.iter().map(|w| w.sqrt()).collect();
289
290    // Scale centered data by sqrt(weights) for weighted SVD
291    let mut weighted = centered.clone();
292    for i in 0..n {
293        for j in 0..m {
294            weighted[(i, j)] *= sqrt_weights[j];
295        }
296    }
297
298    let svd = SVD::new(weighted.to_dmatrix(), true, true);
299    let (singular_values, mut rotation, scores) =
300        extract_pc_components(&svd, n, m, ncomp).ok_or_else(|| FdarError::ComputationFailed {
301            operation: "SVD",
302            detail: "failed to extract U or V_t from SVD decomposition; try reducing ncomp or check for zero-variance columns in the data".to_string(),
303        })?;
304
305    // Unscale loadings: divide by sqrt(weights) to get actual eigenfunctions
306    for k in 0..ncomp {
307        for j in 0..m {
308            if sqrt_weights[j] > 1e-15 {
309                rotation[(j, k)] /= sqrt_weights[j];
310            }
311        }
312    }
313
314    Ok(FpcaResult {
315        singular_values,
316        rotation,
317        scores,
318        mean: means,
319        centered,
320        weights,
321    })
322}
323
324/// Result of PLS regression.
325#[derive(Debug, Clone, PartialEq)]
326#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
327#[non_exhaustive]
328pub struct PlsResult {
329    /// Weight vectors, m x ncomp
330    pub weights: FdMatrix,
331    /// Score vectors, n x ncomp
332    pub scores: FdMatrix,
333    /// Loading vectors, m x ncomp
334    pub loadings: FdMatrix,
335    /// Column means of the training data, length m
336    pub x_means: Vec<f64>,
337    /// Integration weights for the functional inner product
338    pub integration_weights: Vec<f64>,
339}
340
341impl PlsResult {
342    /// Project new functional data onto the PLS score space.
343    ///
344    /// Centers the input data by subtracting the column means estimated
345    /// during PLS fitting, then iteratively projects and deflates through
346    /// each PLS component using the stored weight and loading vectors.
347    ///
348    /// # Arguments
349    /// * `data` - Matrix (n_new x m) of new observations
350    ///
351    /// # Errors
352    ///
353    /// Returns [`FdarError::InvalidDimension`] if the number of columns in
354    /// `data` does not match the number of predictor variables used during
355    /// PLS fitting.
356    ///
357    /// # Examples
358    ///
359    /// ```
360    /// use fdars_core::matrix::FdMatrix;
361    /// use fdars_core::regression::fdata_to_pls_1d;
362    ///
363    /// let x = FdMatrix::from_column_major(
364    ///     (0..100).map(|i| (i as f64 * 0.1).sin()).collect(),
365    ///     10, 10,
366    /// ).unwrap();
367    /// let y: Vec<f64> = (0..10).map(|i| i as f64 * 0.5).collect();
368    /// let argvals: Vec<f64> = (0..10).map(|i| i as f64 / 9.0).collect();
369    /// let pls = fdata_to_pls_1d(&x, &y, 3, &argvals).unwrap();
370    ///
371    /// // Project the original data
372    /// let scores = pls.project(&x).unwrap();
373    /// assert_eq!(scores.shape(), (10, 3));
374    ///
375    /// // Project new data
376    /// let new_x = FdMatrix::from_column_major(
377    ///     (0..20).map(|i| (i as f64 * 0.2).cos()).collect(),
378    ///     2, 10,
379    /// ).unwrap();
380    /// let new_scores = pls.project(&new_x).unwrap();
381    /// assert_eq!(new_scores.shape(), (2, 3));
382    /// ```
383    pub fn project(&self, data: &FdMatrix) -> Result<FdMatrix, FdarError> {
384        let (n, m) = data.shape();
385        let ncomp = self.weights.ncols();
386        if m != self.x_means.len() {
387            return Err(FdarError::InvalidDimension {
388                parameter: "data",
389                expected: format!("{} columns", self.x_means.len()),
390                actual: format!("{m} columns"),
391            });
392        }
393
394        // Center data
395        let mut x_cen = FdMatrix::zeros(n, m);
396        for j in 0..m {
397            for i in 0..n {
398                x_cen[(i, j)] = data[(i, j)] - self.x_means[j];
399            }
400        }
401
402        // Iteratively project and deflate through each component
403        let mut scores = FdMatrix::zeros(n, ncomp);
404        for k in 0..ncomp {
405            // Compute scores: t = X_cen * W * w_k (weighted inner product)
406            for i in 0..n {
407                let mut sum = 0.0;
408                for j in 0..m {
409                    sum += x_cen[(i, j)] * self.weights[(j, k)] * self.integration_weights[j];
410                }
411                scores[(i, k)] = sum;
412            }
413
414            // Deflate: X_cen -= t * p_k'
415            for j in 0..m {
416                let p_jk = self.loadings[(j, k)];
417                for i in 0..n {
418                    x_cen[(i, j)] -= scores[(i, k)] * p_jk;
419                }
420            }
421        }
422
423        Ok(scores)
424    }
425}
426
427/// Compute PLS weight vector: w = X'y / ||X'y|| (with integration weights)
428fn pls_compute_weights(x_cen: &FdMatrix, y_cen: &[f64], int_w: &[f64]) -> Vec<f64> {
429    let (n, m) = x_cen.shape();
430    let mut w: Vec<f64> = (0..m)
431        .map(|j| {
432            let mut sum = 0.0;
433            for i in 0..n {
434                sum += x_cen[(i, j)] * y_cen[i];
435            }
436            sum * int_w[j]
437        })
438        .collect();
439
440    let w_norm: f64 = w.iter().map(|&wi| wi * wi).sum::<f64>().sqrt();
441    if w_norm > 1e-10 {
442        for wi in &mut w {
443            *wi /= w_norm;
444        }
445    }
446    w
447}
448
449/// Compute PLS scores: t = X * W * w (weighted inner product)
450fn pls_compute_scores(x_cen: &FdMatrix, w: &[f64], int_w: &[f64]) -> Vec<f64> {
451    let (n, m) = x_cen.shape();
452    (0..n)
453        .map(|i| {
454            let mut sum = 0.0;
455            for j in 0..m {
456                sum += x_cen[(i, j)] * w[j] * int_w[j];
457            }
458            sum
459        })
460        .collect()
461}
462
463/// Compute PLS loadings: p = X't / (t't) (with integration weights)
464fn pls_compute_loadings(x_cen: &FdMatrix, t: &[f64], t_norm_sq: f64, int_w: &[f64]) -> Vec<f64> {
465    let (n, m) = x_cen.shape();
466    (0..m)
467        .map(|j| {
468            let mut sum = 0.0;
469            for i in 0..n {
470                sum += x_cen[(i, j)] * t[i];
471            }
472            sum * int_w[j] / t_norm_sq.max(1e-10)
473        })
474        .collect()
475}
476
477/// Deflate X by removing the rank-1 component t * p'
478fn pls_deflate_x(x_cen: &mut FdMatrix, t: &[f64], p: &[f64]) {
479    let (n, m) = x_cen.shape();
480    for j in 0..m {
481        for i in 0..n {
482            x_cen[(i, j)] -= t[i] * p[j];
483        }
484    }
485}
486
487/// Execute one NIPALS step: compute weights/scores/loadings and deflate X and y.
488fn pls_nipals_step(
489    k: usize,
490    x_cen: &mut FdMatrix,
491    y_cen: &mut [f64],
492    weights: &mut FdMatrix,
493    scores: &mut FdMatrix,
494    loadings: &mut FdMatrix,
495    int_w: &[f64],
496) {
497    let n = x_cen.nrows();
498    let m = x_cen.ncols();
499
500    let w = pls_compute_weights(x_cen, y_cen, int_w);
501    let t = pls_compute_scores(x_cen, &w, int_w);
502    let t_norm_sq: f64 = t.iter().map(|&ti| ti * ti).sum();
503    let p = pls_compute_loadings(x_cen, &t, t_norm_sq, int_w);
504
505    for j in 0..m {
506        weights[(j, k)] = w[j];
507        loadings[(j, k)] = p[j];
508    }
509    for i in 0..n {
510        scores[(i, k)] = t[i];
511    }
512
513    pls_deflate_x(x_cen, &t, &p);
514    let t_y: f64 = t.iter().zip(y_cen.iter()).map(|(&ti, &yi)| ti * yi).sum();
515    let q = t_y / t_norm_sq.max(1e-10);
516    for i in 0..n {
517        y_cen[i] -= t[i] * q;
518    }
519}
520
521/// Perform PLS via NIPALS algorithm with integration weights.
522///
523/// # Arguments
524/// * `data` - Matrix (n x m): n observations, m evaluation points
525/// * `y` - Response vector (length n)
526/// * `ncomp` - Number of components to extract
527/// * `argvals` - Evaluation grid points (length m)
528///
529/// # Errors
530///
531/// Returns [`FdarError::InvalidDimension`] if `data` has zero rows or zero
532/// columns, if `y.len()` does not equal the number of rows in `data`,
533/// or if `argvals.len() != m`.
534/// Returns [`FdarError::InvalidParameter`] if `ncomp` is zero.
535#[must_use = "expensive computation whose result should not be discarded"]
536pub fn fdata_to_pls_1d(
537    data: &FdMatrix,
538    y: &[f64],
539    ncomp: usize,
540    argvals: &[f64],
541) -> Result<PlsResult, FdarError> {
542    let (n, m) = data.shape();
543    if n == 0 {
544        return Err(FdarError::InvalidDimension {
545            parameter: "data",
546            expected: "n > 0 rows".to_string(),
547            actual: format!("n = {n}"),
548        });
549    }
550    if m == 0 {
551        return Err(FdarError::InvalidDimension {
552            parameter: "data",
553            expected: "m > 0 columns".to_string(),
554            actual: format!("m = {m}"),
555        });
556    }
557    if y.len() != n {
558        return Err(FdarError::InvalidDimension {
559            parameter: "y",
560            expected: format!("length {n}"),
561            actual: format!("length {}", y.len()),
562        });
563    }
564    if argvals.len() != m {
565        return Err(FdarError::InvalidDimension {
566            parameter: "argvals",
567            expected: format!("{m} elements"),
568            actual: format!("{} elements", argvals.len()),
569        });
570    }
571    if ncomp < 1 {
572        return Err(FdarError::InvalidParameter {
573            parameter: "ncomp",
574            message: format!("ncomp must be >= 1, got {ncomp}"),
575        });
576    }
577
578    let ncomp = ncomp.min(n).min(m);
579
580    // Compute integration weights
581    let int_w = simpsons_weights(argvals);
582
583    // Center X and y
584    let x_means: Vec<f64> = (0..m)
585        .map(|j| {
586            let col = data.column(j);
587            let sum: f64 = col.iter().sum();
588            sum / n as f64
589        })
590        .collect();
591
592    let y_mean: f64 = y.iter().sum::<f64>() / n as f64;
593
594    let mut x_cen = FdMatrix::zeros(n, m);
595    for j in 0..m {
596        for i in 0..n {
597            x_cen[(i, j)] = data[(i, j)] - x_means[j];
598        }
599    }
600
601    let mut y_cen: Vec<f64> = y.iter().map(|&yi| yi - y_mean).collect();
602
603    let mut weights = FdMatrix::zeros(m, ncomp);
604    let mut scores = FdMatrix::zeros(n, ncomp);
605    let mut loadings = FdMatrix::zeros(m, ncomp);
606
607    // NIPALS algorithm
608    for k in 0..ncomp {
609        pls_nipals_step(
610            k,
611            &mut x_cen,
612            &mut y_cen,
613            &mut weights,
614            &mut scores,
615            &mut loadings,
616            &int_w,
617        );
618    }
619
620    Ok(PlsResult {
621        weights,
622        scores,
623        loadings,
624        x_means,
625        integration_weights: int_w,
626    })
627}
628
629/// Result of ridge regression fit.
630#[derive(Debug, Clone)]
631#[cfg(feature = "linalg")]
632pub struct RidgeResult {
633    /// Coefficients
634    pub coefficients: Vec<f64>,
635    /// Intercept
636    pub intercept: f64,
637    /// Fitted values
638    pub fitted_values: Vec<f64>,
639    /// Residuals
640    pub residuals: Vec<f64>,
641    /// R-squared
642    pub r_squared: f64,
643    /// Lambda used
644    pub lambda: f64,
645    /// Error message if any
646    pub error: Option<String>,
647}
648
649/// Fit ridge regression.
650///
651/// # Arguments
652/// * `x` - Predictor matrix (n x m)
653/// * `y` - Response vector
654/// * `lambda` - Regularization parameter
655/// * `with_intercept` - Whether to include intercept
656#[cfg(feature = "linalg")]
657#[must_use = "expensive computation whose result should not be discarded"]
658pub fn ridge_regression_fit(
659    x: &FdMatrix,
660    y: &[f64],
661    lambda: f64,
662    with_intercept: bool,
663) -> RidgeResult {
664    let (n, m) = x.shape();
665    if n == 0 || m == 0 || y.len() != n {
666        return RidgeResult {
667            coefficients: Vec::new(),
668            intercept: 0.0,
669            fitted_values: Vec::new(),
670            residuals: Vec::new(),
671            r_squared: 0.0,
672            lambda,
673            error: Some("Invalid input dimensions".to_string()),
674        };
675    }
676
677    // Convert to faer Mat format
678    let x_faer = faer::Mat::from_fn(n, m, |i, j| x[(i, j)]);
679    let y_faer = faer::Col::from_fn(n, |i| y[i]);
680
681    // Build and fit the ridge regressor
682    let regressor = RidgeRegressor::builder()
683        .with_intercept(with_intercept)
684        .lambda(lambda)
685        .build();
686
687    let fitted = match regressor.fit(&x_faer, &y_faer) {
688        Ok(f) => f,
689        Err(e) => {
690            return RidgeResult {
691                coefficients: Vec::new(),
692                intercept: 0.0,
693                fitted_values: Vec::new(),
694                residuals: Vec::new(),
695                r_squared: 0.0,
696                lambda,
697                error: Some(format!("Fit failed: {e:?}")),
698            }
699        }
700    };
701
702    // Extract coefficients
703    let coefs = fitted.coefficients();
704    let coefficients: Vec<f64> = (0..coefs.nrows()).map(|i| coefs[i]).collect();
705
706    // Get intercept
707    let intercept = fitted.intercept().unwrap_or(0.0);
708
709    // Compute fitted values
710    let mut fitted_values = vec![0.0; n];
711    for i in 0..n {
712        let mut pred = intercept;
713        for j in 0..m {
714            pred += x[(i, j)] * coefficients[j];
715        }
716        fitted_values[i] = pred;
717    }
718
719    // Compute residuals
720    let residuals: Vec<f64> = y
721        .iter()
722        .zip(fitted_values.iter())
723        .map(|(&yi, &yhat)| yi - yhat)
724        .collect();
725
726    // Compute R-squared
727    let y_mean: f64 = y.iter().sum::<f64>() / n as f64;
728    let ss_tot: f64 = y.iter().map(|&yi| (yi - y_mean).powi(2)).sum();
729    let ss_res: f64 = residuals.iter().map(|&r| r.powi(2)).sum();
730    let r_squared = if ss_tot > 0.0 {
731        1.0 - ss_res / ss_tot
732    } else {
733        0.0
734    };
735
736    RidgeResult {
737        coefficients,
738        intercept,
739        fitted_values,
740        residuals,
741        r_squared,
742        lambda,
743        error: None,
744    }
745}
746
747#[cfg(test)]
748mod tests {
749    use super::*;
750    use std::f64::consts::PI;
751
752    /// Generate functional data with known structure for testing
753    fn generate_test_fdata(n: usize, m: usize) -> (FdMatrix, Vec<f64>) {
754        let t: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1) as f64).collect();
755
756        // Create n curves: sine waves with varying phase
757        let mut data = FdMatrix::zeros(n, m);
758        for i in 0..n {
759            let phase = (i as f64 / n as f64) * PI;
760            for j in 0..m {
761                data[(i, j)] = (2.0 * PI * t[j] + phase).sin();
762            }
763        }
764
765        (data, t)
766    }
767
768    // ============== FPCA tests ==============
769
770    #[test]
771    fn test_fdata_to_pc_1d_basic() {
772        let n = 20;
773        let m = 50;
774        let ncomp = 3;
775        let (data, t) = generate_test_fdata(n, m);
776
777        let result = fdata_to_pc_1d(&data, ncomp, &t);
778        assert!(result.is_ok());
779
780        let fpca = result.unwrap();
781        assert_eq!(fpca.singular_values.len(), ncomp);
782        assert_eq!(fpca.rotation.shape(), (m, ncomp));
783        assert_eq!(fpca.scores.shape(), (n, ncomp));
784        assert_eq!(fpca.mean.len(), m);
785        assert_eq!(fpca.centered.shape(), (n, m));
786    }
787
788    #[test]
789    fn test_fdata_to_pc_1d_singular_values_decreasing() {
790        let n = 20;
791        let m = 50;
792        let ncomp = 5;
793        let (data, t) = generate_test_fdata(n, m);
794
795        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
796
797        // Singular values should be in decreasing order
798        for i in 1..fpca.singular_values.len() {
799            assert!(
800                fpca.singular_values[i] <= fpca.singular_values[i - 1] + 1e-10,
801                "Singular values should be decreasing"
802            );
803        }
804    }
805
806    #[test]
807    fn test_fdata_to_pc_1d_centered_has_zero_mean() {
808        let n = 20;
809        let m = 50;
810        let (data, t) = generate_test_fdata(n, m);
811
812        let fpca = fdata_to_pc_1d(&data, 3, &t).unwrap();
813
814        // Column means of centered data should be zero
815        for j in 0..m {
816            let col_mean: f64 = (0..n).map(|i| fpca.centered[(i, j)]).sum::<f64>() / n as f64;
817            assert!(
818                col_mean.abs() < 1e-10,
819                "Centered data should have zero column mean"
820            );
821        }
822    }
823
824    #[test]
825    fn test_fdata_to_pc_1d_ncomp_limits() {
826        let n = 10;
827        let m = 50;
828        let (data, t) = generate_test_fdata(n, m);
829
830        // Request more components than n - should cap at n
831        let fpca = fdata_to_pc_1d(&data, 20, &t).unwrap();
832        assert!(fpca.singular_values.len() <= n);
833    }
834
835    #[test]
836    fn test_fdata_to_pc_1d_invalid_input() {
837        // Empty data
838        let empty = FdMatrix::zeros(0, 50);
839        let t50: Vec<f64> = (0..50).map(|i| i as f64 / 49.0).collect();
840        let result = fdata_to_pc_1d(&empty, 3, &t50);
841        assert!(result.is_err());
842
843        // Zero components
844        let (data, t) = generate_test_fdata(10, 50);
845        let result = fdata_to_pc_1d(&data, 0, &t);
846        assert!(result.is_err());
847    }
848
849    #[test]
850    fn test_fdata_to_pc_1d_reconstruction() {
851        let n = 10;
852        let m = 30;
853        let (data, t) = generate_test_fdata(n, m);
854
855        // Use all components for perfect reconstruction
856        let ncomp = n.min(m);
857        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
858
859        // Reconstruct: X_centered = scores * rotation^T
860        for i in 0..n {
861            for j in 0..m {
862                let mut reconstructed = 0.0;
863                for k in 0..ncomp {
864                    let score = fpca.scores[(i, k)];
865                    let loading = fpca.rotation[(j, k)];
866                    reconstructed += score * loading;
867                }
868                let original_centered = fpca.centered[(i, j)];
869                assert!(
870                    (reconstructed - original_centered).abs() < 0.1,
871                    "Reconstruction error at ({}, {}): {} vs {}",
872                    i,
873                    j,
874                    reconstructed,
875                    original_centered
876                );
877            }
878        }
879    }
880
881    // ============== PLS tests ==============
882
883    #[test]
884    fn test_fdata_to_pls_1d_basic() {
885        let n = 20;
886        let m = 30;
887        let ncomp = 3;
888        let (x, t) = generate_test_fdata(n, m);
889
890        // Create y with some relationship to x
891        let y: Vec<f64> = (0..n).map(|i| (i as f64 / n as f64) + 0.1).collect();
892
893        let result = fdata_to_pls_1d(&x, &y, ncomp, &t);
894        assert!(result.is_ok());
895
896        let pls = result.unwrap();
897        assert_eq!(pls.weights.shape(), (m, ncomp));
898        assert_eq!(pls.scores.shape(), (n, ncomp));
899        assert_eq!(pls.loadings.shape(), (m, ncomp));
900    }
901
902    #[test]
903    fn test_fdata_to_pls_1d_weights_normalized() {
904        let n = 20;
905        let m = 30;
906        let ncomp = 2;
907        let (x, t) = generate_test_fdata(n, m);
908        let y: Vec<f64> = (0..n).map(|i| i as f64).collect();
909
910        let pls = fdata_to_pls_1d(&x, &y, ncomp, &t).unwrap();
911
912        // Weight vectors should be approximately unit norm
913        for k in 0..ncomp {
914            let norm: f64 = (0..m)
915                .map(|j| pls.weights[(j, k)].powi(2))
916                .sum::<f64>()
917                .sqrt();
918            assert!(
919                (norm - 1.0).abs() < 0.1,
920                "Weight vector {} should be unit norm, got {}",
921                k,
922                norm
923            );
924        }
925    }
926
927    #[test]
928    fn test_fdata_to_pls_1d_invalid_input() {
929        let (x, t) = generate_test_fdata(10, 30);
930
931        // Wrong y length
932        let result = fdata_to_pls_1d(&x, &[0.0; 5], 2, &t);
933        assert!(result.is_err());
934
935        // Zero components
936        let y = vec![0.0; 10];
937        let result = fdata_to_pls_1d(&x, &y, 0, &t);
938        assert!(result.is_err());
939    }
940
941    // ============== Ridge regression tests ==============
942
943    #[cfg(feature = "linalg")]
944    #[test]
945    fn test_ridge_regression_fit_basic() {
946        let n = 50;
947        let m = 5;
948
949        // Create X with known structure
950        let mut x = FdMatrix::zeros(n, m);
951        for i in 0..n {
952            for j in 0..m {
953                x[(i, j)] = (i as f64 + j as f64) / (n + m) as f64;
954            }
955        }
956
957        // Create y = sum of x columns + noise
958        let y: Vec<f64> = (0..n)
959            .map(|i| {
960                let mut sum = 0.0;
961                for j in 0..m {
962                    sum += x[(i, j)];
963                }
964                sum + 0.01 * (i as f64 % 10.0)
965            })
966            .collect();
967
968        let result = ridge_regression_fit(&x, &y, 0.1, true);
969
970        assert!(result.error.is_none(), "Ridge should fit without error");
971        assert_eq!(result.coefficients.len(), m);
972        assert_eq!(result.fitted_values.len(), n);
973        assert_eq!(result.residuals.len(), n);
974    }
975
976    #[cfg(feature = "linalg")]
977    #[test]
978    fn test_ridge_regression_fit_r_squared() {
979        let n = 50;
980        let m = 3;
981
982        let x = FdMatrix::from_column_major(
983            (0..n * m).map(|i| i as f64 / (n * m) as f64).collect(),
984            n,
985            m,
986        )
987        .unwrap();
988        let y: Vec<f64> = (0..n).map(|i| i as f64 / n as f64).collect();
989
990        let result = ridge_regression_fit(&x, &y, 0.01, true);
991
992        assert!(
993            result.r_squared > 0.5,
994            "R-squared should be high, got {}",
995            result.r_squared
996        );
997        assert!(result.r_squared <= 1.0 + 1e-10, "R-squared should be <= 1");
998    }
999
1000    #[cfg(feature = "linalg")]
1001    #[test]
1002    fn test_ridge_regression_fit_regularization() {
1003        let n = 30;
1004        let m = 10;
1005
1006        let x = FdMatrix::from_column_major(
1007            (0..n * m)
1008                .map(|i| ((i * 17) % 100) as f64 / 100.0)
1009                .collect(),
1010            n,
1011            m,
1012        )
1013        .unwrap();
1014        let y: Vec<f64> = (0..n).map(|i| (i as f64).sin()).collect();
1015
1016        let low_lambda = ridge_regression_fit(&x, &y, 0.001, true);
1017        let high_lambda = ridge_regression_fit(&x, &y, 100.0, true);
1018
1019        let norm_low: f64 = low_lambda
1020            .coefficients
1021            .iter()
1022            .map(|c| c.powi(2))
1023            .sum::<f64>()
1024            .sqrt();
1025        let norm_high: f64 = high_lambda
1026            .coefficients
1027            .iter()
1028            .map(|c| c.powi(2))
1029            .sum::<f64>()
1030            .sqrt();
1031
1032        assert!(
1033            norm_high <= norm_low + 1e-6,
1034            "Higher lambda should shrink coefficients: {} vs {}",
1035            norm_high,
1036            norm_low
1037        );
1038    }
1039
1040    #[cfg(feature = "linalg")]
1041    #[test]
1042    fn test_ridge_regression_fit_residuals() {
1043        let n = 20;
1044        let m = 3;
1045
1046        let x = FdMatrix::from_column_major(
1047            (0..n * m).map(|i| i as f64 / (n * m) as f64).collect(),
1048            n,
1049            m,
1050        )
1051        .unwrap();
1052        let y: Vec<f64> = (0..n).map(|i| i as f64 / n as f64).collect();
1053
1054        let result = ridge_regression_fit(&x, &y, 0.1, true);
1055
1056        for i in 0..n {
1057            let expected_resid = y[i] - result.fitted_values[i];
1058            assert!(
1059                (result.residuals[i] - expected_resid).abs() < 1e-10,
1060                "Residual mismatch at {}",
1061                i
1062            );
1063        }
1064    }
1065
1066    #[cfg(feature = "linalg")]
1067    #[test]
1068    fn test_ridge_regression_fit_no_intercept() {
1069        let n = 30;
1070        let m = 5;
1071
1072        let x = FdMatrix::from_column_major(
1073            (0..n * m).map(|i| i as f64 / (n * m) as f64).collect(),
1074            n,
1075            m,
1076        )
1077        .unwrap();
1078        let y: Vec<f64> = (0..n).map(|i| i as f64 / n as f64).collect();
1079
1080        let result = ridge_regression_fit(&x, &y, 0.1, false);
1081
1082        assert!(result.error.is_none());
1083        assert!(
1084            result.intercept.abs() < 1e-10,
1085            "Intercept should be 0, got {}",
1086            result.intercept
1087        );
1088    }
1089
1090    #[cfg(feature = "linalg")]
1091    #[test]
1092    fn test_ridge_regression_fit_invalid_input() {
1093        let empty = FdMatrix::zeros(0, 5);
1094        let result = ridge_regression_fit(&empty, &[], 0.1, true);
1095        assert!(result.error.is_some());
1096
1097        let x = FdMatrix::zeros(10, 10);
1098        let y = vec![0.0; 5];
1099        let result = ridge_regression_fit(&x, &y, 0.1, true);
1100        assert!(result.error.is_some());
1101    }
1102
1103    #[test]
1104    fn test_all_zero_fpca() {
1105        // All-zero data: centering leaves zeros, SVD should return trivial result
1106        let n = 5;
1107        let m = 20;
1108        let data = FdMatrix::zeros(n, m);
1109        let t: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
1110        let result = fdata_to_pc_1d(&data, 2, &t);
1111        // Should not panic; may return Ok with zero singular values
1112        if let Ok(res) = result {
1113            assert_eq!(res.scores.nrows(), n);
1114            for &sv in &res.singular_values {
1115                assert!(
1116                    sv.abs() < 1e-10,
1117                    "All-zero data should have zero singular values"
1118                );
1119            }
1120        }
1121    }
1122
1123    #[test]
1124    fn test_n1_pca() {
1125        // Single observation: centering leaves all zeros, SVD may return trivial result
1126        let data = FdMatrix::from_column_major(vec![1.0, 2.0, 3.0], 1, 3).unwrap();
1127        let t = vec![0.0, 0.5, 1.0];
1128        let result = fdata_to_pc_1d(&data, 1, &t);
1129        // With n=1, centering leaves all zeros, so SVD may fail or return trivial result
1130        // Just ensure no panic
1131        let _ = result;
1132    }
1133
1134    #[test]
1135    fn test_constant_y_pls() {
1136        let n = 10;
1137        let m = 20;
1138        let data_vec: Vec<f64> = (0..n * m).map(|i| (i as f64 * 0.1).sin()).collect();
1139        let data = FdMatrix::from_column_major(data_vec, n, m).unwrap();
1140        let t: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
1141        let y = vec![5.0; n]; // Constant response
1142        let result = fdata_to_pls_1d(&data, &y, 2, &t);
1143        // Constant y → centering makes y all zeros, PLS may fail
1144        // Just ensure no panic
1145        let _ = result;
1146    }
1147
1148    // ============== FpcaResult::project tests ==============
1149
1150    #[test]
1151    fn test_fpca_project_shape() {
1152        let n = 20;
1153        let m = 30;
1154        let ncomp = 3;
1155        let (data, t) = generate_test_fdata(n, m);
1156        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
1157
1158        let new_data = FdMatrix::zeros(5, m);
1159        let scores = fpca.project(&new_data).unwrap();
1160        assert_eq!(scores.shape(), (5, ncomp));
1161    }
1162
1163    #[test]
1164    fn test_fpca_project_reproduces_training_scores() {
1165        let n = 20;
1166        let m = 30;
1167        let ncomp = 3;
1168        let (data, t) = generate_test_fdata(n, m);
1169        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
1170
1171        // Projecting the training data should reproduce the original scores
1172        let scores = fpca.project(&data).unwrap();
1173        for i in 0..n {
1174            for k in 0..ncomp {
1175                assert!(
1176                    (scores[(i, k)] - fpca.scores[(i, k)]).abs() < 1e-8,
1177                    "Score mismatch at ({}, {}): {} vs {}",
1178                    i,
1179                    k,
1180                    scores[(i, k)],
1181                    fpca.scores[(i, k)]
1182                );
1183            }
1184        }
1185    }
1186
1187    #[test]
1188    fn test_fpca_project_dimension_mismatch() {
1189        let (data, t) = generate_test_fdata(20, 30);
1190        let fpca = fdata_to_pc_1d(&data, 3, &t).unwrap();
1191
1192        let wrong_m = FdMatrix::zeros(5, 20); // wrong number of columns
1193        assert!(fpca.project(&wrong_m).is_err());
1194    }
1195
1196    // ============== FpcaResult::reconstruct tests ==============
1197
1198    #[test]
1199    fn test_fpca_reconstruct_shape() {
1200        let n = 10;
1201        let m = 30;
1202        let ncomp = 5;
1203        let (data, t) = generate_test_fdata(n, m);
1204        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
1205
1206        let recon = fpca.reconstruct(&fpca.scores, 3).unwrap();
1207        assert_eq!(recon.shape(), (n, m));
1208    }
1209
1210    #[test]
1211    fn test_fpca_reconstruct_full_matches_original() {
1212        let n = 10;
1213        let m = 30;
1214        let ncomp = n.min(m);
1215        let (data, t) = generate_test_fdata(n, m);
1216        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
1217
1218        // Full reconstruction should recover original data
1219        let recon = fpca.reconstruct(&fpca.scores, ncomp).unwrap();
1220        for i in 0..n {
1221            for j in 0..m {
1222                assert!(
1223                    (recon[(i, j)] - data[(i, j)]).abs() < 0.1,
1224                    "Reconstruction error at ({}, {}): {} vs {}",
1225                    i,
1226                    j,
1227                    recon[(i, j)],
1228                    data[(i, j)]
1229                );
1230            }
1231        }
1232    }
1233
1234    #[test]
1235    fn test_fpca_reconstruct_fewer_components() {
1236        let n = 20;
1237        let m = 30;
1238        let ncomp = 5;
1239        let (data, t) = generate_test_fdata(n, m);
1240        let fpca = fdata_to_pc_1d(&data, ncomp, &t).unwrap();
1241
1242        let recon2 = fpca.reconstruct(&fpca.scores, 2).unwrap();
1243        let recon5 = fpca.reconstruct(&fpca.scores, 5).unwrap();
1244        assert_eq!(recon2.shape(), (n, m));
1245        assert_eq!(recon5.shape(), (n, m));
1246    }
1247
1248    #[test]
1249    fn test_fpca_reconstruct_invalid_ncomp() {
1250        let (data, t) = generate_test_fdata(10, 30);
1251        let fpca = fdata_to_pc_1d(&data, 3, &t).unwrap();
1252
1253        // Zero components
1254        assert!(fpca.reconstruct(&fpca.scores, 0).is_err());
1255        // More components than available
1256        assert!(fpca.reconstruct(&fpca.scores, 10).is_err());
1257    }
1258
1259    // ============== PlsResult::project tests ==============
1260
1261    #[test]
1262    fn test_pls_project_shape() {
1263        let n = 20;
1264        let m = 30;
1265        let ncomp = 3;
1266        let (x, t) = generate_test_fdata(n, m);
1267        let y: Vec<f64> = (0..n).map(|i| i as f64).collect();
1268        let pls = fdata_to_pls_1d(&x, &y, ncomp, &t).unwrap();
1269
1270        let new_x = FdMatrix::zeros(5, m);
1271        let scores = pls.project(&new_x).unwrap();
1272        assert_eq!(scores.shape(), (5, ncomp));
1273    }
1274
1275    #[test]
1276    fn test_pls_project_reproduces_training_scores() {
1277        let n = 20;
1278        let m = 30;
1279        let ncomp = 3;
1280        let (x, t) = generate_test_fdata(n, m);
1281        let y: Vec<f64> = (0..n).map(|i| (i as f64 / n as f64) + 0.1).collect();
1282        let pls = fdata_to_pls_1d(&x, &y, ncomp, &t).unwrap();
1283
1284        // Projecting the training data should reproduce the original scores
1285        let scores = pls.project(&x).unwrap();
1286        for i in 0..n {
1287            for k in 0..ncomp {
1288                assert!(
1289                    (scores[(i, k)] - pls.scores[(i, k)]).abs() < 1e-8,
1290                    "Score mismatch at ({}, {}): {} vs {}",
1291                    i,
1292                    k,
1293                    scores[(i, k)],
1294                    pls.scores[(i, k)]
1295                );
1296            }
1297        }
1298    }
1299
1300    #[test]
1301    fn test_pls_project_dimension_mismatch() {
1302        let (x, t) = generate_test_fdata(20, 30);
1303        let y: Vec<f64> = (0..20).map(|i| i as f64).collect();
1304        let pls = fdata_to_pls_1d(&x, &y, 3, &t).unwrap();
1305
1306        let wrong_m = FdMatrix::zeros(5, 20); // wrong number of columns
1307        assert!(pls.project(&wrong_m).is_err());
1308    }
1309
1310    #[test]
1311    fn test_pls_x_means_stored() {
1312        let n = 20;
1313        let m = 30;
1314        let (x, t) = generate_test_fdata(n, m);
1315        let y: Vec<f64> = (0..n).map(|i| i as f64).collect();
1316        let pls = fdata_to_pls_1d(&x, &y, 3, &t).unwrap();
1317
1318        // x_means should be stored and have correct length
1319        assert_eq!(pls.x_means.len(), m);
1320    }
1321
1322    // ============== Regression tests for issue #22 ==============
1323
1324    /// Regression test: projection of original data recovers original scores.
1325    #[test]
1326    fn fpca_project_recovers_original_scores() {
1327        let n = 15;
1328        let m = 40;
1329        let argvals: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
1330        let vals: Vec<f64> = (0..n)
1331            .flat_map(|i| {
1332                argvals
1333                    .iter()
1334                    .map(move |&t| (2.0 * PI * t).sin() + 0.3 * i as f64 * t)
1335            })
1336            .collect();
1337        let data = FdMatrix::from_column_major(vals, n, m).unwrap();
1338        let fpca = fdata_to_pc_1d(&data, 3, &argvals).unwrap();
1339
1340        // project the training data — should match original scores
1341        let projected = fpca.project(&data).unwrap();
1342        for i in 0..n {
1343            for k in 0..3 {
1344                let diff = (fpca.scores[(i, k)] - projected[(i, k)]).abs();
1345                assert!(
1346                    diff < 1e-8,
1347                    "project score [{i},{k}] mismatch: orig={:.6}, proj={:.6}",
1348                    fpca.scores[(i, k)],
1349                    projected[(i, k)]
1350                );
1351            }
1352        }
1353    }
1354
1355    /// Regression test: weights are stored and have correct properties.
1356    #[test]
1357    fn fpca_weights_are_stored() {
1358        let m = 50;
1359        let argvals: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
1360        let data =
1361            FdMatrix::from_column_major((0..150).map(|i| (i as f64 * 0.1).sin()).collect(), 3, m)
1362                .unwrap();
1363        let fpca = fdata_to_pc_1d(&data, 2, &argvals).unwrap();
1364
1365        // Weights should exist and be positive
1366        assert_eq!(fpca.weights.len(), m);
1367        assert!(fpca.weights.iter().all(|&w| w > 0.0));
1368
1369        // Weights should sum to approximately the domain length (1.0 for [0,1])
1370        let sum: f64 = fpca.weights.iter().sum();
1371        assert!(
1372            (sum - 1.0).abs() < 0.01,
1373            "weight sum should ≈ 1.0, got {sum}"
1374        );
1375    }
1376
1377    /// Regression test: variance explained is consistent across grid densities.
1378    #[test]
1379    fn fpca_variance_explained_grid_invariant() {
1380        use rand::rngs::StdRng;
1381        use rand::{Rng, SeedableRng};
1382
1383        let n = 20;
1384        let mut rng = StdRng::seed_from_u64(99);
1385        let coeffs: Vec<(f64, f64)> = (0..n)
1386            .map(|_| (rng.gen_range(-1.0..1.0), rng.gen_range(-1.0..1.0)))
1387            .collect();
1388
1389        let make_data = |m: usize| -> (FdMatrix, Vec<f64>) {
1390            let t: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
1391            let mut vals = vec![0.0; n * m];
1392            for (i, &(a, b)) in coeffs.iter().enumerate() {
1393                for (j, &tj) in t.iter().enumerate() {
1394                    vals[i + j * n] = a * (2.0 * PI * tj).sin() + b * (4.0 * PI * tj).cos();
1395                }
1396            }
1397            (FdMatrix::from_column_major(vals, n, m).unwrap(), t)
1398        };
1399
1400        let (d1, t1) = make_data(41);
1401        let (d2, t2) = make_data(201);
1402        let f1 = fdata_to_pc_1d(&d1, 2, &t1).unwrap();
1403        let f2 = fdata_to_pc_1d(&d2, 2, &t2).unwrap();
1404
1405        let total1: f64 = f1.singular_values.iter().map(|s| s * s).sum();
1406        let total2: f64 = f2.singular_values.iter().map(|s| s * s).sum();
1407        let pve1 = f1.singular_values[0].powi(2) / total1;
1408        let pve2 = f2.singular_values[0].powi(2) / total2;
1409
1410        assert!(
1411            (pve1 - pve2).abs() < 0.05,
1412            "variance explained differs: coarse={pve1:.4}, fine={pve2:.4}"
1413        );
1414    }
1415
1416    #[test]
1417    fn fpca_scores_invariant_to_grid_density() {
1418        use rand::rngs::StdRng;
1419        use rand::{Rng, SeedableRng};
1420
1421        let n = 20;
1422
1423        // Generate random coefficients once
1424        let mut rng = StdRng::seed_from_u64(42);
1425        let coeffs: Vec<(f64, f64)> = (0..n)
1426            .map(|_| (rng.gen_range(-1.0..1.0), rng.gen_range(-1.0..1.0)))
1427            .collect();
1428
1429        // Helper: generate data on a given grid from the same analytic expression
1430        let make_data = |m: usize| -> (FdMatrix, Vec<f64>) {
1431            let t: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
1432            let mut vals = vec![0.0; n * m];
1433            for (i, &(a, b)) in coeffs.iter().enumerate() {
1434                for (j, &tj) in t.iter().enumerate() {
1435                    vals[i + j * n] = a * (2.0 * PI * tj).sin() + b * (4.0 * PI * tj).cos();
1436                }
1437            }
1438            let data = FdMatrix::from_column_major(vals, n, m).unwrap();
1439            (data, t)
1440        };
1441
1442        let (data1, t1) = make_data(51);
1443        let (data2, t2) = make_data(201);
1444
1445        let fpca1 = fdata_to_pc_1d(&data1, 2, &t1).unwrap();
1446        let fpca2 = fdata_to_pc_1d(&data2, 2, &t2).unwrap();
1447
1448        // Scores should be approximately the same (allow sign flip per component)
1449        for k in 0..2 {
1450            // Determine sign: use the sign of the dot product between score vectors
1451            let dot: f64 = (0..n)
1452                .map(|i| fpca1.scores[(i, k)] * fpca2.scores[(i, k)])
1453                .sum();
1454            let sign = if dot >= 0.0 { 1.0 } else { -1.0 };
1455
1456            for i in 0..n {
1457                let s1 = fpca1.scores[(i, k)];
1458                let s2 = sign * fpca2.scores[(i, k)];
1459                let rel_diff = if s1.abs() > 1e-6 {
1460                    (s1 - s2).abs() / s1.abs()
1461                } else {
1462                    (s1 - s2).abs()
1463                };
1464                assert!(
1465                    rel_diff < 0.10,
1466                    "score [{i},{k}] differs: coarse={s1:.4}, fine={s2:.4}, rel_diff={rel_diff:.4}"
1467                );
1468            }
1469        }
1470    }
1471}