Skip to main content

fdars_core/
elastic_fpca.rs

1//! Vertical, horizontal, and joint FPCA for elastic functional data.
2//!
3//! These FPCA variants decompose amplitude vs phase variability after elastic
4//! alignment. They correspond to `vert.fpca`, `horiz.fpca`, and `jointFPCA`
5//! from the R fdasrvf package.
6//!
7//! Key capabilities:
8//! - [`vert_fpca`] — Amplitude FPCA in augmented SRSF space
9//! - [`horiz_fpca`] — Phase FPCA via shooting vectors on the Hilbert sphere
10//! - [`joint_fpca`] — Combined amplitude + phase FPCA
11
12use crate::alignment::{srsf_inverse, srsf_transform, KarcherMeanResult};
13
14use crate::matrix::FdMatrix;
15use crate::warping::{exp_map_sphere, inv_exp_map_sphere, l2_norm_l2, psi_to_gam};
16use nalgebra::SVD;
17
18// ─── Types ──────────────────────────────────────────────────────────────────
19
20/// Result of vertical (amplitude) FPCA.
21#[derive(Debug, Clone, PartialEq)]
22#[non_exhaustive]
23pub struct VertFpcaResult {
24    /// PC scores (n × ncomp).
25    pub scores: FdMatrix,
26    /// Eigenfunctions in augmented SRSF space (ncomp × (m+1)).
27    pub eigenfunctions_q: FdMatrix,
28    /// Eigenfunctions in function space (ncomp × m).
29    pub eigenfunctions_f: FdMatrix,
30    /// Eigenvalues (variance explained).
31    pub eigenvalues: Vec<f64>,
32    /// Cumulative proportion of variance explained.
33    pub cumulative_variance: Vec<f64>,
34    /// Augmented mean SRSF (length m+1).
35    pub mean_q: Vec<f64>,
36}
37
38/// Result of horizontal (phase) FPCA.
39#[derive(Debug, Clone, PartialEq)]
40#[non_exhaustive]
41pub struct HorizFpcaResult {
42    /// PC scores (n × ncomp).
43    pub scores: FdMatrix,
44    /// Eigenfunctions in ψ space (ncomp × m).
45    pub eigenfunctions_psi: FdMatrix,
46    /// Eigenfunctions as warping functions (ncomp × m).
47    pub eigenfunctions_gam: FdMatrix,
48    /// Eigenvalues.
49    pub eigenvalues: Vec<f64>,
50    /// Cumulative proportion of variance explained.
51    pub cumulative_variance: Vec<f64>,
52    /// Mean ψ on the sphere (length m).
53    pub mean_psi: Vec<f64>,
54    /// Shooting vectors (n × m).
55    pub shooting_vectors: FdMatrix,
56}
57
58/// Result of joint (amplitude + phase) FPCA.
59#[derive(Debug, Clone, PartialEq)]
60#[non_exhaustive]
61pub struct JointFpcaResult {
62    /// PC scores (n × ncomp).
63    pub scores: FdMatrix,
64    /// Eigenvalues.
65    pub eigenvalues: Vec<f64>,
66    /// Cumulative proportion of variance explained.
67    pub cumulative_variance: Vec<f64>,
68    /// Phase-vs-amplitude balance weight.
69    pub balance_c: f64,
70    /// Vertical (amplitude) component of eigenvectors (ncomp × (m+1)).
71    pub vert_component: FdMatrix,
72    /// Horizontal (phase) component of eigenvectors (ncomp × m).
73    pub horiz_component: FdMatrix,
74}
75
76// ─── Vertical FPCA ──────────────────────────────────────────────────────────
77
78/// Perform vertical (amplitude) FPCA on elastically aligned curves.
79///
80/// 1. Compute SRSFs of aligned curves
81/// 2. Augment with `sign(f_i(t0)) * sqrt(|f_i(t0)|)` as extra dimension
82/// 3. Center, compute covariance, SVD
83/// 4. Project onto eigenvectors and convert back to function space
84///
85/// # Arguments
86/// * `karcher` — Pre-computed Karcher mean result (with aligned data and gammas)
87/// * `argvals` — Evaluation points (length m)
88/// * `ncomp` — Number of principal components to extract
89pub fn vert_fpca(
90    karcher: &KarcherMeanResult,
91    argvals: &[f64],
92    ncomp: usize,
93) -> Result<VertFpcaResult, crate::FdarError> {
94    let (n, m) = karcher.aligned_data.shape();
95    if n < 2 || m < 2 || ncomp < 1 || argvals.len() != m {
96        return Err(crate::FdarError::InvalidDimension {
97            parameter: "aligned_data/argvals",
98            expected: "n >= 2, m >= 2, ncomp >= 1, argvals.len() == m".to_string(),
99            actual: format!(
100                "n={}, m={}, ncomp={}, argvals.len()={}",
101                n,
102                m,
103                ncomp,
104                argvals.len()
105            ),
106        });
107    }
108    let ncomp = ncomp.min(n - 1).min(m);
109    let m_aug = m + 1;
110
111    let qn = match &karcher.aligned_srsfs {
112        Some(srsfs) => srsfs.clone(),
113        None => srsf_transform(&karcher.aligned_data, argvals),
114    };
115
116    let q_aug = build_augmented_srsfs(&qn, &karcher.aligned_data, n, m);
117
118    // Covariance matrix K (m_aug × m_aug) and SVD
119    let (_, mean_q) = center_matrix(&q_aug, n, m_aug);
120    let k_mat = build_symmetric_covariance(&q_aug, &mean_q, n, m_aug);
121
122    let svd = SVD::new(k_mat, true, true);
123    let u_cov = svd
124        .u
125        .as_ref()
126        .ok_or_else(|| crate::FdarError::ComputationFailed {
127            operation: "SVD",
128            detail: "SVD failed to compute U matrix; check for constant or zero-variance aligned functions".to_string(),
129        })?;
130
131    let eigenvalues: Vec<f64> = svd.singular_values.iter().take(ncomp).copied().collect();
132    let cumulative_variance = cumulative_variance_from_eigenvalues(&eigenvalues);
133
134    // Eigenfunctions = columns of U from svd(K)
135    let mut eigenfunctions_q = FdMatrix::zeros(ncomp, m_aug);
136    for k in 0..ncomp {
137        for j in 0..m_aug {
138            eigenfunctions_q[(k, j)] = u_cov[(j, k)];
139        }
140    }
141
142    // Scores: project centered data onto eigenvectors
143    let scores = project_onto_eigenvectors(&q_aug, &mean_q, u_cov, n, m_aug, ncomp);
144
145    // Convert eigenfunctions to function domain via srsf_inverse
146    let mut eigenfunctions_f = FdMatrix::zeros(ncomp, m);
147    for k in 0..ncomp {
148        let q_k: Vec<f64> = (0..m)
149            .map(|j| mean_q[j] + eigenfunctions_q[(k, j)])
150            .collect();
151        let aug_val = mean_q[m] + eigenfunctions_q[(k, m)];
152        let f0 = aug_val.signum() * aug_val * aug_val;
153        let f_k = srsf_inverse(&q_k, argvals, f0);
154        for j in 0..m {
155            eigenfunctions_f[(k, j)] = f_k[j];
156        }
157    }
158
159    Ok(VertFpcaResult {
160        scores,
161        eigenfunctions_q,
162        eigenfunctions_f,
163        eigenvalues,
164        cumulative_variance,
165        mean_q,
166    })
167}
168
169// ─── Horizontal FPCA ────────────────────────────────────────────────────────
170
171/// Perform horizontal (phase) FPCA on warping functions.
172///
173/// 1. Convert warps to ψ space (Hilbert sphere)
174/// 2. Compute Karcher mean on sphere via iterative exp/log maps
175/// 3. Compute shooting vectors (log map at mean)
176/// 4. PCA on shooting vectors
177///
178/// # Arguments
179/// * `karcher` — Pre-computed Karcher mean result
180/// * `argvals` — Evaluation points (length m)
181/// * `ncomp` — Number of principal components
182pub fn horiz_fpca(
183    karcher: &KarcherMeanResult,
184    argvals: &[f64],
185    ncomp: usize,
186) -> Result<HorizFpcaResult, crate::FdarError> {
187    let (n, m) = karcher.gammas.shape();
188    if n < 2 || m < 2 || ncomp < 1 || argvals.len() != m {
189        return Err(crate::FdarError::InvalidDimension {
190            parameter: "gammas/argvals",
191            expected: "n >= 2, m >= 2, ncomp >= 1, argvals.len() == m".to_string(),
192            actual: format!(
193                "n={}, m={}, ncomp={}, argvals.len()={}",
194                n,
195                m,
196                ncomp,
197                argvals.len()
198            ),
199        });
200    }
201    let ncomp = ncomp.min(n - 1).min(m);
202
203    let t0 = argvals[0];
204    let domain = argvals[m - 1] - t0;
205    let time: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
206
207    let psis = warps_to_normalized_psi(&karcher.gammas, argvals);
208    let mu_psi = sphere_karcher_mean(&psis, &time, 50);
209    let shooting = shooting_vectors_from_psis(&psis, &mu_psi, &time);
210
211    // PCA on shooting vectors (tangent space → standard PCA)
212    let (centered, _mean_v) = center_matrix(&shooting, n, m);
213
214    let svd = SVD::new(centered.to_dmatrix(), true, true);
215    let v_t = svd
216        .v_t
217        .as_ref()
218        .ok_or_else(|| crate::FdarError::ComputationFailed {
219            operation: "SVD",
220            detail:
221                "SVD failed to compute V^T matrix; check for constant or zero-variance functions"
222                    .to_string(),
223        })?;
224    let (scores, eigenvalues) = svd_scores_and_eigenvalues(&svd, ncomp, n).ok_or_else(|| {
225        crate::FdarError::ComputationFailed {
226            operation: "SVD",
227            detail: "SVD failed to compute scores; try reducing ncomp or check for degenerate input data".to_string(),
228        }
229    })?;
230    let cumulative_variance = cumulative_variance_from_eigenvalues(&eigenvalues);
231
232    // Eigenfunctions in ψ space
233    let mut eigenfunctions_psi = FdMatrix::zeros(ncomp, m);
234    for k in 0..ncomp {
235        for j in 0..m {
236            eigenfunctions_psi[(k, j)] = v_t[(k, j)];
237        }
238    }
239
240    // Convert eigenfunctions to warping functions
241    let mut eigenfunctions_gam = FdMatrix::zeros(ncomp, m);
242    for k in 0..ncomp {
243        let v_k: Vec<f64> = (0..m).map(|j| eigenfunctions_psi[(k, j)]).collect();
244        let psi_k = exp_map_sphere(&mu_psi, &v_k, &time);
245        let gam_k = psi_to_gam(&psi_k, &time);
246        for j in 0..m {
247            eigenfunctions_gam[(k, j)] = t0 + gam_k[j] * domain;
248        }
249    }
250
251    Ok(HorizFpcaResult {
252        scores,
253        eigenfunctions_psi,
254        eigenfunctions_gam,
255        eigenvalues,
256        cumulative_variance,
257        mean_psi: mu_psi,
258        shooting_vectors: shooting,
259    })
260}
261
262// ─── Joint FPCA ─────────────────────────────────────────────────────────────
263
264/// Perform joint (amplitude + phase) FPCA.
265///
266/// Concatenates augmented SRSFs and scaled shooting vectors, then does PCA
267/// on the combined representation.
268///
269/// # Arguments
270/// * `karcher` — Pre-computed Karcher mean result
271/// * `argvals` — Evaluation points (length m)
272/// * `ncomp` — Number of principal components
273/// * `balance_c` — Weight for phase component (if None, optimized via golden section)
274pub fn joint_fpca(
275    karcher: &KarcherMeanResult,
276    argvals: &[f64],
277    ncomp: usize,
278    balance_c: Option<f64>,
279) -> Result<JointFpcaResult, crate::FdarError> {
280    let (n, m) = karcher.aligned_data.shape();
281    if n < 2 || m < 2 || ncomp < 1 || argvals.len() != m {
282        return Err(crate::FdarError::InvalidDimension {
283            parameter: "aligned_data/argvals",
284            expected: "n >= 2, m >= 2, ncomp >= 1, argvals.len() == m".to_string(),
285            actual: format!(
286                "n={}, m={}, ncomp={}, argvals.len()={}",
287                n,
288                m,
289                ncomp,
290                argvals.len()
291            ),
292        });
293    }
294
295    let _vert = vert_fpca(karcher, argvals, ncomp)?;
296    let horiz = horiz_fpca(karcher, argvals, ncomp)?;
297
298    let m_aug = m + 1;
299    let ncomp = ncomp.min(n - 1);
300
301    let qn = match &karcher.aligned_srsfs {
302        Some(srsfs) => srsfs.clone(),
303        None => srsf_transform(&karcher.aligned_data, argvals),
304    };
305    let q_aug = build_augmented_srsfs(&qn, &karcher.aligned_data, n, m);
306    let (q_centered, _mean_q) = center_matrix(&q_aug, n, m_aug);
307
308    let shooting = &horiz.shooting_vectors;
309    let c = match balance_c {
310        Some(c) => c,
311        None => optimize_balance_c(karcher, argvals, &q_centered, shooting, ncomp),
312    };
313
314    // Concatenate: g_i = [qn_aug_centered_i; C * v_i]
315    let combined = build_combined_representation(&q_centered, shooting, c, n, m_aug, m);
316
317    let svd = SVD::new(combined.to_dmatrix(), true, true);
318    let v_t = svd
319        .v_t
320        .as_ref()
321        .ok_or_else(|| crate::FdarError::ComputationFailed {
322            operation: "SVD",
323            detail:
324                "SVD failed to compute V^T matrix; check for constant or zero-variance functions"
325                    .to_string(),
326        })?;
327    let (scores, eigenvalues) = svd_scores_and_eigenvalues(&svd, ncomp, n).ok_or_else(|| {
328        crate::FdarError::ComputationFailed {
329            operation: "SVD",
330            detail: "SVD failed to compute scores; try reducing ncomp or check for degenerate input data".to_string(),
331        }
332    })?;
333    let cumulative_variance = cumulative_variance_from_eigenvalues(&eigenvalues);
334
335    // Split eigenvectors into amplitude and phase parts
336    let (vert_component, horiz_component) = split_joint_eigenvectors(v_t, ncomp, m_aug, m);
337
338    Ok(JointFpcaResult {
339        scores,
340        eigenvalues,
341        cumulative_variance,
342        balance_c: c,
343        vert_component,
344        horiz_component,
345    })
346}
347
348// ─── Shared Helpers ────────────────────────────────────────────────────────
349
350/// Compute cumulative proportion of variance explained from eigenvalues.
351fn cumulative_variance_from_eigenvalues(eigenvalues: &[f64]) -> Vec<f64> {
352    let total_var: f64 = eigenvalues.iter().sum();
353    let mut cum = Vec::with_capacity(eigenvalues.len());
354    let mut running = 0.0;
355    for ev in eigenvalues {
356        running += ev;
357        cum.push(if total_var > 0.0 {
358            running / total_var
359        } else {
360            0.0
361        });
362    }
363    cum
364}
365
366/// Convert warping functions to normalized ψ vectors on the Hilbert sphere.
367pub(crate) fn warps_to_normalized_psi(gammas: &FdMatrix, argvals: &[f64]) -> Vec<Vec<f64>> {
368    let (n, m) = gammas.shape();
369    let t0 = argvals[0];
370    let domain = argvals[m - 1] - t0;
371    let time: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
372    let binsize = 1.0 / (m - 1) as f64;
373
374    (0..n)
375        .map(|i| {
376            let gam_01: Vec<f64> = (0..m).map(|j| (gammas[(i, j)] - t0) / domain).collect();
377            let mut grad = vec![0.0; m];
378            grad[0] = (gam_01[1] - gam_01[0]) / binsize;
379            for j in 1..m - 1 {
380                grad[j] = (gam_01[j + 1] - gam_01[j - 1]) / (2.0 * binsize);
381            }
382            grad[m - 1] = (gam_01[m - 1] - gam_01[m - 2]) / binsize;
383            let mut psi: Vec<f64> = grad.iter().map(|&g| g.max(0.0).sqrt()).collect();
384            let norm = l2_norm_l2(&psi, &time);
385            if norm > 1e-10 {
386                for v in &mut psi {
387                    *v /= norm;
388                }
389            }
390            psi
391        })
392        .collect()
393}
394
395/// Compute Karcher mean on the Hilbert sphere via iterative exp/log maps.
396pub(crate) fn sphere_karcher_mean(psis: &[Vec<f64>], time: &[f64], max_iter: usize) -> Vec<f64> {
397    let n = psis.len();
398    let m = psis[0].len();
399
400    // Initial mean: normalized arithmetic mean
401    let mut mu_psi = vec![0.0; m];
402    for psi in psis {
403        for j in 0..m {
404            mu_psi[j] += psi[j];
405        }
406    }
407    for j in 0..m {
408        mu_psi[j] /= n as f64;
409    }
410    normalize_to_sphere(&mut mu_psi, time);
411
412    // Iterative refinement
413    for _ in 0..max_iter {
414        let mean_v = mean_tangent_vector(psis, &mu_psi, time);
415        let step_norm = l2_norm_l2(&mean_v, time);
416        if step_norm < 1e-8 {
417            break;
418        }
419        mu_psi = exp_map_sphere(&mu_psi, &mean_v, time);
420        normalize_to_sphere(&mut mu_psi, time);
421    }
422
423    mu_psi
424}
425
426/// Compute shooting vectors v_i = log_μ(ψ_i) from ψ vectors and Karcher mean.
427pub(crate) fn shooting_vectors_from_psis(
428    psis: &[Vec<f64>],
429    mu_psi: &[f64],
430    time: &[f64],
431) -> FdMatrix {
432    let n = psis.len();
433    let m = psis[0].len();
434    let mut shooting = FdMatrix::zeros(n, m);
435    for i in 0..n {
436        let v = inv_exp_map_sphere(mu_psi, &psis[i], time);
437        for j in 0..m {
438            shooting[(i, j)] = v[j];
439        }
440    }
441    shooting
442}
443
444/// Build augmented SRSF matrix: original SRSFs + sign(f(id))*sqrt(|f(id)|) column.
445pub(crate) fn build_augmented_srsfs(
446    qn: &FdMatrix,
447    aligned_data: &FdMatrix,
448    n: usize,
449    m: usize,
450) -> FdMatrix {
451    let id = m / 2;
452    let m_aug = m + 1;
453    let mut q_aug = FdMatrix::zeros(n, m_aug);
454    for i in 0..n {
455        for j in 0..m {
456            q_aug[(i, j)] = qn[(i, j)];
457        }
458        let fid = aligned_data[(i, id)];
459        q_aug[(i, m)] = fid.signum() * fid.abs().sqrt();
460    }
461    q_aug
462}
463
464/// Center a matrix and return the mean vector.
465pub(crate) fn center_matrix(mat: &FdMatrix, n: usize, m: usize) -> (FdMatrix, Vec<f64>) {
466    let mut mean = vec![0.0; m];
467    for j in 0..m {
468        for i in 0..n {
469            mean[j] += mat[(i, j)];
470        }
471        mean[j] /= n as f64;
472    }
473    let mut centered = FdMatrix::zeros(n, m);
474    for i in 0..n {
475        for j in 0..m {
476            centered[(i, j)] = mat[(i, j)] - mean[j];
477        }
478    }
479    (centered, mean)
480}
481
482/// Extract eigenvalues and scores from SVD of centered data.
483fn svd_scores_and_eigenvalues(
484    svd: &SVD<f64, nalgebra::Dyn, nalgebra::Dyn>,
485    ncomp: usize,
486    n: usize,
487) -> Option<(FdMatrix, Vec<f64>)> {
488    let u = svd.u.as_ref()?;
489    let eigenvalues: Vec<f64> = svd
490        .singular_values
491        .iter()
492        .take(ncomp)
493        .map(|&s| s * s / (n - 1) as f64)
494        .collect();
495    let mut scores = FdMatrix::zeros(n, ncomp);
496    for k in 0..ncomp {
497        let sv = svd.singular_values[k];
498        for i in 0..n {
499            scores[(i, k)] = u[(i, k)] * sv;
500        }
501    }
502    Some((scores, eigenvalues))
503}
504
505/// Split joint eigenvectors into vertical (amplitude) and horizontal (phase) components.
506fn split_joint_eigenvectors(
507    v_t: &nalgebra::DMatrix<f64>,
508    ncomp: usize,
509    m_aug: usize,
510    m: usize,
511) -> (FdMatrix, FdMatrix) {
512    let mut vert_component = FdMatrix::zeros(ncomp, m_aug);
513    let mut horiz_component = FdMatrix::zeros(ncomp, m);
514    for k in 0..ncomp {
515        for j in 0..m_aug {
516            vert_component[(k, j)] = v_t[(k, j)];
517        }
518        for j in 0..m {
519            horiz_component[(k, j)] = v_t[(k, m_aug + j)];
520        }
521    }
522    (vert_component, horiz_component)
523}
524
525/// Build symmetric covariance matrix K (d × d) from data and mean.
526fn build_symmetric_covariance(
527    data: &FdMatrix,
528    mean: &[f64],
529    n: usize,
530    d: usize,
531) -> nalgebra::DMatrix<f64> {
532    let nf = (n - 1) as f64;
533    let mut k_mat = nalgebra::DMatrix::zeros(d, d);
534    for i in 0..n {
535        for p in 0..d {
536            let dp = data[(i, p)] - mean[p];
537            for q in p..d {
538                k_mat[(p, q)] += dp * (data[(i, q)] - mean[q]);
539            }
540        }
541    }
542    for p in 0..d {
543        k_mat[(p, p)] /= nf;
544        for q in (p + 1)..d {
545            k_mat[(p, q)] /= nf;
546            k_mat[(q, p)] = k_mat[(p, q)];
547        }
548    }
549    k_mat
550}
551
552/// Project centered data onto covariance eigenvectors to get scores.
553fn project_onto_eigenvectors(
554    data: &FdMatrix,
555    mean: &[f64],
556    u_cov: &nalgebra::DMatrix<f64>,
557    n: usize,
558    d: usize,
559    ncomp: usize,
560) -> FdMatrix {
561    let mut scores = FdMatrix::zeros(n, ncomp);
562    for k in 0..ncomp {
563        for i in 0..n {
564            let mut s = 0.0;
565            for j in 0..d {
566                s += (data[(i, j)] - mean[j]) * u_cov[(j, k)];
567            }
568            scores[(i, k)] = s;
569        }
570    }
571    scores
572}
573
574/// Normalize a vector to unit L2 norm on sphere. Returns whether normalization happened.
575fn normalize_to_sphere(mu: &mut [f64], time: &[f64]) {
576    let norm = l2_norm_l2(mu, time);
577    if norm > 1e-10 {
578        for v in mu.iter_mut() {
579            *v /= norm;
580        }
581    }
582}
583
584/// Compute mean tangent vector on sphere from ψ vectors at current mean.
585fn mean_tangent_vector(psis: &[Vec<f64>], mu_psi: &[f64], time: &[f64]) -> Vec<f64> {
586    let n = psis.len();
587    let m = mu_psi.len();
588    let mut mean_v = vec![0.0; m];
589    for psi in psis {
590        let v = inv_exp_map_sphere(mu_psi, psi, time);
591        for j in 0..m {
592            mean_v[j] += v[j];
593        }
594    }
595    for j in 0..m {
596        mean_v[j] /= n as f64;
597    }
598    mean_v
599}
600
601/// Build combined representation: [q_centered | c * shooting] for joint FPCA.
602fn build_combined_representation(
603    q_centered: &FdMatrix,
604    shooting: &FdMatrix,
605    c: f64,
606    n: usize,
607    m_aug: usize,
608    m: usize,
609) -> FdMatrix {
610    let combined_dim = m_aug + m;
611    let mut combined = FdMatrix::zeros(n, combined_dim);
612    for i in 0..n {
613        for j in 0..m_aug {
614            combined[(i, j)] = q_centered[(i, j)];
615        }
616        for j in 0..m {
617            combined[(i, m_aug + j)] = c * shooting[(i, j)];
618        }
619    }
620    combined
621}
622
623/// Optimize the balance parameter C via golden section search.
624///
625/// Minimizes reconstruction error of the joint representation.
626fn optimize_balance_c(
627    _karcher: &KarcherMeanResult,
628    _argvals: &[f64],
629    q_centered: &FdMatrix,
630    shooting: &FdMatrix,
631    ncomp: usize,
632) -> f64 {
633    let (n, m) = shooting.shape();
634    let m_aug = q_centered.ncols();
635    let combined_dim = m_aug + m;
636
637    let golden_ratio = (5.0_f64.sqrt() - 1.0) / 2.0;
638    let mut a = 0.0_f64;
639    let mut b = 10.0_f64;
640
641    let eval_c = |c: f64| -> f64 {
642        let mut combined = FdMatrix::zeros(n, combined_dim);
643        for i in 0..n {
644            for j in 0..m_aug {
645                combined[(i, j)] = q_centered[(i, j)];
646            }
647            for j in 0..m {
648                combined[(i, m_aug + j)] = c * shooting[(i, j)];
649            }
650        }
651
652        let svd = SVD::new(combined.to_dmatrix(), true, true);
653        if let (Some(_u), Some(_v_t)) = (svd.u.as_ref(), svd.v_t.as_ref()) {
654            let nc = ncomp.min(svd.singular_values.len());
655            // Reconstruction error = total variance - explained variance
656            let total_var: f64 = svd.singular_values.iter().map(|&s| s * s).sum();
657            let explained: f64 = svd.singular_values.iter().take(nc).map(|&s| s * s).sum();
658            total_var - explained
659        } else {
660            f64::INFINITY
661        }
662    };
663
664    for _ in 0..20 {
665        let c1 = b - golden_ratio * (b - a);
666        let c2 = a + golden_ratio * (b - a);
667        if eval_c(c1) < eval_c(c2) {
668            b = c2;
669        } else {
670            a = c1;
671        }
672    }
673
674    (a + b) / 2.0
675}
676
677#[cfg(test)]
678mod tests {
679    use super::*;
680    use crate::alignment::karcher_mean;
681    use std::f64::consts::PI;
682
683    fn generate_test_data(n: usize, m: usize) -> (FdMatrix, Vec<f64>) {
684        let t: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1) as f64).collect();
685        let mut data = FdMatrix::zeros(n, m);
686        for i in 0..n {
687            let shift = 0.1 * (i as f64 - n as f64 / 2.0);
688            let scale = 1.0 + 0.2 * (i as f64 / n as f64);
689            for j in 0..m {
690                data[(i, j)] = scale * (2.0 * PI * (t[j] + shift)).sin();
691            }
692        }
693        (data, t)
694    }
695
696    #[test]
697    fn test_vert_fpca_basic() {
698        let (data, t) = generate_test_data(15, 51);
699        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
700        let result = vert_fpca(&km, &t, 3);
701        assert!(result.is_ok(), "vert_fpca should succeed");
702
703        let res = result.unwrap();
704        assert_eq!(res.scores.shape(), (15, 3));
705        assert_eq!(res.eigenvalues.len(), 3);
706        assert_eq!(res.eigenfunctions_q.shape(), (3, 52)); // m+1
707        assert_eq!(res.eigenfunctions_f.shape(), (3, 51));
708
709        // Eigenvalues should be non-negative and decreasing
710        for ev in &res.eigenvalues {
711            assert!(*ev >= -1e-10, "Eigenvalue should be non-negative: {}", ev);
712        }
713        for i in 1..res.eigenvalues.len() {
714            assert!(
715                res.eigenvalues[i] <= res.eigenvalues[i - 1] + 1e-10,
716                "Eigenvalues should be decreasing"
717            );
718        }
719
720        // Cumulative variance should be increasing and <= 1
721        for i in 1..res.cumulative_variance.len() {
722            assert!(res.cumulative_variance[i] >= res.cumulative_variance[i - 1] - 1e-10);
723        }
724        assert!(*res.cumulative_variance.last().unwrap() <= 1.0 + 1e-10);
725    }
726
727    #[test]
728    fn test_horiz_fpca_basic() {
729        let (data, t) = generate_test_data(15, 51);
730        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
731        let result = horiz_fpca(&km, &t, 3);
732        assert!(result.is_ok(), "horiz_fpca should succeed");
733
734        let res = result.unwrap();
735        assert_eq!(res.scores.shape(), (15, 3));
736        assert_eq!(res.eigenvalues.len(), 3);
737        assert_eq!(res.eigenfunctions_psi.shape(), (3, 51));
738        assert_eq!(res.shooting_vectors.shape(), (15, 51));
739    }
740
741    #[test]
742    fn test_joint_fpca_basic() {
743        let (data, t) = generate_test_data(15, 51);
744        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
745        let result = joint_fpca(&km, &t, 3, Some(1.0));
746        assert!(result.is_ok(), "joint_fpca should succeed");
747
748        let res = result.unwrap();
749        assert_eq!(res.scores.shape(), (15, 3));
750        assert_eq!(res.eigenvalues.len(), 3);
751        assert!(res.balance_c >= 0.0);
752    }
753
754    #[test]
755    fn test_joint_fpca_optimize_c() {
756        let (data, t) = generate_test_data(15, 51);
757        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
758        let result = joint_fpca(&km, &t, 3, None);
759        assert!(
760            result.is_ok(),
761            "joint_fpca with C optimization should succeed"
762        );
763    }
764
765    #[test]
766    fn test_vert_fpca_invalid_input() {
767        let data = FdMatrix::zeros(1, 10); // n < 2
768        let t: Vec<f64> = (0..10).map(|i| i as f64 / 9.0).collect();
769        let km = KarcherMeanResult {
770            mean: vec![0.0; 10],
771            mean_srsf: vec![0.0; 10],
772            gammas: FdMatrix::zeros(1, 10),
773            aligned_data: data,
774            n_iter: 0,
775            converged: true,
776            aligned_srsfs: None,
777        };
778        assert!(vert_fpca(&km, &t, 3).is_err());
779    }
780
781    #[test]
782    fn test_horiz_fpca_eigenvalue_properties() {
783        let (data, t) = generate_test_data(15, 51);
784        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
785        let res = horiz_fpca(&km, &t, 3).expect("horiz_fpca should succeed");
786
787        // Eigenvalues non-negative
788        for ev in &res.eigenvalues {
789            assert!(*ev >= -1e-10, "Eigenvalue should be non-negative: {}", ev);
790        }
791        // Eigenvalues decreasing
792        for i in 1..res.eigenvalues.len() {
793            assert!(
794                res.eigenvalues[i] <= res.eigenvalues[i - 1] + 1e-10,
795                "Eigenvalues should be decreasing"
796            );
797        }
798        // Cumulative variance increasing and <= 1
799        for i in 1..res.cumulative_variance.len() {
800            assert!(res.cumulative_variance[i] >= res.cumulative_variance[i - 1] - 1e-10);
801        }
802        assert!(*res.cumulative_variance.last().unwrap() <= 1.0 + 1e-10);
803    }
804
805    #[test]
806    fn test_joint_fpca_eigenvalue_properties() {
807        let (data, t) = generate_test_data(15, 51);
808        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
809        let res = joint_fpca(&km, &t, 3, Some(1.0)).expect("joint_fpca should succeed");
810
811        // Eigenvalues non-negative
812        for ev in &res.eigenvalues {
813            assert!(*ev >= -1e-10, "Eigenvalue should be non-negative: {}", ev);
814        }
815        // Eigenvalues decreasing
816        for i in 1..res.eigenvalues.len() {
817            assert!(
818                res.eigenvalues[i] <= res.eigenvalues[i - 1] + 1e-10,
819                "Eigenvalues should be decreasing"
820            );
821        }
822        // Cumulative variance increasing and <= 1
823        for i in 1..res.cumulative_variance.len() {
824            assert!(res.cumulative_variance[i] >= res.cumulative_variance[i - 1] - 1e-10);
825        }
826        assert!(*res.cumulative_variance.last().unwrap() <= 1.0 + 1e-10);
827    }
828
829    #[test]
830    fn test_vert_fpca_ncomp_sensitivity() -> Result<(), crate::error::FdarError> {
831        let (data, t) = generate_test_data(15, 51);
832        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
833
834        for &ncomp in &[1, 2, 5, 10] {
835            let res = vert_fpca(&km, &t, ncomp)?;
836            assert_eq!(res.scores.shape(), (15, ncomp));
837            assert_eq!(res.eigenvalues.len(), ncomp);
838            assert_eq!(res.eigenfunctions_q.shape(), (ncomp, 52));
839            assert_eq!(res.eigenfunctions_f.shape(), (ncomp, 51));
840            assert_eq!(res.cumulative_variance.len(), ncomp);
841        }
842        Ok(())
843    }
844
845    #[test]
846    fn test_vert_fpca_score_orthogonality() {
847        let (data, t) = generate_test_data(15, 51);
848        let km = karcher_mean(&data, &t, 10, 1e-4, 0.0);
849        let res = vert_fpca(&km, &t, 3).expect("vert_fpca should succeed");
850
851        let n = 15;
852        // Check approximate orthogonality of score vectors
853        for k1 in 0..3 {
854            for k2 in (k1 + 1)..3 {
855                let dot: f64 = (0..n)
856                    .map(|i| res.scores[(i, k1)] * res.scores[(i, k2)])
857                    .sum();
858                let norm1: f64 = (0..n)
859                    .map(|i| res.scores[(i, k1)].powi(2))
860                    .sum::<f64>()
861                    .sqrt();
862                let norm2: f64 = (0..n)
863                    .map(|i| res.scores[(i, k2)].powi(2))
864                    .sum::<f64>()
865                    .sqrt();
866                if norm1 > 1e-10 && norm2 > 1e-10 {
867                    let cos_angle = (dot / (norm1 * norm2)).abs();
868                    assert!(
869                        cos_angle < 0.15,
870                        "Score components {} and {} should be approximately orthogonal, cos={}",
871                        k1,
872                        k2,
873                        cos_angle
874                    );
875                }
876            }
877        }
878    }
879}