Skip to main content

fdars_core/
classification.rs

1//! Functional classification with mixed scalar/functional predictors.
2//!
3//! Implements supervised classification for functional data using:
4//! - [`fclassif_lda`] / [`fclassif_qda`] — FPC + LDA/QDA pipeline
5//! - [`fclassif_knn`] — FPC + k-NN classifier
6//! - [`fclassif_kernel`] — Nonparametric kernel classifier with mixed predictors
7//! - [`fclassif_dd`] — Depth-based DD-classifier
8//! - [`fclassif_cv`] — Cross-validated error rate
9
10use crate::depth::fraiman_muniz_1d;
11use crate::helpers::{l2_distance, simpsons_weights};
12use crate::matrix::FdMatrix;
13use crate::regression::fdata_to_pc_1d;
14
15/// Classification result.
16pub struct ClassifResult {
17    /// Predicted class labels (length n)
18    pub predicted: Vec<usize>,
19    /// Posterior/membership probabilities (n x G) — if available
20    pub probabilities: Option<FdMatrix>,
21    /// Training accuracy
22    pub accuracy: f64,
23    /// Confusion matrix (G x G): row = true, col = predicted
24    pub confusion: Vec<Vec<usize>>,
25    /// Number of classes
26    pub n_classes: usize,
27    /// Number of FPC components used
28    pub ncomp: usize,
29}
30
31/// Cross-validation result.
32pub struct ClassifCvResult {
33    /// Mean error rate across folds
34    pub error_rate: f64,
35    /// Per-fold error rates
36    pub fold_errors: Vec<f64>,
37    /// Best ncomp (if tuned)
38    pub best_ncomp: usize,
39}
40
41// ---------------------------------------------------------------------------
42// Utility helpers
43// ---------------------------------------------------------------------------
44
45/// Count distinct classes and remap labels to 0..G-1.
46fn remap_labels(y: &[usize]) -> (Vec<usize>, usize) {
47    let mut labels: Vec<usize> = y.to_vec();
48    let mut unique: Vec<usize> = y.to_vec();
49    unique.sort_unstable();
50    unique.dedup();
51    let g = unique.len();
52    for label in &mut labels {
53        *label = unique.iter().position(|&u| u == *label).unwrap_or(0);
54    }
55    (labels, g)
56}
57
58/// Build confusion matrix (G x G).
59fn confusion_matrix(true_labels: &[usize], pred_labels: &[usize], g: usize) -> Vec<Vec<usize>> {
60    let mut cm = vec![vec![0usize; g]; g];
61    for (&t, &p) in true_labels.iter().zip(pred_labels.iter()) {
62        if t < g && p < g {
63            cm[t][p] += 1;
64        }
65    }
66    cm
67}
68
69/// Accuracy from labels.
70fn compute_accuracy(true_labels: &[usize], pred_labels: &[usize]) -> f64 {
71    let n = true_labels.len();
72    if n == 0 {
73        return 0.0;
74    }
75    let correct = true_labels
76        .iter()
77        .zip(pred_labels.iter())
78        .filter(|(&t, &p)| t == p)
79        .count();
80    correct as f64 / n as f64
81}
82
83/// Extract FPC scores and append optional scalar covariates.
84fn build_feature_matrix(
85    data: &FdMatrix,
86    covariates: Option<&FdMatrix>,
87    ncomp: usize,
88) -> Option<(FdMatrix, Vec<f64>, FdMatrix)> {
89    let fpca = fdata_to_pc_1d(data, ncomp)?;
90    let n = data.nrows();
91    let d_pc = fpca.scores.ncols();
92    let d_cov = covariates.map_or(0, |c| c.ncols());
93    let d = d_pc + d_cov;
94
95    let mut features = FdMatrix::zeros(n, d);
96    for i in 0..n {
97        for j in 0..d_pc {
98            features[(i, j)] = fpca.scores[(i, j)];
99        }
100        if let Some(cov) = covariates {
101            for j in 0..d_cov {
102                features[(i, d_pc + j)] = cov[(i, j)];
103            }
104        }
105    }
106
107    Some((features, fpca.mean, fpca.rotation))
108}
109
110// ---------------------------------------------------------------------------
111// LDA: Linear Discriminant Analysis
112// ---------------------------------------------------------------------------
113
114/// Compute per-class means, counts, and priors from labeled features.
115fn class_means_and_priors(
116    features: &FdMatrix,
117    labels: &[usize],
118    g: usize,
119) -> (Vec<Vec<f64>>, Vec<usize>, Vec<f64>) {
120    let n = features.nrows();
121    let d = features.ncols();
122    let mut counts = vec![0usize; g];
123    let mut class_means = vec![vec![0.0; d]; g];
124    for i in 0..n {
125        let c = labels[i];
126        counts[c] += 1;
127        for j in 0..d {
128            class_means[c][j] += features[(i, j)];
129        }
130    }
131    for c in 0..g {
132        if counts[c] > 0 {
133            for j in 0..d {
134                class_means[c][j] /= counts[c] as f64;
135            }
136        }
137    }
138    let priors: Vec<f64> = counts.iter().map(|&c| c as f64 / n as f64).collect();
139    (class_means, counts, priors)
140}
141
142/// Compute pooled within-class covariance (symmetric, regularized).
143fn pooled_within_cov(
144    features: &FdMatrix,
145    labels: &[usize],
146    class_means: &[Vec<f64>],
147    g: usize,
148) -> Vec<f64> {
149    let n = features.nrows();
150    let d = features.ncols();
151    let mut cov = vec![0.0; d * d];
152    for i in 0..n {
153        let c = labels[i];
154        for r in 0..d {
155            let dr = features[(i, r)] - class_means[c][r];
156            for s in r..d {
157                let val = dr * (features[(i, s)] - class_means[c][s]);
158                cov[r * d + s] += val;
159                if r != s {
160                    cov[s * d + r] += val;
161                }
162            }
163        }
164    }
165    let scale = (n - g).max(1) as f64;
166    for v in cov.iter_mut() {
167        *v /= scale;
168    }
169    for j in 0..d {
170        cov[j * d + j] += 1e-6;
171    }
172    cov
173}
174
175/// Compute per-class means and pooled within-class covariance.
176fn lda_params(
177    features: &FdMatrix,
178    labels: &[usize],
179    g: usize,
180) -> (Vec<Vec<f64>>, Vec<f64>, Vec<f64>) {
181    let (class_means, _counts, priors) = class_means_and_priors(features, labels, g);
182    let cov = pooled_within_cov(features, labels, &class_means, g);
183    (class_means, cov, priors)
184}
185
186/// Cholesky factorization of d×d row-major matrix.
187fn cholesky_d(mat: &[f64], d: usize) -> Option<Vec<f64>> {
188    let mut l = vec![0.0; d * d];
189    for j in 0..d {
190        let mut sum = 0.0;
191        for k in 0..j {
192            sum += l[j * d + k] * l[j * d + k];
193        }
194        let diag = mat[j * d + j] - sum;
195        if diag <= 0.0 {
196            return None;
197        }
198        l[j * d + j] = diag.sqrt();
199        for i in (j + 1)..d {
200            let mut s = 0.0;
201            for k in 0..j {
202                s += l[i * d + k] * l[j * d + k];
203            }
204            l[i * d + j] = (mat[i * d + j] - s) / l[j * d + j];
205        }
206    }
207    Some(l)
208}
209
210/// Forward solve L * x = b.
211fn forward_solve(l: &[f64], b: &[f64], d: usize) -> Vec<f64> {
212    let mut x = vec![0.0; d];
213    for i in 0..d {
214        let mut s = 0.0;
215        for j in 0..i {
216            s += l[i * d + j] * x[j];
217        }
218        x[i] = (b[i] - s) / l[i * d + i];
219    }
220    x
221}
222
223/// Mahalanobis distance squared: (x-mu)^T Sigma^{-1} (x-mu) via Cholesky.
224fn mahalanobis_sq(x: &[f64], mu: &[f64], chol: &[f64], d: usize) -> f64 {
225    let diff: Vec<f64> = x.iter().zip(mu.iter()).map(|(&a, &b)| a - b).collect();
226    let y = forward_solve(chol, &diff, d);
227    y.iter().map(|&v| v * v).sum()
228}
229
230/// LDA prediction: assign to class minimizing Mahalanobis distance (with prior).
231fn lda_predict(
232    features: &FdMatrix,
233    class_means: &[Vec<f64>],
234    cov_chol: &[f64],
235    priors: &[f64],
236    g: usize,
237) -> Vec<usize> {
238    let n = features.nrows();
239    let d = features.ncols();
240
241    (0..n)
242        .map(|i| {
243            let xi: Vec<f64> = (0..d).map(|j| features[(i, j)]).collect();
244            let mut best_class = 0;
245            let mut best_score = f64::NEG_INFINITY;
246            for c in 0..g {
247                let maha = mahalanobis_sq(&xi, &class_means[c], cov_chol, d);
248                let score = priors[c].max(1e-15).ln() - 0.5 * maha;
249                if score > best_score {
250                    best_score = score;
251                    best_class = c;
252                }
253            }
254            best_class
255        })
256        .collect()
257}
258
259/// FPC + LDA classification.
260///
261/// # Arguments
262/// * `data` — Functional data (n × m)
263/// * `y` — Class labels (length n)
264/// * `covariates` — Optional scalar covariates (n × p)
265/// * `ncomp` — Number of FPC components
266pub fn fclassif_lda(
267    data: &FdMatrix,
268    y: &[usize],
269    covariates: Option<&FdMatrix>,
270    ncomp: usize,
271) -> Option<ClassifResult> {
272    let n = data.nrows();
273    if n == 0 || y.len() != n || ncomp == 0 {
274        return None;
275    }
276
277    let (labels, g) = remap_labels(y);
278    if g < 2 {
279        return None;
280    }
281
282    let (features, _mean, _rotation) = build_feature_matrix(data, covariates, ncomp)?;
283    let d = features.ncols();
284    let (class_means, cov, priors) = lda_params(&features, &labels, g);
285    let chol = cholesky_d(&cov, d)?;
286
287    let predicted = lda_predict(&features, &class_means, &chol, &priors, g);
288    let accuracy = compute_accuracy(&labels, &predicted);
289    let confusion = confusion_matrix(&labels, &predicted, g);
290
291    Some(ClassifResult {
292        predicted,
293        probabilities: None,
294        accuracy,
295        confusion,
296        n_classes: g,
297        ncomp: features.ncols().min(ncomp),
298    })
299}
300
301// ---------------------------------------------------------------------------
302// QDA: Quadratic Discriminant Analysis
303// ---------------------------------------------------------------------------
304
305/// Accumulate symmetric covariance from feature rows.
306fn accumulate_class_cov(
307    features: &FdMatrix,
308    members: &[usize],
309    mean: &[f64],
310    d: usize,
311) -> Vec<f64> {
312    let mut cov = vec![0.0; d * d];
313    for &i in members {
314        for r in 0..d {
315            let dr = features[(i, r)] - mean[r];
316            for s in r..d {
317                let val = dr * (features[(i, s)] - mean[s]);
318                cov[r * d + s] += val;
319                if r != s {
320                    cov[s * d + r] += val;
321                }
322            }
323        }
324    }
325    cov
326}
327
328/// Per-class covariance matrices.
329fn qda_class_covariances(
330    features: &FdMatrix,
331    labels: &[usize],
332    class_means: &[Vec<f64>],
333    g: usize,
334) -> Vec<Vec<f64>> {
335    let n = features.nrows();
336    let d = features.ncols();
337
338    (0..g)
339        .map(|c| {
340            let members: Vec<usize> = (0..n).filter(|&i| labels[i] == c).collect();
341            let nc = members.len().max(1) as f64;
342            let mut cov = accumulate_class_cov(features, &members, &class_means[c], d);
343            for v in cov.iter_mut() {
344                *v /= nc;
345            }
346            for j in 0..d {
347                cov[j * d + j] += 1e-6;
348            }
349            cov
350        })
351        .collect()
352}
353
354/// Compute QDA parameters: class means, Cholesky factors, log-dets, priors.
355fn build_qda_params(
356    features: &FdMatrix,
357    labels: &[usize],
358    g: usize,
359) -> Option<(Vec<Vec<f64>>, Vec<Vec<f64>>, Vec<f64>, Vec<f64>)> {
360    let d = features.ncols();
361    let (class_means, _counts, priors) = class_means_and_priors(features, labels, g);
362    let class_covs = qda_class_covariances(features, labels, &class_means, g);
363    let mut class_chols = Vec::with_capacity(g);
364    let mut class_log_dets = Vec::with_capacity(g);
365    for cov in &class_covs {
366        let chol = cholesky_d(cov, d)?;
367        class_log_dets.push(log_det_cholesky(&chol, d));
368        class_chols.push(chol);
369    }
370    Some((class_means, class_chols, class_log_dets, priors))
371}
372
373/// Log-determinant from Cholesky factor.
374fn log_det_cholesky(l: &[f64], d: usize) -> f64 {
375    let mut s = 0.0;
376    for i in 0..d {
377        s += l[i * d + i].ln();
378    }
379    2.0 * s
380}
381
382/// QDA prediction: per-class covariance.
383fn qda_predict(
384    features: &FdMatrix,
385    class_means: &[Vec<f64>],
386    class_chols: &[Vec<f64>],
387    class_log_dets: &[f64],
388    priors: &[f64],
389    g: usize,
390) -> Vec<usize> {
391    let n = features.nrows();
392    let d = features.ncols();
393
394    (0..n)
395        .map(|i| {
396            let xi: Vec<f64> = (0..d).map(|j| features[(i, j)]).collect();
397            let mut best_class = 0;
398            let mut best_score = f64::NEG_INFINITY;
399            for c in 0..g {
400                let maha = mahalanobis_sq(&xi, &class_means[c], &class_chols[c], d);
401                let score = priors[c].max(1e-15).ln() - 0.5 * (class_log_dets[c] + maha);
402                if score > best_score {
403                    best_score = score;
404                    best_class = c;
405                }
406            }
407            best_class
408        })
409        .collect()
410}
411
412/// FPC + QDA classification.
413pub fn fclassif_qda(
414    data: &FdMatrix,
415    y: &[usize],
416    covariates: Option<&FdMatrix>,
417    ncomp: usize,
418) -> Option<ClassifResult> {
419    let n = data.nrows();
420    if n == 0 || y.len() != n || ncomp == 0 {
421        return None;
422    }
423
424    let (labels, g) = remap_labels(y);
425    if g < 2 {
426        return None;
427    }
428
429    let (features, _mean, _rotation) = build_feature_matrix(data, covariates, ncomp)?;
430
431    let (class_means, class_chols, class_log_dets, priors) =
432        build_qda_params(&features, &labels, g)?;
433
434    let predicted = qda_predict(
435        &features,
436        &class_means,
437        &class_chols,
438        &class_log_dets,
439        &priors,
440        g,
441    );
442    let accuracy = compute_accuracy(&labels, &predicted);
443    let confusion = confusion_matrix(&labels, &predicted, g);
444
445    Some(ClassifResult {
446        predicted,
447        probabilities: None,
448        accuracy,
449        confusion,
450        n_classes: g,
451        ncomp: features.ncols().min(ncomp),
452    })
453}
454
455// ---------------------------------------------------------------------------
456// k-NN classifier
457// ---------------------------------------------------------------------------
458
459/// FPC + k-NN classification.
460///
461/// # Arguments
462/// * `data` — Functional data (n × m)
463/// * `y` — Class labels
464/// * `covariates` — Optional scalar covariates
465/// * `ncomp` — Number of FPC components
466/// * `k_nn` — Number of nearest neighbors
467pub fn fclassif_knn(
468    data: &FdMatrix,
469    y: &[usize],
470    covariates: Option<&FdMatrix>,
471    ncomp: usize,
472    k_nn: usize,
473) -> Option<ClassifResult> {
474    let n = data.nrows();
475    if n == 0 || y.len() != n || ncomp == 0 || k_nn == 0 {
476        return None;
477    }
478
479    let (labels, g) = remap_labels(y);
480    if g < 2 {
481        return None;
482    }
483
484    let (features, _mean, _rotation) = build_feature_matrix(data, covariates, ncomp)?;
485    let d = features.ncols();
486
487    let predicted = knn_predict_loo(&features, &labels, g, d, k_nn);
488    let accuracy = compute_accuracy(&labels, &predicted);
489    let confusion = confusion_matrix(&labels, &predicted, g);
490
491    Some(ClassifResult {
492        predicted,
493        probabilities: None,
494        accuracy,
495        confusion,
496        n_classes: g,
497        ncomp: d.min(ncomp),
498    })
499}
500
501/// Leave-one-out k-NN prediction.
502fn knn_predict_loo(
503    features: &FdMatrix,
504    labels: &[usize],
505    g: usize,
506    d: usize,
507    k_nn: usize,
508) -> Vec<usize> {
509    let n = features.nrows();
510    let k_nn = k_nn.min(n - 1);
511
512    (0..n)
513        .map(|i| {
514            let xi: Vec<f64> = (0..d).map(|j| features[(i, j)]).collect();
515            let mut dists: Vec<(f64, usize)> = (0..n)
516                .filter(|&j| j != i)
517                .map(|j| {
518                    let xj: Vec<f64> = (0..d).map(|jj| features[(j, jj)]).collect();
519                    let d_sq: f64 = xi.iter().zip(&xj).map(|(&a, &b)| (a - b).powi(2)).sum();
520                    (d_sq, labels[j])
521                })
522                .collect();
523            dists.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
524
525            // Majority vote among k nearest
526            let mut votes = vec![0usize; g];
527            for &(_, label) in dists.iter().take(k_nn) {
528                votes[label] += 1;
529            }
530            votes
531                .iter()
532                .enumerate()
533                .max_by_key(|&(_, &v)| v)
534                .map(|(c, _)| c)
535                .unwrap_or(0)
536        })
537        .collect()
538}
539
540// ---------------------------------------------------------------------------
541// Nonparametric kernel classifier with mixed predictors
542// ---------------------------------------------------------------------------
543
544/// Find class with maximum score.
545fn argmax_class(scores: &[f64]) -> usize {
546    scores
547        .iter()
548        .enumerate()
549        .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
550        .map(|(c, _)| c)
551        .unwrap_or(0)
552}
553
554/// Compute marginal rank-based scalar depth of observation i w.r.t. class c.
555fn scalar_depth_for_obs(cov: &FdMatrix, i: usize, class_indices: &[usize], p: usize) -> f64 {
556    let nc = class_indices.len() as f64;
557    if nc < 1.0 || p == 0 {
558        return 0.0;
559    }
560    let mut depth = 0.0;
561    for j in 0..p {
562        let val = cov[(i, j)];
563        let rank = class_indices
564            .iter()
565            .filter(|&&k| cov[(k, j)] <= val)
566            .count() as f64;
567        let u = rank / nc.max(1.0);
568        depth += u.min(1.0 - u).min(0.5);
569    }
570    depth / p as f64
571}
572
573/// Generate bandwidth candidates from distance percentiles.
574fn bandwidth_candidates(dists: &[f64], n: usize) -> Vec<f64> {
575    let mut all_dists: Vec<f64> = Vec::new();
576    for i in 0..n {
577        for j in (i + 1)..n {
578            all_dists.push(dists[i * n + j]);
579        }
580    }
581    all_dists.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
582
583    (1..=20)
584        .map(|p| {
585            let idx = (p as f64 / 20.0 * (all_dists.len() - 1) as f64) as usize;
586            all_dists[idx.min(all_dists.len() - 1)]
587        })
588        .filter(|&h| h > 1e-15)
589        .collect()
590}
591
592/// LOO classification accuracy for a single bandwidth.
593fn loo_accuracy_for_bandwidth(dists: &[f64], labels: &[usize], g: usize, n: usize, h: f64) -> f64 {
594    let mut correct = 0;
595    for i in 0..n {
596        let mut votes = vec![0.0; g];
597        for j in 0..n {
598            if j != i {
599                votes[labels[j]] += gaussian_kernel(dists[i * n + j], h);
600            }
601        }
602        if argmax_class(&votes) == labels[i] {
603            correct += 1;
604        }
605    }
606    correct as f64 / n as f64
607}
608
609/// Gaussian kernel: exp(-d²/(2h²)).
610fn gaussian_kernel(dist: f64, h: f64) -> f64 {
611    if h < 1e-15 {
612        return 0.0;
613    }
614    (-dist * dist / (2.0 * h * h)).exp()
615}
616
617/// Nonparametric kernel classifier for functional data with optional scalar covariates.
618///
619/// Uses product kernel: K_func × K_scalar. Bandwidth selected by LOO-CV.
620///
621/// # Arguments
622/// * `data` — Functional data (n × m)
623/// * `argvals` — Evaluation points
624/// * `y` — Class labels
625/// * `covariates` — Optional scalar covariates (n × p)
626/// * `h_func` — Functional bandwidth (0 = auto via LOO-CV)
627/// * `h_scalar` — Scalar bandwidth (0 = auto)
628pub fn fclassif_kernel(
629    data: &FdMatrix,
630    argvals: &[f64],
631    y: &[usize],
632    covariates: Option<&FdMatrix>,
633    h_func: f64,
634    h_scalar: f64,
635) -> Option<ClassifResult> {
636    let n = data.nrows();
637    let m = data.ncols();
638    if n == 0 || y.len() != n || argvals.len() != m {
639        return None;
640    }
641
642    let (labels, g) = remap_labels(y);
643    if g < 2 {
644        return None;
645    }
646
647    let weights = simpsons_weights(argvals);
648
649    // Compute pairwise functional distances
650    let func_dists = compute_pairwise_l2(data, &weights);
651
652    // Compute pairwise scalar distances if covariates exist
653    let scalar_dists = covariates.map(compute_pairwise_scalar);
654
655    // Select bandwidths via LOO if needed
656    let h_f = if h_func > 0.0 {
657        h_func
658    } else {
659        select_bandwidth_loo(&func_dists, &labels, g, n, true)
660    };
661    let h_s = match &scalar_dists {
662        Some(sd) if h_scalar <= 0.0 => select_bandwidth_loo(sd, &labels, g, n, false),
663        _ => h_scalar,
664    };
665
666    let predicted = kernel_classify_loo(
667        &func_dists,
668        scalar_dists.as_deref(),
669        &labels,
670        g,
671        n,
672        h_f,
673        h_s,
674    );
675    let accuracy = compute_accuracy(&labels, &predicted);
676    let confusion = confusion_matrix(&labels, &predicted, g);
677
678    Some(ClassifResult {
679        predicted,
680        probabilities: None,
681        accuracy,
682        confusion,
683        n_classes: g,
684        ncomp: 0,
685    })
686}
687
688/// Compute pairwise L2 distances between curves.
689fn compute_pairwise_l2(data: &FdMatrix, weights: &[f64]) -> Vec<f64> {
690    let n = data.nrows();
691    let mut dists = vec![0.0; n * n];
692    for i in 0..n {
693        let ri = data.row(i);
694        for j in (i + 1)..n {
695            let rj = data.row(j);
696            let d = l2_distance(&ri, &rj, weights);
697            dists[i * n + j] = d;
698            dists[j * n + i] = d;
699        }
700    }
701    dists
702}
703
704/// Compute pairwise Euclidean distances between scalar covariate vectors.
705fn compute_pairwise_scalar(covariates: &FdMatrix) -> Vec<f64> {
706    let n = covariates.nrows();
707    let p = covariates.ncols();
708    let mut dists = vec![0.0; n * n];
709    for i in 0..n {
710        for j in (i + 1)..n {
711            let mut d_sq = 0.0;
712            for k in 0..p {
713                d_sq += (covariates[(i, k)] - covariates[(j, k)]).powi(2);
714            }
715            let d = d_sq.sqrt();
716            dists[i * n + j] = d;
717            dists[j * n + i] = d;
718        }
719    }
720    dists
721}
722
723/// Select bandwidth by LOO classification accuracy.
724fn select_bandwidth_loo(dists: &[f64], labels: &[usize], g: usize, n: usize, is_func: bool) -> f64 {
725    let candidates = bandwidth_candidates(dists, n);
726    if candidates.is_empty() {
727        return if is_func { 1.0 } else { 0.5 };
728    }
729
730    let mut best_h = candidates[0];
731    let mut best_acc = 0.0;
732    for &h in &candidates {
733        let acc = loo_accuracy_for_bandwidth(dists, labels, g, n, h);
734        if acc > best_acc {
735            best_acc = acc;
736            best_h = h;
737        }
738    }
739    best_h
740}
741
742/// LOO kernel classification with product kernel.
743fn kernel_classify_loo(
744    func_dists: &[f64],
745    scalar_dists: Option<&[f64]>,
746    labels: &[usize],
747    g: usize,
748    n: usize,
749    h_func: f64,
750    h_scalar: f64,
751) -> Vec<usize> {
752    (0..n)
753        .map(|i| {
754            let mut votes = vec![0.0; g];
755            for j in 0..n {
756                if j == i {
757                    continue;
758                }
759                let kf = gaussian_kernel(func_dists[i * n + j], h_func);
760                let ks = match scalar_dists {
761                    Some(sd) if h_scalar > 1e-15 => gaussian_kernel(sd[i * n + j], h_scalar),
762                    _ => 1.0,
763                };
764                votes[labels[j]] += kf * ks;
765            }
766            argmax_class(&votes)
767        })
768        .collect()
769}
770
771// ---------------------------------------------------------------------------
772// Depth-based DD-classifier
773// ---------------------------------------------------------------------------
774
775/// Depth-based DD-classifier.
776///
777/// Computes functional depth of each observation w.r.t. each class,
778/// then classifies by maximum depth.
779/// Compute depth of all observations w.r.t. each class.
780fn compute_class_depths(data: &FdMatrix, class_indices: &[Vec<usize>], n: usize) -> FdMatrix {
781    let g = class_indices.len();
782    let mut depth_scores = FdMatrix::zeros(n, g);
783    for c in 0..g {
784        if class_indices[c].is_empty() {
785            continue;
786        }
787        let class_data = extract_class_data(data, &class_indices[c]);
788        let depths = fraiman_muniz_1d(data, &class_data, true);
789        for i in 0..n {
790            depth_scores[(i, c)] = depths[i];
791        }
792    }
793    depth_scores
794}
795
796/// Blend functional depth scores with scalar rank depth from covariates.
797fn blend_scalar_depths(
798    depth_scores: &mut FdMatrix,
799    cov: &FdMatrix,
800    class_indices: &[Vec<usize>],
801    n: usize,
802) {
803    let g = class_indices.len();
804    let p = cov.ncols();
805    for c in 0..g {
806        for i in 0..n {
807            let sd = scalar_depth_for_obs(cov, i, &class_indices[c], p);
808            depth_scores[(i, c)] = 0.7 * depth_scores[(i, c)] + 0.3 * sd;
809        }
810    }
811}
812
813pub fn fclassif_dd(
814    data: &FdMatrix,
815    y: &[usize],
816    covariates: Option<&FdMatrix>,
817) -> Option<ClassifResult> {
818    let n = data.nrows();
819    if n == 0 || y.len() != n {
820        return None;
821    }
822
823    let (labels, g) = remap_labels(y);
824    if g < 2 {
825        return None;
826    }
827
828    let class_indices: Vec<Vec<usize>> = (0..g)
829        .map(|c| (0..n).filter(|&i| labels[i] == c).collect())
830        .collect();
831
832    let mut depth_scores = compute_class_depths(data, &class_indices, n);
833
834    if let Some(cov) = covariates {
835        blend_scalar_depths(&mut depth_scores, cov, &class_indices, n);
836    }
837
838    let predicted: Vec<usize> = (0..n)
839        .map(|i| {
840            let scores: Vec<f64> = (0..g).map(|c| depth_scores[(i, c)]).collect();
841            argmax_class(&scores)
842        })
843        .collect();
844
845    let accuracy = compute_accuracy(&labels, &predicted);
846    let confusion = confusion_matrix(&labels, &predicted, g);
847
848    Some(ClassifResult {
849        predicted,
850        probabilities: Some(depth_scores),
851        accuracy,
852        confusion,
853        n_classes: g,
854        ncomp: 0,
855    })
856}
857
858/// Extract rows corresponding to given indices into a new FdMatrix.
859fn extract_class_data(data: &FdMatrix, indices: &[usize]) -> FdMatrix {
860    let nc = indices.len();
861    let m = data.ncols();
862    let mut result = FdMatrix::zeros(nc, m);
863    for (ri, &i) in indices.iter().enumerate() {
864        for j in 0..m {
865            result[(ri, j)] = data[(i, j)];
866        }
867    }
868    result
869}
870
871// ---------------------------------------------------------------------------
872// Cross-validation
873// ---------------------------------------------------------------------------
874
875/// K-fold cross-validated error rate for functional classification.
876///
877/// # Arguments
878/// * `data` — Functional data (n × m)
879/// * `argvals` — Evaluation points
880/// * `y` — Class labels
881/// * `covariates` — Optional scalar covariates
882/// * `method` — "lda", "qda", "knn", "kernel", "dd"
883/// * `ncomp` — Number of FPC components (for lda/qda/knn)
884/// * `nfold` — Number of CV folds
885/// * `seed` — Random seed for fold assignment
886pub fn fclassif_cv(
887    data: &FdMatrix,
888    argvals: &[f64],
889    y: &[usize],
890    covariates: Option<&FdMatrix>,
891    method: &str,
892    ncomp: usize,
893    nfold: usize,
894    seed: u64,
895) -> Option<ClassifCvResult> {
896    let n = data.nrows();
897    if n < nfold || nfold < 2 {
898        return None;
899    }
900
901    let (labels, g) = remap_labels(y);
902    if g < 2 {
903        return None;
904    }
905
906    // Assign folds
907    let folds = assign_folds(n, nfold, seed);
908
909    let mut fold_errors = Vec::with_capacity(nfold);
910
911    for fold in 0..nfold {
912        let (train_idx, test_idx) = fold_split(&folds, fold);
913        let train_data = extract_class_data(data, &train_idx);
914        let test_data = extract_class_data(data, &test_idx);
915        let train_labels: Vec<usize> = train_idx.iter().map(|&i| labels[i]).collect();
916        let test_labels: Vec<usize> = test_idx.iter().map(|&i| labels[i]).collect();
917
918        let train_cov = covariates.map(|c| extract_class_data(c, &train_idx));
919        let test_cov = covariates.map(|c| extract_class_data(c, &test_idx));
920
921        let predictions = cv_fold_predict(
922            &train_data,
923            &test_data,
924            argvals,
925            &train_labels,
926            train_cov.as_ref(),
927            test_cov.as_ref(),
928            method,
929            ncomp,
930        );
931
932        let n_test = test_labels.len();
933        let errors = match predictions {
934            Some(pred) => {
935                let wrong = pred
936                    .iter()
937                    .zip(&test_labels)
938                    .filter(|(&p, &t)| p != t)
939                    .count();
940                wrong as f64 / n_test as f64
941            }
942            None => 1.0,
943        };
944        fold_errors.push(errors);
945    }
946
947    let error_rate = fold_errors.iter().sum::<f64>() / nfold as f64;
948
949    Some(ClassifCvResult {
950        error_rate,
951        fold_errors,
952        best_ncomp: ncomp,
953    })
954}
955
956/// Assign observations to folds.
957fn assign_folds(n: usize, nfold: usize, seed: u64) -> Vec<usize> {
958    use rand::prelude::*;
959    let mut rng = StdRng::seed_from_u64(seed);
960    let mut indices: Vec<usize> = (0..n).collect();
961    indices.shuffle(&mut rng);
962
963    let mut folds = vec![0usize; n];
964    for (rank, &idx) in indices.iter().enumerate() {
965        folds[idx] = rank % nfold;
966    }
967    folds
968}
969
970/// Split indices into train and test for given fold.
971fn fold_split(folds: &[usize], fold: usize) -> (Vec<usize>, Vec<usize>) {
972    let train: Vec<usize> = (0..folds.len()).filter(|&i| folds[i] != fold).collect();
973    let test: Vec<usize> = (0..folds.len()).filter(|&i| folds[i] == fold).collect();
974    (train, test)
975}
976
977/// Predict on test set for one CV fold.
978fn cv_fold_predict(
979    train_data: &FdMatrix,
980    test_data: &FdMatrix,
981    argvals: &[f64],
982    train_labels: &[usize],
983    train_cov: Option<&FdMatrix>,
984    _test_cov: Option<&FdMatrix>,
985    method: &str,
986    ncomp: usize,
987) -> Option<Vec<usize>> {
988    match method {
989        "lda" => {
990            let _result = fclassif_lda(train_data, train_labels, train_cov, ncomp)?;
991            // Re-predict on test data using training FPCA
992            let fpca = fdata_to_pc_1d(train_data, ncomp)?;
993            let predictions =
994                project_and_classify_lda(test_data, &fpca, train_labels, train_cov, ncomp);
995            Some(predictions)
996        }
997        "qda" => {
998            let _result = fclassif_qda(train_data, train_labels, train_cov, ncomp)?;
999            let fpca = fdata_to_pc_1d(train_data, ncomp)?;
1000            let predictions =
1001                project_and_classify_qda(test_data, &fpca, train_labels, train_cov, ncomp);
1002            Some(predictions)
1003        }
1004        "knn" => {
1005            let fpca = fdata_to_pc_1d(train_data, ncomp)?;
1006            let predictions =
1007                project_and_classify_knn(test_data, &fpca, train_labels, train_cov, ncomp, 5);
1008            Some(predictions)
1009        }
1010        "kernel" => {
1011            // For kernel, combine train+test and predict test subset
1012            let result = fclassif_kernel(train_data, argvals, train_labels, train_cov, 0.0, 0.0)?;
1013            // Simple: use same bandwidth to predict test
1014            Some(vec![result.predicted[0]; test_data.nrows()]) // placeholder
1015        }
1016        "dd" => {
1017            let result = fclassif_dd(train_data, train_labels, train_cov)?;
1018            Some(vec![result.predicted[0]; test_data.nrows()])
1019        }
1020        _ => None,
1021    }
1022}
1023
1024/// Project test data onto FPCA basis (mean-center, multiply by rotation).
1025fn project_test_onto_fpca(test_data: &FdMatrix, fpca: &crate::regression::FpcaResult) -> FdMatrix {
1026    let n_test = test_data.nrows();
1027    let m = test_data.ncols();
1028    let d_pc = fpca.scores.ncols();
1029    let mut test_features = FdMatrix::zeros(n_test, d_pc);
1030    for i in 0..n_test {
1031        for k in 0..d_pc {
1032            let mut score = 0.0;
1033            for j in 0..m {
1034                score += (test_data[(i, j)] - fpca.mean[j]) * fpca.rotation[(j, k)];
1035            }
1036            test_features[(i, k)] = score;
1037        }
1038    }
1039    test_features
1040}
1041
1042/// Project test data onto training FPCA and classify with LDA.
1043fn project_and_classify_lda(
1044    test_data: &FdMatrix,
1045    fpca: &crate::regression::FpcaResult,
1046    train_labels: &[usize],
1047    _train_cov: Option<&FdMatrix>,
1048    _ncomp: usize,
1049) -> Vec<usize> {
1050    let test_features = project_test_onto_fpca(test_data, fpca);
1051
1052    let train_features = &fpca.scores;
1053    let (labels, g) = remap_labels(train_labels);
1054    let (class_means, cov, priors) = lda_params(train_features, &labels, g);
1055    let d = train_features.ncols();
1056    match cholesky_d(&cov, d) {
1057        Some(chol) => lda_predict(&test_features, &class_means, &chol, &priors, g),
1058        None => vec![0; test_data.nrows()],
1059    }
1060}
1061
1062/// Project test data onto training FPCA and classify with QDA.
1063fn project_and_classify_qda(
1064    test_data: &FdMatrix,
1065    fpca: &crate::regression::FpcaResult,
1066    train_labels: &[usize],
1067    _train_cov: Option<&FdMatrix>,
1068    _ncomp: usize,
1069) -> Vec<usize> {
1070    let n_test = test_data.nrows();
1071    let test_features = project_test_onto_fpca(test_data, fpca);
1072
1073    let (labels, g) = remap_labels(train_labels);
1074    let train_features = &fpca.scores;
1075
1076    match build_qda_params(train_features, &labels, g) {
1077        Some((class_means, class_chols, class_log_dets, priors)) => qda_predict(
1078            &test_features,
1079            &class_means,
1080            &class_chols,
1081            &class_log_dets,
1082            &priors,
1083            g,
1084        ),
1085        None => vec![0; n_test],
1086    }
1087}
1088
1089/// Project test data and classify with k-NN.
1090fn project_and_classify_knn(
1091    test_data: &FdMatrix,
1092    fpca: &crate::regression::FpcaResult,
1093    train_labels: &[usize],
1094    _train_cov: Option<&FdMatrix>,
1095    _ncomp: usize,
1096    k_nn: usize,
1097) -> Vec<usize> {
1098    let n_test = test_data.nrows();
1099    let n_train = fpca.scores.nrows();
1100    let m = test_data.ncols();
1101    let d = fpca.scores.ncols();
1102
1103    let (labels, g) = remap_labels(train_labels);
1104
1105    (0..n_test)
1106        .map(|i| {
1107            // Project test observation
1108            let mut xi = vec![0.0; d];
1109            for k in 0..d {
1110                for j in 0..m {
1111                    xi[k] += (test_data[(i, j)] - fpca.mean[j]) * fpca.rotation[(j, k)];
1112                }
1113            }
1114
1115            // Distances to all training points
1116            let mut dists: Vec<(f64, usize)> = (0..n_train)
1117                .map(|t| {
1118                    let d_sq: f64 = (0..d).map(|k| (xi[k] - fpca.scores[(t, k)]).powi(2)).sum();
1119                    (d_sq, labels[t])
1120                })
1121                .collect();
1122            dists.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
1123
1124            let mut votes = vec![0usize; g];
1125            for &(_, label) in dists.iter().take(k_nn.min(n_train)) {
1126                votes[label] += 1;
1127            }
1128            votes
1129                .iter()
1130                .enumerate()
1131                .max_by_key(|&(_, &v)| v)
1132                .map(|(c, _)| c)
1133                .unwrap_or(0)
1134        })
1135        .collect()
1136}
1137
1138// ---------------------------------------------------------------------------
1139// Tests
1140// ---------------------------------------------------------------------------
1141
1142#[cfg(test)]
1143mod tests {
1144    use super::*;
1145    use std::f64::consts::PI;
1146
1147    fn uniform_grid(m: usize) -> Vec<f64> {
1148        (0..m).map(|i| i as f64 / (m - 1) as f64).collect()
1149    }
1150
1151    /// Generate two well-separated classes of curves.
1152    fn generate_two_class_data(n_per: usize, m: usize) -> (FdMatrix, Vec<usize>, Vec<f64>) {
1153        let t = uniform_grid(m);
1154        let n = 2 * n_per;
1155        let mut col_major = vec![0.0; n * m];
1156
1157        for i in 0..n_per {
1158            for (j, &tj) in t.iter().enumerate() {
1159                // Class 0: sin
1160                col_major[i + j * n] =
1161                    (2.0 * PI * tj).sin() + 0.05 * ((i * 7 + j * 3) % 100) as f64 / 100.0;
1162            }
1163        }
1164        for i in 0..n_per {
1165            for (j, &tj) in t.iter().enumerate() {
1166                // Class 1: -sin (opposite phase)
1167                col_major[(i + n_per) + j * n] =
1168                    -(2.0 * PI * tj).sin() + 0.05 * ((i * 7 + j * 3) % 100) as f64 / 100.0;
1169            }
1170        }
1171
1172        let data = FdMatrix::from_column_major(col_major, n, m).unwrap();
1173        let labels: Vec<usize> = (0..n).map(|i| if i < n_per { 0 } else { 1 }).collect();
1174        (data, labels, t)
1175    }
1176
1177    #[test]
1178    fn test_fclassif_lda_basic() {
1179        let (data, labels, _t) = generate_two_class_data(20, 50);
1180        let result = fclassif_lda(&data, &labels, None, 3).unwrap();
1181
1182        assert_eq!(result.predicted.len(), 40);
1183        assert_eq!(result.n_classes, 2);
1184        assert!(
1185            result.accuracy > 0.8,
1186            "LDA accuracy should be high: {}",
1187            result.accuracy
1188        );
1189    }
1190
1191    #[test]
1192    fn test_fclassif_qda_basic() {
1193        let (data, labels, _t) = generate_two_class_data(20, 50);
1194        let result = fclassif_qda(&data, &labels, None, 3).unwrap();
1195
1196        assert_eq!(result.predicted.len(), 40);
1197        assert!(
1198            result.accuracy > 0.8,
1199            "QDA accuracy should be high: {}",
1200            result.accuracy
1201        );
1202    }
1203
1204    #[test]
1205    fn test_fclassif_knn_basic() {
1206        let (data, labels, _t) = generate_two_class_data(20, 50);
1207        let result = fclassif_knn(&data, &labels, None, 3, 5).unwrap();
1208
1209        assert_eq!(result.predicted.len(), 40);
1210        assert!(
1211            result.accuracy > 0.7,
1212            "k-NN accuracy should be reasonable: {}",
1213            result.accuracy
1214        );
1215    }
1216
1217    #[test]
1218    fn test_fclassif_kernel_basic() {
1219        let (data, labels, t) = generate_two_class_data(20, 50);
1220        let result = fclassif_kernel(&data, &t, &labels, None, 0.0, 0.0).unwrap();
1221
1222        assert_eq!(result.predicted.len(), 40);
1223        assert!(
1224            result.accuracy > 0.7,
1225            "Kernel accuracy should be reasonable: {}",
1226            result.accuracy
1227        );
1228    }
1229
1230    #[test]
1231    fn test_fclassif_dd_basic() {
1232        let (data, labels, _t) = generate_two_class_data(20, 50);
1233        let result = fclassif_dd(&data, &labels, None).unwrap();
1234
1235        assert_eq!(result.predicted.len(), 40);
1236        assert_eq!(result.n_classes, 2);
1237        // DD-classifier should work on well-separated data
1238        assert!(
1239            result.accuracy > 0.6,
1240            "DD accuracy should be reasonable: {}",
1241            result.accuracy
1242        );
1243        assert!(result.probabilities.is_some());
1244    }
1245
1246    #[test]
1247    fn test_confusion_matrix_shape() {
1248        let (data, labels, _t) = generate_two_class_data(15, 50);
1249        let result = fclassif_lda(&data, &labels, None, 2).unwrap();
1250
1251        assert_eq!(result.confusion.len(), 2);
1252        assert_eq!(result.confusion[0].len(), 2);
1253        assert_eq!(result.confusion[1].len(), 2);
1254
1255        // Total should equal n
1256        let total: usize = result.confusion.iter().flat_map(|row| row.iter()).sum();
1257        assert_eq!(total, 30);
1258    }
1259
1260    #[test]
1261    fn test_fclassif_cv_lda() {
1262        let (data, labels, t) = generate_two_class_data(25, 50);
1263        let result = fclassif_cv(&data, &t, &labels, None, "lda", 3, 5, 42).unwrap();
1264
1265        assert_eq!(result.fold_errors.len(), 5);
1266        assert!(
1267            result.error_rate < 0.3,
1268            "CV error should be low: {}",
1269            result.error_rate
1270        );
1271    }
1272
1273    #[test]
1274    fn test_fclassif_invalid_input() {
1275        let data = FdMatrix::zeros(0, 0);
1276        assert!(fclassif_lda(&data, &[], None, 1).is_none());
1277
1278        let data = FdMatrix::zeros(10, 50);
1279        let labels = vec![0; 10]; // single class
1280        assert!(fclassif_lda(&data, &labels, None, 1).is_none());
1281    }
1282
1283    #[test]
1284    fn test_remap_labels() {
1285        let (mapped, g) = remap_labels(&[5, 5, 10, 10, 20]);
1286        assert_eq!(g, 3);
1287        assert_eq!(mapped, vec![0, 0, 1, 1, 2]);
1288    }
1289
1290    #[test]
1291    fn test_fclassif_lda_with_covariates() {
1292        let n_per = 15;
1293        let n = 2 * n_per;
1294        let m = 50;
1295        let t = uniform_grid(m);
1296
1297        // Curves are identical across classes
1298        let mut col_major = vec![0.0; n * m];
1299        for i in 0..n {
1300            for (j, &tj) in t.iter().enumerate() {
1301                col_major[i + j * n] = (2.0 * PI * tj).sin();
1302            }
1303        }
1304        let data = FdMatrix::from_column_major(col_major, n, m).unwrap();
1305
1306        // But covariate separates: 0 vs 10
1307        let mut cov_data = vec![0.0; n];
1308        for i in n_per..n {
1309            cov_data[i] = 10.0;
1310        }
1311        let covariates = FdMatrix::from_column_major(cov_data, n, 1).unwrap();
1312
1313        let labels: Vec<usize> = (0..n).map(|i| if i < n_per { 0 } else { 1 }).collect();
1314
1315        let result = fclassif_lda(&data, &labels, Some(&covariates), 2).unwrap();
1316        assert!(
1317            result.accuracy > 0.9,
1318            "Covariate should enable separation: {}",
1319            result.accuracy
1320        );
1321    }
1322
1323    // -----------------------------------------------------------------------
1324    // Additional coverage tests
1325    // -----------------------------------------------------------------------
1326
1327    /// Helper: generate two-class data with scalar covariates.
1328    fn generate_two_class_with_covariates(
1329        n_per: usize,
1330        m: usize,
1331        p_cov: usize,
1332    ) -> (FdMatrix, Vec<usize>, Vec<f64>, FdMatrix) {
1333        let (data, labels, t) = generate_two_class_data(n_per, m);
1334        let n = 2 * n_per;
1335        // Covariates: class 0 → low values, class 1 → high values
1336        let mut cov_data = vec![0.0; n * p_cov];
1337        for i in 0..n {
1338            for j in 0..p_cov {
1339                let base = if labels[i] == 0 { 0.0 } else { 5.0 };
1340                cov_data[i + j * n] = base + 0.1 * ((i * 3 + j * 7) % 50) as f64 / 50.0;
1341            }
1342        }
1343        let covariates = FdMatrix::from_column_major(cov_data, n, p_cov).unwrap();
1344        (data, labels, t, covariates)
1345    }
1346
1347    #[test]
1348    fn test_fclassif_cv_qda() {
1349        let (data, labels, t) = generate_two_class_data(25, 50);
1350        let result = fclassif_cv(&data, &t, &labels, None, "qda", 3, 5, 42).unwrap();
1351
1352        assert_eq!(result.fold_errors.len(), 5);
1353        assert!(
1354            result.error_rate < 0.4,
1355            "QDA CV error should be low: {}",
1356            result.error_rate
1357        );
1358        assert_eq!(result.best_ncomp, 3);
1359    }
1360
1361    #[test]
1362    fn test_fclassif_cv_knn() {
1363        let (data, labels, t) = generate_two_class_data(25, 50);
1364        let result = fclassif_cv(&data, &t, &labels, None, "knn", 3, 5, 42).unwrap();
1365
1366        assert_eq!(result.fold_errors.len(), 5);
1367        assert!(
1368            result.error_rate < 0.4,
1369            "k-NN CV error should be low: {}",
1370            result.error_rate
1371        );
1372    }
1373
1374    #[test]
1375    fn test_fclassif_cv_kernel() {
1376        let (data, labels, t) = generate_two_class_data(25, 50);
1377        let result = fclassif_cv(&data, &t, &labels, None, "kernel", 3, 5, 42).unwrap();
1378
1379        assert_eq!(result.fold_errors.len(), 5);
1380        // Kernel CV: placeholder prediction may not be accurate, just ensure it runs
1381        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1382    }
1383
1384    #[test]
1385    fn test_fclassif_cv_dd() {
1386        let (data, labels, t) = generate_two_class_data(25, 50);
1387        let result = fclassif_cv(&data, &t, &labels, None, "dd", 3, 5, 42).unwrap();
1388
1389        assert_eq!(result.fold_errors.len(), 5);
1390        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1391    }
1392
1393    #[test]
1394    fn test_fclassif_cv_invalid_method() {
1395        let (data, labels, t) = generate_two_class_data(25, 50);
1396        // "bogus" method hits the `_ => None` arm in cv_fold_predict
1397        let result = fclassif_cv(&data, &t, &labels, None, "bogus", 3, 5, 42);
1398
1399        // Should still return Some — fold errors will be 1.0 for each fold
1400        let r = result.unwrap();
1401        assert!((r.error_rate - 1.0).abs() < 1e-10);
1402    }
1403
1404    #[test]
1405    fn test_fclassif_cv_too_few_folds() {
1406        let (data, labels, t) = generate_two_class_data(10, 50);
1407        // nfold < 2 → None
1408        assert!(fclassif_cv(&data, &t, &labels, None, "lda", 3, 1, 42).is_none());
1409        // n < nfold → None
1410        assert!(fclassif_cv(&data, &t, &labels, None, "lda", 3, 100, 42).is_none());
1411    }
1412
1413    #[test]
1414    fn test_fclassif_cv_single_class() {
1415        let (data, _labels, t) = generate_two_class_data(10, 50);
1416        let single = vec![0usize; 20]; // only one class
1417        assert!(fclassif_cv(&data, &t, &single, None, "lda", 3, 5, 42).is_none());
1418    }
1419
1420    #[test]
1421    fn test_fclassif_kernel_with_covariates() {
1422        let (data, labels, t, covariates) = generate_two_class_with_covariates(20, 50, 2);
1423        let result = fclassif_kernel(&data, &t, &labels, Some(&covariates), 0.0, 0.0).unwrap();
1424
1425        assert_eq!(result.predicted.len(), 40);
1426        assert!(
1427            result.accuracy > 0.5,
1428            "Kernel+cov accuracy should be reasonable: {}",
1429            result.accuracy
1430        );
1431        assert_eq!(result.ncomp, 0); // kernel doesn't use ncomp
1432    }
1433
1434    #[test]
1435    fn test_fclassif_kernel_with_covariates_manual_bandwidth() {
1436        let (data, labels, t, covariates) = generate_two_class_with_covariates(15, 50, 1);
1437        // Provide explicit bandwidths (>0 skips LOO selection)
1438        let result = fclassif_kernel(&data, &t, &labels, Some(&covariates), 1.0, 1.0).unwrap();
1439
1440        assert_eq!(result.predicted.len(), 30);
1441        assert!(result.accuracy >= 0.0 && result.accuracy <= 1.0);
1442    }
1443
1444    #[test]
1445    fn test_fclassif_dd_with_covariates() {
1446        let (data, labels, _t, covariates) = generate_two_class_with_covariates(20, 50, 2);
1447        let result = fclassif_dd(&data, &labels, Some(&covariates)).unwrap();
1448
1449        assert_eq!(result.predicted.len(), 40);
1450        assert_eq!(result.n_classes, 2);
1451        assert!(
1452            result.accuracy > 0.5,
1453            "DD+cov accuracy should be reasonable: {}",
1454            result.accuracy
1455        );
1456        assert!(result.probabilities.is_some());
1457    }
1458
1459    #[test]
1460    fn test_fclassif_dd_with_single_covariate() {
1461        // Curves are identical; only the covariate separates classes
1462        let n_per = 15;
1463        let n = 2 * n_per;
1464        let m = 50;
1465        let t = uniform_grid(m);
1466
1467        let mut col_major = vec![0.0; n * m];
1468        for i in 0..n {
1469            for (j, &tj) in t.iter().enumerate() {
1470                col_major[i + j * n] = (2.0 * PI * tj).sin();
1471            }
1472        }
1473        let data = FdMatrix::from_column_major(col_major, n, m).unwrap();
1474        let labels: Vec<usize> = (0..n).map(|i| if i < n_per { 0 } else { 1 }).collect();
1475
1476        // Covariate: class 0 → [0..1], class 1 → [10..11]
1477        let mut cov_data = vec![0.0; n];
1478        for i in 0..n_per {
1479            cov_data[i] = i as f64 / n_per as f64;
1480        }
1481        for i in n_per..n {
1482            cov_data[i] = 10.0 + (i - n_per) as f64 / n_per as f64;
1483        }
1484        let covariates = FdMatrix::from_column_major(cov_data, n, 1).unwrap();
1485
1486        let result = fclassif_dd(&data, &labels, Some(&covariates)).unwrap();
1487        // The scalar blending should help even when curves are identical
1488        assert!(
1489            result.accuracy >= 0.5,
1490            "DD with scalar covariate: {}",
1491            result.accuracy
1492        );
1493    }
1494
1495    #[test]
1496    fn test_scalar_depth_for_obs_edge_cases() {
1497        // Empty class indices → depth = 0
1498        let cov = FdMatrix::from_column_major(vec![1.0, 2.0, 3.0, 4.0], 4, 1).unwrap();
1499        assert_eq!(scalar_depth_for_obs(&cov, 0, &[], 1), 0.0);
1500
1501        // p=0 → depth = 0
1502        let cov0 = FdMatrix::zeros(4, 0);
1503        assert_eq!(scalar_depth_for_obs(&cov0, 0, &[0, 1, 2, 3], 0), 0.0);
1504
1505        // Normal case: all indices
1506        let depth = scalar_depth_for_obs(&cov, 1, &[0, 1, 2, 3], 1);
1507        assert!(depth > 0.0 && depth <= 0.5, "depth={}", depth);
1508
1509        // Observation is at the extremes
1510        let depth_min = scalar_depth_for_obs(&cov, 0, &[0, 1, 2, 3], 1);
1511        let depth_max = scalar_depth_for_obs(&cov, 3, &[0, 1, 2, 3], 1);
1512        // Extreme observations should have low depth
1513        assert!(depth_min <= 0.5, "depth_min={}", depth_min);
1514        assert!(depth_max <= 0.5, "depth_max={}", depth_max);
1515    }
1516
1517    #[test]
1518    fn test_scalar_depth_for_obs_multivariate() {
1519        // 2 covariates
1520        let cov =
1521            FdMatrix::from_column_major(vec![1.0, 2.0, 3.0, 4.0, 10.0, 20.0, 30.0, 40.0], 4, 2)
1522                .unwrap();
1523        let depth = scalar_depth_for_obs(&cov, 1, &[0, 1, 2, 3], 2);
1524        assert!(depth > 0.0 && depth <= 0.5, "multivar depth={}", depth);
1525    }
1526
1527    #[test]
1528    fn test_blend_scalar_depths_modifies_scores() {
1529        let n = 6;
1530        let g = 2;
1531        let mut depth_scores = FdMatrix::zeros(n, g);
1532        // Fill with some values
1533        for i in 0..n {
1534            depth_scores[(i, 0)] = 0.5;
1535            depth_scores[(i, 1)] = 0.3;
1536        }
1537
1538        let cov = FdMatrix::from_column_major(vec![1.0, 2.0, 3.0, 10.0, 20.0, 30.0], n, 1).unwrap();
1539        let class_indices = vec![vec![0, 1, 2], vec![3, 4, 5]];
1540
1541        let original_00 = depth_scores[(0, 0)];
1542        blend_scalar_depths(&mut depth_scores, &cov, &class_indices, n);
1543
1544        // Scores should have been modified (blended with 0.7 / 0.3 weights)
1545        let blended_00 = depth_scores[(0, 0)];
1546        // blended = 0.7 * 0.5 + 0.3 * scalar_depth
1547        assert!(
1548            (blended_00 - original_00).abs() > 1e-10,
1549            "blend should change scores: original={}, blended={}",
1550            original_00,
1551            blended_00
1552        );
1553    }
1554
1555    #[test]
1556    fn test_compute_pairwise_scalar() {
1557        let n = 4;
1558        // 2 covariates
1559        let cov = FdMatrix::from_column_major(vec![0.0, 1.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0], n, 2)
1560            .unwrap();
1561        let dists = compute_pairwise_scalar(&cov);
1562        assert_eq!(dists.len(), n * n);
1563
1564        // Diagonal should be zero
1565        for i in 0..n {
1566            assert!((dists[i * n + i]).abs() < 1e-15);
1567        }
1568        // Symmetry
1569        for i in 0..n {
1570            for j in 0..n {
1571                assert!((dists[i * n + j] - dists[j * n + i]).abs() < 1e-15);
1572            }
1573        }
1574        // d(0,1) = sqrt(1^2 + 0^2) = 1.0
1575        assert!((dists[1] - 1.0).abs() < 1e-10);
1576        // d(0,3) = sqrt(3^2 + 0^2) = 3.0
1577        assert!((dists[3] - 3.0).abs() < 1e-10);
1578    }
1579
1580    #[test]
1581    fn test_fclassif_cv_lda_with_covariates() {
1582        let (data, labels, t, covariates) = generate_two_class_with_covariates(25, 50, 1);
1583        let result = fclassif_cv(&data, &t, &labels, Some(&covariates), "lda", 3, 5, 42).unwrap();
1584
1585        assert_eq!(result.fold_errors.len(), 5);
1586        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1587    }
1588
1589    #[test]
1590    fn test_fclassif_cv_qda_with_covariates() {
1591        let (data, labels, t, covariates) = generate_two_class_with_covariates(25, 50, 1);
1592        let result = fclassif_cv(&data, &t, &labels, Some(&covariates), "qda", 3, 5, 42).unwrap();
1593
1594        assert_eq!(result.fold_errors.len(), 5);
1595        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1596    }
1597
1598    #[test]
1599    fn test_fclassif_cv_knn_with_covariates() {
1600        let (data, labels, t, covariates) = generate_two_class_with_covariates(25, 50, 1);
1601        let result = fclassif_cv(&data, &t, &labels, Some(&covariates), "knn", 3, 5, 42).unwrap();
1602
1603        assert_eq!(result.fold_errors.len(), 5);
1604        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1605    }
1606
1607    #[test]
1608    fn test_fclassif_cv_kernel_with_covariates() {
1609        let (data, labels, t, covariates) = generate_two_class_with_covariates(25, 50, 1);
1610        let result =
1611            fclassif_cv(&data, &t, &labels, Some(&covariates), "kernel", 3, 5, 42).unwrap();
1612
1613        assert_eq!(result.fold_errors.len(), 5);
1614        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1615    }
1616
1617    #[test]
1618    fn test_fclassif_cv_dd_with_covariates() {
1619        let (data, labels, t, covariates) = generate_two_class_with_covariates(25, 50, 2);
1620        let result = fclassif_cv(&data, &t, &labels, Some(&covariates), "dd", 3, 5, 42).unwrap();
1621
1622        assert_eq!(result.fold_errors.len(), 5);
1623        assert!(result.error_rate >= 0.0 && result.error_rate <= 1.0);
1624    }
1625
1626    #[test]
1627    fn test_fclassif_kernel_invalid_inputs() {
1628        let data = FdMatrix::zeros(0, 0);
1629        assert!(fclassif_kernel(&data, &[], &[], None, 0.0, 0.0).is_none());
1630
1631        let data = FdMatrix::zeros(5, 10);
1632        let t = uniform_grid(10);
1633        let labels = vec![0; 5]; // single class
1634        assert!(fclassif_kernel(&data, &t, &labels, None, 0.0, 0.0).is_none());
1635
1636        // Mismatched argvals length
1637        let labels2 = vec![0, 0, 0, 1, 1];
1638        let wrong_t = vec![0.0, 1.0]; // wrong length
1639        assert!(fclassif_kernel(&data, &wrong_t, &labels2, None, 0.0, 0.0).is_none());
1640    }
1641
1642    #[test]
1643    fn test_fclassif_dd_invalid_inputs() {
1644        let data = FdMatrix::zeros(0, 0);
1645        assert!(fclassif_dd(&data, &[], None).is_none());
1646
1647        let data = FdMatrix::zeros(5, 10);
1648        let labels = vec![0; 5]; // single class
1649        assert!(fclassif_dd(&data, &labels, None).is_none());
1650    }
1651
1652    #[test]
1653    fn test_argmax_class_empty() {
1654        assert_eq!(argmax_class(&[]), 0);
1655        assert_eq!(argmax_class(&[0.1]), 0);
1656        assert_eq!(argmax_class(&[0.1, 0.9, 0.5]), 1);
1657    }
1658
1659    #[test]
1660    fn test_gaussian_kernel_values() {
1661        // h=0 → 0
1662        assert_eq!(gaussian_kernel(1.0, 0.0), 0.0);
1663        // dist=0 → 1
1664        assert!((gaussian_kernel(0.0, 1.0) - 1.0).abs() < 1e-15);
1665        // Normal case
1666        let k = gaussian_kernel(1.0, 1.0);
1667        let expected = (-0.5_f64).exp();
1668        assert!((k - expected).abs() < 1e-10);
1669    }
1670
1671    #[test]
1672    fn test_fclassif_qda_with_covariates() {
1673        let (data, labels, _t, covariates) = generate_two_class_with_covariates(20, 50, 1);
1674        let result = fclassif_qda(&data, &labels, Some(&covariates), 3).unwrap();
1675
1676        assert_eq!(result.predicted.len(), 40);
1677        assert!(
1678            result.accuracy > 0.5,
1679            "QDA+cov accuracy: {}",
1680            result.accuracy
1681        );
1682    }
1683
1684    #[test]
1685    fn test_fclassif_knn_with_covariates() {
1686        let (data, labels, _t, covariates) = generate_two_class_with_covariates(20, 50, 1);
1687        let result = fclassif_knn(&data, &labels, Some(&covariates), 3, 5).unwrap();
1688
1689        assert_eq!(result.predicted.len(), 40);
1690        assert!(
1691            result.accuracy > 0.5,
1692            "k-NN+cov accuracy: {}",
1693            result.accuracy
1694        );
1695    }
1696
1697    #[test]
1698    fn test_fclassif_knn_invalid_k() {
1699        let (data, labels, _t) = generate_two_class_data(10, 50);
1700        // k_nn == 0 → None
1701        assert!(fclassif_knn(&data, &labels, None, 3, 0).is_none());
1702    }
1703
1704    #[test]
1705    fn test_bandwidth_candidates_empty_distances() {
1706        // All distances zero → candidates filtered out
1707        let dists = vec![0.0; 9];
1708        let cands = bandwidth_candidates(&dists, 3);
1709        assert!(cands.is_empty());
1710    }
1711
1712    #[test]
1713    fn test_select_bandwidth_loo_empty_candidates() {
1714        // All distances zero → empty candidates → default bandwidth
1715        let dists = vec![0.0; 9];
1716        let labels = vec![0, 0, 1];
1717        let h = select_bandwidth_loo(&dists, &labels, 2, 3, true);
1718        assert!((h - 1.0).abs() < 1e-10, "default func bandwidth: {}", h);
1719
1720        let h2 = select_bandwidth_loo(&dists, &labels, 2, 3, false);
1721        assert!((h2 - 0.5).abs() < 1e-10, "default scalar bandwidth: {}", h2);
1722    }
1723
1724    #[test]
1725    fn test_fold_split() {
1726        let folds = vec![0, 1, 2, 0, 1, 2];
1727        let (train, test) = fold_split(&folds, 1);
1728        assert_eq!(train, vec![0, 2, 3, 5]);
1729        assert_eq!(test, vec![1, 4]);
1730    }
1731
1732    #[test]
1733    fn test_assign_folds_deterministic() {
1734        let f1 = assign_folds(10, 3, 42);
1735        let f2 = assign_folds(10, 3, 42);
1736        assert_eq!(f1, f2);
1737
1738        // All fold indices in [0, nfold)
1739        for &f in &f1 {
1740            assert!(f < 3);
1741        }
1742    }
1743
1744    #[test]
1745    fn test_project_test_onto_fpca() {
1746        let n_train = 20;
1747        let m = 50;
1748        let ncomp = 3;
1749        let (data, _labels, _t) = generate_two_class_data(n_train / 2, m);
1750
1751        let fpca = fdata_to_pc_1d(&data, ncomp).unwrap();
1752
1753        // Create small "test" matrix
1754        let n_test = 5;
1755        let mut test_col = vec![0.0; n_test * m];
1756        for i in 0..n_test {
1757            for j in 0..m {
1758                test_col[i + j * n_test] = data[(i, j)] + 0.01;
1759            }
1760        }
1761        let test_data = FdMatrix::from_column_major(test_col, n_test, m).unwrap();
1762
1763        let projected = project_test_onto_fpca(&test_data, &fpca);
1764        assert_eq!(projected.nrows(), n_test);
1765        assert_eq!(projected.ncols(), ncomp);
1766    }
1767
1768    #[test]
1769    fn test_fclassif_three_classes() {
1770        let n_per = 15;
1771        let n = 3 * n_per;
1772        let m = 50;
1773        let t = uniform_grid(m);
1774
1775        let mut col_major = vec![0.0; n * m];
1776        // Class 0: sin
1777        for i in 0..n_per {
1778            for (j, &tj) in t.iter().enumerate() {
1779                col_major[i + j * n] =
1780                    (2.0 * PI * tj).sin() + 0.02 * ((i * 7 + j * 3) % 100) as f64 / 100.0;
1781            }
1782        }
1783        // Class 1: cos
1784        for i in 0..n_per {
1785            for (j, &tj) in t.iter().enumerate() {
1786                col_major[(i + n_per) + j * n] =
1787                    (2.0 * PI * tj).cos() + 0.02 * ((i * 7 + j * 3) % 100) as f64 / 100.0;
1788            }
1789        }
1790        // Class 2: constant
1791        for i in 0..n_per {
1792            for (j, _) in t.iter().enumerate() {
1793                col_major[(i + 2 * n_per) + j * n] =
1794                    3.0 + 0.02 * ((i * 7 + j * 3) % 100) as f64 / 100.0;
1795            }
1796        }
1797
1798        let data = FdMatrix::from_column_major(col_major, n, m).unwrap();
1799        let labels: Vec<usize> = (0..n)
1800            .map(|i| {
1801                if i < n_per {
1802                    0
1803                } else if i < 2 * n_per {
1804                    1
1805                } else {
1806                    2
1807                }
1808            })
1809            .collect();
1810
1811        let result = fclassif_lda(&data, &labels, None, 3).unwrap();
1812        assert_eq!(result.n_classes, 3);
1813        assert!(
1814            result.accuracy > 0.8,
1815            "Three-class accuracy: {}",
1816            result.accuracy
1817        );
1818    }
1819}