Skip to main content

fdars_core/
simulation.rs

1//! Simulation functions for functional data.
2//!
3//! This module provides tools for generating synthetic functional data using
4//! the Karhunen-Loève expansion and various eigenfunction/eigenvalue configurations.
5//!
6//! ## Overview
7//!
8//! Functional data can be simulated using the truncated Karhunen-Loève representation:
9//! ```text
10//! f_i(t) = μ(t) + Σ_{k=1}^{M} ξ_{ik} φ_k(t)
11//! ```
12//! where:
13//! - μ(t) is the mean function
14//! - φ_k(t) are orthonormal eigenfunctions
15//! - ξ_{ik} ~ N(0, λ_k) are random scores with variances given by eigenvalues
16//!
17//! ## Eigenfunction Types
18//!
19//! - **Fourier**: sin/cos basis functions, suitable for periodic data
20//! - **Legendre**: Orthonormal Legendre polynomials on \[0,1\]
21//! - **Wiener**: Eigenfunctions of the Wiener process
22
23use crate::matrix::FdMatrix;
24use crate::maybe_par_chunks_mut_enumerate;
25use rand::prelude::*;
26use rand_distr::Normal;
27use std::f64::consts::PI;
28
29/// Eigenfunction type enum for simulation
30#[derive(Clone, Copy, Debug, PartialEq)]
31pub enum EFunType {
32    /// Fourier basis: 1, sqrt(2)*cos(2πkt), sqrt(2)*sin(2πkt)
33    Fourier = 0,
34    /// Orthonormal Legendre polynomials on \[0,1\]
35    Poly = 1,
36    /// Higher-order Legendre polynomials (starting at degree 2)
37    PolyHigh = 2,
38    /// Wiener process eigenfunctions: sqrt(2)*sin((k-0.5)πt)
39    Wiener = 3,
40}
41
42impl EFunType {
43    /// Create from integer (for FFI)
44    pub fn from_i32(value: i32) -> Option<Self> {
45        match value {
46            0 => Some(EFunType::Fourier),
47            1 => Some(EFunType::Poly),
48            2 => Some(EFunType::PolyHigh),
49            3 => Some(EFunType::Wiener),
50            _ => None,
51        }
52    }
53}
54
55/// Eigenvalue decay type for simulation
56#[derive(Clone, Copy, Debug, PartialEq)]
57pub enum EValType {
58    /// Linear decay: λ_k = 1/k
59    Linear = 0,
60    /// Exponential decay: λ_k = exp(-k)
61    Exponential = 1,
62    /// Wiener eigenvalues: λ_k = 1/((k-0.5)π)²
63    Wiener = 2,
64}
65
66impl EValType {
67    /// Create from integer (for FFI)
68    pub fn from_i32(value: i32) -> Option<Self> {
69        match value {
70            0 => Some(EValType::Linear),
71            1 => Some(EValType::Exponential),
72            2 => Some(EValType::Wiener),
73            _ => None,
74        }
75    }
76}
77
78// =============================================================================
79// Eigenfunction Computation
80// =============================================================================
81
82/// Compute Fourier eigenfunctions on \[0,1\].
83///
84/// The Fourier basis consists of:
85/// - φ_1(t) = 1
86/// - φ_{2k}(t) = √2 cos(2πkt) for k = 1, 2, ...
87/// - φ_{2k+1}(t) = √2 sin(2πkt) for k = 1, 2, ...
88///
89/// # Arguments
90/// * `t` - Evaluation points in \[0,1\]
91/// * `m` - Number of eigenfunctions
92///
93/// # Returns
94/// `FdMatrix` of size `len(t) × m`
95pub fn fourier_eigenfunctions(t: &[f64], m: usize) -> FdMatrix {
96    let n = t.len();
97    let mut phi = FdMatrix::zeros(n, m);
98    let sqrt2 = 2.0_f64.sqrt();
99
100    for (i, &ti) in t.iter().enumerate() {
101        // φ_1(t) = 1
102        phi[(i, 0)] = 1.0;
103
104        let mut k = 1; // current eigenfunction index
105        let mut freq = 1; // frequency index
106
107        while k < m {
108            // sin term: sqrt(2) * sin(2*pi*freq*t)
109            if k < m {
110                phi[(i, k)] = sqrt2 * (2.0 * PI * freq as f64 * ti).sin();
111                k += 1;
112            }
113            // cos term: sqrt(2) * cos(2*pi*freq*t)
114            if k < m {
115                phi[(i, k)] = sqrt2 * (2.0 * PI * freq as f64 * ti).cos();
116                k += 1;
117            }
118            freq += 1;
119        }
120    }
121    phi
122}
123
124/// Compute Legendre polynomial eigenfunctions on \[0,1\].
125///
126/// Uses orthonormalized Legendre polynomials. The normalization factor is
127/// √(2n+1) where n is the polynomial degree, which ensures unit L² norm on \[0,1\].
128///
129/// # Arguments
130/// * `t` - Evaluation points in \[0,1\]
131/// * `m` - Number of eigenfunctions
132/// * `high` - If true, start at degree 2 (PolyHigh), otherwise start at degree 0
133///
134/// # Returns
135/// `FdMatrix` of size `len(t) × m`
136pub fn legendre_eigenfunctions(t: &[f64], m: usize, high: bool) -> FdMatrix {
137    let n = t.len();
138    let mut phi = FdMatrix::zeros(n, m);
139    let start_deg = if high { 2 } else { 0 };
140
141    for (i, &ti) in t.iter().enumerate() {
142        // Transform from \[0,1\] to \[-1,1\]
143        let x = 2.0 * ti - 1.0;
144
145        for j in 0..m {
146            let deg = start_deg + j;
147            // Compute Legendre polynomial P_deg(x)
148            let p = legendre_p(x, deg);
149            // Normalize: ||P_n||² on \[-1,1\] = 2/(2n+1), on \[0,1\] = 1/(2n+1)
150            let norm = ((2 * deg + 1) as f64).sqrt();
151            phi[(i, j)] = p * norm;
152        }
153    }
154    phi
155}
156
157/// Compute Legendre polynomial P_n(x) using recurrence relation.
158///
159/// The three-term recurrence is:
160/// (n+1)P_{n+1}(x) = (2n+1)xP_n(x) - nP_{n-1}(x)
161fn legendre_p(x: f64, n: usize) -> f64 {
162    if n == 0 {
163        return 1.0;
164    }
165    if n == 1 {
166        return x;
167    }
168
169    let mut p_prev = 1.0;
170    let mut p_curr = x;
171
172    for k in 2..=n {
173        let p_next = ((2 * k - 1) as f64 * x * p_curr - (k - 1) as f64 * p_prev) / k as f64;
174        p_prev = p_curr;
175        p_curr = p_next;
176    }
177    p_curr
178}
179
180/// Compute Wiener process eigenfunctions on \[0,1\].
181///
182/// The Wiener (Brownian motion) eigenfunctions are:
183/// φ_k(t) = √2 sin((k - 0.5)πt)
184///
185/// These are the eigenfunctions of the covariance kernel K(s,t) = min(s,t).
186///
187/// # Arguments
188/// * `t` - Evaluation points in \[0,1\]
189/// * `m` - Number of eigenfunctions
190///
191/// # Returns
192/// `FdMatrix` of size `len(t) × m`
193pub fn wiener_eigenfunctions(t: &[f64], m: usize) -> FdMatrix {
194    let n = t.len();
195    let mut phi = FdMatrix::zeros(n, m);
196    let sqrt2 = 2.0_f64.sqrt();
197
198    for (i, &ti) in t.iter().enumerate() {
199        for j in 0..m {
200            let k = (j + 1) as f64;
201            // φ_k(t) = sqrt(2) * sin((k - 0.5) * pi * t)
202            phi[(i, j)] = sqrt2 * ((k - 0.5) * PI * ti).sin();
203        }
204    }
205    phi
206}
207
208/// Unified eigenfunction computation.
209///
210/// # Arguments
211/// * `t` - Evaluation points
212/// * `m` - Number of eigenfunctions
213/// * `efun_type` - Type of eigenfunction basis
214///
215/// # Returns
216/// `FdMatrix` of size `len(t) × m`
217pub fn eigenfunctions(t: &[f64], m: usize, efun_type: EFunType) -> FdMatrix {
218    match efun_type {
219        EFunType::Fourier => fourier_eigenfunctions(t, m),
220        EFunType::Poly => legendre_eigenfunctions(t, m, false),
221        EFunType::PolyHigh => legendre_eigenfunctions(t, m, true),
222        EFunType::Wiener => wiener_eigenfunctions(t, m),
223    }
224}
225
226// =============================================================================
227// Eigenvalue Computation
228// =============================================================================
229
230/// Generate eigenvalue sequence with linear decay.
231///
232/// λ_k = 1/k for k = 1, ..., m
233pub fn eigenvalues_linear(m: usize) -> Vec<f64> {
234    (1..=m).map(|k| 1.0 / k as f64).collect()
235}
236
237/// Generate eigenvalue sequence with exponential decay.
238///
239/// λ_k = exp(-k) for k = 1, ..., m
240pub fn eigenvalues_exponential(m: usize) -> Vec<f64> {
241    (1..=m).map(|k| (-(k as f64)).exp()).collect()
242}
243
244/// Generate Wiener process eigenvalues.
245///
246/// λ_k = 1/((k - 0.5)π)² for k = 1, ..., m
247///
248/// These are the eigenvalues of the covariance kernel K(s,t) = min(s,t).
249pub fn eigenvalues_wiener(m: usize) -> Vec<f64> {
250    (1..=m)
251        .map(|k| {
252            let denom = (k as f64 - 0.5) * PI;
253            1.0 / (denom * denom)
254        })
255        .collect()
256}
257
258/// Unified eigenvalue computation.
259///
260/// # Arguments
261/// * `m` - Number of eigenvalues
262/// * `eval_type` - Type of eigenvalue decay
263///
264/// # Returns
265/// Vector of m eigenvalues in decreasing order
266pub fn eigenvalues(m: usize, eval_type: EValType) -> Vec<f64> {
267    match eval_type {
268        EValType::Linear => eigenvalues_linear(m),
269        EValType::Exponential => eigenvalues_exponential(m),
270        EValType::Wiener => eigenvalues_wiener(m),
271    }
272}
273
274// =============================================================================
275// Karhunen-Loève Simulation
276// =============================================================================
277
278/// Simulate functional data via Karhunen-Loève expansion.
279///
280/// Generates n curves using the truncated KL representation:
281/// f_i(t) = Σ_{k=1}^{M} ξ_{ik} φ_k(t)
282/// where ξ_{ik} ~ N(0, λ_k)
283///
284/// # Arguments
285/// * `n` - Number of curves to generate
286/// * `phi` - Eigenfunctions matrix (m × big_m) as `FdMatrix`
287/// * `big_m` - Number of eigenfunctions
288/// * `lambda` - Eigenvalues (length big_m)
289/// * `seed` - Optional random seed for reproducibility
290///
291/// # Returns
292/// Data `FdMatrix` of size `n × m`
293pub fn sim_kl(
294    n: usize,
295    phi: &FdMatrix,
296    big_m: usize,
297    lambda: &[f64],
298    seed: Option<u64>,
299) -> FdMatrix {
300    let m = phi.nrows();
301
302    // Create RNG
303    let mut rng = match seed {
304        Some(s) => StdRng::seed_from_u64(s),
305        None => StdRng::from_entropy(),
306    };
307
308    let normal = Normal::new(0.0, 1.0).unwrap();
309
310    // Generate scores ξ ~ N(0, λ) for all curves
311    // xi is n × big_m in column-major format
312    let mut xi = vec![0.0; n * big_m];
313    for k in 0..big_m {
314        let sd = lambda[k].sqrt();
315        for i in 0..n {
316            xi[i + k * n] = rng.sample::<f64, _>(normal) * sd;
317        }
318    }
319
320    // Compute data = xi * phi^T
321    // xi: n × big_m, phi: m × big_m -> data: n × m
322    let mut data = vec![0.0; n * m];
323
324    // Parallelize over columns (evaluation points)
325    maybe_par_chunks_mut_enumerate!(data, n, |(j, col)| {
326        for i in 0..n {
327            let mut sum = 0.0;
328            for k in 0..big_m {
329                // phi[(j, k)] is φ_k(t_j)
330                // xi[i + k*n] is ξ_{ik}
331                sum += xi[i + k * n] * phi[(j, k)];
332            }
333            col[i] = sum;
334        }
335    });
336
337    FdMatrix::from_column_major(data, n, m).unwrap()
338}
339
340/// Simulate functional data with specified eigenfunction and eigenvalue types.
341///
342/// Convenience function that combines eigenfunction and eigenvalue generation
343/// with KL simulation.
344///
345/// # Arguments
346/// * `n` - Number of curves to generate
347/// * `t` - Evaluation points
348/// * `big_m` - Number of eigenfunctions/eigenvalues to use
349/// * `efun_type` - Type of eigenfunction basis
350/// * `eval_type` - Type of eigenvalue decay
351/// * `seed` - Optional random seed
352///
353/// # Returns
354/// Data `FdMatrix` of size `n × len(t)`
355pub fn sim_fundata(
356    n: usize,
357    t: &[f64],
358    big_m: usize,
359    efun_type: EFunType,
360    eval_type: EValType,
361    seed: Option<u64>,
362) -> FdMatrix {
363    let phi = eigenfunctions(t, big_m, efun_type);
364    let lambda = eigenvalues(big_m, eval_type);
365    sim_kl(n, &phi, big_m, &lambda, seed)
366}
367
368// =============================================================================
369// Noise Addition
370// =============================================================================
371
372/// Add pointwise Gaussian noise to functional data.
373///
374/// Adds independent N(0, σ²) noise to each point.
375///
376/// # Arguments
377/// * `data` - Data `FdMatrix` (n × m)
378/// * `sd` - Standard deviation of noise
379/// * `seed` - Optional random seed
380///
381/// # Returns
382/// Noisy data `FdMatrix` (n × m)
383pub fn add_error_pointwise(data: &FdMatrix, sd: f64, seed: Option<u64>) -> FdMatrix {
384    let mut rng = match seed {
385        Some(s) => StdRng::seed_from_u64(s),
386        None => StdRng::from_entropy(),
387    };
388
389    let normal = Normal::new(0.0, sd).unwrap();
390
391    let noisy: Vec<f64> = data
392        .as_slice()
393        .iter()
394        .map(|&x| x + rng.sample::<f64, _>(normal))
395        .collect();
396
397    FdMatrix::from_column_major(noisy, data.nrows(), data.ncols()).unwrap()
398}
399
400/// Add curve-level Gaussian noise to functional data.
401///
402/// Adds a constant noise term per curve: each observation in curve i
403/// has the same noise value.
404///
405/// # Arguments
406/// * `data` - Data `FdMatrix` (n × m)
407/// * `sd` - Standard deviation of noise
408/// * `seed` - Optional random seed
409///
410/// # Returns
411/// Noisy data `FdMatrix` (n × m)
412pub fn add_error_curve(data: &FdMatrix, sd: f64, seed: Option<u64>) -> FdMatrix {
413    let n = data.nrows();
414    let m = data.ncols();
415
416    let mut rng = match seed {
417        Some(s) => StdRng::seed_from_u64(s),
418        None => StdRng::from_entropy(),
419    };
420
421    let normal = Normal::new(0.0, sd).unwrap();
422
423    // Generate one noise value per curve
424    let curve_noise: Vec<f64> = (0..n).map(|_| rng.sample::<f64, _>(normal)).collect();
425
426    // Add to data
427    let mut result = data.as_slice().to_vec();
428    for j in 0..m {
429        for i in 0..n {
430            result[i + j * n] += curve_noise[i];
431        }
432    }
433    FdMatrix::from_column_major(result, n, m).unwrap()
434}
435
436#[cfg(test)]
437mod tests {
438    use super::*;
439
440    #[test]
441    fn test_fourier_eigenfunctions_dimensions() {
442        let t: Vec<f64> = (0..100).map(|i| i as f64 / 99.0).collect();
443        let phi = fourier_eigenfunctions(&t, 5);
444        assert_eq!(phi.nrows(), 100);
445        assert_eq!(phi.ncols(), 5);
446        assert_eq!(phi.len(), 100 * 5);
447    }
448
449    #[test]
450    fn test_fourier_eigenfunctions_first_is_constant() {
451        let t: Vec<f64> = (0..100).map(|i| i as f64 / 99.0).collect();
452        let phi = fourier_eigenfunctions(&t, 3);
453
454        // First eigenfunction should be constant 1
455        for i in 0..100 {
456            assert!((phi[(i, 0)] - 1.0).abs() < 1e-10);
457        }
458    }
459
460    #[test]
461    fn test_eigenvalues_linear() {
462        let lambda = eigenvalues_linear(5);
463        assert_eq!(lambda.len(), 5);
464        assert!((lambda[0] - 1.0).abs() < 1e-10);
465        assert!((lambda[1] - 0.5).abs() < 1e-10);
466        assert!((lambda[2] - 1.0 / 3.0).abs() < 1e-10);
467    }
468
469    #[test]
470    fn test_eigenvalues_exponential() {
471        let lambda = eigenvalues_exponential(3);
472        assert_eq!(lambda.len(), 3);
473        assert!((lambda[0] - (-1.0_f64).exp()).abs() < 1e-10);
474        assert!((lambda[1] - (-2.0_f64).exp()).abs() < 1e-10);
475    }
476
477    #[test]
478    fn test_sim_kl_dimensions() {
479        let t: Vec<f64> = (0..50).map(|i| i as f64 / 49.0).collect();
480        let phi = fourier_eigenfunctions(&t, 5);
481        let lambda = eigenvalues_linear(5);
482
483        let data = sim_kl(10, &phi, 5, &lambda, Some(42));
484        assert_eq!(data.nrows(), 10);
485        assert_eq!(data.ncols(), 50);
486        assert_eq!(data.len(), 10 * 50);
487    }
488
489    #[test]
490    fn test_sim_fundata_dimensions() {
491        let t: Vec<f64> = (0..100).map(|i| i as f64 / 99.0).collect();
492        let data = sim_fundata(20, &t, 5, EFunType::Fourier, EValType::Linear, Some(42));
493        assert_eq!(data.nrows(), 20);
494        assert_eq!(data.ncols(), 100);
495        assert_eq!(data.len(), 20 * 100);
496    }
497
498    #[test]
499    fn test_add_error_pointwise() {
500        let raw = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]; // 2 x 3 matrix
501        let data = FdMatrix::from_column_major(raw.clone(), 2, 3).unwrap();
502        let noisy = add_error_pointwise(&data, 0.1, Some(42));
503        assert_eq!(noisy.len(), 6);
504        // Check that values changed but not by too much
505        let noisy_slice = noisy.as_slice();
506        for i in 0..6 {
507            assert!((noisy_slice[i] - raw[i]).abs() < 1.0);
508        }
509    }
510
511    #[test]
512    fn test_legendre_orthonormality() {
513        // Test that Legendre eigenfunctions are approximately orthonormal
514        let n = 1000;
515        let t: Vec<f64> = (0..n).map(|i| i as f64 / (n - 1) as f64).collect();
516        let m = 5;
517        let phi = legendre_eigenfunctions(&t, m, false);
518        let dt = 1.0 / (n - 1) as f64;
519
520        // Check orthonormality
521        for j1 in 0..m {
522            for j2 in 0..m {
523                let mut integral = 0.0;
524                for i in 0..n {
525                    integral += phi[(i, j1)] * phi[(i, j2)] * dt;
526                }
527                let expected = if j1 == j2 { 1.0 } else { 0.0 };
528                assert!(
529                    (integral - expected).abs() < 0.05,
530                    "Orthonormality check failed for ({}, {}): {} vs {}",
531                    j1,
532                    j2,
533                    integral,
534                    expected
535                );
536            }
537        }
538    }
539
540    // ========================================================================
541    // Wiener eigenfunction tests
542    // ========================================================================
543
544    #[test]
545    fn test_wiener_eigenfunctions_dimensions() {
546        let t: Vec<f64> = (0..100).map(|i| i as f64 / 99.0).collect();
547        let phi = wiener_eigenfunctions(&t, 7);
548        assert_eq!(phi.nrows(), 100);
549        assert_eq!(phi.ncols(), 7);
550        assert_eq!(phi.len(), 100 * 7);
551    }
552
553    #[test]
554    fn test_wiener_eigenfunctions_orthonormality() {
555        // Wiener eigenfunctions: sqrt(2)*sin((k-0.5)*pi*t)
556        // Should be orthonormal on [0,1]
557        let n = 1000;
558        let t: Vec<f64> = (0..n).map(|i| i as f64 / (n - 1) as f64).collect();
559        let m = 5;
560        let phi = wiener_eigenfunctions(&t, m);
561        let dt = 1.0 / (n - 1) as f64;
562
563        for j1 in 0..m {
564            for j2 in 0..m {
565                let mut integral = 0.0;
566                for i in 0..n {
567                    integral += phi[(i, j1)] * phi[(i, j2)] * dt;
568                }
569                let expected = if j1 == j2 { 1.0 } else { 0.0 };
570                assert!(
571                    (integral - expected).abs() < 0.05,
572                    "Wiener orthonormality failed for ({}, {}): {} vs {}",
573                    j1,
574                    j2,
575                    integral,
576                    expected
577                );
578            }
579        }
580    }
581
582    #[test]
583    fn test_wiener_eigenfunctions_analytical_form() {
584        // φ_k(t) = sqrt(2) * sin((k - 0.5) * pi * t)
585        let t = vec![0.0, 0.25, 0.5, 0.75, 1.0];
586        let phi = wiener_eigenfunctions(&t, 2);
587        let sqrt2 = 2.0_f64.sqrt();
588
589        // First eigenfunction: k=1, freq = 0.5*pi
590        for (i, &ti) in t.iter().enumerate() {
591            let expected = sqrt2 * (0.5 * PI * ti).sin();
592            assert!(
593                (phi[(i, 0)] - expected).abs() < 1e-10,
594                "k=1 at t={}: got {} expected {}",
595                ti,
596                phi[(i, 0)],
597                expected
598            );
599        }
600
601        // Second eigenfunction: k=2, freq = 1.5*pi
602        for (i, &ti) in t.iter().enumerate() {
603            let expected = sqrt2 * (1.5 * PI * ti).sin();
604            assert!(
605                (phi[(i, 1)] - expected).abs() < 1e-10,
606                "k=2 at t={}: got {} expected {}",
607                ti,
608                phi[(i, 1)],
609                expected
610            );
611        }
612    }
613
614    // ========================================================================
615    // Wiener eigenvalue tests
616    // ========================================================================
617
618    #[test]
619    fn test_eigenvalues_wiener_decay_rate() {
620        // λ_k = 1/((k - 0.5)*pi)^2
621        let lambda = eigenvalues_wiener(5);
622        assert_eq!(lambda.len(), 5);
623
624        for k in 1..=5 {
625            let denom = (k as f64 - 0.5) * PI;
626            let expected = 1.0 / (denom * denom);
627            assert!(
628                (lambda[k - 1] - expected).abs() < 1e-12,
629                "Wiener eigenvalue k={}: got {} expected {}",
630                k,
631                lambda[k - 1],
632                expected
633            );
634        }
635    }
636
637    #[test]
638    fn test_eigenvalues_wiener_decreasing() {
639        // Wiener eigenvalues should decrease monotonically
640        let lambda = eigenvalues_wiener(10);
641
642        for i in 1..lambda.len() {
643            assert!(
644                lambda[i] < lambda[i - 1],
645                "Eigenvalues not decreasing at {}: {} >= {}",
646                i,
647                lambda[i],
648                lambda[i - 1]
649            );
650        }
651    }
652
653    // ========================================================================
654    // add_error_curve tests
655    // ========================================================================
656
657    #[test]
658    fn test_add_error_curve_properties() {
659        // Curve-level noise: each observation in same curve gets same noise
660        let raw = vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]; // 2 curves x 3 points
661        let n = 2;
662        let data = FdMatrix::from_column_major(raw.clone(), n, 3).unwrap();
663        let noisy = add_error_curve(&data, 0.5, Some(42));
664
665        assert_eq!(noisy.len(), 6);
666
667        // Compute the difference for curve 0 at each point
668        let diff0_j0 = noisy[(0, 0)] - raw[0]; // curve 0, point 0
669        let diff0_j1 = noisy[(0, 1)] - raw[n]; // curve 0, point 1
670        let diff0_j2 = noisy[(0, 2)] - raw[2 * n]; // curve 0, point 2
671
672        // All differences for same curve should be equal (same noise added)
673        assert!(
674            (diff0_j0 - diff0_j1).abs() < 1e-10,
675            "Curve 0 noise differs: {} vs {}",
676            diff0_j0,
677            diff0_j1
678        );
679        assert!(
680            (diff0_j0 - diff0_j2).abs() < 1e-10,
681            "Curve 0 noise differs: {} vs {}",
682            diff0_j0,
683            diff0_j2
684        );
685
686        // Curve 1 should have different noise
687        let diff1_j0 = noisy[(1, 0)] - raw[1];
688        // Different curves should (with high probability) have different noise
689        // We can't guarantee this, but with seed=42 they should differ
690        assert!(
691            (diff0_j0 - diff1_j0).abs() > 1e-10,
692            "Different curves got same noise"
693        );
694    }
695
696    #[test]
697    fn test_add_error_curve_reproducibility() {
698        let raw = vec![1.0, 2.0, 3.0, 4.0];
699        let data = FdMatrix::from_column_major(raw, 2, 2).unwrap();
700        let noisy1 = add_error_curve(&data, 1.0, Some(123));
701        let noisy2 = add_error_curve(&data, 1.0, Some(123));
702
703        let s1 = noisy1.as_slice();
704        let s2 = noisy2.as_slice();
705        for i in 0..4 {
706            assert!(
707                (s1[i] - s2[i]).abs() < 1e-10,
708                "Reproducibility failed at {}: {} vs {}",
709                i,
710                s1[i],
711                s2[i]
712            );
713        }
714    }
715
716    // ========================================================================
717    // Enum dispatcher tests
718    // ========================================================================
719
720    #[test]
721    fn test_efun_type_from_i32() {
722        assert_eq!(EFunType::from_i32(0), Some(EFunType::Fourier));
723        assert_eq!(EFunType::from_i32(1), Some(EFunType::Poly));
724        assert_eq!(EFunType::from_i32(2), Some(EFunType::PolyHigh));
725        assert_eq!(EFunType::from_i32(3), Some(EFunType::Wiener));
726        assert_eq!(EFunType::from_i32(-1), None);
727        assert_eq!(EFunType::from_i32(4), None);
728        assert_eq!(EFunType::from_i32(100), None);
729    }
730
731    #[test]
732    fn test_eval_type_from_i32() {
733        assert_eq!(EValType::from_i32(0), Some(EValType::Linear));
734        assert_eq!(EValType::from_i32(1), Some(EValType::Exponential));
735        assert_eq!(EValType::from_i32(2), Some(EValType::Wiener));
736        assert_eq!(EValType::from_i32(-1), None);
737        assert_eq!(EValType::from_i32(3), None);
738        assert_eq!(EValType::from_i32(99), None);
739    }
740
741    #[test]
742    fn test_eigenfunctions_dispatcher() {
743        let t: Vec<f64> = (0..50).map(|i| i as f64 / 49.0).collect();
744        let m = 4;
745
746        // Test that dispatcher returns correct results for each type
747        let phi_fourier = eigenfunctions(&t, m, EFunType::Fourier);
748        let phi_fourier_direct = fourier_eigenfunctions(&t, m);
749        assert_eq!(phi_fourier, phi_fourier_direct);
750
751        let phi_poly = eigenfunctions(&t, m, EFunType::Poly);
752        let phi_poly_direct = legendre_eigenfunctions(&t, m, false);
753        assert_eq!(phi_poly, phi_poly_direct);
754
755        let phi_poly_high = eigenfunctions(&t, m, EFunType::PolyHigh);
756        let phi_poly_high_direct = legendre_eigenfunctions(&t, m, true);
757        assert_eq!(phi_poly_high, phi_poly_high_direct);
758
759        let phi_wiener = eigenfunctions(&t, m, EFunType::Wiener);
760        let phi_wiener_direct = wiener_eigenfunctions(&t, m);
761        assert_eq!(phi_wiener, phi_wiener_direct);
762    }
763}