fdars_core/
depth.rs

1//! Depth measures for functional data.
2//!
3//! This module provides various depth measures for assessing the centrality
4//! of functional observations within a reference sample.
5
6use crate::helpers::simpsons_weights;
7use rand::prelude::*;
8use rand_distr::StandardNormal;
9use rayon::prelude::*;
10
11/// Compute Fraiman-Muniz depth for 1D functional data.
12///
13/// Uses the FM1 formula: d = 1 - |0.5 - Fn(x)|
14/// With scale=true: d = 2 * min(Fn(x), 1-Fn(x))
15///
16/// # Arguments
17/// * `data_obj` - Data to compute depth for (column-major, nobj x n_points)
18/// * `data_ori` - Reference data (column-major, nori x n_points)
19/// * `nobj` - Number of objects
20/// * `nori` - Number of reference observations
21/// * `n_points` - Number of evaluation points
22/// * `scale` - Whether to scale the depth values
23pub fn fraiman_muniz_1d(
24    data_obj: &[f64],
25    data_ori: &[f64],
26    nobj: usize,
27    nori: usize,
28    n_points: usize,
29    scale: bool,
30) -> Vec<f64> {
31    if nobj == 0 || nori == 0 || n_points == 0 {
32        return Vec::new();
33    }
34
35    let scale_factor = if scale { 2.0 } else { 1.0 };
36
37    (0..nobj)
38        .into_par_iter()
39        .map(|i| {
40            let mut depth_sum = 0.0;
41
42            for t in 0..n_points {
43                let x_t = data_obj[i + t * nobj];
44                let mut le_count = 0;
45
46                for j in 0..nori {
47                    let y_t = data_ori[j + t * nori];
48                    if y_t <= x_t {
49                        le_count += 1;
50                    }
51                }
52
53                let fn_x = le_count as f64 / nori as f64;
54                let univariate_depth = fn_x.min(1.0 - fn_x) * scale_factor;
55                depth_sum += univariate_depth;
56            }
57
58            depth_sum / n_points as f64
59        })
60        .collect()
61}
62
63/// Compute Fraiman-Muniz depth for 2D functional data (surfaces).
64pub fn fraiman_muniz_2d(
65    data_obj: &[f64],
66    data_ori: &[f64],
67    nobj: usize,
68    nori: usize,
69    n_points: usize,
70    scale: bool,
71) -> Vec<f64> {
72    // Same implementation as 1D - iterate over all grid points
73    fraiman_muniz_1d(data_obj, data_ori, nobj, nori, n_points, scale)
74}
75
76/// Compute modal depth for 1D functional data.
77///
78/// Uses a Gaussian kernel to measure density around each curve.
79///
80/// # Arguments
81/// * `data_obj` - Data to compute depth for
82/// * `data_ori` - Reference data
83/// * `nobj` - Number of objects
84/// * `nori` - Number of reference observations
85/// * `n_points` - Number of evaluation points
86/// * `h` - Bandwidth parameter
87pub fn modal_1d(
88    data_obj: &[f64],
89    data_ori: &[f64],
90    nobj: usize,
91    nori: usize,
92    n_points: usize,
93    h: f64,
94) -> Vec<f64> {
95    if nobj == 0 || nori == 0 || n_points == 0 {
96        return Vec::new();
97    }
98
99    (0..nobj)
100        .into_par_iter()
101        .map(|i| {
102            let mut depth = 0.0;
103
104            for j in 0..nori {
105                let mut dist_sq = 0.0;
106                for t in 0..n_points {
107                    let diff = data_obj[i + t * nobj] - data_ori[j + t * nori];
108                    dist_sq += diff * diff;
109                }
110                let dist = (dist_sq / n_points as f64).sqrt();
111                let kernel_val = (-0.5 * (dist / h).powi(2)).exp();
112                depth += kernel_val;
113            }
114
115            depth / nori as f64
116        })
117        .collect()
118}
119
120/// Compute modal depth for 2D functional data.
121pub fn modal_2d(
122    data_obj: &[f64],
123    data_ori: &[f64],
124    nobj: usize,
125    nori: usize,
126    n_points: usize,
127    h: f64,
128) -> Vec<f64> {
129    modal_1d(data_obj, data_ori, nobj, nori, n_points, h)
130}
131
132/// Compute random projection depth for 1D functional data.
133///
134/// Projects curves to scalars using random projections and computes
135/// average univariate depth.
136///
137/// # Arguments
138/// * `data_obj` - Data to compute depth for
139/// * `data_ori` - Reference data
140/// * `nobj` - Number of objects
141/// * `nori` - Number of reference observations
142/// * `n_points` - Number of evaluation points
143/// * `nproj` - Number of random projections
144pub fn random_projection_1d(
145    data_obj: &[f64],
146    data_ori: &[f64],
147    nobj: usize,
148    nori: usize,
149    n_points: usize,
150    nproj: usize,
151) -> Vec<f64> {
152    if nobj == 0 || nori == 0 || n_points == 0 || nproj == 0 {
153        return Vec::new();
154    }
155
156    let mut rng = rand::thread_rng();
157    let projections: Vec<Vec<f64>> = (0..nproj)
158        .map(|_| {
159            let mut proj: Vec<f64> = (0..n_points).map(|_| rng.sample(StandardNormal)).collect();
160            let norm: f64 = proj.iter().map(|x| x * x).sum::<f64>().sqrt();
161            proj.iter_mut().for_each(|x| *x /= norm);
162            proj
163        })
164        .collect();
165
166    (0..nobj)
167        .into_par_iter()
168        .map(|i| {
169            let mut total_depth = 0.0;
170
171            for proj in &projections {
172                let mut proj_i = 0.0;
173                for t in 0..n_points {
174                    proj_i += data_obj[i + t * nobj] * proj[t];
175                }
176
177                let mut proj_ori: Vec<f64> = (0..nori)
178                    .map(|j| {
179                        let mut p = 0.0;
180                        for t in 0..n_points {
181                            p += data_ori[j + t * nori] * proj[t];
182                        }
183                        p
184                    })
185                    .collect();
186
187                proj_ori.sort_by(|a, b| a.partial_cmp(b).unwrap());
188
189                let below = proj_ori.iter().filter(|&&x| x < proj_i).count();
190                let above = proj_ori.iter().filter(|&&x| x > proj_i).count();
191                let depth = (below.min(above) as f64 + 1.0) / (nori as f64 + 1.0);
192
193                total_depth += depth;
194            }
195
196            total_depth / nproj as f64
197        })
198        .collect()
199}
200
201/// Compute random projection depth for 2D functional data.
202pub fn random_projection_2d(
203    data_obj: &[f64],
204    data_ori: &[f64],
205    nobj: usize,
206    nori: usize,
207    n_points: usize,
208    nproj: usize,
209) -> Vec<f64> {
210    random_projection_1d(data_obj, data_ori, nobj, nori, n_points, nproj)
211}
212
213/// Compute random Tukey depth for 1D functional data.
214///
215/// Takes the minimum over all random projections (more conservative than RP depth).
216pub fn random_tukey_1d(
217    data_obj: &[f64],
218    data_ori: &[f64],
219    nobj: usize,
220    nori: usize,
221    n_points: usize,
222    nproj: usize,
223) -> Vec<f64> {
224    if nobj == 0 || nori == 0 || n_points == 0 || nproj == 0 {
225        return Vec::new();
226    }
227
228    let mut rng = rand::thread_rng();
229    let projections: Vec<Vec<f64>> = (0..nproj)
230        .map(|_| {
231            let mut proj: Vec<f64> = (0..n_points).map(|_| rng.sample(StandardNormal)).collect();
232            let norm: f64 = proj.iter().map(|x| x * x).sum::<f64>().sqrt();
233            proj.iter_mut().for_each(|x| *x /= norm);
234            proj
235        })
236        .collect();
237
238    (0..nobj)
239        .into_par_iter()
240        .map(|i| {
241            let mut min_depth = f64::INFINITY;
242
243            for proj in &projections {
244                let mut proj_i = 0.0;
245                for t in 0..n_points {
246                    proj_i += data_obj[i + t * nobj] * proj[t];
247                }
248
249                let proj_ori: Vec<f64> = (0..nori)
250                    .map(|j| {
251                        let mut p = 0.0;
252                        for t in 0..n_points {
253                            p += data_ori[j + t * nori] * proj[t];
254                        }
255                        p
256                    })
257                    .collect();
258
259                let below = proj_ori.iter().filter(|&&x| x < proj_i).count();
260                let above = proj_ori.iter().filter(|&&x| x > proj_i).count();
261                let depth = (below.min(above) as f64 + 1.0) / (nori as f64 + 1.0);
262
263                min_depth = min_depth.min(depth);
264            }
265
266            min_depth
267        })
268        .collect()
269}
270
271/// Compute random Tukey depth for 2D functional data.
272pub fn random_tukey_2d(
273    data_obj: &[f64],
274    data_ori: &[f64],
275    nobj: usize,
276    nori: usize,
277    n_points: usize,
278    nproj: usize,
279) -> Vec<f64> {
280    random_tukey_1d(data_obj, data_ori, nobj, nori, n_points, nproj)
281}
282
283/// Compute Functional Spatial Depth for 1D functional data.
284pub fn functional_spatial_1d(
285    data_obj: &[f64],
286    data_ori: &[f64],
287    nobj: usize,
288    nori: usize,
289    n_points: usize,
290) -> Vec<f64> {
291    if nobj == 0 || nori == 0 || n_points == 0 {
292        return Vec::new();
293    }
294
295    (0..nobj)
296        .into_par_iter()
297        .map(|i| {
298            let mut sum_unit = vec![0.0; n_points];
299
300            for j in 0..nori {
301                let mut direction = vec![0.0; n_points];
302                let mut norm_sq = 0.0;
303
304                for t in 0..n_points {
305                    direction[t] = data_ori[j + t * nori] - data_obj[i + t * nobj];
306                    norm_sq += direction[t] * direction[t];
307                }
308
309                let norm = norm_sq.sqrt();
310                if norm > 1e-10 {
311                    for t in 0..n_points {
312                        sum_unit[t] += direction[t] / norm;
313                    }
314                }
315            }
316
317            let mut avg_norm_sq = 0.0;
318            for t in 0..n_points {
319                let avg = sum_unit[t] / nori as f64;
320                avg_norm_sq += avg * avg;
321            }
322
323            1.0 - avg_norm_sq.sqrt()
324        })
325        .collect()
326}
327
328/// Compute Functional Spatial Depth for 2D functional data.
329pub fn functional_spatial_2d(
330    data_obj: &[f64],
331    data_ori: &[f64],
332    nobj: usize,
333    nori: usize,
334    n_points: usize,
335) -> Vec<f64> {
336    functional_spatial_1d(data_obj, data_ori, nobj, nori, n_points)
337}
338
339/// Compute Kernel Functional Spatial Depth (KFSD) for 1D functional data.
340///
341/// Implements the RKHS-based formulation.
342pub fn kernel_functional_spatial_1d(
343    data_obj: &[f64],
344    data_ori: &[f64],
345    nobj: usize,
346    nori: usize,
347    n_points: usize,
348    argvals: &[f64],
349    h: f64,
350) -> Vec<f64> {
351    if nobj == 0 || nori == 0 || n_points == 0 {
352        return Vec::new();
353    }
354
355    let weights = simpsons_weights(argvals);
356    let h_sq = h * h;
357
358    // Helper function to compute integrated L2 norm squared
359    let norm_sq_integrated = |data1: &[f64],
360                              row1: usize,
361                              nrow1: usize,
362                              data2: &[f64],
363                              row2: usize,
364                              nrow2: usize|
365     -> f64 {
366        let mut sum = 0.0;
367        for t in 0..n_points {
368            let diff = data1[row1 + t * nrow1] - data2[row2 + t * nrow2];
369            sum += weights[t] * diff * diff;
370        }
371        sum
372    };
373
374    let kern = |dist_sq: f64| -> f64 { (-dist_sq / h_sq).exp() };
375
376    // Pre-compute M1[j,k] = K(X_j, X_k) for all pairs in reference data
377    let m1_upper: Vec<(usize, usize, f64)> = (0..nori)
378        .into_par_iter()
379        .flat_map(|j| {
380            ((j + 1)..nori)
381                .map(|k| {
382                    let mut sum = 0.0;
383                    for t in 0..n_points {
384                        let diff = data_ori[j + t * nori] - data_ori[k + t * nori];
385                        sum += weights[t] * diff * diff;
386                    }
387                    let kval = (-sum / h_sq).exp();
388                    (j, k, kval)
389                })
390                .collect::<Vec<_>>()
391        })
392        .collect();
393
394    let mut m1 = vec![vec![0.0; nori]; nori];
395    for j in 0..nori {
396        m1[j][j] = 1.0;
397    }
398    for (j, k, kval) in m1_upper {
399        m1[j][k] = kval;
400        m1[k][j] = kval;
401    }
402
403    let nori_f64 = nori as f64;
404
405    (0..nobj)
406        .into_par_iter()
407        .map(|i| {
408            let k02_i = 1.0;
409
410            let m2: Vec<f64> = (0..nori)
411                .map(|j| {
412                    let d_sq = norm_sq_integrated(data_obj, i, nobj, data_ori, j, nori);
413                    kern(d_sq)
414                })
415                .collect();
416
417            let mut total_sum = 0.0;
418            let mut valid_count = 0;
419
420            for j in 0..nori {
421                let k01_j = 1.0;
422                let denom_j_sq = k02_i + k01_j - 2.0 * m2[j];
423
424                if denom_j_sq < 1e-20 {
425                    continue;
426                }
427                let denom_j = denom_j_sq.sqrt();
428
429                for k in 0..nori {
430                    let k01_k = 1.0;
431                    let denom_k_sq = k02_i + k01_k - 2.0 * m2[k];
432
433                    if denom_k_sq < 1e-20 {
434                        continue;
435                    }
436                    let denom_k = denom_k_sq.sqrt();
437
438                    let numerator = k02_i + m1[j][k] - m2[j] - m2[k];
439                    let denom = denom_j * denom_k;
440
441                    if denom > 1e-20 {
442                        let m_ijk = numerator / denom;
443                        if m_ijk.is_finite() {
444                            total_sum += m_ijk;
445                            valid_count += 1;
446                        }
447                    }
448                }
449            }
450
451            if valid_count > 0 && total_sum >= 0.0 {
452                1.0 - total_sum.sqrt() / nori_f64
453            } else if total_sum < 0.0 {
454                1.0
455            } else {
456                0.0
457            }
458        })
459        .collect()
460}
461
462/// Compute Kernel Functional Spatial Depth (KFSD) for 2D functional data.
463pub fn kernel_functional_spatial_2d(
464    data_obj: &[f64],
465    data_ori: &[f64],
466    nobj: usize,
467    nori: usize,
468    n_points: usize,
469    h: f64,
470) -> Vec<f64> {
471    if nobj == 0 || nori == 0 || n_points == 0 {
472        return Vec::new();
473    }
474
475    let h_sq = h * h;
476
477    let norm_sq = |data1: &[f64],
478                   row1: usize,
479                   nrow1: usize,
480                   data2: &[f64],
481                   row2: usize,
482                   nrow2: usize|
483     -> f64 {
484        let mut sum = 0.0;
485        for t in 0..n_points {
486            let diff = data1[row1 + t * nrow1] - data2[row2 + t * nrow2];
487            sum += diff * diff;
488        }
489        sum
490    };
491
492    let kern = |dist_sq: f64| -> f64 { (-dist_sq / h_sq).exp() };
493
494    // Pre-compute M1 matrix
495    let m1_upper: Vec<(usize, usize, f64)> = (0..nori)
496        .into_par_iter()
497        .flat_map(|j| {
498            ((j + 1)..nori)
499                .map(|k| {
500                    let d_sq = norm_sq(data_ori, j, nori, data_ori, k, nori);
501                    let kval = kern(d_sq);
502                    (j, k, kval)
503                })
504                .collect::<Vec<_>>()
505        })
506        .collect();
507
508    let mut m1_mat = vec![vec![0.0; nori]; nori];
509    for j in 0..nori {
510        m1_mat[j][j] = 1.0;
511    }
512    for (j, k, kval) in m1_upper {
513        m1_mat[j][k] = kval;
514        m1_mat[k][j] = kval;
515    }
516
517    let nori_f64 = nori as f64;
518
519    (0..nobj)
520        .into_par_iter()
521        .map(|i| {
522            let k02_i = 1.0;
523
524            let m2: Vec<f64> = (0..nori)
525                .map(|j| {
526                    let d_sq = norm_sq(data_obj, i, nobj, data_ori, j, nori);
527                    kern(d_sq)
528                })
529                .collect();
530
531            let mut total_sum = 0.0;
532            let mut valid_count = 0;
533
534            for j in 0..nori {
535                let k01_j = 1.0;
536                let denom_j_sq = k02_i + k01_j - 2.0 * m2[j];
537
538                if denom_j_sq < 1e-20 {
539                    continue;
540                }
541                let denom_j = denom_j_sq.sqrt();
542
543                for k in 0..nori {
544                    let k01_k = 1.0;
545                    let denom_k_sq = k02_i + k01_k - 2.0 * m2[k];
546
547                    if denom_k_sq < 1e-20 {
548                        continue;
549                    }
550                    let denom_k = denom_k_sq.sqrt();
551
552                    let numerator = k02_i + m1_mat[j][k] - m2[j] - m2[k];
553                    let denom = denom_j * denom_k;
554
555                    if denom > 1e-20 {
556                        let m_ijk = numerator / denom;
557                        if m_ijk.is_finite() {
558                            total_sum += m_ijk;
559                            valid_count += 1;
560                        }
561                    }
562                }
563            }
564
565            if valid_count > 0 && total_sum >= 0.0 {
566                1.0 - total_sum.sqrt() / nori_f64
567            } else if total_sum < 0.0 {
568                1.0
569            } else {
570                0.0
571            }
572        })
573        .collect()
574}
575
576/// Compute Band Depth (BD) for 1D functional data.
577///
578/// BD(x) = proportion of pairs (i,j) where x lies within the band formed by curves i and j.
579pub fn band_1d(
580    data_obj: &[f64],
581    data_ori: &[f64],
582    nobj: usize,
583    nori: usize,
584    n_points: usize,
585) -> Vec<f64> {
586    if nobj == 0 || nori < 2 || n_points == 0 {
587        return Vec::new();
588    }
589
590    let n_pairs = (nori * (nori - 1)) / 2;
591
592    (0..nobj)
593        .into_par_iter()
594        .map(|i| {
595            let mut count_in_band = 0usize;
596
597            for j in 0..nori {
598                for k in (j + 1)..nori {
599                    let mut inside_band = true;
600
601                    for t in 0..n_points {
602                        let x_t = data_obj[i + t * nobj];
603                        let y_j_t = data_ori[j + t * nori];
604                        let y_k_t = data_ori[k + t * nori];
605
606                        let band_min = y_j_t.min(y_k_t);
607                        let band_max = y_j_t.max(y_k_t);
608
609                        if x_t < band_min || x_t > band_max {
610                            inside_band = false;
611                            break;
612                        }
613                    }
614
615                    if inside_band {
616                        count_in_band += 1;
617                    }
618                }
619            }
620
621            count_in_band as f64 / n_pairs as f64
622        })
623        .collect()
624}
625
626/// Compute Modified Band Depth (MBD) for 1D functional data.
627///
628/// MBD(x) = average over pairs of the proportion of the domain where x is inside the band.
629pub fn modified_band_1d(
630    data_obj: &[f64],
631    data_ori: &[f64],
632    nobj: usize,
633    nori: usize,
634    n_points: usize,
635) -> Vec<f64> {
636    if nobj == 0 || nori < 2 || n_points == 0 {
637        return Vec::new();
638    }
639
640    let n_pairs = (nori * (nori - 1)) / 2;
641
642    (0..nobj)
643        .into_par_iter()
644        .map(|i| {
645            let mut total_proportion = 0.0;
646
647            for j in 0..nori {
648                for k in (j + 1)..nori {
649                    let mut count_inside = 0usize;
650
651                    for t in 0..n_points {
652                        let x_t = data_obj[i + t * nobj];
653                        let y_j_t = data_ori[j + t * nori];
654                        let y_k_t = data_ori[k + t * nori];
655
656                        let band_min = y_j_t.min(y_k_t);
657                        let band_max = y_j_t.max(y_k_t);
658
659                        if x_t >= band_min && x_t <= band_max {
660                            count_inside += 1;
661                        }
662                    }
663
664                    total_proportion += count_inside as f64 / n_points as f64;
665                }
666            }
667
668            total_proportion / n_pairs as f64
669        })
670        .collect()
671}
672
673/// Compute Modified Epigraph Index (MEI) for 1D functional data.
674///
675/// MEI measures the proportion of time a curve is below other curves.
676pub fn modified_epigraph_index_1d(
677    data_obj: &[f64],
678    data_ori: &[f64],
679    nobj: usize,
680    nori: usize,
681    n_points: usize,
682) -> Vec<f64> {
683    if nobj == 0 || nori == 0 || n_points == 0 {
684        return Vec::new();
685    }
686
687    (0..nobj)
688        .into_par_iter()
689        .map(|i| {
690            let mut total = 0.0;
691
692            for j in 0..nori {
693                let mut count = 0.0;
694
695                for t in 0..n_points {
696                    let xi = data_obj[i + t * nobj];
697                    let xj = data_ori[j + t * nori];
698
699                    if xi < xj {
700                        count += 1.0;
701                    } else if (xi - xj).abs() < 1e-12 {
702                        count += 0.5;
703                    }
704                }
705
706                total += count / n_points as f64;
707            }
708
709            total / nori as f64
710        })
711        .collect()
712}
713
714#[cfg(test)]
715mod tests {
716    use super::*;
717    use std::f64::consts::PI;
718
719    fn uniform_grid(n: usize) -> Vec<f64> {
720        (0..n).map(|i| i as f64 / (n - 1) as f64).collect()
721    }
722
723    fn generate_centered_data(n: usize, m: usize) -> Vec<f64> {
724        let argvals = uniform_grid(m);
725        let mut data = vec![0.0; n * m];
726        for i in 0..n {
727            let offset = (i as f64 - n as f64 / 2.0) / (n as f64);
728            for j in 0..m {
729                data[i + j * n] = (2.0 * PI * argvals[j]).sin() + offset;
730            }
731        }
732        data
733    }
734
735    // ============== Fraiman-Muniz tests ==============
736
737    #[test]
738    fn test_fraiman_muniz() {
739        // Simple test: identical data should give maximum depth
740        let data = vec![1.0, 1.0, 2.0, 2.0]; // 2 identical curves, 2 points each
741        let depths = fraiman_muniz_1d(&data, &data, 2, 2, 2, true);
742        assert_eq!(depths.len(), 2);
743    }
744
745    #[test]
746    fn test_fraiman_muniz_central_deeper() {
747        let n = 20;
748        let m = 30;
749        let data = generate_centered_data(n, m);
750        let depths = fraiman_muniz_1d(&data, &data, n, n, m, true);
751
752        // Central curve (index n/2) should have higher depth than extreme curves
753        let central_depth = depths[n / 2];
754        let edge_depth = depths[0];
755        assert!(
756            central_depth > edge_depth,
757            "Central curve should be deeper: {} > {}",
758            central_depth,
759            edge_depth
760        );
761    }
762
763    #[test]
764    fn test_fraiman_muniz_range() {
765        let n = 15;
766        let m = 20;
767        let data = generate_centered_data(n, m);
768        let depths = fraiman_muniz_1d(&data, &data, n, n, m, true);
769
770        for d in &depths {
771            assert!(*d >= 0.0 && *d <= 1.0, "Depth should be in [0, 1]");
772        }
773    }
774
775    #[test]
776    fn test_fraiman_muniz_invalid() {
777        assert!(fraiman_muniz_1d(&[], &[], 0, 0, 0, true).is_empty());
778    }
779
780    // ============== Modal depth tests ==============
781
782    #[test]
783    fn test_modal_central_deeper() {
784        let n = 20;
785        let m = 30;
786        let data = generate_centered_data(n, m);
787        let depths = modal_1d(&data, &data, n, n, m, 0.5);
788
789        let central_depth = depths[n / 2];
790        let edge_depth = depths[0];
791        assert!(central_depth > edge_depth, "Central curve should be deeper");
792    }
793
794    #[test]
795    fn test_modal_positive() {
796        let n = 10;
797        let m = 20;
798        let data = generate_centered_data(n, m);
799        let depths = modal_1d(&data, &data, n, n, m, 0.5);
800
801        for d in &depths {
802            assert!(*d > 0.0, "Modal depth should be positive");
803        }
804    }
805
806    #[test]
807    fn test_modal_invalid() {
808        assert!(modal_1d(&[], &[], 0, 0, 0, 0.5).is_empty());
809    }
810
811    // ============== Random projection depth tests ==============
812
813    #[test]
814    fn test_rp_depth_range() {
815        let n = 15;
816        let m = 20;
817        let data = generate_centered_data(n, m);
818        let depths = random_projection_1d(&data, &data, n, n, m, 50);
819
820        for d in &depths {
821            assert!(*d >= 0.0 && *d <= 1.0, "RP depth should be in [0, 1]");
822        }
823    }
824
825    #[test]
826    fn test_rp_depth_invalid() {
827        assert!(random_projection_1d(&[], &[], 0, 0, 0, 10).is_empty());
828    }
829
830    // ============== Random Tukey depth tests ==============
831
832    #[test]
833    fn test_random_tukey_range() {
834        let n = 15;
835        let m = 20;
836        let data = generate_centered_data(n, m);
837        let depths = random_tukey_1d(&data, &data, n, n, m, 50);
838
839        for d in &depths {
840            assert!(*d >= 0.0 && *d <= 1.0, "Tukey depth should be in [0, 1]");
841        }
842    }
843
844    #[test]
845    fn test_random_tukey_invalid() {
846        assert!(random_tukey_1d(&[], &[], 0, 0, 0, 10).is_empty());
847    }
848
849    // ============== Functional spatial depth tests ==============
850
851    #[test]
852    fn test_functional_spatial_range() {
853        let n = 15;
854        let m = 20;
855        let data = generate_centered_data(n, m);
856        let depths = functional_spatial_1d(&data, &data, n, n, m);
857
858        for d in &depths {
859            assert!(*d >= 0.0 && *d <= 1.0, "Spatial depth should be in [0, 1]");
860        }
861    }
862
863    #[test]
864    fn test_functional_spatial_invalid() {
865        assert!(functional_spatial_1d(&[], &[], 0, 0, 0).is_empty());
866    }
867
868    // ============== Band depth tests ==============
869
870    #[test]
871    fn test_band_depth_central_deeper() {
872        let n = 10;
873        let m = 20;
874        let data = generate_centered_data(n, m);
875        let depths = band_1d(&data, &data, n, n, m);
876
877        // Central curve should be in more bands
878        let central_depth = depths[n / 2];
879        let edge_depth = depths[0];
880        assert!(
881            central_depth >= edge_depth,
882            "Central curve should have higher band depth"
883        );
884    }
885
886    #[test]
887    fn test_band_depth_range() {
888        let n = 10;
889        let m = 20;
890        let data = generate_centered_data(n, m);
891        let depths = band_1d(&data, &data, n, n, m);
892
893        for d in &depths {
894            assert!(*d >= 0.0 && *d <= 1.0, "Band depth should be in [0, 1]");
895        }
896    }
897
898    #[test]
899    fn test_band_depth_invalid() {
900        assert!(band_1d(&[], &[], 0, 0, 0).is_empty());
901        assert!(band_1d(&[1.0, 2.0], &[1.0, 2.0], 1, 1, 2).is_empty()); // need at least 2 ref curves
902    }
903
904    // ============== Modified band depth tests ==============
905
906    #[test]
907    fn test_modified_band_depth_range() {
908        let n = 10;
909        let m = 20;
910        let data = generate_centered_data(n, m);
911        let depths = modified_band_1d(&data, &data, n, n, m);
912
913        for d in &depths {
914            assert!(*d >= 0.0 && *d <= 1.0, "MBD should be in [0, 1]");
915        }
916    }
917
918    #[test]
919    fn test_modified_band_depth_invalid() {
920        assert!(modified_band_1d(&[], &[], 0, 0, 0).is_empty());
921    }
922
923    // ============== Modified epigraph index tests ==============
924
925    #[test]
926    fn test_mei_range() {
927        let n = 15;
928        let m = 20;
929        let data = generate_centered_data(n, m);
930        let mei = modified_epigraph_index_1d(&data, &data, n, n, m);
931
932        for d in &mei {
933            assert!(*d >= 0.0 && *d <= 1.0, "MEI should be in [0, 1]");
934        }
935    }
936
937    #[test]
938    fn test_mei_invalid() {
939        assert!(modified_epigraph_index_1d(&[], &[], 0, 0, 0).is_empty());
940    }
941}