scirs2_core/ndarray_ext/
reduction.rs

1//! Array reduction operations with SIMD acceleration
2//!
3//! This module provides SIMD-accelerated reduction functions for finding
4//! indices of minimum and maximum elements in arrays. These are fundamental
5//! operations for optimization, statistics, and data analysis.
6
7use crate::numeric::Float;
8use crate::simd_ops::{AutoOptimizer, SimdUnifiedOps};
9use ::ndarray::{Array1, ArrayView1};
10
11/// Find the index of the minimum element in a 1D array with SIMD acceleration.
12///
13/// This function uses SIMD operations when beneficial for performance, providing
14/// significant speedups for large arrays on supported platforms (AVX2, NEON).
15///
16/// # Arguments
17///
18/// * `x` - Input 1D array
19///
20/// # Returns
21///
22/// * `Some(index)` - Index of the minimum element
23/// * `None` - If the array is empty
24///
25/// # Performance
26///
27/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
28/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
29/// - Automatically selects SIMD or scalar based on array size and platform capabilities
30///
31/// # Examples
32///
33/// ```
34/// use scirs2_core::ndarray::array;
35/// use scirs2_core::ndarray_ext::reduction::argmin_simd;
36///
37/// let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
38/// let idx = argmin_simd(&x.view()).unwrap();
39/// assert_eq!(idx, 1); // First occurrence of minimum value (1.0)
40/// ```
41#[allow(dead_code)]
42pub fn argmin_simd<F>(x: &ArrayView1<F>) -> Option<usize>
43where
44    F: Float + SimdUnifiedOps,
45{
46    if x.is_empty() {
47        return None;
48    }
49
50    let optimizer = AutoOptimizer::new();
51
52    // Use SIMD fast path for large arrays
53    if optimizer.should_use_simd(x.len()) {
54        return F::simd_argmin(x);
55    }
56
57    // Scalar fallback for small arrays or unsupported platforms
58    let mut min_idx = 0;
59    let mut min_val = x[0];
60
61    for (i, &val) in x.iter().enumerate().skip(1) {
62        if val < min_val {
63            min_idx = i;
64            min_val = val;
65        }
66    }
67
68    Some(min_idx)
69}
70
71/// Find the index of the maximum element in a 1D array with SIMD acceleration.
72///
73/// This function uses SIMD operations when beneficial for performance, providing
74/// significant speedups for large arrays on supported platforms (AVX2, NEON).
75///
76/// # Arguments
77///
78/// * `x` - Input 1D array
79///
80/// # Returns
81///
82/// * `Some(index)` - Index of the maximum element
83/// * `None` - If the array is empty
84///
85/// # Performance
86///
87/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
88/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
89/// - Automatically selects SIMD or scalar based on array size and platform capabilities
90///
91/// # Examples
92///
93/// ```
94/// use scirs2_core::ndarray::array;
95/// use scirs2_core::ndarray_ext::reduction::argmax_simd;
96///
97/// let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
98/// let idx = argmax_simd(&x.view()).unwrap();
99/// assert_eq!(idx, 5); // Index of maximum value (9.0)
100/// ```
101#[allow(dead_code)]
102pub fn argmax_simd<F>(x: &ArrayView1<F>) -> Option<usize>
103where
104    F: Float + SimdUnifiedOps,
105{
106    if x.is_empty() {
107        return None;
108    }
109
110    let optimizer = AutoOptimizer::new();
111
112    // Use SIMD fast path for large arrays
113    if optimizer.should_use_simd(x.len()) {
114        return F::simd_argmax(x);
115    }
116
117    // Scalar fallback for small arrays or unsupported platforms
118    let mut max_idx = 0;
119    let mut max_val = x[0];
120
121    for (i, &val) in x.iter().enumerate().skip(1) {
122        if val > max_val {
123            max_idx = i;
124            max_val = val;
125        }
126    }
127
128    Some(max_idx)
129}
130
131/// Find indices of top-k minimum elements in a 1D array.
132///
133/// Returns the indices of the k smallest elements in ascending order of their values.
134///
135/// # Arguments
136///
137/// * `x` - Input 1D array
138/// * `k` - Number of minimum elements to find
139///
140/// # Returns
141///
142/// * Array of indices of the k minimum elements, sorted by value (ascending)
143///
144/// # Examples
145///
146/// ```
147/// use scirs2_core::ndarray::array;
148/// use scirs2_core::ndarray_ext::reduction::argmin_k;
149///
150/// let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
151/// let indices = argmin_k(&x.view(), 3).unwrap();
152/// // Returns indices of 3 smallest values: [1, 3, 6] (values 1.0, 1.0, 2.0)
153/// assert_eq!(indices.len(), 3);
154/// ```
155#[allow(dead_code)]
156pub fn argmin_k<F>(x: &ArrayView1<F>, k: usize) -> Option<Array1<usize>>
157where
158    F: Float + SimdUnifiedOps,
159{
160    if x.is_empty() || k == 0 {
161        return None;
162    }
163
164    let k = k.min(x.len()); // Cap k at array length
165
166    // Create (index, value) pairs
167    let mut indexed: Vec<(usize, F)> = x.iter().enumerate().map(|(i, &v)| (i, v)).collect();
168
169    // Partial sort to find k smallest elements
170    indexed.select_nth_unstable_by(k - 1, |a, b| {
171        a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal)
172    });
173
174    // Extract and sort the k smallest indices by value
175    let mut result: Vec<usize> = indexed[..k].iter().map(|(i, _)| *i).collect();
176    result.sort_unstable_by(|&a, &b| x[a].partial_cmp(&x[b]).unwrap_or(std::cmp::Ordering::Equal));
177
178    Some(Array1::from_vec(result))
179}
180
181/// Find indices of top-k maximum elements in a 1D array.
182///
183/// Returns the indices of the k largest elements in descending order of their values.
184///
185/// # Arguments
186///
187/// * `x` - Input 1D array
188/// * `k` - Number of maximum elements to find
189///
190/// # Returns
191///
192/// * Array of indices of the k maximum elements, sorted by value (descending)
193///
194/// # Examples
195///
196/// ```
197/// use scirs2_core::ndarray::array;
198/// use scirs2_core::ndarray_ext::reduction::argmax_k;
199///
200/// let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
201/// let indices = argmax_k(&x.view(), 3).unwrap();
202/// // Returns indices of 3 largest values: [5, 4, 2] (values 9.0, 5.0, 4.0)
203/// assert_eq!(indices.len(), 3);
204/// ```
205#[allow(dead_code)]
206pub fn argmax_k<F>(x: &ArrayView1<F>, k: usize) -> Option<Array1<usize>>
207where
208    F: Float + SimdUnifiedOps,
209{
210    if x.is_empty() || k == 0 {
211        return None;
212    }
213
214    let k = k.min(x.len()); // Cap k at array length
215
216    // Create (index, value) pairs
217    let mut indexed: Vec<(usize, F)> = x.iter().enumerate().map(|(i, &v)| (i, v)).collect();
218
219    // Partial sort to find k largest elements
220    indexed.select_nth_unstable_by(k - 1, |a, b| {
221        b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal) // Reverse comparison for max
222    });
223
224    // Extract and sort the k largest indices by value (descending)
225    let mut result: Vec<usize> = indexed[..k].iter().map(|(i, _)| *i).collect();
226    result.sort_unstable_by(|&a, &b| {
227        x[b].partial_cmp(&x[a]).unwrap_or(std::cmp::Ordering::Equal) // Reverse for descending
228    });
229
230    Some(Array1::from_vec(result))
231}
232
233/// Compute cumulative sum of array elements with SIMD acceleration.
234///
235/// Returns a new array where each element is the sum of all previous elements
236/// (including itself) in the input array. This is also known as a prefix sum
237/// or running total.
238///
239/// # Arguments
240///
241/// * `x` - Input 1D array
242///
243/// # Returns
244///
245/// * Array of cumulative sums with the same length as input
246///
247/// # Performance
248///
249/// - Uses SIMD acceleration for large arrays (> threshold)
250/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
251/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
252///
253/// # Examples
254///
255/// ```
256/// use scirs2_core::ndarray::array;
257/// use scirs2_core::ndarray_ext::reduction::cumsum_simd;
258///
259/// let x = array![1.0f32, 2.0, 3.0, 4.0, 5.0];
260/// let result = cumsum_simd(&x.view());
261/// // result = [1.0, 3.0, 6.0, 10.0, 15.0]
262/// assert_eq!(result[0], 1.0);
263/// assert_eq!(result[4], 15.0);
264/// ```
265#[allow(dead_code)]
266pub fn cumsum_simd<F>(x: &ArrayView1<F>) -> Array1<F>
267where
268    F: Float + SimdUnifiedOps,
269{
270    if x.is_empty() {
271        return Array1::zeros(0);
272    }
273
274    let optimizer = AutoOptimizer::new();
275
276    // Use SIMD fast path for large arrays
277    if optimizer.should_use_simd(x.len()) {
278        return F::simd_cumsum(x);
279    }
280
281    // Scalar fallback for small arrays
282    if x.is_empty() {
283        return Array1::zeros(0);
284    }
285
286    let mut cumsum = x[0];
287    let mut result = Array1::zeros(x.len());
288    result[0] = cumsum;
289
290    for i in 1..x.len() {
291        cumsum = cumsum + x[i];
292        result[i] = cumsum;
293    }
294
295    result
296}
297
298/// Compute cumulative product of array elements with SIMD acceleration.
299///
300/// Returns a new array where each element is the product of all previous elements
301/// (including itself) in the input array. This is also known as a prefix product
302/// or running product.
303///
304/// # Arguments
305///
306/// * `x` - Input 1D array
307///
308/// # Returns
309///
310/// * Array of cumulative products with the same length as input
311///
312/// # Performance
313///
314/// - Uses SIMD acceleration for large arrays (> threshold)
315/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
316/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
317///
318/// # Examples
319///
320/// ```
321/// use scirs2_core::ndarray::array;
322/// use scirs2_core::ndarray_ext::reduction::cumprod_simd;
323///
324/// let x = array![1.0f32, 2.0, 3.0, 4.0, 5.0];
325/// let result = cumprod_simd(&x.view());
326/// // result = [1.0, 2.0, 6.0, 24.0, 120.0]
327/// assert_eq!(result[0], 1.0);
328/// assert_eq!(result[4], 120.0);
329/// ```
330#[allow(dead_code)]
331pub fn cumprod_simd<F>(x: &ArrayView1<F>) -> Array1<F>
332where
333    F: Float + SimdUnifiedOps,
334{
335    if x.is_empty() {
336        return Array1::zeros(0);
337    }
338
339    let optimizer = AutoOptimizer::new();
340
341    // Use SIMD fast path for large arrays
342    if optimizer.should_use_simd(x.len()) {
343        return F::simd_cumprod(x);
344    }
345
346    // Scalar fallback for small arrays
347    if x.is_empty() {
348        return Array1::zeros(0);
349    }
350
351    let mut cumprod = x[0];
352    let mut result = Array1::zeros(x.len());
353    result[0] = cumprod;
354
355    for i in 1..x.len() {
356        cumprod = cumprod * x[i];
357        result[i] = cumprod;
358    }
359
360    result
361}
362
363// ============================================================================
364// Scalar Reduction Operations (Phase 29 - min/max values)
365// ============================================================================
366
367/// Find the minimum value in a 1D array with SIMD acceleration.
368///
369/// This function uses SIMD operations when beneficial for performance, providing
370/// significant speedups for large arrays on supported platforms (AVX2, NEON).
371///
372/// # Arguments
373///
374/// * `x` - Input 1D array
375///
376/// # Returns
377///
378/// * `Some(value)` - The minimum value in the array
379/// * `None` - If the array is empty
380///
381/// # Performance
382///
383/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
384/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
385/// - Automatically selects SIMD or scalar based on array size and platform capabilities
386///
387/// # Examples
388///
389/// ```
390/// use scirs2_core::ndarray::array;
391/// use scirs2_core::ndarray_ext::reduction::min_simd;
392///
393/// let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
394/// let min_val = min_simd(&x.view()).unwrap();
395/// assert_eq!(min_val, 1.0);
396/// ```
397#[allow(dead_code)]
398pub fn min_simd<F>(x: &ArrayView1<F>) -> Option<F>
399where
400    F: Float + SimdUnifiedOps,
401{
402    if x.is_empty() {
403        return None;
404    }
405
406    let optimizer = AutoOptimizer::new();
407
408    // Use SIMD fast path for large arrays
409    if optimizer.should_use_simd(x.len()) {
410        Some(F::simd_min_element(x))
411    } else {
412        // Scalar fallback for small arrays
413        let mut min_val = x[0];
414        for &val in x.iter().skip(1) {
415            if val < min_val {
416                min_val = val;
417            }
418        }
419        Some(min_val)
420    }
421}
422
423/// Find the maximum value in a 1D array with SIMD acceleration.
424///
425/// This function uses SIMD operations when beneficial for performance, providing
426/// significant speedups for large arrays on supported platforms (AVX2, NEON).
427///
428/// # Arguments
429///
430/// * `x` - Input 1D array
431///
432/// # Returns
433///
434/// * `Some(value)` - The maximum value in the array
435/// * `None` - If the array is empty
436///
437/// # Performance
438///
439/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
440/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
441/// - Automatically selects SIMD or scalar based on array size and platform capabilities
442///
443/// # Examples
444///
445/// ```
446/// use scirs2_core::ndarray::array;
447/// use scirs2_core::ndarray_ext::reduction::max_simd;
448///
449/// let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
450/// let max_val = max_simd(&x.view()).unwrap();
451/// assert_eq!(max_val, 9.0);
452/// ```
453#[allow(dead_code)]
454pub fn max_simd<F>(x: &ArrayView1<F>) -> Option<F>
455where
456    F: Float + SimdUnifiedOps,
457{
458    if x.is_empty() {
459        return None;
460    }
461
462    let optimizer = AutoOptimizer::new();
463
464    // Use SIMD fast path for large arrays
465    if optimizer.should_use_simd(x.len()) {
466        Some(F::simd_max_element(x))
467    } else {
468        // Scalar fallback for small arrays
469        let mut max_val = x[0];
470        for &val in x.iter().skip(1) {
471            if val > max_val {
472                max_val = val;
473            }
474        }
475        Some(max_val)
476    }
477}
478
479// ============================================================================
480// Statistical Reduction Operations (Phase 30 - mean/variance/std)
481// ============================================================================
482
483/// Compute the arithmetic mean of a 1D array with SIMD acceleration.
484///
485/// This function uses SIMD operations when beneficial for performance, providing
486/// significant speedups for large arrays on supported platforms (AVX2, NEON).
487///
488/// # Arguments
489///
490/// * `x` - Input 1D array
491///
492/// # Returns
493///
494/// * `Some(mean)` - The arithmetic mean of the array
495/// * `None` - If the array is empty
496///
497/// # Performance
498///
499/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
500/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
501/// - Uses SIMD sum followed by division
502///
503/// # Examples
504///
505/// ```
506/// use scirs2_core::ndarray::array;
507/// use scirs2_core::ndarray_ext::reduction::mean_simd;
508///
509/// let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
510/// let mean = mean_simd(&x.view()).unwrap();
511/// assert!((mean - 3.0).abs() < 1e-10);
512/// ```
513#[allow(dead_code)]
514pub fn mean_simd<F>(x: &ArrayView1<F>) -> Option<F>
515where
516    F: Float + SimdUnifiedOps,
517{
518    if x.is_empty() {
519        return None;
520    }
521
522    Some(F::simd_mean(x))
523}
524
525/// Compute the variance of a 1D array with SIMD acceleration.
526///
527/// This function uses SIMD operations for computing the variance with Welford's
528/// algorithm for numerical stability. The variance is calculated as the average
529/// of squared deviations from the mean.
530///
531/// # Arguments
532///
533/// * `x` - Input 1D array
534/// * `ddof` - Delta degrees of freedom (0 for population variance, 1 for sample variance)
535///
536/// # Returns
537///
538/// * `Some(variance)` - The variance of the array
539/// * `None` - If the array is empty or has insufficient data (length <= ddof)
540///
541/// # Performance
542///
543/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
544/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
545/// - Uses SIMD operations for mean and sum of squared deviations
546///
547/// # Examples
548///
549/// ```
550/// use scirs2_core::ndarray::array;
551/// use scirs2_core::ndarray_ext::reduction::variance_simd;
552///
553/// let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
554/// let var = variance_simd(&x.view(), 1).unwrap(); // Sample variance (ddof=1)
555/// assert!((var - 2.5).abs() < 1e-10);
556/// ```
557#[allow(dead_code)]
558pub fn variance_simd<F>(x: &ArrayView1<F>, ddof: usize) -> Option<F>
559where
560    F: Float + SimdUnifiedOps,
561{
562    let n = x.len();
563    if n == 0 || n <= ddof {
564        return None;
565    }
566
567    // Note: F::simd_variance computes SAMPLE variance (ddof=1, divides by n-1)
568    // We need to adjust for different ddof values
569
570    if ddof == 1 {
571        // Use the built-in simd_variance which already computes sample variance
572        return Some(F::simd_variance(x));
573    }
574
575    // For other ddof values, we need to adjust
576    // simd_variance computes: sum_sq_dev / (n - 1)
577    // We want: sum_sq_dev / (n - ddof)
578    let sample_var = F::simd_variance(x); // This is sum_sq_dev / (n-1)
579    let n_f = F::from(n).unwrap();
580    let ddof_f = F::from(ddof).unwrap();
581
582    // Convert: var(ddof) = var(ddof=1) * (n-1) / (n-ddof)
583    Some(sample_var * (n_f - F::one()) / (n_f - ddof_f))
584}
585
586/// Compute the standard deviation of a 1D array with SIMD acceleration.
587///
588/// This function computes the square root of the variance using SIMD-accelerated
589/// variance computation.
590///
591/// # Arguments
592///
593/// * `x` - Input 1D array
594/// * `ddof` - Delta degrees of freedom (0 for population std, 1 for sample std)
595///
596/// # Returns
597///
598/// * `Some(std)` - The standard deviation of the array
599/// * `None` - If the array is empty or has insufficient data (length <= ddof)
600///
601/// # Performance
602///
603/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
604/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
605/// - Uses SIMD variance followed by sqrt
606///
607/// # Examples
608///
609/// ```
610/// use scirs2_core::ndarray::array;
611/// use scirs2_core::ndarray_ext::reduction::std_simd;
612///
613/// let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
614/// let std = std_simd(&x.view(), 1).unwrap(); // Sample std (ddof=1)
615/// assert!((std - 1.5811388300841898).abs() < 1e-10);
616/// ```
617#[allow(dead_code)]
618pub fn std_simd<F>(x: &ArrayView1<F>, ddof: usize) -> Option<F>
619where
620    F: Float + SimdUnifiedOps,
621{
622    variance_simd(x, ddof).map(|var| var.sqrt())
623}
624
625/// Compute sum of a 1D array with SIMD acceleration.
626///
627/// This function uses SIMD operations when beneficial for performance, providing
628/// significant speedups for large arrays on supported platforms (AVX2, NEON).
629///
630/// # Arguments
631///
632/// * `x` - Input 1D array
633///
634/// # Returns
635///
636/// * The sum of all elements in the array (0 for empty array)
637///
638/// # Performance
639///
640/// - f32: ~2-3x faster than scalar for arrays > 1000 elements
641/// - f64: ~2-3x faster than scalar for arrays > 1000 elements
642///
643/// # Examples
644///
645/// ```
646/// use scirs2_core::ndarray::array;
647/// use scirs2_core::ndarray_ext::reduction::sum_simd;
648///
649/// let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
650/// let sum = sum_simd(&x.view());
651/// assert_eq!(sum, 15.0);
652/// ```
653#[allow(dead_code)]
654pub fn sum_simd<F>(x: &ArrayView1<F>) -> F
655where
656    F: Float + SimdUnifiedOps,
657{
658    if x.is_empty() {
659        return F::zero();
660    }
661
662    let optimizer = AutoOptimizer::new();
663
664    if optimizer.should_use_simd(x.len()) {
665        F::simd_sum(x)
666    } else {
667        x.iter().fold(F::zero(), |acc, &val| acc + val)
668    }
669}
670
671#[cfg(test)]
672mod tests {
673    use super::*;
674    use ::ndarray::array;
675
676    #[test]
677    fn test_argmin_simd_f64_basic() {
678        let x = array![3.0f64, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
679        let idx = argmin_simd(&x.view()).unwrap();
680        assert_eq!(idx, 1, "Should find first occurrence of minimum");
681    }
682
683    #[test]
684    fn test_argmin_simd_f32_basic() {
685        let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
686        let idx = argmin_simd(&x.view()).unwrap();
687        assert_eq!(idx, 1, "Should find first occurrence of minimum");
688    }
689
690    #[test]
691    fn test_argmax_simd_f64_basic() {
692        let x = array![3.0f64, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
693        let idx = argmax_simd(&x.view()).unwrap();
694        assert_eq!(idx, 5, "Should find maximum element");
695    }
696
697    #[test]
698    fn test_argmax_simd_f32_basic() {
699        let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
700        let idx = argmax_simd(&x.view()).unwrap();
701        assert_eq!(idx, 5, "Should find maximum element");
702    }
703
704    #[test]
705    fn test_argmin_simd_empty() {
706        let x: Array1<f64> = array![];
707        assert_eq!(argmin_simd(&x.view()), None);
708    }
709
710    #[test]
711    fn test_argmax_simd_empty() {
712        let x: Array1<f64> = array![];
713        assert_eq!(argmax_simd(&x.view()), None);
714    }
715
716    #[test]
717    fn test_argmin_simd_single() {
718        let x = array![42.0f64];
719        assert_eq!(argmin_simd(&x.view()), Some(0));
720    }
721
722    #[test]
723    fn test_argmax_simd_single() {
724        let x = array![42.0f64];
725        assert_eq!(argmax_simd(&x.view()), Some(0));
726    }
727
728    #[test]
729    fn test_argmin_simd_negative() {
730        let x = array![1.0f64, -5.0, 3.0, -2.0];
731        assert_eq!(argmin_simd(&x.view()), Some(1));
732    }
733
734    #[test]
735    fn test_argmax_simd_negative() {
736        let x = array![-10.0f64, -5.0, -20.0, -2.0];
737        assert_eq!(argmax_simd(&x.view()), Some(3));
738    }
739
740    #[test]
741    fn test_argmin_simd_large_f32() {
742        let x: Array1<f32> = Array1::from_vec((0..10000).map(|i| (i as f32) % 100.0).collect());
743        assert_eq!(argmin_simd(&x.view()), Some(0));
744    }
745
746    #[test]
747    fn test_argmax_simd_large_f64() {
748        let x: Array1<f64> = Array1::from_vec((0..10000).map(|i| (i as f64) % 100.0).collect());
749        assert_eq!(argmax_simd(&x.view()), Some(99));
750    }
751
752    #[test]
753    fn test_argmin_k_basic() {
754        let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
755        let indices = argmin_k(&x.view(), 3).unwrap();
756        assert_eq!(indices.len(), 3);
757        // Should be indices of the 3 smallest values (1.0, 1.0, 2.0)
758        // Values at these indices should be <= 2.0
759        for &idx in indices.iter() {
760            assert!(x[idx] <= 2.0);
761        }
762    }
763
764    #[test]
765    fn test_argmax_k_basic() {
766        let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
767        let indices = argmax_k(&x.view(), 3).unwrap();
768        assert_eq!(indices.len(), 3);
769        // Should be indices of the 3 largest values (9.0, 5.0, 4.0)
770        // Values at these indices should be >= 4.0
771        for &idx in indices.iter() {
772            assert!(x[idx] >= 4.0);
773        }
774    }
775
776    #[test]
777    fn test_argmin_k_empty() {
778        let x: Array1<f64> = array![];
779        assert_eq!(argmin_k(&x.view(), 3), None);
780    }
781
782    #[test]
783    fn test_argmax_k_zero() {
784        let x = array![1.0f64, 2.0, 3.0];
785        assert_eq!(argmax_k(&x.view(), 0), None);
786    }
787
788    #[test]
789    fn test_argmin_k_exceeds_length() {
790        let x = array![1.0f64, 2.0, 3.0];
791        let indices = argmin_k(&x.view(), 10).unwrap();
792        assert_eq!(indices.len(), 3); // Should cap at array length
793    }
794
795    // ========================================================================
796    // Tests for Phase 29: min_simd and max_simd
797    // ========================================================================
798
799    #[test]
800    fn test_min_simd_f64_basic() {
801        let x = array![3.0f64, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
802        let min_val = min_simd(&x.view()).unwrap();
803        assert_eq!(min_val, 1.0);
804    }
805
806    #[test]
807    fn test_min_simd_f32_basic() {
808        let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
809        let min_val = min_simd(&x.view()).unwrap();
810        assert_eq!(min_val, 1.0);
811    }
812
813    #[test]
814    fn test_max_simd_f64_basic() {
815        let x = array![3.0f64, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
816        let max_val = max_simd(&x.view()).unwrap();
817        assert_eq!(max_val, 9.0);
818    }
819
820    #[test]
821    fn test_max_simd_f32_basic() {
822        let x = array![3.0f32, 1.0, 4.0, 1.0, 5.0, 9.0, 2.0];
823        let max_val = max_simd(&x.view()).unwrap();
824        assert_eq!(max_val, 9.0);
825    }
826
827    #[test]
828    fn test_min_simd_empty() {
829        let x: Array1<f64> = array![];
830        assert_eq!(min_simd(&x.view()), None);
831    }
832
833    #[test]
834    fn test_max_simd_empty() {
835        let x: Array1<f64> = array![];
836        assert_eq!(max_simd(&x.view()), None);
837    }
838
839    #[test]
840    fn test_min_simd_single() {
841        let x = array![42.0f64];
842        assert_eq!(min_simd(&x.view()), Some(42.0));
843    }
844
845    #[test]
846    fn test_max_simd_single() {
847        let x = array![42.0f64];
848        assert_eq!(max_simd(&x.view()), Some(42.0));
849    }
850
851    #[test]
852    fn test_min_simd_negative() {
853        let x = array![1.0f64, -5.0, 3.0, -2.0];
854        assert_eq!(min_simd(&x.view()), Some(-5.0));
855    }
856
857    #[test]
858    fn test_max_simd_negative() {
859        let x = array![-10.0f64, -5.0, -20.0, -2.0];
860        assert_eq!(max_simd(&x.view()), Some(-2.0));
861    }
862
863    #[test]
864    fn test_min_simd_large_f32() {
865        let x: Array1<f32> = Array1::from_vec((0..10000).map(|i| (i as f32) % 100.0).collect());
866        assert_eq!(min_simd(&x.view()), Some(0.0));
867    }
868
869    #[test]
870    fn test_max_simd_large_f64() {
871        let x: Array1<f64> = Array1::from_vec((0..10000).map(|i| (i as f64) % 100.0).collect());
872        assert_eq!(max_simd(&x.view()), Some(99.0));
873    }
874
875    // ========================================================================
876    // Tests for Phase 30: mean_simd, variance_simd, std_simd
877    // ========================================================================
878
879    #[test]
880    fn test_mean_simd_f64_basic() {
881        let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
882        let mean = mean_simd(&x.view()).unwrap();
883        assert!((mean - 3.0).abs() < 1e-10);
884    }
885
886    #[test]
887    fn test_mean_simd_f32_basic() {
888        let x = array![1.0f32, 2.0, 3.0, 4.0, 5.0];
889        let mean = mean_simd(&x.view()).unwrap();
890        assert!((mean - 3.0).abs() < 1e-6);
891    }
892
893    #[test]
894    fn test_mean_simd_empty() {
895        let x: Array1<f64> = array![];
896        assert_eq!(mean_simd(&x.view()), None);
897    }
898
899    #[test]
900    fn test_mean_simd_single() {
901        let x = array![42.0f64];
902        assert_eq!(mean_simd(&x.view()), Some(42.0));
903    }
904
905    #[test]
906    fn test_mean_simd_negative() {
907        let x = array![-2.0f64, -4.0, -6.0, -8.0, -10.0];
908        let mean = mean_simd(&x.view()).unwrap();
909        assert!((mean - (-6.0)).abs() < 1e-10);
910    }
911
912    #[test]
913    fn test_mean_simd_large() {
914        let x: Array1<f64> = Array1::from_vec((1..=10000).map(|i| i as f64).collect());
915        let mean = mean_simd(&x.view()).unwrap();
916        let expected = 5000.5; // Mean of 1..=10000
917        assert!((mean - expected).abs() < 1e-6);
918    }
919
920    #[test]
921    fn test_variance_simd_f64_population() {
922        let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
923        let var = variance_simd(&x.view(), 0).unwrap(); // Population variance
924        assert!((var - 2.0).abs() < 1e-10);
925    }
926
927    #[test]
928    fn test_variance_simd_f64_sample() {
929        let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
930        let var = variance_simd(&x.view(), 1).unwrap(); // Sample variance
931        assert!((var - 2.5).abs() < 1e-10);
932    }
933
934    #[test]
935    fn test_variance_simd_f32_population() {
936        let x = array![1.0f32, 2.0, 3.0, 4.0, 5.0];
937        let var = variance_simd(&x.view(), 0).unwrap();
938        assert!((var - 2.0).abs() < 1e-6);
939    }
940
941    #[test]
942    fn test_variance_simd_empty() {
943        let x: Array1<f64> = array![];
944        assert_eq!(variance_simd(&x.view(), 0), None);
945    }
946
947    #[test]
948    fn test_variance_simd_insufficient_data() {
949        let x = array![42.0f64];
950        assert_eq!(variance_simd(&x.view(), 1), None); // n=1, ddof=1 -> insufficient
951        assert!(variance_simd(&x.view(), 0).is_some()); // n=1, ddof=0 -> OK (but var=0)
952    }
953
954    #[test]
955    fn test_variance_simd_constant() {
956        let x = array![5.0f64, 5.0, 5.0, 5.0, 5.0];
957        let var = variance_simd(&x.view(), 0).unwrap();
958        assert!(var.abs() < 1e-10); // Variance of constant array is 0
959    }
960
961    #[test]
962    fn test_std_simd_f64_sample() {
963        let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
964        let std = std_simd(&x.view(), 1).unwrap();
965        let expected = 2.5_f64.sqrt(); // sqrt(2.5) ≈ 1.5811388300841898
966        assert!((std - expected).abs() < 1e-10);
967    }
968
969    #[test]
970    fn test_std_simd_f32_sample() {
971        let x = array![1.0f32, 2.0, 3.0, 4.0, 5.0];
972        let std = std_simd(&x.view(), 1).unwrap();
973        let expected = 2.5_f32.sqrt();
974        assert!((std - expected).abs() < 1e-6);
975    }
976
977    #[test]
978    fn test_std_simd_empty() {
979        let x: Array1<f64> = array![];
980        assert_eq!(std_simd(&x.view(), 0), None);
981    }
982
983    #[test]
984    fn test_std_simd_constant() {
985        let x = array![7.0f64, 7.0, 7.0, 7.0, 7.0];
986        let std = std_simd(&x.view(), 0).unwrap();
987        assert!(std.abs() < 1e-10); // Std of constant array is 0
988    }
989
990    #[test]
991    fn test_sum_simd_f64_basic() {
992        let x = array![1.0f64, 2.0, 3.0, 4.0, 5.0];
993        let sum = sum_simd(&x.view());
994        assert_eq!(sum, 15.0);
995    }
996
997    #[test]
998    fn test_sum_simd_f32_basic() {
999        let x = array![1.0f32, 2.0, 3.0, 4.0, 5.0];
1000        let sum = sum_simd(&x.view());
1001        assert_eq!(sum, 15.0);
1002    }
1003
1004    #[test]
1005    fn test_sum_simd_empty() {
1006        let x: Array1<f64> = array![];
1007        assert_eq!(sum_simd(&x.view()), 0.0);
1008    }
1009
1010    #[test]
1011    fn test_sum_simd_negative() {
1012        let x = array![-1.0f64, -2.0, -3.0, -4.0, -5.0];
1013        let sum = sum_simd(&x.view());
1014        assert_eq!(sum, -15.0);
1015    }
1016
1017    #[test]
1018    fn test_sum_simd_large() {
1019        let x: Array1<f64> = Array1::from_vec((1..=10000).map(|i| i as f64).collect());
1020        let sum = sum_simd(&x.view());
1021        let expected = 50005000.0; // Sum of 1..=10000
1022        assert!((sum - expected).abs() < 1e-6);
1023    }
1024}