sklears_compose/execution/
metrics.rs

1//! Metrics collection and analysis for the composable execution engine
2//!
3//! This module provides comprehensive metrics collection, aggregation, and analysis
4//! with SIMD-accelerated operations for high-performance monitoring.
5
6use std::collections::HashMap;
7use std::time::{Duration, SystemTime};
8
9use super::resources::ResourceMetrics;
10use super::strategies::StrategyMetrics;
11use super::tasks::{TaskPriority, TaskResult, TaskStatus, TaskType};
12
13/// Overall execution metrics for the engine
14#[derive(Debug, Clone)]
15pub struct ExecutionMetrics {
16    /// Execution start time
17    pub start_time: SystemTime,
18    /// Strategy metrics by name
19    pub strategy_metrics: HashMap<String, StrategyMetrics>,
20    /// Scheduler metrics
21    pub scheduler_metrics: SchedulerMetrics,
22    /// Resource metrics
23    pub resource_metrics: ResourceMetrics,
24    /// Error statistics
25    pub error_statistics: ErrorStatistics,
26}
27
28impl Default for ExecutionMetrics {
29    fn default() -> Self {
30        Self::new()
31    }
32}
33
34impl ExecutionMetrics {
35    /// Create new execution metrics
36    #[must_use]
37    pub fn new() -> Self {
38        Self {
39            start_time: SystemTime::now(),
40            strategy_metrics: HashMap::new(),
41            scheduler_metrics: SchedulerMetrics::default(),
42            resource_metrics: ResourceMetrics::default(),
43            error_statistics: ErrorStatistics::default(),
44        }
45    }
46
47    /// Update strategy metrics
48    pub fn update_strategy_metrics(&mut self, strategy_name: String, metrics: StrategyMetrics) {
49        self.strategy_metrics.insert(strategy_name, metrics);
50    }
51
52    /// Update scheduler metrics
53    pub fn update_scheduler_metrics(&mut self, metrics: SchedulerMetrics) {
54        self.scheduler_metrics = metrics;
55    }
56
57    /// Update resource metrics
58    pub fn update_resource_metrics(&mut self, metrics: ResourceMetrics) {
59        self.resource_metrics = metrics;
60    }
61
62    /// Record error
63    pub fn record_error(&mut self, error_type: String) {
64        self.error_statistics.total_errors += 1;
65        *self
66            .error_statistics
67            .errors_by_type
68            .entry(error_type)
69            .or_insert(0) += 1;
70
71        // Update error rate (simple calculation)
72        let total_operations = self
73            .strategy_metrics
74            .values()
75            .map(|m| m.tasks_executed)
76            .sum::<u64>();
77
78        if total_operations > 0 {
79            self.error_statistics.error_rate =
80                self.error_statistics.total_errors as f64 / total_operations as f64;
81        }
82    }
83
84    /// Get execution duration
85    #[must_use]
86    pub fn execution_duration(&self) -> Duration {
87        self.start_time.elapsed().unwrap_or_default()
88    }
89}
90
91/// Scheduler metrics for performance tracking
92#[derive(Debug, Clone)]
93pub struct SchedulerMetrics {
94    /// Tasks scheduled
95    pub tasks_scheduled: u64,
96    /// Average scheduling time
97    pub avg_scheduling_time: Duration,
98    /// Queue length
99    pub queue_length: usize,
100    /// Scheduling efficiency
101    pub efficiency: f64,
102}
103
104impl Default for SchedulerMetrics {
105    fn default() -> Self {
106        Self {
107            tasks_scheduled: 0,
108            avg_scheduling_time: Duration::ZERO,
109            queue_length: 0,
110            efficiency: 1.0,
111        }
112    }
113}
114
115/// Error statistics for monitoring failures
116#[derive(Debug, Clone)]
117pub struct ErrorStatistics {
118    /// Total errors
119    pub total_errors: u64,
120    /// Errors by type
121    pub errors_by_type: HashMap<String, u64>,
122    /// Error rate
123    pub error_rate: f64,
124    /// Recovery rate
125    pub recovery_rate: f64,
126}
127
128impl Default for ErrorStatistics {
129    fn default() -> Self {
130        Self {
131            total_errors: 0,
132            errors_by_type: HashMap::new(),
133            error_rate: 0.0,
134            recovery_rate: 0.0,
135        }
136    }
137}
138
139/// SIMD-accelerated metrics calculations for high-performance resource monitoring
140pub mod simd_metrics {
141    use super::{
142        AggregatedPerformanceMetrics, Duration, PerformanceBounds, ResourceUtilizationStatistics,
143        TaskPriority, TaskType,
144    };
145
146    /// Aggregate CPU utilization across multiple cores using SIMD operations
147    #[must_use]
148    pub fn aggregate_cpu_utilization(per_core_utilizations: &[f64]) -> f64 {
149        if per_core_utilizations.is_empty() {
150            return 0.0;
151        }
152
153        // Convert to f32 for SIMD operations (scalar fallback)
154        let core_utils_f32: Vec<f32> = per_core_utilizations.iter().map(|&x| x as f32).collect();
155
156        // Use SIMD mean calculation (scalar fallback)
157        f64::from(mean_vec(&core_utils_f32))
158    }
159
160    /// Calculate resource utilization statistics using SIMD operations
161    #[must_use]
162    pub fn calculate_resource_statistics(
163        cpu_values: &[f64],
164        memory_values: &[f64],
165        io_values: &[f64],
166        network_values: &[f64],
167    ) -> ResourceUtilizationStatistics {
168        if cpu_values.is_empty() {
169            return ResourceUtilizationStatistics::default();
170        }
171
172        // Convert all arrays to f32 for SIMD processing (scalar fallback)
173        let cpu_f32: Vec<f32> = cpu_values.iter().map(|&x| x as f32).collect();
174        let memory_f32: Vec<f32> = memory_values.iter().map(|&x| x as f32).collect();
175        let io_f32: Vec<f32> = io_values.iter().map(|&x| x as f32).collect();
176        let network_f32: Vec<f32> = network_values.iter().map(|&x| x as f32).collect();
177
178        ResourceUtilizationStatistics {
179            cpu_mean: f64::from(mean_vec(&cpu_f32)),
180            cpu_variance: f64::from(variance_vec(&cpu_f32)),
181            memory_mean: f64::from(mean_vec(&memory_f32)),
182            memory_variance: f64::from(variance_vec(&memory_f32)),
183            io_mean: f64::from(mean_vec(&io_f32)),
184            io_variance: f64::from(variance_vec(&io_f32)),
185            network_mean: f64::from(mean_vec(&network_f32)),
186            network_variance: f64::from(variance_vec(&network_f32)),
187            sample_count: cpu_values.len(),
188        }
189    }
190
191    /// Aggregate task performance metrics across multiple tasks using SIMD
192    #[must_use]
193    pub fn aggregate_task_performance(
194        throughputs: &[f64],
195        latencies: &[f64],
196        cache_hit_rates: &[f64],
197        error_rates: &[f64],
198    ) -> AggregatedPerformanceMetrics {
199        if throughputs.is_empty() {
200            return AggregatedPerformanceMetrics::default();
201        }
202
203        // Convert to f32 for SIMD processing (scalar fallback)
204        let throughput_f32: Vec<f32> = throughputs.iter().map(|&x| x as f32).collect();
205        let latency_f32: Vec<f32> = latencies.iter().map(|&x| x as f32).collect();
206        let cache_f32: Vec<f32> = cache_hit_rates.iter().map(|&x| x as f32).collect();
207        let error_f32: Vec<f32> = error_rates.iter().map(|&x| x as f32).collect();
208
209        // Use SIMD operations for aggregation (scalar fallback)
210        let total_throughput = f64::from(sum_vec(&throughput_f32));
211        let avg_latency = f64::from(mean_vec(&latency_f32));
212        let avg_cache_hit_rate = f64::from(mean_vec(&cache_f32));
213        let avg_error_rate = f64::from(mean_vec(&error_f32));
214
215        AggregatedPerformanceMetrics {
216            total_throughput,
217            average_latency: avg_latency,
218            average_cache_hit_rate: avg_cache_hit_rate,
219            average_error_rate: avg_error_rate,
220            task_count: throughputs.len(),
221        }
222    }
223
224    /// Calculate weighted resource allocation using SIMD dot product
225    #[must_use]
226    pub fn calculate_weighted_allocation(
227        resource_demands: &[f64],
228        priority_weights: &[f64],
229    ) -> f64 {
230        if resource_demands.len() != priority_weights.len() || resource_demands.is_empty() {
231            return 0.0;
232        }
233
234        // Convert to f32 for SIMD processing (scalar fallback)
235        let demands_f32: Vec<f32> = resource_demands.iter().map(|&x| x as f32).collect();
236        let weights_f32: Vec<f32> = priority_weights.iter().map(|&x| x as f32).collect();
237
238        // Use SIMD dot product for weighted sum (scalar fallback)
239        f64::from(dot_product(&demands_f32, &weights_f32))
240    }
241
242    /// Vectorized min-max analysis for performance bounds
243    #[must_use]
244    pub fn analyze_performance_bounds(values: &[f64]) -> PerformanceBounds {
245        if values.is_empty() {
246            return PerformanceBounds::default();
247        }
248
249        // Convert to f32 for SIMD processing (scalar fallback)
250        let values_f32: Vec<f32> = values.iter().map(|&x| x as f32).collect();
251
252        // Use SIMD min-max operations (scalar fallback)
253        let (min_val, max_val) = min_max_vec(&values_f32);
254
255        PerformanceBounds {
256            minimum: f64::from(min_val),
257            maximum: f64::from(max_val),
258            range: f64::from(max_val - min_val),
259            mean: f64::from(mean_vec(&values_f32)),
260        }
261    }
262
263    /// Batch process task priorities using SIMD operations for efficient scheduling
264    #[must_use]
265    pub fn optimize_batch_scheduling(
266        task_priorities: &[TaskPriority],
267        estimated_durations: &[Duration],
268        resource_requirements: &[f64],
269    ) -> Vec<usize> {
270        if task_priorities.is_empty() {
271            return Vec::new();
272        }
273
274        // Convert priorities to numeric values for SIMD processing
275        let priority_values: Vec<f32> = task_priorities
276            .iter()
277            .map(|p| match p {
278                TaskPriority::Critical => 4.0,
279                TaskPriority::High => 3.0,
280                TaskPriority::Normal => 2.0,
281                TaskPriority::Low => 1.0,
282            })
283            .collect();
284
285        // Convert durations to seconds as f32
286        let duration_values: Vec<f32> = estimated_durations
287            .iter()
288            .map(std::time::Duration::as_secs_f32)
289            .collect();
290
291        // Ensure resource_requirements is in f32 format
292        let resource_f32: Vec<f32> = resource_requirements.iter().map(|&x| x as f32).collect();
293
294        // Calculate weighted priority using SIMD dot product (scalar fallback)
295        // Higher priority and lower duration/resource usage gets higher score
296        let mut duration_weights = vec![1.0f32; duration_values.len()];
297        divide_vec_inplace(&mut duration_weights, &duration_values);
298
299        let mut resource_weights = vec![1.0f32; resource_f32.len()];
300        divide_vec_inplace(&mut resource_weights, &resource_f32);
301
302        // Combine all factors using SIMD operations (scalar fallback)
303        let mut priority_scores = vec![0.0f32; priority_values.len()];
304        multiply_vec(&priority_values, &duration_weights, &mut priority_scores);
305        multiply_vec_inplace(&mut priority_scores, &resource_weights);
306
307        // Sort indices by priority scores (descending order)
308        let mut indexed_scores: Vec<(usize, f32)> = priority_scores
309            .iter()
310            .enumerate()
311            .map(|(i, &score)| (i, score))
312            .collect();
313
314        indexed_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
315
316        indexed_scores.into_iter().map(|(i, _)| i).collect()
317    }
318
319    /// Vectorized load balancing calculation for distributed execution
320    #[must_use]
321    pub fn calculate_load_distribution(
322        node_capacities: &[f64],
323        current_loads: &[f64],
324        _task_weights: &[f64],
325    ) -> Vec<f64> {
326        if node_capacities.is_empty() || current_loads.is_empty() {
327            return Vec::new();
328        }
329
330        // Convert to f32 for SIMD processing (scalar fallback)
331        let capacities_f32: Vec<f32> = node_capacities.iter().map(|&x| x as f32).collect();
332        let loads_f32: Vec<f32> = current_loads.iter().map(|&x| x as f32).collect();
333
334        // Calculate available capacity using SIMD subtraction (scalar fallback)
335        let mut available_capacity = vec![0.0f32; capacities_f32.len()];
336        subtract_vec(&capacities_f32, &loads_f32, &mut available_capacity);
337
338        // Normalize available capacities
339        let total_available = sum_vec(&available_capacity);
340        if total_available > 0.0 {
341            let mut distribution = vec![0.0f32; available_capacity.len()];
342            let total_vec = vec![total_available; available_capacity.len()];
343            divide_vec(&available_capacity, &total_vec, &mut distribution);
344            distribution.into_iter().map(f64::from).collect()
345        } else {
346            vec![0.0; node_capacities.len()]
347        }
348    }
349
350    /// SIMD-accelerated task clustering for batch optimization
351    #[must_use]
352    pub fn cluster_tasks_for_batch_processing(
353        task_types: &[TaskType],
354        resource_requirements: &[f64],
355        similarity_threshold: f64,
356    ) -> Vec<Vec<usize>> {
357        if task_types.is_empty() {
358            return Vec::new();
359        }
360
361        let mut clusters = Vec::new();
362        let mut assigned = vec![false; task_types.len()];
363
364        for i in 0..task_types.len() {
365            if assigned[i] {
366                continue;
367            }
368
369            let mut cluster = vec![i];
370            assigned[i] = true;
371
372            // Find similar tasks using SIMD operations
373            for j in (i + 1)..task_types.len() {
374                if assigned[j] {
375                    continue;
376                }
377
378                // Check type similarity
379                let type_similar = match (&task_types[i], &task_types[j]) {
380                    (TaskType::Computation, TaskType::Computation)
381                    | (TaskType::IoOperation, TaskType::IoOperation)
382                    | (TaskType::NetworkOperation, TaskType::NetworkOperation)
383                    | (TaskType::Custom, TaskType::Custom) => true,
384                    _ => false,
385                };
386
387                // Check resource similarity using SIMD (scalar fallback)
388                if type_similar {
389                    let resource_diff = (resource_requirements[i] - resource_requirements[j]).abs();
390                    let resource_avg = (resource_requirements[i] + resource_requirements[j]) / 2.0;
391                    let similarity = if resource_avg > 0.0 {
392                        1.0 - (resource_diff / resource_avg)
393                    } else {
394                        1.0
395                    };
396
397                    if similarity >= similarity_threshold {
398                        cluster.push(j);
399                        assigned[j] = true;
400                    }
401                }
402            }
403
404            clusters.push(cluster);
405        }
406
407        clusters
408    }
409
410    // Scalar fallback implementations for SIMD operations
411    // These would be replaced with actual SIMD implementations in production
412
413    /// Calculate mean of vector (scalar fallback)
414    #[must_use]
415    pub fn mean_vec(values: &[f32]) -> f32 {
416        if values.is_empty() {
417            return 0.0;
418        }
419        values.iter().sum::<f32>() / values.len() as f32
420    }
421
422    /// Calculate variance of vector (scalar fallback)
423    #[must_use]
424    pub fn variance_vec(values: &[f32]) -> f32 {
425        if values.len() <= 1 {
426            return 0.0;
427        }
428        let mean = mean_vec(values);
429        let sum_sq_diff: f32 = values.iter().map(|&x| (x - mean).powi(2)).sum();
430        sum_sq_diff / (values.len() - 1) as f32
431    }
432
433    /// Calculate sum of vector (scalar fallback)
434    #[must_use]
435    pub fn sum_vec(values: &[f32]) -> f32 {
436        values.iter().sum()
437    }
438
439    /// Calculate dot product (scalar fallback)
440    #[must_use]
441    pub fn dot_product(a: &[f32], b: &[f32]) -> f32 {
442        if a.len() != b.len() {
443            return 0.0;
444        }
445        a.iter().zip(b.iter()).map(|(&x, &y)| x * y).sum()
446    }
447
448    /// Calculate min and max of vector (scalar fallback)
449    #[must_use]
450    pub fn min_max_vec(values: &[f32]) -> (f32, f32) {
451        if values.is_empty() {
452            return (0.0, 0.0);
453        }
454        let min_val = values.iter().fold(f32::INFINITY, |acc, &x| acc.min(x));
455        let max_val = values.iter().fold(f32::NEG_INFINITY, |acc, &x| acc.max(x));
456        (min_val, max_val)
457    }
458
459    /// Element-wise division with in-place mutation (scalar fallback)
460    pub fn divide_vec_inplace(numerator: &mut [f32], denominator: &[f32]) {
461        for (n, &d) in numerator.iter_mut().zip(denominator.iter()) {
462            if d != 0.0 {
463                *n /= d;
464            }
465        }
466    }
467
468    /// Element-wise multiplication (scalar fallback)
469    pub fn multiply_vec(a: &[f32], b: &[f32], result: &mut [f32]) {
470        for ((r, &a_val), &b_val) in result.iter_mut().zip(a.iter()).zip(b.iter()) {
471            *r = a_val * b_val;
472        }
473    }
474
475    /// Element-wise multiplication with in-place mutation (scalar fallback)
476    pub fn multiply_vec_inplace(a: &mut [f32], b: &[f32]) {
477        for (a_val, &b_val) in a.iter_mut().zip(b.iter()) {
478            *a_val *= b_val;
479        }
480    }
481
482    /// Element-wise subtraction (scalar fallback)
483    pub fn subtract_vec(a: &[f32], b: &[f32], result: &mut [f32]) {
484        for ((r, &a_val), &b_val) in result.iter_mut().zip(a.iter()).zip(b.iter()) {
485            *r = a_val - b_val;
486        }
487    }
488
489    /// Element-wise division (scalar fallback)
490    pub fn divide_vec(numerator: &[f32], denominator: &[f32], result: &mut [f32]) {
491        for ((r, &n), &d) in result
492            .iter_mut()
493            .zip(numerator.iter())
494            .zip(denominator.iter())
495        {
496            *r = if d == 0.0 { 0.0 } else { n / d };
497        }
498    }
499}
500
501/// Resource utilization statistics computed using SIMD operations
502#[derive(Debug, Clone)]
503pub struct ResourceUtilizationStatistics {
504    /// CPU mean utilization
505    pub cpu_mean: f64,
506    /// CPU utilization variance
507    pub cpu_variance: f64,
508    /// Memory mean utilization
509    pub memory_mean: f64,
510    /// Memory utilization variance
511    pub memory_variance: f64,
512    /// I/O mean utilization
513    pub io_mean: f64,
514    /// I/O utilization variance
515    pub io_variance: f64,
516    /// Network mean utilization
517    pub network_mean: f64,
518    /// Network utilization variance
519    pub network_variance: f64,
520    /// Number of samples
521    pub sample_count: usize,
522}
523
524impl Default for ResourceUtilizationStatistics {
525    fn default() -> Self {
526        Self {
527            cpu_mean: 0.0,
528            cpu_variance: 0.0,
529            memory_mean: 0.0,
530            memory_variance: 0.0,
531            io_mean: 0.0,
532            io_variance: 0.0,
533            network_mean: 0.0,
534            network_variance: 0.0,
535            sample_count: 0,
536        }
537    }
538}
539
540/// Aggregated performance metrics computed using SIMD operations
541#[derive(Debug, Clone)]
542pub struct AggregatedPerformanceMetrics {
543    /// Total throughput across all tasks
544    pub total_throughput: f64,
545    /// Average latency
546    pub average_latency: f64,
547    /// Average cache hit rate
548    pub average_cache_hit_rate: f64,
549    /// Average error rate
550    pub average_error_rate: f64,
551    /// Number of tasks analyzed
552    pub task_count: usize,
553}
554
555impl Default for AggregatedPerformanceMetrics {
556    fn default() -> Self {
557        Self {
558            total_throughput: 0.0,
559            average_latency: 0.0,
560            average_cache_hit_rate: 0.0,
561            average_error_rate: 0.0,
562            task_count: 0,
563        }
564    }
565}
566
567/// Performance bounds analysis using SIMD operations
568#[derive(Debug, Clone)]
569pub struct PerformanceBounds {
570    /// Minimum performance value
571    pub minimum: f64,
572    /// Maximum performance value
573    pub maximum: f64,
574    /// Performance range (max - min)
575    pub range: f64,
576    /// Mean performance value
577    pub mean: f64,
578}
579
580impl Default for PerformanceBounds {
581    fn default() -> Self {
582        Self {
583            minimum: 0.0,
584            maximum: 0.0,
585            range: 0.0,
586            mean: 0.0,
587        }
588    }
589}
590
591/// Metrics collector for gathering execution metrics
592pub struct MetricsCollector {
593    /// Collected metrics
594    metrics: ExecutionMetrics,
595    /// Collection interval
596    interval: Duration,
597    /// Enable detailed collection
598    detailed: bool,
599}
600
601impl MetricsCollector {
602    /// Create a new metrics collector
603    #[must_use]
604    pub fn new(interval: Duration, detailed: bool) -> Self {
605        Self {
606            metrics: ExecutionMetrics::new(),
607            interval,
608            detailed,
609        }
610    }
611
612    /// Collect current metrics
613    pub fn collect(&mut self) -> ExecutionMetrics {
614        // In a real implementation, this would gather metrics from various sources
615        self.metrics.clone()
616    }
617
618    /// Record task completion
619    pub fn record_task_completion(&mut self, task_result: &TaskResult) {
620        if let Some(strategy) = self.metrics.strategy_metrics.get_mut("default") {
621            strategy.tasks_executed += 1;
622            match task_result.status {
623                TaskStatus::Completed => {
624                    // Update success metrics
625                }
626                TaskStatus::Failed => {
627                    self.metrics
628                        .record_error("task_execution_error".to_string());
629                }
630                _ => {}
631            }
632        }
633    }
634
635    /// Update configuration
636    pub fn update_config(&mut self, interval: Duration, detailed: bool) {
637        self.interval = interval;
638        self.detailed = detailed;
639    }
640
641    /// Get metrics interval
642    #[must_use]
643    pub fn get_interval(&self) -> Duration {
644        self.interval
645    }
646
647    /// Check if detailed collection is enabled
648    #[must_use]
649    pub fn is_detailed(&self) -> bool {
650        self.detailed
651    }
652}
653
654#[allow(non_snake_case)]
655#[cfg(test)]
656mod tests {
657    use super::*;
658
659    #[test]
660    fn test_execution_metrics_creation() {
661        let metrics = ExecutionMetrics::new();
662        assert!(metrics.start_time <= SystemTime::now());
663        assert_eq!(metrics.strategy_metrics.len(), 0);
664        assert_eq!(metrics.error_statistics.total_errors, 0);
665    }
666
667    #[test]
668    fn test_simd_metrics_cpu_aggregation() {
669        let per_core_utils = vec![75.0, 80.0, 85.0, 70.0, 90.0, 88.0, 72.0, 78.0];
670        let avg_util = simd_metrics::aggregate_cpu_utilization(&per_core_utils);
671
672        // Should be close to the arithmetic mean (79.75)
673        assert!((avg_util - 79.75).abs() < 0.1);
674    }
675
676    #[test]
677    fn test_simd_metrics_resource_statistics() {
678        let cpu_values = vec![75.0, 80.0, 85.0, 70.0];
679        let memory_values = vec![60.0, 65.0, 70.0, 55.0];
680        let io_values = vec![40.0, 45.0, 35.0, 50.0];
681        let network_values = vec![20.0, 25.0, 30.0, 15.0];
682
683        let stats = simd_metrics::calculate_resource_statistics(
684            &cpu_values,
685            &memory_values,
686            &io_values,
687            &network_values,
688        );
689
690        assert_eq!(stats.sample_count, 4);
691        assert!((stats.cpu_mean - 77.5).abs() < 0.1);
692        assert!((stats.memory_mean - 62.5).abs() < 0.1);
693        assert!(stats.cpu_variance > 0.0); // Should have some variance
694    }
695
696    #[test]
697    fn test_simd_batch_scheduling_optimization() {
698        let priorities = vec![
699            TaskPriority::High,
700            TaskPriority::Low,
701            TaskPriority::Critical,
702            TaskPriority::Normal,
703        ];
704        let durations = vec![
705            Duration::from_secs(10),
706            Duration::from_secs(60),
707            Duration::from_secs(5),
708            Duration::from_secs(30),
709        ];
710        let resources = vec![100.0, 500.0, 50.0, 200.0];
711
712        let optimized_order =
713            simd_metrics::optimize_batch_scheduling(&priorities, &durations, &resources);
714
715        // Critical priority task with short duration should be first
716        assert_eq!(optimized_order[0], 2); // Critical task with 5 second duration
717        assert_eq!(optimized_order.len(), 4);
718    }
719
720    #[test]
721    fn test_simd_load_distribution() {
722        let capacities = vec![100.0, 150.0, 120.0, 80.0];
723        let current_loads = vec![20.0, 30.0, 40.0, 10.0];
724        let task_weights = vec![10.0, 15.0, 12.0, 8.0];
725
726        let distribution =
727            simd_metrics::calculate_load_distribution(&capacities, &current_loads, &task_weights);
728
729        assert_eq!(distribution.len(), 4);
730
731        // Sum of distribution should be approximately 1.0 (normalized)
732        let total: f64 = distribution.iter().sum();
733        assert!((total - 1.0).abs() < 0.1);
734    }
735
736    #[test]
737    fn test_simd_performance_bounds_analysis() {
738        let performance_values = vec![95.5, 87.2, 91.8, 89.4, 93.1, 88.7, 94.3];
739
740        let bounds = simd_metrics::analyze_performance_bounds(&performance_values);
741
742        assert!(bounds.minimum < bounds.maximum);
743        assert!(bounds.range > 0.0);
744        assert!(bounds.mean > 0.0);
745        assert!(bounds.mean >= bounds.minimum && bounds.mean <= bounds.maximum);
746    }
747
748    #[test]
749    fn test_metrics_collector() {
750        let mut collector = MetricsCollector::new(Duration::from_secs(1), false);
751
752        assert_eq!(collector.get_interval(), Duration::from_secs(1));
753        assert!(!collector.is_detailed());
754
755        let metrics = collector.collect();
756        assert!(metrics.start_time <= SystemTime::now());
757
758        collector.update_config(Duration::from_secs(5), true);
759        assert_eq!(collector.get_interval(), Duration::from_secs(5));
760        assert!(collector.is_detailed());
761    }
762}