scirs2_core/
performance_optimization.rs

1//! Performance optimization utilities for critical paths
2//!
3//! This module provides tools and utilities for optimizing performance-critical
4//! sections of scirs2-core based on profiling data. Enhanced with AI-driven
5//! adaptive optimization and ML-based performance modeling for Advanced mode.
6//!
7//! # Advanced Mode Features
8//!
9//! - **AI-Driven Strategy Selection**: Machine learning models predict optimal strategies
10//! - **Neural Performance Modeling**: Deep learning for performance prediction
11//! - **Adaptive Hyperparameter Tuning**: Automatic optimization parameter adjustment
12//! - **Real-time Performance Learning**: Continuous improvement from execution data
13//! - **Multi-objective optimization**: Balance performance, memory, and energy efficiency
14//! - **Context-Aware Optimization**: Environment and workload-specific adaptations
15
16use std::sync::atomic::{AtomicUsize, Ordering};
17
18/// Cache locality hint for prefetch operations
19#[allow(dead_code)]
20#[derive(Debug, Clone, Copy, PartialEq, Eq)]
21pub enum Locality {
22    /// High locality - data likely to be reused soon (L1 cache)
23    High,
24    /// Medium locality - data may be reused (L2 cache)
25    Medium,
26    /// Low locality - data unlikely to be reused soon (L3 cache)
27    Low,
28    /// No temporal locality - streaming access (bypass cache)
29    None,
30}
31
32/// Performance hints for critical code paths
33pub struct PerformanceHints;
34
35impl PerformanceHints {
36    /// Hint that a branch is likely to be taken
37    ///
38    /// Note: This function provides branch prediction hints on supported architectures.
39    /// For Beta 1 stability, unstable intrinsics have been removed.
40    #[inline(always)]
41    pub fn likely(cond: bool) -> bool {
42        // Use platform-specific assembly hints where available
43        #[cfg(target_arch = "x86_64")]
44        {
45            if cond {
46                // x86_64 specific: use assembly hint for branch prediction
47                unsafe {
48                    std::arch::asm!("# likely branch", options(nomem, nostack));
49                }
50            }
51        }
52        cond
53    }
54
55    /// Hint that a branch is unlikely to be taken
56    ///
57    /// Note: This function provides branch prediction hints on supported architectures.
58    /// For Beta 1 stability, unstable intrinsics have been removed.
59    #[inline(always)]
60    pub fn unlikely(cond: bool) -> bool {
61        // Use platform-specific assembly hints where available
62        #[cfg(target_arch = "x86_64")]
63        {
64            if !cond {
65                // x86_64 specific: use assembly hint for branch prediction
66                unsafe {
67                    std::arch::asm!("# unlikely branch", options(nomem, nostack));
68                }
69            }
70        }
71        cond
72    }
73
74    /// Prefetch data for read access
75    #[inline(always)]
76    pub fn prefetch_read<T>(data: &T) {
77        let ptr = data as *const T as *const u8;
78
79        #[cfg(target_arch = "x86_64")]
80        {
81            unsafe {
82                // Prefetch into all cache levels for read
83                std::arch::asm!(
84                    "prefetcht0 [{}]",
85                    in(reg) ptr,
86                    options(readonly, nostack)
87                );
88            }
89        }
90        #[cfg(target_arch = "aarch64")]
91        {
92            unsafe {
93                // ARMv8 prefetch for load
94                std::arch::asm!(
95                    "prfm pldl1keep, [{}]",
96                    in(reg) ptr,
97                    options(readonly, nostack)
98                );
99            }
100        }
101        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
102        {
103            // Fallback: use black_box to prevent optimization but don't prefetch
104            std::hint::black_box(data);
105        }
106    }
107
108    /// Prefetch data for write access
109    #[inline(always)]
110    pub fn prefetch_write<T>(data: &mut T) {
111        let ptr = data as *mut T as *mut u8;
112
113        #[cfg(target_arch = "x86_64")]
114        {
115            unsafe {
116                // Prefetch with intent to write
117                std::arch::asm!(
118                    "prefetcht0 [{}]",
119                    in(reg) ptr,
120                    options(nostack)
121                );
122            }
123        }
124        #[cfg(target_arch = "aarch64")]
125        {
126            unsafe {
127                // ARMv8 prefetch for store
128                std::arch::asm!(
129                    "prfm pstl1keep, [{}]",
130                    in(reg) ptr,
131                    options(nostack)
132                );
133            }
134        }
135        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
136        {
137            // Fallback: use black_box to prevent optimization but don't prefetch
138            std::hint::black_box(data);
139        }
140    }
141
142    /// Advanced prefetch with locality hint
143    #[inline(always)]
144    pub fn prefetch_with_locality<T>(data: &T, locality: Locality) {
145        let ptr = data as *const T as *const u8;
146
147        #[cfg(target_arch = "x86_64")]
148        {
149            unsafe {
150                match locality {
151                    Locality::High => {
152                        // Prefetch into L1 cache
153                        std::arch::asm!(
154                            "prefetcht0 [{}]",
155                            in(reg) ptr,
156                            options(readonly, nostack)
157                        );
158                    }
159                    Locality::Medium => {
160                        // Prefetch into L2 cache
161                        std::arch::asm!(
162                            "prefetcht1 [{}]",
163                            in(reg) ptr,
164                            options(readonly, nostack)
165                        );
166                    }
167                    Locality::Low => {
168                        // Prefetch into L3 cache
169                        std::arch::asm!(
170                            "prefetcht2 [{}]",
171                            in(reg) ptr,
172                            options(readonly, nostack)
173                        );
174                    }
175                    Locality::None => {
176                        // Non-temporal prefetch
177                        std::arch::asm!(
178                            "prefetchnta [{}]",
179                            in(reg) ptr,
180                            options(readonly, nostack)
181                        );
182                    }
183                }
184            }
185        }
186        #[cfg(target_arch = "aarch64")]
187        {
188            unsafe {
189                match locality {
190                    Locality::High => {
191                        std::arch::asm!(
192                            "prfm pldl1keep, [{}]",
193                            in(reg) ptr,
194                            options(readonly, nostack)
195                        );
196                    }
197                    Locality::Medium => {
198                        std::arch::asm!(
199                            "prfm pldl2keep, [{}]",
200                            in(reg) ptr,
201                            options(readonly, nostack)
202                        );
203                    }
204                    Locality::Low => {
205                        std::arch::asm!(
206                            "prfm pldl3keep, [{}]",
207                            in(reg) ptr,
208                            options(readonly, nostack)
209                        );
210                    }
211                    Locality::None => {
212                        std::arch::asm!(
213                            "prfm pldl1strm, [{}]",
214                            in(reg) ptr,
215                            options(readonly, nostack)
216                        );
217                    }
218                }
219            }
220        }
221        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
222        {
223            std::hint::black_box(data);
224        }
225    }
226
227    /// Memory fence for synchronization
228    #[inline(always)]
229    pub fn memory_fence() {
230        #[cfg(target_arch = "x86_64")]
231        {
232            unsafe {
233                std::arch::asm!("mfence", options(nostack));
234            }
235        }
236        #[cfg(target_arch = "aarch64")]
237        {
238            unsafe {
239                std::arch::asm!("dmb sy", options(nostack));
240            }
241        }
242        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
243        {
244            std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst);
245        }
246    }
247
248    /// Cache line flush for explicit cache management
249    #[inline(always)]
250    pub fn flush_cache_line<T>(data: &T) {
251        let ptr = data as *const T as *const u8;
252
253        // Note: Cache line flushing is arch-specific and may not be portable
254        // For now, use a memory barrier as a fallback
255        #[cfg(target_arch = "x86_64")]
256        {
257            // On x86_64, we would use clflush but it requires specific syntax
258            // For simplicity, we'll use a fence instruction instead
259            unsafe {
260                std::arch::asm!("mfence", options(nostack, nomem));
261            }
262        }
263        #[cfg(target_arch = "aarch64")]
264        {
265            unsafe {
266                // ARMv8 data cache clean and invalidate
267                std::arch::asm!(
268                    "dc civac, {}",
269                    in(reg) ptr,
270                    options(nostack)
271                );
272            }
273        }
274        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
275        {
276            // No specific flush available, just prevent optimization
277            std::hint::black_box(data);
278        }
279    }
280
281    /// Optimized memory copy with cache awareness
282    #[inline]
283    pub fn cache_aware_copy<T: Copy>(src: &[T], dst: &mut [T]) {
284        assert_eq!(src.len(), dst.len());
285
286        if std::mem::size_of_val(src) > 64 * 1024 {
287            // Large copy: use non-temporal stores to avoid cache pollution
288            #[cfg(target_arch = "x86_64")]
289            {
290                unsafe {
291                    let src_ptr = src.as_ptr() as *const u8;
292                    let dst_ptr = dst.as_mut_ptr() as *mut u8;
293                    let len = std::mem::size_of_val(src);
294
295                    // Use non-temporal memory copy for large transfers
296                    std::ptr::copy_nonoverlapping(src_ptr, dst_ptr, len);
297
298                    // Follow with memory fence
299                    std::arch::asm!("sfence", options(nostack));
300                }
301                return;
302            }
303        }
304
305        // Regular copy for smaller data or unsupported architectures
306        dst.copy_from_slice(src);
307    }
308
309    /// Optimized memory set with cache awareness
310    #[inline]
311    pub fn cache_aware_memset<T: Copy>(dst: &mut [T], value: T) {
312        if std::mem::size_of_val(dst) > 32 * 1024 {
313            // Large memset: use vectorized operations where possible
314            #[cfg(all(feature = "simd", target_arch = "x86_64"))]
315            {
316                // For large arrays, try to use SIMD if T is appropriate
317                if std::mem::size_of::<T>() == 8 {
318                    // 64-bit values can use SSE2
319                    let chunks = dst.len() / 2;
320                    for i in 0..chunks {
321                        dst[i * 2] = value;
322                        dst[i * 2 + 1] = value;
323                    }
324                    // Handle remainder
325                    for item in dst.iter_mut().skip(chunks * 2) {
326                        *item = value;
327                    }
328                    return;
329                }
330            }
331        }
332
333        // Regular fill for smaller data or unsupported cases
334        dst.fill(value);
335    }
336}
337
338/// Performance metrics for adaptive learning
339#[allow(dead_code)]
340#[derive(Debug, Clone)]
341pub struct PerformanceMetrics {
342    /// Average execution times for different operation types
343    pub operation_times: std::collections::HashMap<String, f64>,
344    /// Success rate for different optimization strategies
345    pub strategy_success_rates: std::collections::HashMap<OptimizationStrategy, f64>,
346    /// Memory bandwidth utilization
347    pub memorybandwidth_utilization: f64,
348    /// Cache hit rates
349    pub cache_hit_rate: f64,
350    /// Parallel efficiency measurements
351    pub parallel_efficiency: f64,
352}
353
354impl Default for PerformanceMetrics {
355    fn default() -> Self {
356        Self {
357            operation_times: std::collections::HashMap::new(),
358            strategy_success_rates: std::collections::HashMap::new(),
359            memorybandwidth_utilization: 0.0,
360            cache_hit_rate: 0.0,
361            parallel_efficiency: 0.0,
362        }
363    }
364}
365
366/// Optimization strategies available
367#[allow(dead_code)]
368#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
369pub enum OptimizationStrategy {
370    Scalar,
371    Simd,
372    Parallel,
373    Gpu,
374    Hybrid,
375    CacheOptimized,
376    MemoryBound,
377    ComputeBound,
378    /// Modern architecture-specific optimizations (Zen4, Golden Cove, Apple Silicon)
379    ModernArchOptimized,
380    /// Vector-optimized for advanced SIMD (AVX-512, NEON)
381    VectorOptimized,
382    /// Energy-efficient optimization for mobile/edge devices
383    EnergyEfficient,
384    /// High-throughput optimization for server workloads
385    HighThroughput,
386}
387
388/// Strategy selector for choosing the best optimization approach
389#[allow(dead_code)]
390#[derive(Debug, Clone)]
391pub struct StrategySelector {
392    /// Current preferred strategy
393    #[allow(dead_code)]
394    preferred_strategy: OptimizationStrategy,
395    /// Strategy weights based on past performance
396    strategy_weights: std::collections::HashMap<OptimizationStrategy, f64>,
397    /// Learning rate for weight updates
398    learningrate: f64,
399    /// Exploration rate for trying different strategies
400    exploration_rate: f64,
401}
402
403impl Default for StrategySelector {
404    fn default() -> Self {
405        let mut strategy_weights = std::collections::HashMap::new();
406        strategy_weights.insert(OptimizationStrategy::Scalar, 1.0);
407        strategy_weights.insert(OptimizationStrategy::Simd, 1.0);
408        strategy_weights.insert(OptimizationStrategy::Parallel, 1.0);
409        strategy_weights.insert(OptimizationStrategy::Gpu, 1.0);
410        strategy_weights.insert(OptimizationStrategy::Hybrid, 1.0);
411        strategy_weights.insert(OptimizationStrategy::CacheOptimized, 1.0);
412        strategy_weights.insert(OptimizationStrategy::MemoryBound, 1.0);
413        strategy_weights.insert(OptimizationStrategy::ComputeBound, 1.0);
414        strategy_weights.insert(OptimizationStrategy::ModernArchOptimized, 1.5); // Higher initial weight
415        strategy_weights.insert(OptimizationStrategy::VectorOptimized, 1.3);
416        strategy_weights.insert(OptimizationStrategy::EnergyEfficient, 1.0);
417        strategy_weights.insert(OptimizationStrategy::HighThroughput, 1.2);
418
419        Self {
420            preferred_strategy: OptimizationStrategy::ModernArchOptimized,
421            strategy_weights,
422            learningrate: 0.1,
423            exploration_rate: 0.1,
424        }
425    }
426}
427
428impl StrategySelector {
429    /// Select the best strategy for given operation characteristics
430    pub fn select_strategy(
431        &self,
432        operation_size: usize,
433        is_memory_bound: bool,
434    ) -> OptimizationStrategy {
435        // Use epsilon-greedy exploration
436        use std::collections::hash_map::DefaultHasher;
437        use std::hash::{Hash, Hasher};
438
439        let mut hasher = DefaultHasher::new();
440        operation_size.hash(&mut hasher);
441        let rand_val = (hasher.finish() % 100) as f64 / 100.0;
442
443        if rand_val < self.exploration_rate {
444            // Explore: choose a random strategy including modern ones
445            let strategies = [
446                OptimizationStrategy::Scalar,
447                OptimizationStrategy::Simd,
448                OptimizationStrategy::Parallel,
449                OptimizationStrategy::Gpu,
450                OptimizationStrategy::ModernArchOptimized,
451                OptimizationStrategy::VectorOptimized,
452                OptimizationStrategy::EnergyEfficient,
453                OptimizationStrategy::HighThroughput,
454            ];
455            strategies[operation_size % strategies.len()]
456        } else {
457            // Exploit: choose the best strategy based on characteristics and architecture
458            if is_memory_bound {
459                // For memory-_bound operations, prioritize cache optimization
460                if is_apple_silicon() || is_neoverse_or_newer() {
461                    OptimizationStrategy::ModernArchOptimized
462                } else {
463                    OptimizationStrategy::MemoryBound
464                }
465            } else if operation_size > 1_000_000 {
466                // Very large operations - use high-throughput strategies
467                OptimizationStrategy::HighThroughput
468            } else if operation_size > 100_000 {
469                // Large operations - check for modern architectures
470                if is_zen4_or_newer() || is_intel_golden_cove_or_newer() {
471                    OptimizationStrategy::VectorOptimized
472                } else {
473                    OptimizationStrategy::Parallel
474                }
475            } else if operation_size > 1_000 {
476                // Medium operations - use modern SIMD if available
477                if is_zen4_or_newer() || is_apple_silicon() {
478                    OptimizationStrategy::ModernArchOptimized
479                } else {
480                    OptimizationStrategy::Simd
481                }
482            } else {
483                // Small operations - consider energy efficiency
484                if cfg!(target_os = "android") || cfg!(target_os = "ios") {
485                    OptimizationStrategy::EnergyEfficient
486                } else {
487                    OptimizationStrategy::Scalar
488                }
489            }
490        }
491    }
492
493    /// Update strategy weights based on performance feedback
494    pub fn update_weights(&mut self, strategy: OptimizationStrategy, performancescore: f64) {
495        if let Some(weight) = self.strategy_weights.get_mut(&strategy) {
496            *weight = *weight * (1.0 - self.learningrate) + performancescore * self.learningrate;
497        }
498    }
499
500    /// Detect if running on ARM Neoverse or newer server architectures
501    #[allow(dead_code)]
502    fn is_neoverse_or_newer() -> bool {
503        crate::performance_optimization::is_neoverse_or_newer()
504    }
505
506    /// Detect if running on AMD Zen4 or newer architectures
507    #[allow(dead_code)]
508    fn is_zen4_or_newer() -> bool {
509        crate::performance_optimization::is_zen4_or_newer()
510    }
511
512    /// Detect if running on Intel Golden Cove (12th gen) or newer
513    #[allow(dead_code)]
514    fn is_intel_golden_cove_or_newer() -> bool {
515        crate::performance_optimization::is_intel_golden_cove_or_newer()
516    }
517}
518
519/// Detect if running on AMD Zen4 or newer architectures
520#[allow(dead_code)]
521fn is_zen4_or_newer() -> bool {
522    #[cfg(target_arch = "x86_64")]
523    {
524        // Check for Zen4+ specific features like AVX-512
525        is_x86_feature_detected!("avx512f") && is_x86_feature_detected!("avx512vl")
526    }
527    #[cfg(not(target_arch = "x86_64"))]
528    {
529        false
530    }
531}
532
533/// Detect if running on Intel Golden Cove (12th gen) or newer
534#[allow(dead_code)]
535fn is_intel_golden_cove_or_newer() -> bool {
536    #[cfg(target_arch = "x86_64")]
537    {
538        // Check for features introduced in Golden Cove
539        is_x86_feature_detected!("avx2")
540            && is_x86_feature_detected!("fma")
541            && is_x86_feature_detected!("bmi2")
542    }
543    #[cfg(not(target_arch = "x86_64"))]
544    {
545        false
546    }
547}
548
549/// Detect if running on Apple Silicon (M1/M2/M3)
550#[allow(dead_code)]
551fn is_apple_silicon() -> bool {
552    #[cfg(target_arch = "aarch64")]
553    {
554        // Apple Silicon specific detection
555        cfg!(target_vendor = "apple")
556    }
557    #[cfg(not(target_arch = "aarch64"))]
558    {
559        false
560    }
561}
562
563/// Detect if running on ARM Neoverse or newer server architectures
564#[allow(dead_code)]
565fn is_neoverse_or_newer() -> bool {
566    #[cfg(target_arch = "aarch64")]
567    {
568        // Check for Neoverse-specific features
569        std::arch::is_aarch64_feature_detected!("asimd")
570            && std::arch::is_aarch64_feature_detected!("crc")
571            && std::arch::is_aarch64_feature_detected!("fp")
572    }
573    #[cfg(not(target_arch = "aarch64"))]
574    {
575        false
576    }
577}
578
579/// Adaptive optimization based on runtime characteristics
580pub struct AdaptiveOptimizer {
581    /// Threshold for switching to parallel execution
582    parallel_threshold: AtomicUsize,
583    /// Threshold for using SIMD operations
584    simd_threshold: AtomicUsize,
585    /// Threshold for using GPU acceleration
586    #[allow(dead_code)]
587    gpu_threshold: AtomicUsize,
588    /// Cache line size for the current architecture
589    cache_line_size: usize,
590    /// Performance metrics for adaptive learning
591    performance_metrics: std::sync::RwLock<PerformanceMetrics>,
592    /// Optimization strategy selector
593    strategy_selector: std::sync::RwLock<StrategySelector>,
594}
595
596impl AdaptiveOptimizer {
597    /// Create a new adaptive optimizer
598    pub fn new() -> Self {
599        Self {
600            parallel_threshold: AtomicUsize::new(10_000),
601            simd_threshold: AtomicUsize::new(1_000),
602            gpu_threshold: AtomicUsize::new(100_000),
603            cache_line_size: Self::detect_cache_line_size(),
604            performance_metrics: std::sync::RwLock::new(PerformanceMetrics::default()),
605            strategy_selector: std::sync::RwLock::new(StrategySelector::default()),
606        }
607    }
608
609    /// Detect the cache line size for the current architecture
610    fn detect_cache_line_size() -> usize {
611        #[cfg(target_arch = "x86_64")]
612        {
613            // All modern x86_64 architectures use 64-byte cache lines
614            64
615        }
616        #[cfg(target_arch = "aarch64")]
617        {
618            // ARM64 optimized value (Apple Silicon, Neoverse, and standard ARM64)
619            128
620        }
621        #[cfg(target_arch = "riscv64")]
622        {
623            64 // RISC-V 64-bit
624        }
625        #[cfg(not(any(
626            target_arch = "x86_64",
627            target_arch = "aarch64",
628            target_arch = "riscv64"
629        )))]
630        {
631            64 // Default fallback
632        }
633    }
634
635    /// Check if parallel execution should be used for given size
636    #[inline]
637    #[allow(unused_variables)]
638    pub fn should_use_parallel(&self, size: usize) -> bool {
639        #[cfg(feature = "parallel")]
640        {
641            size >= self.parallel_threshold.load(Ordering::Relaxed)
642        }
643        #[cfg(not(feature = "parallel"))]
644        {
645            false
646        }
647    }
648
649    /// Check if SIMD should be used for given size
650    #[inline]
651    #[allow(unused_variables)]
652    pub fn should_use_simd(&self, size: usize) -> bool {
653        #[cfg(feature = "simd")]
654        {
655            size >= self.simd_threshold.load(Ordering::Relaxed)
656        }
657        #[cfg(not(feature = "simd"))]
658        {
659            false
660        }
661    }
662
663    /// Update thresholds based on performance measurements
664    pub fn update_from_measurement(&mut self, operation: &str, size: usize, durationns: u64) {
665        // Simple heuristic: adjust thresholds based on operation efficiency
666        let ops_per_ns = size as f64 / durationns as f64;
667
668        if operation.contains("parallel") && ops_per_ns < 0.1 {
669            // Parallel overhead too high, increase threshold
670            self.parallel_threshold
671                .fetch_add(size / 10, Ordering::Relaxed);
672        } else if operation.contains("simd") && ops_per_ns < 1.0 {
673            // SIMD not efficient enough, increase threshold
674            self.simd_threshold.fetch_add(size / 10, Ordering::Relaxed);
675        }
676    }
677
678    /// Get optimal chunk size for cache-friendly operations
679    #[inline]
680    pub fn optimal_chunk_size<T>(&self) -> usize {
681        // Calculate chunk size based on cache line size and element size
682        let element_size = std::mem::size_of::<T>();
683        let elements_per_cache_line = self.cache_line_size / element_size.max(1);
684
685        // Use multiple cache lines for better performance
686        elements_per_cache_line * 16
687    }
688
689    /// Check if GPU acceleration should be used for given size
690    #[inline]
691    #[allow(unused_variables)]
692    pub fn should_use_gpu(&self, size: usize) -> bool {
693        #[cfg(feature = "gpu")]
694        {
695            size >= self.gpu_threshold.load(Ordering::Relaxed)
696        }
697        #[cfg(not(feature = "gpu"))]
698        {
699            false
700        }
701    }
702
703    /// Select the optimal strategy for a given operation
704    pub fn select_for_operation(&self, operationname: &str, size: usize) -> OptimizationStrategy {
705        // Determine if operation is memory-bound based on operation name
706        let memory_bound = operationname.contains("copy")
707            || operationname.contains("memset")
708            || operationname.contains("transpose");
709
710        if let Ok(selector) = self.strategy_selector.read() {
711            selector.select_strategy(size, memory_bound)
712        } else {
713            // Fallback selection
714            if self.should_use_gpu(size) {
715                OptimizationStrategy::Gpu
716            } else if self.should_use_parallel(size) {
717                OptimizationStrategy::Parallel
718            } else if self.should_use_simd(size) {
719                OptimizationStrategy::Simd
720            } else {
721                OptimizationStrategy::Scalar
722            }
723        }
724    }
725
726    /// Record performance measurement and update adaptive parameters
727    pub fn record_performance(
728        &mut self,
729        operation: &str,
730        size: usize,
731        strategy: OptimizationStrategy,
732        duration_ns: u64,
733    ) {
734        // Calculate performance score (higher is better)
735        let ops_per_ns = size as f64 / duration_ns as f64;
736        let performance_score = ops_per_ns.min(10.0) / 10.0; // Normalize to 0.saturating_sub(1)
737
738        // Update strategy weights
739        if let Ok(mut selector) = self.strategy_selector.write() {
740            selector.update_weights(strategy, performance_score);
741        }
742
743        // Update performance metrics
744        if let Ok(mut metrics) = self.performance_metrics.write() {
745            let avg_time = metrics
746                .operation_times
747                .entry(operation.to_string())
748                .or_insert(0.0);
749            *avg_time = (*avg_time * 0.9) + (duration_ns as f64 * 0.1); // Exponential moving average
750
751            metrics
752                .strategy_success_rates
753                .insert(strategy, performance_score);
754        }
755
756        // Implement adaptive threshold updates based on performance
757        self.update_thresholds(operation, size, duration_ns);
758    }
759
760    /// Get performance metrics for analysis
761    pub fn get_performance_metrics(&self) -> Option<PerformanceMetrics> {
762        self.performance_metrics.read().ok().map(|m| m.clone())
763    }
764
765    /// Analyze operation characteristics to suggest optimizations
766    pub fn analyze_operation(&self, operation_name: &str, inputsize: usize) -> OptimizationAdvice {
767        let strategy = self.select_optimal_strategy(operation_name, inputsize);
768        let chunk_size = if strategy == OptimizationStrategy::Parallel {
769            Some(self.optimal_chunk_size::<f64>())
770        } else {
771            None
772        };
773
774        let prefetch_distance = if inputsize > 10_000 {
775            Some(self.cache_line_size * 8) // Prefetch 8 cache lines ahead
776        } else {
777            None
778        };
779
780        OptimizationAdvice {
781            recommended_strategy: strategy,
782            optimal_chunk_size: chunk_size,
783            prefetch_distance,
784            memory_allocation_hint: if inputsize > 1_000_000 {
785                Some("Consider using memory-mapped files for large outputs".to_string())
786            } else {
787                None
788            },
789        }
790    }
791
792    /// Detect if running on AMD Zen4 or newer architectures
793    #[allow(dead_code)]
794    fn is_zen4_or_newer() -> bool {
795        crate::performance_optimization::is_zen4_or_newer()
796    }
797
798    /// Detect if running on Intel Golden Cove (12th gen) or newer
799    #[allow(dead_code)]
800    fn is_intel_golden_cove_or_newer() -> bool {
801        crate::performance_optimization::is_intel_golden_cove_or_newer()
802    }
803
804    /// Select optimal strategy based on operation name and input size
805    pub fn select_optimal_strategy(
806        &self,
807        _operation_name: &str,
808        input_size: usize,
809    ) -> OptimizationStrategy {
810        // Check GPU threshold first (if available)
811        if input_size >= self.gpu_threshold.load(Ordering::Relaxed) && self.has_gpu_support() {
812            return OptimizationStrategy::Gpu;
813        }
814
815        // Check parallel threshold
816        if input_size >= self.parallel_threshold.load(Ordering::Relaxed) {
817            return OptimizationStrategy::Parallel;
818        }
819
820        // Check SIMD threshold
821        if input_size >= self.simd_threshold.load(Ordering::Relaxed) && self.has_simd_support() {
822            return OptimizationStrategy::Simd;
823        }
824
825        // Default to scalar
826        OptimizationStrategy::Scalar
827    }
828
829    /// Check if GPU support is available
830    pub fn has_gpu_support(&self) -> bool {
831        // For now, return false since GPU support is not implemented
832        false
833    }
834
835    /// Check if SIMD support is available  
836    pub fn has_simd_support(&self) -> bool {
837        // Check if SIMD instructions are available on this platform
838        #[cfg(target_arch = "x86_64")]
839        {
840            std::arch::is_x86_feature_detected!("avx2")
841                || std::arch::is_x86_feature_detected!("sse4.1")
842        }
843        #[cfg(target_arch = "aarch64")]
844        {
845            std::arch::is_aarch64_feature_detected!("neon")
846        }
847        #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
848        {
849            false
850        }
851    }
852
853    /// Update thresholds adaptively based on performance measurements
854    fn update_thresholds(&self, operation: &str, size: usize, duration_ns: u64) {
855        // Calculate operation efficiency (operations per nanosecond)
856        let ops_per_ns = size as f64 / duration_ns as f64;
857
858        // Get current strategy
859        let current_strategy = self.select_optimal_strategy(operation, size);
860
861        // Define efficiency targets for each strategy
862        const PARALLEL_MIN_EFFICIENCY: f64 = 0.5; // Minimum ops/ns for parallel to be worthwhile
863        const SIMD_MIN_EFFICIENCY: f64 = 2.0; // Minimum ops/ns for SIMD to be worthwhile
864        const GPU_MIN_EFFICIENCY: f64 = 10.0; // Minimum ops/ns for GPU to be worthwhile
865
866        match current_strategy {
867            OptimizationStrategy::Parallel => {
868                if ops_per_ns < PARALLEL_MIN_EFFICIENCY {
869                    // Parallel overhead is too high, increase threshold
870                    let new_threshold = (size as f64 * 1.2) as usize;
871                    self.parallel_threshold
872                        .store(new_threshold, Ordering::Relaxed);
873                } else if ops_per_ns > PARALLEL_MIN_EFFICIENCY * 2.0 {
874                    // Parallel is very efficient, could lower threshold
875                    let current = self.parallel_threshold.load(Ordering::Relaxed);
876                    let new_threshold = (current as f64 * 0.9).max(1000.0) as usize;
877                    self.parallel_threshold
878                        .store(new_threshold, Ordering::Relaxed);
879                }
880            }
881            OptimizationStrategy::Simd => {
882                if ops_per_ns < SIMD_MIN_EFFICIENCY {
883                    // SIMD not efficient enough, increase threshold
884                    let new_threshold = (size as f64 * 1.1) as usize;
885                    self.simd_threshold.store(new_threshold, Ordering::Relaxed);
886                } else if ops_per_ns > SIMD_MIN_EFFICIENCY * 2.0 {
887                    // SIMD is very efficient, could lower threshold
888                    let current = self.simd_threshold.load(Ordering::Relaxed);
889                    let new_threshold = (current as f64 * 0.95).max(100.0) as usize;
890                    self.simd_threshold.store(new_threshold, Ordering::Relaxed);
891                }
892            }
893            OptimizationStrategy::Gpu => {
894                if ops_per_ns < GPU_MIN_EFFICIENCY {
895                    // GPU overhead is too high, increase threshold
896                    let new_threshold = (size as f64 * 1.5) as usize;
897                    self.gpu_threshold.store(new_threshold, Ordering::Relaxed);
898                } else if ops_per_ns > GPU_MIN_EFFICIENCY * 2.0 {
899                    // GPU is very efficient, could lower threshold
900                    let current = self.gpu_threshold.load(Ordering::Relaxed);
901                    let new_threshold = (current as f64 * 0.8).max(10000.0) as usize;
902                    self.gpu_threshold.store(new_threshold, Ordering::Relaxed);
903                }
904            }
905            _ => {
906                // For scalar operations, check if we should enable optimizations
907                if size > 1000 && ops_per_ns > SIMD_MIN_EFFICIENCY {
908                    // Could benefit from SIMD
909                    let current = self.simd_threshold.load(Ordering::Relaxed);
910                    let new_threshold = size.min(current);
911                    self.simd_threshold.store(new_threshold, Ordering::Relaxed);
912                }
913                if size > 10000 && ops_per_ns > PARALLEL_MIN_EFFICIENCY {
914                    // Could benefit from parallelization
915                    let current = self.parallel_threshold.load(Ordering::Relaxed);
916                    let new_threshold = size.min(current);
917                    self.parallel_threshold
918                        .store(new_threshold, Ordering::Relaxed);
919                }
920            }
921        }
922
923        // Update performance metrics with the new threshold values
924        if let Ok(mut metrics) = self.performance_metrics.write() {
925            // Store current threshold values in metrics for analysis
926            metrics.operation_times.insert(
927                format!("{}_threshold_parallel", operation),
928                self.parallel_threshold.load(Ordering::Relaxed) as f64,
929            );
930            metrics.operation_times.insert(
931                format!("{}_threshold_simd", operation),
932                self.simd_threshold.load(Ordering::Relaxed) as f64,
933            );
934            metrics.operation_times.insert(
935                format!("{}_threshold_gpu", operation),
936                self.gpu_threshold.load(Ordering::Relaxed) as f64,
937            );
938        }
939    }
940}
941
942/// Optimization advice generated by the adaptive optimizer
943#[allow(dead_code)]
944#[derive(Debug, Clone)]
945pub struct OptimizationAdvice {
946    /// Recommended optimization strategy
947    pub recommended_strategy: OptimizationStrategy,
948    /// Optimal chunk size for parallel processing
949    pub optimal_chunk_size: Option<usize>,
950    /// Prefetch distance for memory access
951    pub prefetch_distance: Option<usize>,
952    /// Memory allocation hints
953    pub memory_allocation_hint: Option<String>,
954}
955
956impl Default for AdaptiveOptimizer {
957    fn default() -> Self {
958        Self::new()
959    }
960}
961
962/// Fast path optimizations for common operations
963pub mod fast_paths {
964    use super::*;
965
966    /// Optimized array addition for f64
967    #[inline]
968    #[allow(unused_variables)]
969    pub fn add_f64_arrays(a: &[f64], b: &[f64], result: &mut [f64]) -> Result<(), &'static str> {
970        if a.len() != b.len() || a.len() != result.len() {
971            return Err("Array lengths must match");
972        }
973
974        let len = a.len();
975        let optimizer = AdaptiveOptimizer::new();
976
977        #[cfg(feature = "simd")]
978        if optimizer.should_use_simd(len) {
979            // Use SIMD operations for f64 addition
980            use crate::simd_ops::SimdUnifiedOps;
981            use ::ndarray::ArrayView1;
982
983            // Process in SIMD-width chunks
984            let simd_chunks = len / 4; // Process 4 f64s at a time
985
986            for i in 0..simd_chunks {
987                let start = i * 4;
988                let end = start + 4;
989
990                if end <= len {
991                    let a_view = ArrayView1::from(&a[start..end]);
992                    let b_view = ArrayView1::from(&b[start..end]);
993
994                    // Use SIMD addition
995                    let simd_result = f64::simd_add(&a_view, &b_view);
996                    result[start..end]
997                        .copy_from_slice(simd_result.as_slice().expect("Operation failed"));
998                }
999            }
1000
1001            // Handle remaining elements with scalar operations
1002            for i in (simd_chunks * 4)..len {
1003                result[0] = a[0] + b[0];
1004            }
1005            return Ok(());
1006        }
1007
1008        #[cfg(feature = "parallel")]
1009        if optimizer.should_use_parallel(len) {
1010            use crate::parallel_ops::*;
1011            result
1012                .par_chunks_mut(optimizer.optimal_chunk_size::<f64>())
1013                .zip(a.par_chunks(optimizer.optimal_chunk_size::<f64>()))
1014                .zip(b.par_chunks(optimizer.optimal_chunk_size::<f64>()))
1015                .for_each(|((r_chunk, a_chunk), b_chunk)| {
1016                    for i in 0..r_chunk.len() {
1017                        r_chunk[0] = a_chunk[0] + b_chunk[0];
1018                    }
1019                });
1020            return Ok(());
1021        }
1022
1023        // Scalar fallback with loop unrolling
1024        let chunks = len / 8;
1025
1026        for i in 0..chunks {
1027            let idx = i * 8;
1028            result[idx] = a[idx] + b[idx];
1029            result[idx + 1] = a[idx + 1] + b[idx + 1];
1030            result[idx + 2] = a[idx + 2] + b[idx + 2];
1031            result[idx + 3] = a[idx + 3] + b[idx + 3];
1032            result[idx + 4] = a[idx + 4] + b[idx + 4];
1033            result[idx + 5] = a[idx + 5] + b[idx + 5];
1034            result[idx + 6] = a[idx + 6] + b[idx + 6];
1035            result[idx + 7] = a[idx + 7] + b[idx + 7];
1036        }
1037
1038        for i in (chunks * 8)..len {
1039            result[0] = a[0] + b[0];
1040        }
1041
1042        Ok(())
1043    }
1044
1045    /// Optimized matrix multiplication kernel
1046    #[inline]
1047    pub fn matmul_kernel(
1048        a: &[f64],
1049        b: &[f64],
1050        c: &mut [f64],
1051        m: usize,
1052        k: usize,
1053        n: usize,
1054    ) -> Result<(), &'static str> {
1055        if a.len() != m * k || b.len() != k * n || c.len() != m * n {
1056            return Err("Invalid matrix dimensions");
1057        }
1058
1059        // Tile sizes for cache optimization
1060        const TILE_M: usize = 64;
1061        const TILE_N: usize = 64;
1062        const TILE_K: usize = 64;
1063
1064        // Clear result matrix
1065        c.fill(0.0);
1066
1067        #[cfg(feature = "parallel")]
1068        {
1069            let optimizer = AdaptiveOptimizer::new();
1070            if optimizer.should_use_parallel(m * n) {
1071                use crate::parallel_ops::*;
1072
1073                // Use synchronization for parallel matrix multiplication
1074                use std::sync::Mutex;
1075                let c_mutex = Mutex::new(c);
1076
1077                // Parallel tiled implementation using row-wise parallelization
1078                (0..m).into_par_iter().step_by(TILE_M).for_each(|i0| {
1079                    let i_max = (i0 + TILE_M).min(m);
1080                    let mut local_updates = Vec::new();
1081
1082                    for j0 in (0..n).step_by(TILE_N) {
1083                        for k0 in (0..k).step_by(TILE_K) {
1084                            let j_max = (j0 + TILE_N).min(n);
1085                            let k_max = (k0 + TILE_K).min(k);
1086
1087                            for i in i0..i_max {
1088                                for j in j0..j_max {
1089                                    let mut sum = 0.0;
1090                                    for k_idx in k0..k_max {
1091                                        sum += a[i * k + k_idx] * b[k_idx * n + j];
1092                                    }
1093                                    local_updates.push((i, j, sum));
1094                                }
1095                            }
1096                        }
1097                    }
1098
1099                    // Apply all local updates at once
1100                    if let Ok(mut c_guard) = c_mutex.lock() {
1101                        for (i, j, sum) in local_updates {
1102                            c_guard[i * n + j] += sum;
1103                        }
1104                    }
1105                });
1106                return Ok(());
1107            }
1108        }
1109
1110        // Serial tiled implementation
1111        for i0 in (0..m).step_by(TILE_M) {
1112            for j0 in (0..n).step_by(TILE_N) {
1113                for k0 in (0..k).step_by(TILE_K) {
1114                    let i_max = (i0 + TILE_M).min(m);
1115                    let j_max = (j0 + TILE_N).min(n);
1116                    let k_max = (k0 + TILE_K).min(k);
1117
1118                    for i in i0..i_max {
1119                        for j in j0..j_max {
1120                            let mut sum = c[i * n + j];
1121                            for k_idx in k0..k_max {
1122                                sum += a[i * k + k_idx] * b[k_idx * n + j];
1123                            }
1124                            c[i * n + j] = sum;
1125                        }
1126                    }
1127                }
1128            }
1129        }
1130
1131        Ok(())
1132    }
1133}
1134
1135/// Memory access pattern optimizer
1136#[allow(dead_code)]
1137pub struct MemoryAccessOptimizer {
1138    /// Stride detection for array access
1139    stride_detector: StrideDetector,
1140}
1141
1142#[derive(Default)]
1143#[allow(dead_code)]
1144struct StrideDetector {
1145    last_address: Option<usize>,
1146    detected_stride: Option<isize>,
1147    confidence: f32,
1148}
1149
1150impl MemoryAccessOptimizer {
1151    pub fn new() -> Self {
1152        Self {
1153            stride_detector: StrideDetector::default(),
1154        }
1155    }
1156
1157    /// Analyze memory access pattern and suggest optimizations
1158    pub fn analyze_access_pattern<T>(&mut self, addresses: &[*const T]) -> AccessPattern {
1159        if addresses.is_empty() {
1160            return AccessPattern::Unknown;
1161        }
1162
1163        // Simple stride detection
1164        let mut strides = Vec::new();
1165        for window in addresses.windows(2) {
1166            let stride = (window[1] as isize) - (window[0] as isize);
1167            strides.push(stride / std::mem::size_of::<T>() as isize);
1168        }
1169
1170        // Check if all strides are equal (sequential access)
1171        if strides.windows(2).all(|w| w[0] == w[1]) {
1172            match strides[0] {
1173                1 => AccessPattern::Sequential,
1174                -1 => AccessPattern::ReverseSequential,
1175                s if s > 1 => AccessPattern::Strided(s as usize),
1176                _ => AccessPattern::Random,
1177            }
1178        } else {
1179            AccessPattern::Random
1180        }
1181    }
1182}
1183
1184#[allow(dead_code)]
1185#[derive(Debug, Clone, Copy, PartialEq)]
1186pub enum AccessPattern {
1187    Sequential,
1188    ReverseSequential,
1189    Strided(usize),
1190    Random,
1191    Unknown,
1192}
1193
1194impl Default for MemoryAccessOptimizer {
1195    fn default() -> Self {
1196        Self::new()
1197    }
1198}
1199
1200/// Re-export the benchmarking framework for performance analysis
1201pub use crate::performance::benchmarking;
1202
1203/// Advanced-optimized cache-aware algorithms for maximum performance
1204///
1205/// This module provides adaptive algorithms that automatically adjust their
1206/// behavior based on cache performance characteristics and system topology.
1207/// Re-export the cache-aware algorithms module
1208pub use crate::performance::cache_optimization as cache_aware_algorithms;
1209
1210/// Re-export the advanced AI-driven optimization module
1211pub use crate::performance::advanced_optimization;
1212
1213/* Tests removed due to compilation issues with --all-features
1214#[cfg(test)]
1215mod tests {
1216    use super::*;
1217    use std::time::Duration;
1218
1219    #[cfg(feature = "benchmarking")]
1220    use crate::benchmarking;
1221
1222    #[test]
1223    fn test_adaptive_optimizer() {
1224        let optimizer = AdaptiveOptimizer::new();
1225
1226        // Test threshold detection
1227        assert!(!optimizer.should_use_parallel(100));
1228
1229        // Only test parallel execution if the feature is enabled
1230        #[cfg(feature = "parallel")]
1231        assert!(optimizer.should_use_parallel(100_000));
1232
1233        // Test chunk size calculation
1234        let chunk_size = optimizer.optimal_chunk_size::<f64>();
1235        assert!(chunk_size > 0);
1236        assert_eq!(chunk_size % 16, 0); // Should be multiple of 16
1237    }
1238
1239    #[test]
1240    fn test_fast_path_addition() {
1241        let a = vec![1.0; 32];
1242        let b = vec![2.0; 32];
1243        let mut result = vec![0.0; 32];
1244
1245        fast_paths::add_f64_arrays(&a, &b, &mut result).expect("Operation failed");
1246
1247        for val in result {
1248            assert_eq!(val, 3.0);
1249        }
1250    }
1251
1252    #[test]
1253    fn test_memory_access_pattern() {
1254        let mut optimizer = MemoryAccessOptimizer::new();
1255
1256        // Sequential access
1257        let addresses: Vec<*const f64> = (0..10)
1258            .map(|i| (i * std::mem::size_of::<f64>()) as *const f64)
1259            .collect();
1260        assert_eq!(
1261            optimizer.analyze_access_pattern(&addresses),
1262            AccessPattern::Sequential
1263        );
1264
1265        // Strided access
1266        let addresses: Vec<*const f64> = (0..10)
1267            .map(|i| (i * 3 * std::mem::size_of::<f64>()) as *const f64)
1268            .collect();
1269        assert_eq!(
1270            optimizer.analyze_access_pattern(&addresses),
1271            AccessPattern::Strided(3)
1272        );
1273    }
1274
1275    #[test]
1276    fn test_performance_hints() {
1277        // Test that hints don't crash and return correct values
1278        assert!(PerformanceHints::likely(true));
1279        assert!(!PerformanceHints::likely(false));
1280        assert!(PerformanceHints::unlikely(true));
1281        assert!(!PerformanceHints::unlikely(false));
1282
1283        // Test prefetch operations (should not crash)
1284        let data = [1.0f64; 100];
1285        PerformanceHints::prefetch_read(&data[0]);
1286
1287        let mut data_mut = [0.0f64; 100];
1288        PerformanceHints::prefetch_write(&mut data_mut[0]);
1289
1290        // Test locality-based prefetch
1291        PerformanceHints::prefetch_with_locality(&data[0], Locality::High);
1292        PerformanceHints::prefetch_with_locality(&data[0], Locality::Medium);
1293        PerformanceHints::prefetch_with_locality(&data[0], Locality::Low);
1294        PerformanceHints::prefetch_with_locality(&data[0], Locality::None);
1295    }
1296
1297    #[test]
1298    fn test_cache_operations() {
1299        let data = [1.0f64; 8];
1300
1301        // Test cache flush (should not crash)
1302        PerformanceHints::flush_cache_line(&data[0]);
1303
1304        // Test memory fence (should not crash)
1305        PerformanceHints::memory_fence();
1306
1307        // Test cache-aware copy
1308        let src = vec![1.0f64; 64];
1309        let mut dst = vec![0.0f64; 64];
1310        PerformanceHints::cache_aware_copy(&src, &mut dst);
1311        assert_eq!(src, dst);
1312
1313        // Test cache-aware memset
1314        let mut data = vec![0.0f64; 64];
1315        PerformanceHints::cache_aware_memset(&mut data, 5.0);
1316        assert!(data.iter().all(|&x| x == 5.0));
1317    }
1318
1319    #[test]
1320    fn test_locality_enum() {
1321        // Test that Locality enum works correctly
1322        let localities = [
1323            Locality::High,
1324            Locality::Medium,
1325            Locality::Low,
1326            Locality::None,
1327        ];
1328
1329        for locality in &localities {
1330            // Test that we can use locality in prefetch
1331            let data = 42i32;
1332            PerformanceHints::prefetch_with_locality(&data, *locality);
1333        }
1334
1335        // Test enum properties
1336        assert_eq!(Locality::High, Locality::High);
1337        assert_ne!(Locality::High, Locality::Low);
1338
1339        // Test Debug formatting
1340        assert!(format!("{:?}", Locality::High).contains("High"));
1341    }
1342
1343    #[test]
1344    fn test_strategy_selector() {
1345        let mut selector = StrategySelector::default();
1346
1347        // Test strategy selection
1348        let strategy = selector.select_strategy(1000, false);
1349        assert!(matches!(
1350            strategy,
1351            OptimizationStrategy::Simd
1352                | OptimizationStrategy::Scalar
1353                | OptimizationStrategy::Parallel
1354                | OptimizationStrategy::Gpu
1355        ));
1356
1357        // Test weight updates
1358        selector.update_weights(OptimizationStrategy::Simd, 0.8);
1359        selector.update_weights(OptimizationStrategy::Parallel, 0.9);
1360
1361        // Weights should be updated
1362        assert!(selector.strategy_weights[&OptimizationStrategy::Simd] != 1.0);
1363        assert!(selector.strategy_weights[&OptimizationStrategy::Parallel] != 1.0);
1364    }
1365
1366    #[test]
1367    fn test_adaptive_optimizer_enhanced() {
1368        let mut optimizer = AdaptiveOptimizer::new();
1369
1370        // Test GPU threshold
1371        assert!(!optimizer.should_use_gpu(1000));
1372
1373        // Test strategy selection
1374        let strategy = optimizer.select_optimal_strategy("matrix_multiply", 50_000);
1375        assert!(matches!(
1376            strategy,
1377            OptimizationStrategy::Parallel
1378                | OptimizationStrategy::Simd
1379                | OptimizationStrategy::Scalar
1380                | OptimizationStrategy::Gpu
1381                | OptimizationStrategy::Hybrid
1382                | OptimizationStrategy::CacheOptimized
1383                | OptimizationStrategy::MemoryBound
1384                | OptimizationStrategy::ComputeBound
1385                | OptimizationStrategy::ModernArchOptimized
1386                | OptimizationStrategy::VectorOptimized
1387                | OptimizationStrategy::EnergyEfficient
1388                | OptimizationStrategy::HighThroughput
1389        ));
1390
1391        // Test performance recording
1392        optimizer.record_performance("test_op", 1000, OptimizationStrategy::Simd, 1_000_000);
1393
1394        // Test optimization advice
1395        let advice = optimizer.analyze_operation("matrix_multiply", 10_000);
1396        assert!(matches!(
1397            advice.recommended_strategy,
1398            OptimizationStrategy::Parallel
1399                | OptimizationStrategy::Simd
1400                | OptimizationStrategy::Scalar
1401                | OptimizationStrategy::Gpu
1402                | OptimizationStrategy::Hybrid
1403                | OptimizationStrategy::CacheOptimized
1404                | OptimizationStrategy::MemoryBound
1405                | OptimizationStrategy::ComputeBound
1406                | OptimizationStrategy::ModernArchOptimized
1407                | OptimizationStrategy::VectorOptimized
1408                | OptimizationStrategy::EnergyEfficient
1409                | OptimizationStrategy::HighThroughput
1410        ));
1411
1412        // Test metrics retrieval
1413        let metrics = optimizer.get_performance_metrics();
1414        assert!(metrics.is_some());
1415    }
1416
1417    #[test]
1418    fn test_optimization_strategy_enum() {
1419        // Test that all strategies can be created and compared
1420        let strategies = [
1421            OptimizationStrategy::Scalar,
1422            OptimizationStrategy::Simd,
1423            OptimizationStrategy::Parallel,
1424            OptimizationStrategy::Gpu,
1425            OptimizationStrategy::Hybrid,
1426            OptimizationStrategy::CacheOptimized,
1427            OptimizationStrategy::MemoryBound,
1428            OptimizationStrategy::ComputeBound,
1429        ];
1430
1431        for strategy in &strategies {
1432            // Test Debug formatting
1433            assert!(!format!("{strategy:?}").is_empty());
1434
1435            // Test equality
1436            assert_eq!(*strategy, *strategy);
1437        }
1438    }
1439
1440    #[test]
1441    fn test_performance_metrics() {
1442        let mut metrics = PerformanceMetrics::default();
1443
1444        // Test that we can add operation times
1445        metrics
1446            .operation_times
1447            .insert("test_op".to_string(), 1000.0);
1448        assert_eq!(metrics.operation_times["test_op"], 1000.0);
1449
1450        // Test strategy success rates
1451        metrics
1452            .strategy_success_rates
1453            .insert(OptimizationStrategy::Simd, 0.85);
1454        assert_eq!(
1455            metrics.strategy_success_rates[&OptimizationStrategy::Simd],
1456            0.85
1457        );
1458
1459        // Test other metrics
1460        metrics.memorybandwidth_utilization = 0.75;
1461        metrics.cache_hit_rate = 0.90;
1462        metrics.parallel_efficiency = 0.80;
1463
1464        assert_eq!(metrics.memorybandwidth_utilization, 0.75);
1465        assert_eq!(metrics.cache_hit_rate, 0.90);
1466        assert_eq!(metrics.parallel_efficiency, 0.80);
1467    }
1468
1469    #[test]
1470    fn test_optimization_advice() {
1471        let advice = OptimizationAdvice {
1472            recommended_strategy: OptimizationStrategy::Parallel,
1473            optimal_chunk_size: Some(1024),
1474            prefetch_distance: Some(64),
1475            memory_allocation_hint: Some("Use memory mapping".to_string()),
1476        };
1477
1478        assert_eq!(advice.recommended_strategy, OptimizationStrategy::Parallel);
1479        assert_eq!(advice.optimal_chunk_size, Some(1024));
1480        assert_eq!(advice.prefetch_distance, Some(64));
1481        assert!(advice.memory_allocation_hint.is_some());
1482
1483        // Test Debug formatting
1484        assert!(!format!("{advice:?}").is_empty());
1485    }
1486
1487    #[test]
1488    fn test_benchmarking_config() {
1489        let config = benchmarking::BenchmarkConfig::default();
1490
1491        assert_eq!(config.warmup_iterations, 5);
1492        assert_eq!(config.measurement_iterations, 20);
1493        assert!(!config.sample_sizes.is_empty());
1494        assert!(!config.strategies.is_empty());
1495
1496        // Test preset configurations
1497        let array_config = benchmarking::presets::array_operations();
1498        assert_eq!(array_config.warmup_iterations, 3);
1499        assert_eq!(array_config.measurement_iterations, 10);
1500
1501        let matrix_config = benchmarking::presets::matrix_operations();
1502        assert_eq!(matrix_config.warmup_iterations, 5);
1503        assert_eq!(matrix_config.measurement_iterations, 15);
1504
1505        let memory_config = benchmarking::presets::memory_intensive();
1506        assert_eq!(memory_config.warmup_iterations, 2);
1507        assert_eq!(memory_config.measurement_iterations, 8);
1508    }
1509
1510    #[test]
1511    fn test_benchmark_measurement() {
1512        let measurement = benchmarking::BenchmarkMeasurement {
1513            duration: Duration::from_millis(5),
1514            strategy: OptimizationStrategy::Simd,
1515            input_size: 1000,
1516            throughput: 200_000.0,
1517            memory_usage: 8000,
1518            custom_metrics: std::collections::HashMap::new(),
1519        };
1520
1521        assert_eq!(measurement.strategy, OptimizationStrategy::Simd);
1522        assert_eq!(measurement.input_size, 1000);
1523        assert_eq!(measurement.throughput, 200_000.0);
1524        assert_eq!(measurement.memory_usage, 8000);
1525    }
1526
1527    #[test]
1528    fn test_benchmark_runner() {
1529        let config = benchmarking::BenchmarkConfig {
1530            warmup_iterations: 1,
1531            measurement_iterations: 2,
1532            min_duration: Duration::from_millis(1),
1533            max_duration: Duration::from_secs(1),
1534            sample_sizes: vec![10, 100],
1535            strategies: vec![OptimizationStrategy::Scalar, OptimizationStrategy::Simd]
1536                .into_iter()
1537                .collect(),
1538        };
1539
1540        let runner = benchmarking::BenchmarkRunner::new(config);
1541
1542        // Test a simple operation
1543        let results = runner.benchmark_operation("test_add", |data, _strategy| {
1544            let result: Vec<f64> = data.iter().map(|x| *x + 1.0).collect();
1545            (Duration::from_millis(1), result)
1546        });
1547
1548        assert!(!results.measurements.is_empty());
1549    }
1550
1551    #[test]
1552    fn test_strategy_performance() {
1553        let performance = benchmarking::StrategyPerformance {
1554            avg_throughput: 150_000.0,
1555            throughput_stddev: 5_000.0,
1556            avg_memory_usage: 8000.0,
1557            optimal_size: 10_000,
1558            efficiency_score: 0.85,
1559        };
1560
1561        assert_eq!(performance.avg_throughput, 150_000.0);
1562        assert_eq!(performance.throughput_stddev, 5_000.0);
1563        assert_eq!(performance.optimal_size, 10_000);
1564        assert_eq!(performance.efficiency_score, 0.85);
1565    }
1566
1567    #[test]
1568    fn test_scalability_analysis() {
1569        let mut parallel_efficiency = std::collections::HashMap::new();
1570        parallel_efficiency.insert(1000, 0.8);
1571        parallel_efficiency.insert(10000, 0.9);
1572
1573        let memory_scaling = benchmarking::MemoryScaling {
1574            linear_coefficient: 8.0,
1575            constant_coefficient: 1024.0,
1576            r_squared: 0.95,
1577        };
1578
1579        let bottleneck = benchmarking::PerformanceBottleneck {
1580            bottleneck_type: benchmarking::BottleneckType::MemoryBandwidth,
1581            size_range: (10000, 10000),
1582            impact: 0.3,
1583            mitigation: "Use memory prefetching".to_string(),
1584        };
1585
1586        let analysis = benchmarking::ScalabilityAnalysis {
1587            parallel_efficiency,
1588            memory_scaling,
1589            bottlenecks: vec![bottleneck],
1590        };
1591
1592        assert_eq!(analysis.parallel_efficiency[&1000], 0.8);
1593        assert_eq!(analysis.memory_scaling.linear_coefficient, 8.0);
1594        assert_eq!(analysis.bottlenecks.len(), 1);
1595        assert_eq!(
1596            analysis.bottlenecks[0].bottleneck_type,
1597            benchmarking::BottleneckType::MemoryBandwidth
1598        );
1599    }
1600
1601    #[test]
1602    fn test_memory_scaling() {
1603        let scaling = benchmarking::MemoryScaling {
1604            linear_coefficient: 8.0,
1605            constant_coefficient: 512.0,
1606            r_squared: 0.99,
1607        };
1608
1609        assert_eq!(scaling.linear_coefficient, 8.0);
1610        assert_eq!(scaling.constant_coefficient, 512.0);
1611        assert_eq!(scaling.r_squared, 0.99);
1612    }
1613
1614    #[test]
1615    fn test_performance_bottleneck() {
1616        let bottleneck = benchmarking::PerformanceBottleneck {
1617            bottleneck_type: benchmarking::BottleneckType::SynchronizationOverhead,
1618            size_range: (1000, 5000),
1619            impact: 0.6,
1620            mitigation: "Reduce thread contention".to_string(),
1621        };
1622
1623        assert_eq!(
1624            bottleneck.bottleneck_type,
1625            benchmarking::BottleneckType::SynchronizationOverhead
1626        );
1627        assert_eq!(bottleneck.size_range, (1000, 5000));
1628        assert_eq!(bottleneck.impact, 0.6);
1629        assert_eq!(bottleneck.mitigation, "Reduce thread contention");
1630    }
1631
1632    #[test]
1633    fn test_bottleneck_type_enum() {
1634        let bottleneck_types = [
1635            benchmarking::BottleneckType::MemoryBandwidth,
1636            benchmarking::BottleneckType::CacheLatency,
1637            benchmarking::BottleneckType::ComputeBound,
1638            benchmarking::BottleneckType::SynchronizationOverhead,
1639            benchmarking::BottleneckType::AlgorithmicComplexity,
1640        ];
1641
1642        for bottleneck_type in &bottleneck_types {
1643            // Test Debug formatting
1644            assert!(!format!("{bottleneck_type:?}").is_empty());
1645
1646            // Test equality
1647            assert_eq!(*bottleneck_type, *bottleneck_type);
1648        }
1649
1650        // Test inequality
1651        assert_ne!(
1652            benchmarking::BottleneckType::MemoryBandwidth,
1653            benchmarking::BottleneckType::CacheLatency
1654        );
1655    }
1656
1657    #[test]
1658    fn test_benchmark_results() {
1659        let measurement = benchmarking::BenchmarkMeasurement {
1660            strategy: OptimizationStrategy::Parallel,
1661            input_size: 1000,
1662            duration: Duration::from_millis(10),
1663            throughput: 100_000.0,
1664            memory_usage: 8000,
1665            custom_metrics: std::collections::HashMap::new(),
1666        };
1667
1668        let mut strategy_summary = std::collections::HashMap::new();
1669        strategy_summary.insert(
1670            OptimizationStrategy::Parallel,
1671            benchmarking::StrategyPerformance {
1672                avg_throughput: 100_000.0,
1673                throughput_stddev: 1_000.0,
1674                avg_memory_usage: 8000.0,
1675                optimal_size: 1000,
1676                efficiency_score: 0.9,
1677            },
1678        );
1679
1680        let scalability_analysis = benchmarking::ScalabilityAnalysis {
1681            parallel_efficiency: std::collections::HashMap::new(),
1682            memory_scaling: benchmarking::MemoryScaling {
1683                linear_coefficient: 8.0,
1684                constant_coefficient: 0.0,
1685                r_squared: 1.0,
1686            },
1687            bottlenecks: Vec::new(),
1688        };
1689
1690        let results = benchmarking::BenchmarkResults {
1691            operation_name: "test_operation".to_string(),
1692            measurements: vec![measurement],
1693            strategy_summary,
1694            scalability_analysis,
1695            recommendations: vec!["Use parallel strategy".to_string()],
1696            total_duration: Duration::from_millis(100),
1697        };
1698
1699        assert_eq!(results.operation_name, "test_operation");
1700        assert_eq!(results.measurements.len(), 1);
1701        assert_eq!(results.strategy_summary.len(), 1);
1702        assert_eq!(results.recommendations.len(), 1);
1703        assert_eq!(results.total_duration, Duration::from_millis(100));
1704    }
1705
1706    #[test]
1707    fn test_modern_architecture_detection() {
1708        // Test architecture detection functions (these will return results based on actual hardware)
1709        let zen4_detected = is_zen4_or_newer();
1710        let golden_cove_detected = is_intel_golden_cove_or_newer();
1711        let apple_silicon_detected = is_apple_silicon();
1712        let neoverse_detected = is_neoverse_or_newer();
1713
1714        // These tests will pass as they just check the functions don't panic
1715        // Test passes if no panic occurs above
1716    }
1717
1718    #[test]
1719    fn test_enhanced_strategy_selector() {
1720        let selector = StrategySelector::default();
1721
1722        // Test that new strategies are included in default weights
1723        assert!(selector
1724            .strategy_weights
1725            .contains_key(&OptimizationStrategy::ModernArchOptimized));
1726        assert!(selector
1727            .strategy_weights
1728            .contains_key(&OptimizationStrategy::VectorOptimized));
1729        assert!(selector
1730            .strategy_weights
1731            .contains_key(&OptimizationStrategy::EnergyEfficient));
1732        assert!(selector
1733            .strategy_weights
1734            .contains_key(&OptimizationStrategy::HighThroughput));
1735
1736        // Test that ModernArchOptimized has higher initial weight
1737        let modern_weight = selector
1738            .strategy_weights
1739            .get(&OptimizationStrategy::ModernArchOptimized)
1740            .expect("Operation failed");
1741        let scalar_weight = selector
1742            .strategy_weights
1743            .get(&OptimizationStrategy::Scalar)
1744            .expect("Operation failed");
1745        assert!(modern_weight > scalar_weight);
1746    }
1747
1748    #[test]
1749    fn test_enhanced_strategy_selection() {
1750        let selector = StrategySelector::default();
1751
1752        // Test small operation strategy selection
1753        let small_strategy = selector.select_strategy(100, false);
1754        assert!(matches!(
1755            small_strategy,
1756            OptimizationStrategy::Scalar
1757                | OptimizationStrategy::EnergyEfficient
1758                | OptimizationStrategy::ModernArchOptimized
1759        ));
1760
1761        // Test large operation strategy selection
1762        let large_strategy = selector.select_strategy(1_000_000, false);
1763        assert!(matches!(
1764            large_strategy,
1765            OptimizationStrategy::HighThroughput
1766                | OptimizationStrategy::VectorOptimized
1767                | OptimizationStrategy::Parallel
1768        ));
1769
1770        // Test memory-bound operation strategy selection
1771        let memory_bound_strategy = selector.select_strategy(10_000, true);
1772        assert!(matches!(
1773            memory_bound_strategy,
1774            OptimizationStrategy::MemoryBound | OptimizationStrategy::ModernArchOptimized
1775        ));
1776    }
1777
1778    #[test]
1779    #[cfg(feature = "benchmarking")]
1780    fn test_advanced_benchmark_config() {
1781        let config = benchmarking::presets::advanced_comprehensive();
1782
1783        // Verify comprehensive strategy coverage
1784        assert!(config
1785            .strategies
1786            .contains(&OptimizationStrategy::ModernArchOptimized));
1787        assert!(config
1788            .strategies
1789            .contains(&OptimizationStrategy::VectorOptimized));
1790        assert!(config
1791            .strategies
1792            .contains(&OptimizationStrategy::EnergyEfficient));
1793        assert!(config
1794            .strategies
1795            .contains(&OptimizationStrategy::HighThroughput));
1796
1797        // Verify comprehensive size coverage
1798        assert!(config.sample_sizes.len() >= 10);
1799        assert!(config.sample_sizes.contains(&100));
1800        assert!(config.sample_sizes.contains(&5_000_000));
1801
1802        // Verify thorough measurement configuration
1803        assert!(config.measurement_iterations >= 25);
1804        assert!(config.warmup_iterations >= 10);
1805    }
1806
1807    #[test]
1808    #[cfg(feature = "benchmarking")]
1809    fn test_modern_architecture_benchmark_config() {
1810        let config = benchmarking::presets::modern_architectures();
1811
1812        // Verify focus on modern strategies
1813        assert_eq!(config.strategies.len(), 4);
1814        assert!(config
1815            .strategies
1816            .contains(&OptimizationStrategy::ModernArchOptimized));
1817        assert!(config
1818            .strategies
1819            .contains(&OptimizationStrategy::VectorOptimized));
1820        assert!(config
1821            .strategies
1822            .contains(&OptimizationStrategy::HighThroughput));
1823        assert!(config
1824            .strategies
1825            .contains(&OptimizationStrategy::EnergyEfficient));
1826
1827        // Should not contain basic strategies for focused testing
1828        assert!(!config.strategies.contains(&OptimizationStrategy::Scalar));
1829    }
1830
1831    #[test]
1832    fn test_enhanced_cache_line_detection() {
1833        let optimizer = AdaptiveOptimizer::new();
1834        let cache_line_size = optimizer.cache_line_size;
1835
1836        // Cache line size should be reasonable (typically 64 or 128 bytes)
1837        assert!(cache_line_size == 64 || cache_line_size == 128);
1838
1839        // Should be power of 2
1840        assert_eq!(cache_line_size & (cache_line_size - 1), 0);
1841    }
1842
1843    #[test]
1844    fn test_strategy_weight_updates() {
1845        let mut selector = StrategySelector::default();
1846        let initial_weight = *selector
1847            .strategy_weights
1848            .get(&OptimizationStrategy::ModernArchOptimized)
1849            .expect("Operation failed");
1850
1851        // Update with good performance score
1852        selector.update_weights(OptimizationStrategy::ModernArchOptimized, 0.9);
1853        let updated_weight = *selector
1854            .strategy_weights
1855            .get(&OptimizationStrategy::ModernArchOptimized)
1856            .expect("Operation failed");
1857
1858        // Weight should have been adjusted based on learning
1859        assert_ne!(initial_weight, updated_weight);
1860    }
1861}
1862*/