scirs2_io/pipeline/advanced_optimization/
config.rs

1//! Configuration types and structures for advanced pipeline optimization
2//!
3//! This module contains all the configuration enums, structs, and result types
4//! used throughout the advanced optimization system.
5
6use chrono::{DateTime, Utc};
7use serde::{Deserialize, Serialize};
8use std::collections::{HashMap, VecDeque};
9use std::time::Duration;
10
11/// Optimized pipeline configuration with advanced settings
12#[derive(Debug, Clone, Serialize, Deserialize)]
13pub struct OptimizedPipelineConfig {
14    pub thread_count: usize,
15    pub chunk_size: usize,
16    pub memory_strategy: MemoryStrategy,
17    pub cache_config: CacheConfiguration,
18    pub simd_optimization: bool,
19    pub gpu_acceleration: bool,
20    pub prefetch_strategy: PrefetchStrategy,
21    pub compression_level: u8,
22    pub io_buffer_size: usize,
23    pub batch_processing: BatchProcessingMode,
24}
25
26/// Memory allocation and management strategy
27#[derive(Debug, Clone, Serialize, Deserialize)]
28pub enum MemoryStrategy {
29    /// Standard allocation with GC
30    Standard,
31    /// Memory pool allocation for reduced fragmentation
32    MemoryPool { pool_size: usize },
33    /// Memory mapping for large datasets
34    MemoryMapped { chunk_size: usize },
35    /// Streaming processing for very large datasets
36    Streaming { buffer_size: usize },
37    /// Hybrid approach combining multiple strategies
38    Hybrid {
39        small_data_threshold: usize,
40        memory_pool_size: usize,
41        streaming_threshold: usize,
42    },
43}
44
45/// Cache configuration for optimal data locality
46#[derive(Debug, Clone, Serialize, Deserialize)]
47pub struct CacheConfiguration {
48    pub l1_cache_size: usize,
49    pub l2_cache_size: usize,
50    pub prefetch_distance: usize,
51    pub cache_line_size: usize,
52    pub temporal_locality_weight: f64,
53    pub spatial_locality_weight: f64,
54    pub replacement_policy: CacheReplacementPolicy,
55}
56
57impl Default for CacheConfiguration {
58    fn default() -> Self {
59        Self {
60            l1_cache_size: 32 * 1024,  // 32KB
61            l2_cache_size: 256 * 1024, // 256KB
62            prefetch_distance: 64,
63            cache_line_size: 64,
64            temporal_locality_weight: 0.7,
65            spatial_locality_weight: 0.3,
66            replacement_policy: CacheReplacementPolicy::LRU,
67        }
68    }
69}
70
71#[derive(Debug, Clone, Serialize, Deserialize)]
72pub enum CacheReplacementPolicy {
73    LRU,
74    LFU,
75    ARC, // Adaptive Replacement Cache
76    CLOCK,
77}
78
79/// Data prefetch strategy for reducing memory latency
80#[derive(Debug, Clone, Serialize, Deserialize)]
81pub enum PrefetchStrategy {
82    None,
83    Sequential { distance: usize },
84    Adaptive { learning_window: usize },
85    Pattern { pattern_length: usize },
86}
87
88/// Batch processing mode configuration
89#[derive(Debug, Clone, Serialize, Deserialize)]
90pub enum BatchProcessingMode {
91    Disabled,
92    Fixed {
93        batch_size: usize,
94    },
95    Dynamic {
96        min_batch_size: usize,
97        max_batch_size: usize,
98        latency_target: Duration,
99    },
100    Adaptive {
101        target_throughput: f64,
102        adjustment_factor: f64,
103    },
104}
105
106/// Individual execution record for performance tracking
107#[derive(Debug, Clone)]
108pub struct ExecutionRecord {
109    pub timestamp: DateTime<Utc>,
110    pub pipeline_id: String,
111    pub config: OptimizedPipelineConfig,
112    pub metrics: PipelinePerformanceMetrics,
113}
114
115/// Pipeline performance metrics for optimization feedback
116#[derive(Debug, Clone, Serialize, Deserialize)]
117pub struct PipelinePerformanceMetrics {
118    pub throughput: f64,
119    pub latency: Duration,
120    pub peak_memory_usage: usize,
121    pub avg_memory_usage: usize,
122    pub cpu_utilization: f64,
123    pub gpu_utilization: f64,
124    pub io_wait_time: Duration,
125    pub cache_hit_ratio: f64,
126    pub data_size: usize,
127    pub error_rate: f64,
128    pub power_consumption: f64,
129}
130
131impl Default for PipelinePerformanceMetrics {
132    fn default() -> Self {
133        Self {
134            throughput: 0.0,
135            latency: Duration::from_millis(100),
136            peak_memory_usage: 0,
137            avg_memory_usage: 0,
138            cpu_utilization: 0.0,
139            gpu_utilization: 0.0,
140            io_wait_time: Duration::from_millis(0),
141            cache_hit_ratio: 0.0,
142            data_size: 0,
143            error_rate: 0.0,
144            power_consumption: 0.0,
145        }
146    }
147}
148
149/// System resource metrics
150#[derive(Debug, Clone)]
151pub struct SystemMetrics {
152    pub cpu_usage: f64,
153    pub memory_usage: MemoryUsage,
154    pub io_utilization: f64,
155    pub network_bandwidth_usage: f64,
156    pub cache_performance: CachePerformance,
157    pub numa_topology: NumaTopology,
158}
159
160impl Default for SystemMetrics {
161    fn default() -> Self {
162        Self {
163            cpu_usage: 0.5,
164            memory_usage: MemoryUsage {
165                total: 8 * 1024 * 1024 * 1024,
166                available: 4 * 1024 * 1024 * 1024,
167                used: 4 * 1024 * 1024 * 1024,
168                utilization: 0.5,
169            },
170            io_utilization: 0.3,
171            network_bandwidth_usage: 0.2,
172            cache_performance: CachePerformance {
173                l1_hit_rate: 0.95,
174                l2_hit_rate: 0.85,
175                l3_hit_rate: 0.75,
176                tlb_hit_rate: 0.99,
177            },
178            numa_topology: NumaTopology::default(),
179        }
180    }
181}
182
183#[derive(Debug, Clone)]
184pub struct MemoryUsage {
185    pub total: u64,
186    pub available: u64,
187    pub used: u64,
188    pub utilization: f64,
189}
190
191#[derive(Debug, Clone)]
192pub struct CachePerformance {
193    pub l1_hit_rate: f64,
194    pub l2_hit_rate: f64,
195    pub l3_hit_rate: f64,
196    pub tlb_hit_rate: f64,
197}
198
199#[derive(Debug, Clone)]
200pub struct NumaTopology {
201    pub nodes: Vec<NumaNode>,
202    pub preferred_node: usize,
203}
204
205impl Default for NumaTopology {
206    fn default() -> Self {
207        Self {
208            nodes: vec![NumaNode {
209                id: 0,
210                memory_size: 8 * 1024 * 1024 * 1024,
211                cpu_cores: vec![0, 1, 2, 3],
212            }],
213            preferred_node: 0,
214        }
215    }
216}
217
218#[derive(Debug, Clone)]
219pub struct NumaNode {
220    pub id: usize,
221    pub memory_size: u64,
222    pub cpu_cores: Vec<usize>,
223}
224
225/// Auto-tuning parameters for optimization
226#[derive(Debug, Clone)]
227pub struct AutoTuningParameters {
228    pub thread_count: usize,
229    pub chunk_size: usize,
230    pub simd_enabled: bool,
231    pub gpu_enabled: bool,
232    pub prefetch_strategy: PrefetchStrategy,
233    pub compression_level: u8,
234    pub io_buffer_size: usize,
235    pub batch_processing: BatchProcessingMode,
236}
237
238impl Default for AutoTuningParameters {
239    fn default() -> Self {
240        Self {
241            thread_count: num_cpus::get(),
242            chunk_size: 1024,
243            simd_enabled: true,
244            gpu_enabled: false,
245            prefetch_strategy: PrefetchStrategy::Sequential { distance: 64 },
246            compression_level: 6,
247            io_buffer_size: 64 * 1024,
248            batch_processing: BatchProcessingMode::Disabled,
249        }
250    }
251}
252
253/// Performance regression detector using statistical methods
254#[derive(Debug)]
255pub struct RegressionDetector {
256    recent_metrics: VecDeque<f64>,
257    baseline_performance: f64,
258    detection_window: usize,
259    regression_threshold: f64,
260}
261
262impl Default for RegressionDetector {
263    fn default() -> Self {
264        Self::new()
265    }
266}
267
268impl RegressionDetector {
269    pub fn new() -> Self {
270        Self {
271            recent_metrics: VecDeque::with_capacity(50),
272            baseline_performance: 0.0,
273            detection_window: 20,
274            regression_threshold: 0.1, // 10% performance drop
275        }
276    }
277
278    pub fn check_regression(&mut self, metrics: &PipelinePerformanceMetrics) {
279        let performance_score = metrics.throughput;
280
281        self.recent_metrics.push_back(performance_score);
282        if self.recent_metrics.len() > self.detection_window {
283            self.recent_metrics.pop_front();
284        }
285
286        // Update baseline if we have enough data
287        if self.recent_metrics.len() >= self.detection_window {
288            let avg_recent: f64 =
289                self.recent_metrics.iter().sum::<f64>() / self.recent_metrics.len() as f64;
290
291            if self.baseline_performance == 0.0 {
292                self.baseline_performance = avg_recent;
293            } else {
294                // Check for regression
295                let relative_change =
296                    (avg_recent - self.baseline_performance) / self.baseline_performance;
297                if relative_change < -self.regression_threshold {
298                    // Performance regression detected
299                    eprintln!(
300                        "Performance regression detected: {:.2}% decrease from baseline",
301                        -relative_change * 100.0
302                    );
303                }
304
305                // Update baseline with exponential moving average
306                self.baseline_performance = 0.9 * self.baseline_performance + 0.1 * avg_recent;
307            }
308        }
309    }
310}
311
312/// Quantum optimization configuration
313#[derive(Debug, Clone)]
314pub struct QuantumOptimizationConfig {
315    pub num_qubits: usize,
316    pub annealing_steps: usize,
317    pub temperature_schedule: Vec<f64>,
318    pub tunneling_probability: f64,
319}
320
321impl Default for QuantumOptimizationConfig {
322    fn default() -> Self {
323        Self {
324            num_qubits: 10,
325            annealing_steps: 1000,
326            temperature_schedule: (0..1000)
327                .map(|i| 10.0 * (-5.0 * i as f64 / 1000.0).exp())
328                .collect(),
329            tunneling_probability: 0.1,
330        }
331    }
332}
333
334/// Neuromorphic optimization configuration
335#[derive(Debug, Clone)]
336pub struct NeuromorphicConfig {
337    pub num_neurons: usize,
338    pub num_outputs: usize,
339    pub memory_capacity: usize,
340    pub learning_rate: f64,
341    pub adaptation_rate: f64,
342}
343
344impl Default for NeuromorphicConfig {
345    fn default() -> Self {
346        Self {
347            num_neurons: 1000,
348            num_outputs: 100,
349            memory_capacity: 10000,
350            learning_rate: 0.01,
351            adaptation_rate: 0.001,
352        }
353    }
354}
355
356/// Consciousness-inspired optimization configuration
357#[derive(Debug, Clone)]
358pub struct ConsciousnessConfig {
359    pub awareness_level: f64,
360    pub attention_focus: f64,
361    pub metacognitive_strength: f64,
362    pub intentionality_weight: f64,
363    pub max_cycles: usize,
364    pub convergence_threshold: f64,
365}
366
367impl Default for ConsciousnessConfig {
368    fn default() -> Self {
369        Self {
370            awareness_level: 0.5,
371            attention_focus: 0.7,
372            metacognitive_strength: 0.6,
373            intentionality_weight: 0.8,
374            max_cycles: 100,
375            convergence_threshold: 1e-6,
376        }
377    }
378}