1use scirs2_core::Rng;
8#[cfg(feature = "distributed")]
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11
12use crate::error::{Result, TransformError};
13use crate::utils::ProcessingStrategy;
14
15#[derive(Debug, Clone)]
17#[cfg_attr(feature = "distributed", derive(Serialize, Deserialize))]
18pub struct SystemResources {
19 pub memory_mb: usize,
21 pub cpu_cores: usize,
23 pub has_gpu: bool,
25 pub has_simd: bool,
27 pub l3_cache_kb: usize,
29}
30
31impl SystemResources {
32 pub fn detect() -> Self {
34 SystemResources {
35 memory_mb: Self::detect_memory_mb(),
36 cpu_cores: num_cpus::get(),
37 has_gpu: Self::detect_gpu(),
38 has_simd: Self::detect_simd(),
39 l3_cache_kb: Self::detect_l3_cache_kb(),
40 }
41 }
42
43 fn detect_memory_mb() -> usize {
45 #[cfg(target_os = "linux")]
47 {
48 if let Ok(meminfo) = std::fs::read_to_string("/proc/meminfo") {
49 for line in meminfo.lines() {
50 if line.starts_with("MemAvailable:") {
51 if let Some(kb_str) = line.split_whitespace().nth(1) {
52 if let Ok(kb) = kb_str.parse::<usize>() {
53 return kb / 1024; }
55 }
56 }
57 }
58 }
59 }
60
61 8 * 1024
63 }
64
65 fn detect_gpu() -> bool {
67 #[cfg(feature = "gpu")]
69 {
70 true
72 }
73 #[cfg(not(feature = "gpu"))]
74 {
75 false
76 }
77 }
78
79 fn detect_simd() -> bool {
81 #[cfg(feature = "simd")]
82 {
83 true
84 }
85 #[cfg(not(feature = "simd"))]
86 {
87 false
88 }
89 }
90
91 fn detect_l3_cache_kb() -> usize {
93 8 * 1024 }
96
97 pub fn safe_memory_mb(&self) -> usize {
99 (self.memory_mb as f64 * 0.8) as usize
100 }
101
102 pub fn optimal_chunk_size(&self, elementsize: usize) -> usize {
104 let target_bytes = (self.l3_cache_kb * 1024) / 2;
106 (target_bytes / elementsize).max(1000) }
108}
109
110#[derive(Debug, Clone)]
112#[cfg_attr(feature = "distributed", derive(Serialize, Deserialize))]
113pub struct DataCharacteristics {
114 pub n_samples: usize,
116 pub nfeatures: usize,
118 pub sparsity: f64,
120 pub data_range: f64,
122 pub outlier_ratio: f64,
124 pub has_missing: bool,
126 pub memory_footprint_mb: f64,
128 pub elementsize: usize,
130}
131
132impl DataCharacteristics {
133 pub fn analyze(data: &scirs2_core::ndarray::ArrayView2<f64>) -> Result<Self> {
135 let (n_samples, nfeatures) = data.dim();
136
137 if n_samples == 0 || nfeatures == 0 {
138 return Err(TransformError::InvalidInput("Empty _data".to_string()));
139 }
140
141 let zeros = data.iter().filter(|&&x| x == 0.0).count();
143 let sparsity = zeros as f64 / data.len() as f64;
144
145 let mut min_val = f64::INFINITY;
147 let mut max_val = f64::NEG_INFINITY;
148 let mut finite_count = 0;
149 let mut missing_count = 0;
150
151 for &val in data.iter() {
152 if val.is_finite() {
153 min_val = min_val.min(val);
154 max_val = max_val.max(val);
155 finite_count += 1;
156 } else {
157 missing_count += 1;
158 }
159 }
160
161 let data_range = if finite_count > 0 {
162 max_val - min_val
163 } else {
164 0.0
165 };
166 let has_missing = missing_count > 0;
167
168 let outlier_ratio = if n_samples > 10 {
170 let mut sample_values: Vec<f64> = data.iter()
171 .filter(|&&x| x.is_finite())
172 .take(1000) .copied()
174 .collect();
175
176 if sample_values.len() >= 4 {
177 sample_values.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
178 let n = sample_values.len();
179 let q1 = sample_values[n / 4];
180 let q3 = sample_values[3 * n / 4];
181 let iqr = q3 - q1;
182
183 if iqr > 0.0 {
184 let lower_bound = q1 - 1.5 * iqr;
185 let upper_bound = q3 + 1.5 * iqr;
186 let outliers = sample_values
187 .iter()
188 .filter(|&&x| x < lower_bound || x > upper_bound)
189 .count();
190 outliers as f64 / sample_values.len() as f64
191 } else {
192 0.0
193 }
194 } else {
195 0.0
196 }
197 } else {
198 0.0
199 };
200
201 let memory_footprint_mb =
202 (n_samples * nfeatures * std::mem::size_of::<f64>()) as f64 / (1024.0 * 1024.0);
203
204 Ok(DataCharacteristics {
205 n_samples,
206 nfeatures,
207 sparsity,
208 data_range,
209 outlier_ratio,
210 has_missing,
211 memory_footprint_mb,
212 elementsize: std::mem::size_of::<f64>(),
213 })
214 }
215
216 pub fn is_large_dataset(&self) -> bool {
218 self.n_samples > 100_000 || self.nfeatures > 10_000 || self.memory_footprint_mb > 1000.0
219 }
220
221 pub fn is_wide_dataset(&self) -> bool {
223 self.nfeatures > self.n_samples
224 }
225
226 pub fn is_sparse(&self) -> bool {
228 self.sparsity > 0.5
229 }
230
231 pub fn has_outliers(&self) -> bool {
233 self.outlier_ratio > 0.05 }
235}
236
237#[derive(Debug, Clone)]
239#[cfg_attr(feature = "distributed", derive(Serialize, Deserialize))]
240pub struct OptimizationConfig {
241 pub processing_strategy: ProcessingStrategy,
243 pub memory_limit_mb: usize,
245 pub use_robust: bool,
247 pub use_parallel: bool,
249 pub use_simd: bool,
251 pub use_gpu: bool,
253 pub chunk_size: usize,
255 pub num_threads: usize,
257 pub algorithm_params: HashMap<String, f64>,
259}
260
261impl OptimizationConfig {
262 pub fn for_standardization(datachars: &DataCharacteristics, system: &SystemResources) -> Self {
264 let use_robust = datachars.has_outliers();
265 let use_parallel = datachars.n_samples > 10_000 && system.cpu_cores > 1;
266 let use_simd = system.has_simd && datachars.nfeatures > 100;
267 let use_gpu = system.has_gpu && datachars.memory_footprint_mb > 100.0;
268
269 let processing_strategy = if datachars.memory_footprint_mb > system.safe_memory_mb() as f64
270 {
271 ProcessingStrategy::OutOfCore {
272 chunk_size: system.optimal_chunk_size(datachars.elementsize),
273 }
274 } else if use_parallel {
275 ProcessingStrategy::Parallel
276 } else if use_simd {
277 ProcessingStrategy::Simd
278 } else {
279 ProcessingStrategy::Standard
280 };
281
282 OptimizationConfig {
283 processing_strategy,
284 memory_limit_mb: system.safe_memory_mb(),
285 use_robust,
286 use_parallel,
287 use_simd,
288 use_gpu,
289 chunk_size: system.optimal_chunk_size(datachars.elementsize),
290 num_threads: if use_parallel { system.cpu_cores } else { 1 },
291 algorithm_params: HashMap::new(),
292 }
293 }
294
295 pub fn for_pca(
297 datachars: &DataCharacteristics,
298 system: &SystemResources,
299 n_components: usize,
300 ) -> Self {
301 let use_randomized = datachars.is_large_dataset();
302 let use_parallel = datachars.n_samples > 1_000 && system.cpu_cores > 1;
303 let use_gpu = system.has_gpu && datachars.memory_footprint_mb > 500.0;
304
305 let memory_multiplier = if datachars.nfeatures > datachars.n_samples {
307 3.0
308 } else {
309 2.0
310 };
311 let estimated_memory = datachars.memory_footprint_mb * memory_multiplier;
312
313 let processing_strategy = if estimated_memory > system.safe_memory_mb() as f64 {
314 ProcessingStrategy::OutOfCore {
315 chunk_size: (system.safe_memory_mb() * 1024 * 1024)
316 / (datachars.nfeatures * datachars.elementsize),
317 }
318 } else if use_parallel {
319 ProcessingStrategy::Parallel
320 } else {
321 ProcessingStrategy::Standard
322 };
323
324 let mut algorithm_params = HashMap::new();
325 algorithm_params.insert(
326 "use_randomized".to_string(),
327 if use_randomized { 1.0 } else { 0.0 },
328 );
329 algorithm_params.insert("n_components".to_string(), n_components as f64);
330
331 OptimizationConfig {
332 processing_strategy,
333 memory_limit_mb: system.safe_memory_mb(),
334 use_robust: false, use_parallel,
336 use_simd: system.has_simd,
337 use_gpu,
338 chunk_size: system.optimal_chunk_size(datachars.elementsize),
339 num_threads: if use_parallel { system.cpu_cores } else { 1 },
340 algorithm_params,
341 }
342 }
343
344 pub fn for_polynomial_features(
346 datachars: &DataCharacteristics,
347 system: &SystemResources,
348 degree: usize,
349 ) -> Result<Self> {
350 let estimated_output_features =
352 Self::estimate_polynomial_features(datachars.nfeatures, degree)?;
353 let estimated_memory = datachars.n_samples as f64
354 * estimated_output_features as f64
355 * datachars.elementsize as f64
356 / (1024.0 * 1024.0);
357
358 if estimated_memory > system.memory_mb as f64 * 0.9 {
359 return Err(TransformError::MemoryError(format!(
360 "Polynomial features would require {estimated_memory:.1} MB, but only {} MB available",
361 system.memory_mb
362 )));
363 }
364
365 let use_parallel = datachars.n_samples > 1_000 && system.cpu_cores > 1;
366 let use_simd = system.has_simd && estimated_output_features > 100;
367
368 let processing_strategy = if estimated_memory > system.safe_memory_mb() as f64 {
369 ProcessingStrategy::OutOfCore {
370 chunk_size: (system.safe_memory_mb() * 1024 * 1024)
371 / (estimated_output_features * datachars.elementsize),
372 }
373 } else if use_parallel {
374 ProcessingStrategy::Parallel
375 } else if use_simd {
376 ProcessingStrategy::Simd
377 } else {
378 ProcessingStrategy::Standard
379 };
380
381 let mut algorithm_params = HashMap::new();
382 algorithm_params.insert("degree".to_string(), degree as f64);
383 algorithm_params.insert(
384 "estimated_output_features".to_string(),
385 estimated_output_features as f64,
386 );
387
388 Ok(OptimizationConfig {
389 processing_strategy,
390 memory_limit_mb: system.safe_memory_mb(),
391 use_robust: false,
392 use_parallel,
393 use_simd,
394 use_gpu: false, chunk_size: system.optimal_chunk_size(datachars.elementsize),
396 num_threads: if use_parallel { system.cpu_cores } else { 1 },
397 algorithm_params,
398 })
399 }
400
401 fn estimate_polynomial_features(nfeatures: usize, degree: usize) -> Result<usize> {
403 if degree == 0 {
404 return Err(TransformError::InvalidInput(
405 "Degree must be at least 1".to_string(),
406 ));
407 }
408
409 let mut total_features = 1; for d in 1..=degree {
412 let mut coeff = 1;
414 for i in 0..d {
415 coeff = coeff * (nfeatures + d - 1 - i) / (i + 1);
416
417 if coeff > 1_000_000 {
419 return Err(TransformError::ComputationError(
420 "Too many polynomial _features would be generated".to_string(),
421 ));
422 }
423 }
424 total_features += coeff;
425 }
426
427 Ok(total_features)
428 }
429
430 pub fn estimated_execution_time(&self, datachars: &DataCharacteristics) -> std::time::Duration {
432 use std::time::Duration;
433
434 let base_ops = datachars.n_samples as u64 * datachars.nfeatures as u64;
435
436 let ops_per_second = match self.processing_strategy {
437 ProcessingStrategy::Parallel => {
438 1_000_000_000 * self.num_threads as u64 }
440 ProcessingStrategy::Simd => {
441 2_000_000_000 }
443 ProcessingStrategy::OutOfCore { .. } => {
444 100_000_000 }
446 ProcessingStrategy::Standard => {
447 500_000_000 }
449 };
450
451 let time_ns = (base_ops * 1_000_000_000) / ops_per_second;
452 Duration::from_nanos(time_ns.max(1000)) }
454}
455
456pub struct AutoTuner {
458 system: SystemResources,
460 performance_history: HashMap<String, Vec<PerformanceRecord>>,
462}
463
464#[derive(Debug, Clone)]
466struct PerformanceRecord {
467 #[allow(dead_code)]
468 config_hash: String,
469 #[allow(dead_code)]
470 execution_time: std::time::Duration,
471 #[allow(dead_code)]
472 memory_used_mb: f64,
473 #[allow(dead_code)]
474 success: bool,
475 #[allow(dead_code)]
476 data_characteristics: DataCharacteristics,
477}
478
479impl Default for AutoTuner {
480 fn default() -> Self {
481 Self::new()
482 }
483}
484
485impl AutoTuner {
486 pub fn new() -> Self {
488 AutoTuner {
489 system: SystemResources::detect(),
490 performance_history: HashMap::new(),
491 }
492 }
493
494 pub fn optimize_for_transformation(
496 &self,
497 transformation: &str,
498 datachars: &DataCharacteristics,
499 params: &HashMap<String, f64>,
500 ) -> Result<OptimizationConfig> {
501 match transformation {
502 "standardization" => Ok(OptimizationConfig::for_standardization(
503 datachars,
504 &self.system,
505 )),
506 "pca" => {
507 let n_components = params.get("n_components").unwrap_or(&5.0) as &f64;
508 Ok(OptimizationConfig::for_pca(
509 datachars,
510 &self.system,
511 *n_components as usize,
512 ))
513 }
514 "polynomial" => {
515 let degree = params.get("degree").unwrap_or(&2.0) as &f64;
516 OptimizationConfig::for_polynomial_features(
517 datachars,
518 &self.system,
519 *degree as usize,
520 )
521 }
522 _ => {
523 Ok(OptimizationConfig {
525 processing_strategy: if datachars.is_large_dataset() {
526 ProcessingStrategy::Parallel
527 } else {
528 ProcessingStrategy::Standard
529 },
530 memory_limit_mb: self.system.safe_memory_mb(),
531 use_robust: datachars.has_outliers(),
532 use_parallel: datachars.n_samples > 10_000,
533 use_simd: self.system.has_simd,
534 use_gpu: self.system.has_gpu && datachars.memory_footprint_mb > 100.0,
535 chunk_size: self.system.optimal_chunk_size(datachars.elementsize),
536 num_threads: self.system.cpu_cores,
537 algorithm_params: HashMap::new(),
538 })
539 }
540 }
541 }
542
543 pub fn record_performance(
545 &mut self,
546 transformation: &str,
547 config: &OptimizationConfig,
548 execution_time: std::time::Duration,
549 memory_used_mb: f64,
550 success: bool,
551 datachars: DataCharacteristics,
552 ) {
553 let config_hash = format!("{config:?}"); let record = PerformanceRecord {
556 config_hash: config_hash.clone(),
557 execution_time,
558 memory_used_mb,
559 success,
560 data_characteristics: datachars,
561 };
562
563 self.performance_history
564 .entry(transformation.to_string())
565 .or_default()
566 .push(record);
567
568 let records = self.performance_history.get_mut(transformation).unwrap();
570 if records.len() > 100 {
571 records.remove(0);
572 }
573 }
574
575 pub fn system_resources(&self) -> &SystemResources {
577 &self.system
578 }
579
580 pub fn generate_report(&self, datachars: &DataCharacteristics) -> OptimizationReport {
582 let recommendations = vec![
583 self.get_recommendation_for_transformation("standardization", datachars),
584 self.get_recommendation_for_transformation("pca", datachars),
585 self.get_recommendation_for_transformation("polynomial", datachars),
586 ];
587
588 OptimizationReport {
589 system_info: self.system.clone(),
590 data_info: datachars.clone(),
591 recommendations,
592 estimated_total_memory_mb: datachars.memory_footprint_mb * 2.0, }
594 }
595
596 fn get_recommendation_for_transformation(
597 &self,
598 transformation: &str,
599 datachars: &DataCharacteristics,
600 ) -> TransformationRecommendation {
601 let config = self
602 .optimize_for_transformation(transformation, datachars, &HashMap::new())
603 .unwrap_or_else(|_| OptimizationConfig {
604 processing_strategy: ProcessingStrategy::Standard,
605 memory_limit_mb: self.system.safe_memory_mb(),
606 use_robust: false,
607 use_parallel: false,
608 use_simd: false,
609 use_gpu: false,
610 chunk_size: 1000,
611 num_threads: 1,
612 algorithm_params: HashMap::new(),
613 });
614
615 let estimated_time = config.estimated_execution_time(datachars);
616
617 TransformationRecommendation {
618 transformation: transformation.to_string(),
619 config,
620 estimated_time,
621 confidence: 0.8, reason: format!(
623 "Optimized for {} samples, {} features",
624 datachars.n_samples, datachars.nfeatures
625 ),
626 }
627 }
628}
629
630#[derive(Debug, Clone)]
632pub struct OptimizationReport {
633 pub system_info: SystemResources,
635 pub data_info: DataCharacteristics,
637 pub recommendations: Vec<TransformationRecommendation>,
639 pub estimated_total_memory_mb: f64,
641}
642
643#[derive(Debug, Clone)]
645pub struct TransformationRecommendation {
646 pub transformation: String,
648 pub config: OptimizationConfig,
650 pub estimated_time: std::time::Duration,
652 pub confidence: f64,
654 pub reason: String,
656}
657
658impl OptimizationReport {
659 pub fn print_report(&self) {
661 println!("=== Optimization Report ===");
662 println!("System Resources:");
663 println!(" Memory: {} MB", self.system_info.memory_mb);
664 println!(" CPU Cores: {}", self.system_info.cpu_cores);
665 println!(" GPU Available: {}", self.system_info.has_gpu);
666 println!(" SIMD Available: {}", self.system_info.has_simd);
667 println!();
668
669 println!("Data Characteristics:");
670 println!(" Samples: {}", self.data_info.n_samples);
671 println!(" Features: {}", self.data_info.nfeatures);
672 println!(
673 " Memory Footprint: {:.1} MB",
674 self.data_info.memory_footprint_mb
675 );
676 println!(" Sparsity: {:.1}%", self.data_info.sparsity * 100.0);
677 println!(" Has Outliers: {}", self.data_info.has_outliers());
678 println!();
679
680 println!("Recommendations:");
681 for rec in &self.recommendations {
682 println!(" {}:", rec.transformation);
683 println!(" Strategy: {:?}", rec.config.processing_strategy);
684 println!(
685 " Estimated Time: {:.2}s",
686 rec.estimated_time.as_secs_f64()
687 );
688 println!(" Use Parallel: {}", rec.config.use_parallel);
689 println!(" Use SIMD: {}", rec.config.use_simd);
690 println!(" Use GPU: {}", rec.config.use_gpu);
691 println!(" Reason: {}", rec.reason);
692 println!();
693 }
694 }
695}
696
697pub struct AdvancedConfigOptimizer {
701 performance_history: HashMap<String, Vec<PerformanceMetric>>,
703 system_monitor: SystemMonitor,
705 config_predictor: ConfigurationPredictor,
707 adaptive_tuner: AdaptiveParameterTuner,
709}
710
711#[derive(Debug, Clone)]
713pub struct PerformanceMetric {
714 #[allow(dead_code)]
716 config_hash: u64,
717 execution_time_us: u64,
719 memory_usage_bytes: usize,
721 cache_hit_rate: f64,
723 cpu_utilization: f64,
725 quality_score: f64,
727 #[allow(dead_code)]
729 timestamp: std::time::Instant,
730}
731
732pub struct SystemMonitor {
734 cpu_load: f64,
736 available_memory_bytes: usize,
738 cache_miss_rate: f64,
740 io_wait_percent: f64,
742 cpu_temperature_celsius: f64,
744}
745
746pub struct ConfigurationPredictor {
748 #[allow(dead_code)]
750 feature_weights: HashMap<String, f64>,
751 #[allow(dead_code)]
753 learning_rate: f64,
754 confidence_threshold: f64,
756 sample_count: usize,
758}
759
760pub struct AdaptiveParameterTuner {
762 q_table: HashMap<(String, String), f64>, exploration_rate: f64,
766 learning_rate: f64,
768 #[allow(dead_code)]
770 discount_factor: f64,
771 current_state: String,
773}
774
775impl Default for AdvancedConfigOptimizer {
776 fn default() -> Self {
777 Self::new()
778 }
779}
780
781impl AdvancedConfigOptimizer {
782 pub fn new() -> Self {
784 AdvancedConfigOptimizer {
785 performance_history: HashMap::new(),
786 system_monitor: SystemMonitor::new(),
787 config_predictor: ConfigurationPredictor::new(),
788 adaptive_tuner: AdaptiveParameterTuner::new(),
789 }
790 }
791
792 pub fn advanced_optimize_config(
794 &mut self,
795 datachars: &DataCharacteristics,
796 transformation_type: &str,
797 user_params: &HashMap<String, f64>,
798 ) -> Result<OptimizationConfig> {
799 self.system_monitor.update_metrics()?;
801
802 let current_state = self.generate_state_representation(datachars, &self.system_monitor);
804
805 let predicted_config = self.config_predictor.predict_optimal_config(
807 ¤t_state,
808 transformation_type,
809 user_params,
810 )?;
811
812 let tuned_config = self.adaptive_tuner.tune_parameters(
814 predicted_config,
815 ¤t_state,
816 transformation_type,
817 )?;
818
819 let validated_config =
821 self.validate_and_adjust_config(tuned_config, &self.system_monitor)?;
822
823 Ok(validated_config)
824 }
825
826 pub fn learn_from_performance(
828 &mut self,
829 config: &OptimizationConfig,
830 performance: PerformanceMetric,
831 transformation_type: &str,
832 ) -> Result<()> {
833 let config_hash = self.compute_config_hash(config);
834
835 self.performance_history
837 .entry(transformation_type.to_string())
838 .or_default()
839 .push(performance.clone());
840
841 self.config_predictor.update_from_feedback(&performance)?;
843
844 let reward = self.compute_reward_signal(&performance);
846 self.adaptive_tuner.update_q_values(config_hash, reward)?;
847
848 if self.config_predictor.sample_count.is_multiple_of(100) {
850 self.retrain_models()?;
851 }
852
853 Ok(())
854 }
855
856 fn generate_state_representation(
858 &self,
859 datachars: &DataCharacteristics,
860 system_monitor: &SystemMonitor,
861 ) -> String {
862 format!(
863 "samples:{}_features:{}_memory:{:.2}_cpu:{:.2}_sparsity:{:.3}",
864 datachars.n_samples,
865 datachars.nfeatures,
866 datachars.memory_footprint_mb,
867 system_monitor.cpu_load,
868 datachars.sparsity,
869 )
870 }
871
872 fn compute_config_hash(&self, config: &OptimizationConfig) -> u64 {
874 use std::collections::hash_map::DefaultHasher;
875 use std::hash::{Hash, Hasher};
876
877 let mut hasher = DefaultHasher::new();
878 config.memory_limit_mb.hash(&mut hasher);
879 config.use_parallel.hash(&mut hasher);
880 config.use_simd.hash(&mut hasher);
881 config.use_gpu.hash(&mut hasher);
882 config.chunk_size.hash(&mut hasher);
883 config.num_threads.hash(&mut hasher);
884
885 hasher.finish()
886 }
887
888 fn compute_reward_signal(&self, performance: &PerformanceMetric) -> f64 {
890 let time_score = 1.0 / (1.0 + performance.execution_time_us as f64 / 1_000_000.0);
892 let memory_score = 1.0 / (1.0 + performance.memory_usage_bytes as f64 / 1_000_000_000.0);
893 let cache_score = performance.cache_hit_rate;
894 let cpu_score = 1.0 - performance.cpu_utilization.min(1.0);
895 let quality_score = performance.quality_score;
896
897 0.3 * time_score
899 + 0.2 * memory_score
900 + 0.2 * cache_score
901 + 0.1 * cpu_score
902 + 0.2 * quality_score
903 }
904
905 fn validate_and_adjust_config(
907 &self,
908 mut config: OptimizationConfig,
909 system_monitor: &SystemMonitor,
910 ) -> Result<OptimizationConfig> {
911 let available_mb = system_monitor.available_memory_bytes / (1024 * 1024);
913 config.memory_limit_mb = config.memory_limit_mb.min(available_mb * 80 / 100); if system_monitor.cpu_load > 0.8 {
917 config.num_threads = (config.num_threads / 2).max(1);
918 }
919
920 if system_monitor.cpu_temperature_celsius > 85.0 {
922 config.use_gpu = false;
923 }
924
925 if system_monitor.cache_miss_rate > 0.1 {
927 config.chunk_size = (config.chunk_size as f64 * 0.8) as usize;
928 }
929
930 Ok(config)
931 }
932
933 fn retrain_models(&mut self) -> Result<()> {
935 self.config_predictor
937 .retrain_with_history(&self.performance_history)?;
938
939 self.adaptive_tuner.decay_exploration_rate();
941
942 Ok(())
943 }
944}
945
946impl Default for SystemMonitor {
947 fn default() -> Self {
948 Self::new()
949 }
950}
951
952impl SystemMonitor {
953 pub fn new() -> Self {
955 SystemMonitor {
956 cpu_load: 0.0,
957 available_memory_bytes: 0,
958 cache_miss_rate: 0.0,
959 io_wait_percent: 0.0,
960 cpu_temperature_celsius: 50.0,
961 }
962 }
963
964 pub fn update_metrics(&mut self) -> Result<()> {
966 self.cpu_load = self.read_cpu_load()?;
968 self.available_memory_bytes = self.read_available_memory()?;
969 self.cache_miss_rate = self.read_cache_miss_rate()?;
970 self.io_wait_percent = self.read_io_wait()?;
971 self.cpu_temperature_celsius = self.read_cpu_temperature()?;
972
973 Ok(())
974 }
975
976 fn read_cpu_load(&self) -> Result<f64> {
977 Ok(0.5) }
980
981 fn read_available_memory(&self) -> Result<usize> {
982 Ok(8 * 1024 * 1024 * 1024) }
985
986 fn read_cache_miss_rate(&self) -> Result<f64> {
987 Ok(0.05) }
990
991 fn read_io_wait(&self) -> Result<f64> {
992 Ok(0.02) }
995
996 fn read_cpu_temperature(&self) -> Result<f64> {
997 Ok(55.0) }
1000}
1001
1002impl Default for ConfigurationPredictor {
1003 fn default() -> Self {
1004 Self::new()
1005 }
1006}
1007
1008impl ConfigurationPredictor {
1009 pub fn new() -> Self {
1011 let mut feature_weights = HashMap::new();
1012 feature_weights.insert("n_samples".to_string(), 0.3);
1013 feature_weights.insert("nfeatures".to_string(), 0.25);
1014 feature_weights.insert("memory_footprint".to_string(), 0.2);
1015 feature_weights.insert("sparsity".to_string(), 0.15);
1016 feature_weights.insert("cpu_load".to_string(), 0.1);
1017
1018 ConfigurationPredictor {
1019 feature_weights,
1020 learning_rate: 0.01,
1021 confidence_threshold: 0.8,
1022 sample_count: 0,
1023 }
1024 }
1025
1026 pub fn predict_optimal_config(
1028 &self,
1029 state: &str,
1030 _transformation_type: &str,
1031 _user_params: &HashMap<String, f64>,
1032 ) -> Result<OptimizationConfig> {
1033 let features = self.extract_features(state)?;
1035
1036 let predicted_memory_limit = self.predict_memory_limit(&features);
1038 let predicted_parallelism = self.predict_parallelism(&features);
1039 let predicted_simd_usage = self.predict_simd_usage(&features);
1040
1041 let strategy = if predicted_memory_limit < 1000 {
1043 ProcessingStrategy::OutOfCore { chunk_size: 1024 }
1044 } else if predicted_parallelism {
1045 ProcessingStrategy::Parallel
1046 } else if predicted_simd_usage {
1047 ProcessingStrategy::Simd
1048 } else {
1049 ProcessingStrategy::Standard
1050 };
1051
1052 Ok(OptimizationConfig {
1053 processing_strategy: strategy,
1054 memory_limit_mb: predicted_memory_limit,
1055 use_robust: false,
1056 use_parallel: predicted_parallelism,
1057 use_simd: predicted_simd_usage,
1058 use_gpu: features.get("memory_footprint").unwrap_or(&0.0) > &100.0,
1059 chunk_size: if predicted_memory_limit < 1000 {
1060 512
1061 } else {
1062 2048
1063 },
1064 num_threads: if predicted_parallelism { 4 } else { 1 },
1065 algorithm_params: HashMap::new(),
1066 })
1067 }
1068
1069 fn extract_features(&self, state: &str) -> Result<HashMap<String, f64>> {
1071 let mut features = HashMap::new();
1072
1073 for part in state.split('_') {
1074 if let Some((key, value)) = part.split_once(':') {
1075 if let Ok(val) = value.parse::<f64>() {
1076 features.insert(key.to_string(), val);
1077 }
1078 }
1079 }
1080
1081 Ok(features)
1082 }
1083
1084 fn predict_memory_limit(&self, features: &HashMap<String, f64>) -> usize {
1085 let memory_footprint = features.get("memory_footprint").unwrap_or(&100.0);
1086 (memory_footprint * 1.5) as usize
1087 }
1088
1089 fn predict_parallelism(&self, features: &HashMap<String, f64>) -> bool {
1090 let samples = features.get("samples").unwrap_or(&1000.0);
1091 let cpu_load = features.get("cpu").unwrap_or(&0.5);
1092 samples > &5000.0 && cpu_load < &0.7
1093 }
1094
1095 fn predict_simd_usage(&self, features: &HashMap<String, f64>) -> bool {
1096 let features_count = features.get("features").unwrap_or(&10.0);
1097 features_count > &50.0
1098 }
1099
1100 pub fn update_from_feedback(&mut self, performance: &PerformanceMetric) -> Result<()> {
1102 self.sample_count += 1;
1103 Ok(())
1105 }
1106
1107 pub fn retrain_with_history(
1109 &mut self,
1110 history: &HashMap<String, Vec<PerformanceMetric>>,
1111 ) -> Result<()> {
1112 self.confidence_threshold = (self.confidence_threshold + 0.01).min(0.95);
1114 Ok(())
1115 }
1116}
1117
1118impl Default for AdaptiveParameterTuner {
1119 fn default() -> Self {
1120 Self::new()
1121 }
1122}
1123
1124impl AdaptiveParameterTuner {
1125 pub fn new() -> Self {
1127 AdaptiveParameterTuner {
1128 q_table: HashMap::new(),
1129 exploration_rate: 0.1,
1130 learning_rate: 0.1,
1131 discount_factor: 0.9,
1132 current_state: String::new(),
1133 }
1134 }
1135
1136 pub fn tune_parameters(
1138 &mut self,
1139 mut config: OptimizationConfig,
1140 state: &str,
1141 _transformation_type: &str,
1142 ) -> Result<OptimizationConfig> {
1143 self.current_state = state.to_string();
1144
1145 if scirs2_core::random::rng().gen_range(0.0..1.0) < self.exploration_rate {
1147 config = self.explore_parameters(config)?;
1149 } else {
1150 config = self.exploit_best_parameters(config, state)?;
1152 }
1153
1154 Ok(config)
1155 }
1156
1157 fn explore_parameters(&self, mut config: OptimizationConfig) -> Result<OptimizationConfig> {
1159 let mut rng = scirs2_core::random::rng();
1160
1161 let memory_factor = rng.gen_range(0.8..1.2);
1163 config.memory_limit_mb = (config.memory_limit_mb as f64 * memory_factor) as usize;
1164
1165 if rng.gen_range(0.0..1.0) < 0.3 {
1167 config.use_parallel = !config.use_parallel;
1168 }
1169
1170 let chunk_factor = rng.gen_range(0.5..1.5);
1172 config.chunk_size = (config.chunk_size as f64 * chunk_factor) as usize;
1173
1174 Ok(config)
1175 }
1176
1177 fn exploit_best_parameters(
1179 &self,
1180 config: OptimizationConfig,
1181 state: &str,
1182 ) -> Result<OptimizationConfig> {
1183 let _best_action = self.find_best_action(state);
1185
1186 Ok(config)
1189 }
1190
1191 fn find_best_action(&self, state: &str) -> String {
1193 let mut best_action = "default".to_string();
1194 let mut best_value = f64::NEG_INFINITY;
1195
1196 for ((s, action), &value) in &self.q_table {
1197 if s == state && value > best_value {
1198 best_value = value;
1199 best_action = action.clone();
1200 }
1201 }
1202
1203 best_action
1204 }
1205
1206 pub fn update_q_values(&mut self, confighash: u64, reward: f64) -> Result<()> {
1208 let state_action = (self.current_state.clone(), "current_action".to_string());
1209
1210 let old_value = self.q_table.get(&state_action).unwrap_or(&0.0);
1212 let new_value = old_value + self.learning_rate * (reward - old_value);
1213
1214 self.q_table.insert(state_action, new_value);
1215
1216 Ok(())
1217 }
1218
1219 pub fn decay_exploration_rate(&mut self) {
1221 self.exploration_rate = (self.exploration_rate * 0.995).max(0.01);
1222 }
1223}
1224
1225#[cfg(test)]
1226mod tests {
1227 use super::*;
1228 use scirs2_core::ndarray::Array2;
1229
1230 #[test]
1231 fn test_system_resources_detection() {
1232 let resources = SystemResources::detect();
1233 assert!(resources.cpu_cores > 0);
1234 assert!(resources.memory_mb > 0);
1235 assert!(resources.safe_memory_mb() < resources.memory_mb);
1236 }
1237
1238 #[test]
1239 fn test_data_characteristics_analysis() {
1240 let data =
1241 Array2::from_shape_vec((100, 10), (0..1000).map(|x| x as f64).collect()).unwrap();
1242 let chars = DataCharacteristics::analyze(&data.view()).unwrap();
1243
1244 assert_eq!(chars.n_samples, 100);
1245 assert_eq!(chars.nfeatures, 10);
1246 assert!(chars.memory_footprint_mb > 0.0);
1247 assert!(!chars.is_large_dataset());
1248 }
1249
1250 #[test]
1251 fn test_optimization_config_for_standardization() {
1252 let data = Array2::ones((1000, 50));
1253 let chars = DataCharacteristics::analyze(&data.view()).unwrap();
1254 let system = SystemResources::detect();
1255
1256 let config = OptimizationConfig::for_standardization(&chars, &system);
1257 assert!(config.memory_limit_mb > 0);
1258 }
1259
1260 #[test]
1261 fn test_optimization_config_for_pca() {
1262 let data = Array2::ones((500, 20));
1263 let chars = DataCharacteristics::analyze(&data.view()).unwrap();
1264 let system = SystemResources::detect();
1265
1266 let config = OptimizationConfig::for_pca(&chars, &system, 10);
1267 assert_eq!(config.algorithm_params.get("n_components"), Some(&10.0));
1268 }
1269
1270 #[test]
1271 fn test_polynomial_features_estimation() {
1272 let result = OptimizationConfig::estimate_polynomial_features(5, 2);
1274 assert!(result.is_ok());
1275
1276 let result = OptimizationConfig::estimate_polynomial_features(100, 10);
1278 assert!(result.is_err());
1279 }
1280
1281 #[test]
1282 fn test_auto_tuner() {
1283 let tuner = AutoTuner::new();
1284 let data = Array2::ones((100, 10));
1285 let chars = DataCharacteristics::analyze(&data.view()).unwrap();
1286
1287 let config = tuner
1288 .optimize_for_transformation("standardization", &chars, &HashMap::new())
1289 .unwrap();
1290 assert!(config.memory_limit_mb > 0);
1291
1292 let report = tuner.generate_report(&chars);
1293 assert!(!report.recommendations.is_empty());
1294 }
1295
1296 #[test]
1297 fn test_large_dataset_detection() {
1298 let mut chars = DataCharacteristics {
1299 n_samples: 200_000,
1300 nfeatures: 1000,
1301 sparsity: 0.1,
1302 data_range: 100.0,
1303 outlier_ratio: 0.02,
1304 has_missing: false,
1305 memory_footprint_mb: 1500.0,
1306 elementsize: 8,
1307 };
1308
1309 assert!(chars.is_large_dataset());
1310
1311 chars.n_samples = 1000;
1312 chars.memory_footprint_mb = 10.0;
1313 assert!(!chars.is_large_dataset());
1314 }
1315}