quantrs2_anneal/scientific_performance_optimization/
algorithm.rs

1//! Algorithm optimization types for scientific performance optimization.
2//!
3//! This module contains problem decomposition, result caching,
4//! approximation engines, and streaming processors.
5
6use std::collections::{HashMap, VecDeque};
7use std::time::{Duration, Instant};
8
9use crate::applications::{
10    drug_discovery::DrugDiscoveryProblem, materials_science::MaterialsOptimizationProblem,
11    protein_folding::ProteinFoldingProblem,
12};
13use crate::ising::{IsingModel, QuboModel};
14
15use super::config::{
16    AlgorithmOptimizationConfig, ApproximationConfig, ApproximationStrategy, CachingConfig,
17    DecompositionStrategy, StreamingConfig,
18};
19use super::memory::CacheStatistics;
20
21/// Algorithm optimizer for improving computational efficiency
22pub struct AlgorithmOptimizer {
23    /// Configuration
24    pub config: AlgorithmOptimizationConfig,
25    /// Problem decomposer
26    pub decomposer: ProblemDecomposer,
27    /// Result cache
28    pub result_cache: ResultCache,
29    /// Approximation engine
30    pub approximation_engine: ApproximationEngine,
31    /// Streaming processor
32    pub streaming_processor: StreamingProcessor,
33}
34
35impl AlgorithmOptimizer {
36    /// Create a new algorithm optimizer
37    #[must_use]
38    pub fn new(config: AlgorithmOptimizationConfig) -> Self {
39        Self {
40            config,
41            decomposer: ProblemDecomposer::new(),
42            result_cache: ResultCache::new(),
43            approximation_engine: ApproximationEngine::new(),
44            streaming_processor: StreamingProcessor::new(),
45        }
46    }
47}
48
49/// Problem decomposer for hierarchical problem solving
50#[derive(Debug)]
51pub struct ProblemDecomposer {
52    /// Decomposition strategy
53    pub strategy: DecompositionStrategy,
54    /// Subproblem registry
55    pub subproblems: HashMap<String, Subproblem>,
56    /// Decomposition statistics
57    pub statistics: DecompositionStatistics,
58}
59
60impl ProblemDecomposer {
61    /// Create a new problem decomposer
62    #[must_use]
63    pub fn new() -> Self {
64        Self {
65            strategy: DecompositionStrategy::Adaptive,
66            subproblems: HashMap::new(),
67            statistics: DecompositionStatistics::default(),
68        }
69    }
70
71    /// Decompose a problem into subproblems
72    pub fn decompose(&mut self, problem_id: &str, problem_data: ProblemData) -> Vec<String> {
73        let subproblem_ids = self.create_subproblems(problem_id, &problem_data);
74        self.statistics.decompositions += 1;
75        subproblem_ids
76    }
77
78    /// Create subproblems based on strategy
79    fn create_subproblems(&mut self, parent_id: &str, _problem_data: &ProblemData) -> Vec<String> {
80        let mut subproblem_ids = Vec::new();
81
82        // Create subproblems based on strategy
83        let num_subproblems = match self.strategy {
84            DecompositionStrategy::Uniform => 4,
85            DecompositionStrategy::Adaptive => 4,
86            DecompositionStrategy::GraphBased => 4,
87            DecompositionStrategy::Hierarchical => 2,
88        };
89
90        for i in 0..num_subproblems {
91            let id = format!("{parent_id}_sub_{i}");
92            let subproblem = Subproblem {
93                id: id.clone(),
94                parent_id: Some(parent_id.to_string()),
95                problem_data: ProblemData::Generic(Vec::new()),
96                status: SubproblemStatus::Pending,
97                dependencies: Vec::new(),
98            };
99            self.subproblems.insert(id.clone(), subproblem);
100            subproblem_ids.push(id);
101        }
102
103        subproblem_ids
104    }
105
106    /// Get subproblem status
107    pub fn get_status(&self, subproblem_id: &str) -> Option<&SubproblemStatus> {
108        self.subproblems.get(subproblem_id).map(|s| &s.status)
109    }
110
111    /// Update subproblem status
112    pub fn update_status(&mut self, subproblem_id: &str, status: SubproblemStatus) {
113        if let Some(subproblem) = self.subproblems.get_mut(subproblem_id) {
114            subproblem.status = status;
115        }
116    }
117}
118
119impl Default for ProblemDecomposer {
120    fn default() -> Self {
121        Self::new()
122    }
123}
124
125/// Subproblem representation
126#[derive(Debug)]
127pub struct Subproblem {
128    /// Subproblem identifier
129    pub id: String,
130    /// Parent problem
131    pub parent_id: Option<String>,
132    /// Problem data
133    pub problem_data: ProblemData,
134    /// Solution status
135    pub status: SubproblemStatus,
136    /// Dependencies
137    pub dependencies: Vec<String>,
138}
139
140/// Problem data types
141#[derive(Debug)]
142pub enum ProblemData {
143    /// Ising model
144    Ising(IsingModel),
145    /// QUBO model
146    QUBO(QuboModel),
147    /// Protein folding problem
148    ProteinFolding(ProteinFoldingProblem),
149    /// Materials science problem
150    MaterialsScience(MaterialsOptimizationProblem),
151    /// Drug discovery problem
152    DrugDiscovery(DrugDiscoveryProblem),
153    /// Generic data
154    Generic(Vec<u8>),
155}
156
157/// Subproblem status
158#[derive(Debug, Clone, PartialEq, Eq)]
159pub enum SubproblemStatus {
160    /// Not started
161    Pending,
162    /// Currently solving
163    InProgress,
164    /// Completed successfully
165    Completed,
166    /// Failed to solve
167    Failed,
168    /// Cancelled
169    Cancelled,
170}
171
172/// Result cache for memoization
173#[derive(Debug)]
174pub struct ResultCache {
175    /// Cache configuration
176    pub config: CachingConfig,
177    /// Cached results
178    pub cache: HashMap<String, CachedResult>,
179    /// Cache access order
180    pub access_order: VecDeque<String>,
181    /// Cache statistics
182    pub statistics: CacheStatistics,
183}
184
185impl ResultCache {
186    /// Create a new result cache
187    #[must_use]
188    pub fn new() -> Self {
189        Self {
190            config: CachingConfig::default(),
191            cache: HashMap::new(),
192            access_order: VecDeque::new(),
193            statistics: CacheStatistics::default(),
194        }
195    }
196
197    /// Get cached result
198    pub fn get(&mut self, key: &str) -> Option<&CachedResult> {
199        if self.cache.contains_key(key) {
200            self.statistics.record_hit();
201            // Move to front
202            self.access_order.retain(|k| k != key);
203            self.access_order.push_front(key.to_string());
204
205            // Update access count
206            if let Some(result) = self.cache.get_mut(key) {
207                result.access_count += 1;
208            }
209
210            self.cache.get(key)
211        } else {
212            self.statistics.record_miss();
213            None
214        }
215    }
216
217    /// Cache a result
218    pub fn put(&mut self, key: String, result_data: Vec<u8>, quality_score: f64) {
219        // Evict if necessary
220        while self.cache.len() >= self.config.cache_size_limit {
221            if let Some(lru_key) = self.access_order.pop_back() {
222                self.cache.remove(&lru_key);
223            }
224        }
225
226        let cached = CachedResult {
227            result_data,
228            timestamp: Instant::now(),
229            access_count: 1,
230            quality_score,
231        };
232
233        self.cache.insert(key.clone(), cached);
234        self.access_order.push_front(key);
235    }
236
237    /// Check if key exists
238    #[must_use]
239    pub fn contains(&self, key: &str) -> bool {
240        self.cache.contains_key(key)
241    }
242
243    /// Clear the cache
244    pub fn clear(&mut self) {
245        self.cache.clear();
246        self.access_order.clear();
247    }
248}
249
250impl Default for ResultCache {
251    fn default() -> Self {
252        Self::new()
253    }
254}
255
256/// Cached result representation
257#[derive(Debug, Clone)]
258pub struct CachedResult {
259    /// Result data
260    pub result_data: Vec<u8>,
261    /// Cache timestamp
262    pub timestamp: Instant,
263    /// Access count
264    pub access_count: u64,
265    /// Result quality
266    pub quality_score: f64,
267}
268
269/// Approximation engine for fast approximate solutions
270#[derive(Debug)]
271pub struct ApproximationEngine {
272    /// Configuration
273    pub config: ApproximationConfig,
274    /// Available strategies
275    pub strategies: Vec<ApproximationStrategy>,
276    /// Strategy performance
277    pub strategy_performance: HashMap<ApproximationStrategy, StrategyPerformance>,
278}
279
280impl ApproximationEngine {
281    /// Create a new approximation engine
282    #[must_use]
283    pub fn new() -> Self {
284        Self {
285            config: ApproximationConfig::default(),
286            strategies: vec![
287                ApproximationStrategy::Sampling,
288                ApproximationStrategy::Clustering,
289                ApproximationStrategy::DimensionalityReduction,
290            ],
291            strategy_performance: HashMap::new(),
292        }
293    }
294
295    /// Select best strategy based on problem characteristics
296    #[must_use]
297    pub fn select_strategy(&self) -> ApproximationStrategy {
298        // Find strategy with best average quality
299        self.strategy_performance
300            .iter()
301            .max_by(|a, b| {
302                a.1.average_quality
303                    .partial_cmp(&b.1.average_quality)
304                    .unwrap_or(std::cmp::Ordering::Equal)
305            })
306            .map(|(s, _)| s.clone())
307            .unwrap_or(ApproximationStrategy::Sampling)
308    }
309
310    /// Record strategy performance
311    pub fn record_performance(
312        &mut self,
313        strategy: ApproximationStrategy,
314        quality: f64,
315        speedup: f64,
316        success: bool,
317    ) {
318        let perf = self
319            .strategy_performance
320            .entry(strategy.clone())
321            .or_insert_with(|| StrategyPerformance::new(strategy));
322
323        perf.usage_count += 1;
324        if success {
325            // Update rolling averages
326            let n = perf.usage_count as f64;
327            perf.average_quality = (perf.average_quality * (n - 1.0) + quality) / n;
328            perf.average_speedup = (perf.average_speedup * (n - 1.0) + speedup) / n;
329            perf.success_rate = (perf.success_rate * (n - 1.0) + 1.0) / n;
330        } else {
331            perf.success_rate =
332                (perf.success_rate * (perf.usage_count - 1) as f64) / perf.usage_count as f64;
333        }
334    }
335}
336
337impl Default for ApproximationEngine {
338    fn default() -> Self {
339        Self::new()
340    }
341}
342
343/// Strategy performance tracking
344#[derive(Debug, Clone)]
345pub struct StrategyPerformance {
346    /// Strategy
347    pub strategy: ApproximationStrategy,
348    /// Success rate
349    pub success_rate: f64,
350    /// Average quality
351    pub average_quality: f64,
352    /// Average speedup
353    pub average_speedup: f64,
354    /// Usage count
355    pub usage_count: u64,
356}
357
358impl StrategyPerformance {
359    /// Create new strategy performance tracker
360    #[must_use]
361    pub fn new(strategy: ApproximationStrategy) -> Self {
362        Self {
363            strategy,
364            success_rate: 0.0,
365            average_quality: 0.0,
366            average_speedup: 0.0,
367            usage_count: 0,
368        }
369    }
370}
371
372/// Streaming processor for continuous data processing
373#[derive(Debug)]
374pub struct StreamingProcessor {
375    /// Configuration
376    pub config: StreamingConfig,
377    /// Processing windows
378    pub windows: Vec<ProcessingWindow>,
379    /// Stream statistics
380    pub statistics: StreamingStatistics,
381}
382
383impl StreamingProcessor {
384    /// Create a new streaming processor
385    #[must_use]
386    pub fn new() -> Self {
387        Self {
388            config: StreamingConfig::default(),
389            windows: Vec::new(),
390            statistics: StreamingStatistics::default(),
391        }
392    }
393
394    /// Add element to stream
395    pub fn add_element(&mut self, data: Vec<u8>, metadata: HashMap<String, String>) {
396        let element = StreamElement {
397            data,
398            timestamp: Instant::now(),
399            metadata,
400        };
401
402        // Add to current window or create new window
403        if let Some(window) = self.windows.last_mut() {
404            if window.data.len() < self.config.window_size {
405                window.data.push_back(element);
406                return;
407            }
408        }
409
410        // Create new window
411        let mut new_window = ProcessingWindow {
412            id: format!("window_{}", self.windows.len()),
413            data: VecDeque::new(),
414            start_time: Instant::now(),
415            duration: Duration::from_secs(60),
416        };
417        new_window.data.push_back(element);
418        self.windows.push(new_window);
419        self.statistics.windows_created += 1;
420    }
421
422    /// Process current windows
423    pub fn process(&mut self) -> Vec<StreamElement> {
424        let mut processed = Vec::new();
425
426        // Process completed windows
427        let now = Instant::now();
428        for window in &mut self.windows {
429            if now.duration_since(window.start_time) >= window.duration {
430                while let Some(element) = window.data.pop_front() {
431                    processed.push(element);
432                    self.statistics.elements_processed += 1;
433                }
434            }
435        }
436
437        // Remove empty windows
438        self.windows.retain(|w| !w.data.is_empty());
439
440        processed
441    }
442}
443
444impl Default for StreamingProcessor {
445    fn default() -> Self {
446        Self::new()
447    }
448}
449
450/// Processing window for streaming
451#[derive(Debug)]
452pub struct ProcessingWindow {
453    /// Window identifier
454    pub id: String,
455    /// Window data
456    pub data: VecDeque<StreamElement>,
457    /// Window start time
458    pub start_time: Instant,
459    /// Window duration
460    pub duration: Duration,
461}
462
463/// Stream element
464#[derive(Debug, Clone)]
465pub struct StreamElement {
466    /// Element data
467    pub data: Vec<u8>,
468    /// Timestamp
469    pub timestamp: Instant,
470    /// Element metadata
471    pub metadata: HashMap<String, String>,
472}
473
474/// Decomposition statistics
475#[derive(Debug, Clone, Default)]
476pub struct DecompositionStatistics {
477    /// Total decompositions
478    pub decompositions: u64,
479    /// Subproblems created
480    pub subproblems_created: u64,
481    /// Subproblems solved
482    pub subproblems_solved: u64,
483    /// Average subproblem size
484    pub avg_subproblem_size: f64,
485}
486
487/// Streaming statistics
488#[derive(Debug, Clone, Default)]
489pub struct StreamingStatistics {
490    /// Elements processed
491    pub elements_processed: u64,
492    /// Windows created
493    pub windows_created: u64,
494    /// Average window size
495    pub avg_window_size: f64,
496    /// Processing rate
497    pub processing_rate: f64,
498}