quantrs2_anneal/scientific_performance_optimization/
algorithm.rs1use std::collections::{HashMap, VecDeque};
7use std::time::{Duration, Instant};
8
9use crate::applications::{
10 drug_discovery::DrugDiscoveryProblem, materials_science::MaterialsOptimizationProblem,
11 protein_folding::ProteinFoldingProblem,
12};
13use crate::ising::{IsingModel, QuboModel};
14
15use super::config::{
16 AlgorithmOptimizationConfig, ApproximationConfig, ApproximationStrategy, CachingConfig,
17 DecompositionStrategy, StreamingConfig,
18};
19use super::memory::CacheStatistics;
20
21pub struct AlgorithmOptimizer {
23 pub config: AlgorithmOptimizationConfig,
25 pub decomposer: ProblemDecomposer,
27 pub result_cache: ResultCache,
29 pub approximation_engine: ApproximationEngine,
31 pub streaming_processor: StreamingProcessor,
33}
34
35impl AlgorithmOptimizer {
36 #[must_use]
38 pub fn new(config: AlgorithmOptimizationConfig) -> Self {
39 Self {
40 config,
41 decomposer: ProblemDecomposer::new(),
42 result_cache: ResultCache::new(),
43 approximation_engine: ApproximationEngine::new(),
44 streaming_processor: StreamingProcessor::new(),
45 }
46 }
47}
48
49#[derive(Debug)]
51pub struct ProblemDecomposer {
52 pub strategy: DecompositionStrategy,
54 pub subproblems: HashMap<String, Subproblem>,
56 pub statistics: DecompositionStatistics,
58}
59
60impl ProblemDecomposer {
61 #[must_use]
63 pub fn new() -> Self {
64 Self {
65 strategy: DecompositionStrategy::Adaptive,
66 subproblems: HashMap::new(),
67 statistics: DecompositionStatistics::default(),
68 }
69 }
70
71 pub fn decompose(&mut self, problem_id: &str, problem_data: ProblemData) -> Vec<String> {
73 let subproblem_ids = self.create_subproblems(problem_id, &problem_data);
74 self.statistics.decompositions += 1;
75 subproblem_ids
76 }
77
78 fn create_subproblems(&mut self, parent_id: &str, _problem_data: &ProblemData) -> Vec<String> {
80 let mut subproblem_ids = Vec::new();
81
82 let num_subproblems = match self.strategy {
84 DecompositionStrategy::Uniform => 4,
85 DecompositionStrategy::Adaptive => 4,
86 DecompositionStrategy::GraphBased => 4,
87 DecompositionStrategy::Hierarchical => 2,
88 };
89
90 for i in 0..num_subproblems {
91 let id = format!("{parent_id}_sub_{i}");
92 let subproblem = Subproblem {
93 id: id.clone(),
94 parent_id: Some(parent_id.to_string()),
95 problem_data: ProblemData::Generic(Vec::new()),
96 status: SubproblemStatus::Pending,
97 dependencies: Vec::new(),
98 };
99 self.subproblems.insert(id.clone(), subproblem);
100 subproblem_ids.push(id);
101 }
102
103 subproblem_ids
104 }
105
106 pub fn get_status(&self, subproblem_id: &str) -> Option<&SubproblemStatus> {
108 self.subproblems.get(subproblem_id).map(|s| &s.status)
109 }
110
111 pub fn update_status(&mut self, subproblem_id: &str, status: SubproblemStatus) {
113 if let Some(subproblem) = self.subproblems.get_mut(subproblem_id) {
114 subproblem.status = status;
115 }
116 }
117}
118
119impl Default for ProblemDecomposer {
120 fn default() -> Self {
121 Self::new()
122 }
123}
124
125#[derive(Debug)]
127pub struct Subproblem {
128 pub id: String,
130 pub parent_id: Option<String>,
132 pub problem_data: ProblemData,
134 pub status: SubproblemStatus,
136 pub dependencies: Vec<String>,
138}
139
140#[derive(Debug)]
142pub enum ProblemData {
143 Ising(IsingModel),
145 QUBO(QuboModel),
147 ProteinFolding(ProteinFoldingProblem),
149 MaterialsScience(MaterialsOptimizationProblem),
151 DrugDiscovery(DrugDiscoveryProblem),
153 Generic(Vec<u8>),
155}
156
157#[derive(Debug, Clone, PartialEq, Eq)]
159pub enum SubproblemStatus {
160 Pending,
162 InProgress,
164 Completed,
166 Failed,
168 Cancelled,
170}
171
172#[derive(Debug)]
174pub struct ResultCache {
175 pub config: CachingConfig,
177 pub cache: HashMap<String, CachedResult>,
179 pub access_order: VecDeque<String>,
181 pub statistics: CacheStatistics,
183}
184
185impl ResultCache {
186 #[must_use]
188 pub fn new() -> Self {
189 Self {
190 config: CachingConfig::default(),
191 cache: HashMap::new(),
192 access_order: VecDeque::new(),
193 statistics: CacheStatistics::default(),
194 }
195 }
196
197 pub fn get(&mut self, key: &str) -> Option<&CachedResult> {
199 if self.cache.contains_key(key) {
200 self.statistics.record_hit();
201 self.access_order.retain(|k| k != key);
203 self.access_order.push_front(key.to_string());
204
205 if let Some(result) = self.cache.get_mut(key) {
207 result.access_count += 1;
208 }
209
210 self.cache.get(key)
211 } else {
212 self.statistics.record_miss();
213 None
214 }
215 }
216
217 pub fn put(&mut self, key: String, result_data: Vec<u8>, quality_score: f64) {
219 while self.cache.len() >= self.config.cache_size_limit {
221 if let Some(lru_key) = self.access_order.pop_back() {
222 self.cache.remove(&lru_key);
223 }
224 }
225
226 let cached = CachedResult {
227 result_data,
228 timestamp: Instant::now(),
229 access_count: 1,
230 quality_score,
231 };
232
233 self.cache.insert(key.clone(), cached);
234 self.access_order.push_front(key);
235 }
236
237 #[must_use]
239 pub fn contains(&self, key: &str) -> bool {
240 self.cache.contains_key(key)
241 }
242
243 pub fn clear(&mut self) {
245 self.cache.clear();
246 self.access_order.clear();
247 }
248}
249
250impl Default for ResultCache {
251 fn default() -> Self {
252 Self::new()
253 }
254}
255
256#[derive(Debug, Clone)]
258pub struct CachedResult {
259 pub result_data: Vec<u8>,
261 pub timestamp: Instant,
263 pub access_count: u64,
265 pub quality_score: f64,
267}
268
269#[derive(Debug)]
271pub struct ApproximationEngine {
272 pub config: ApproximationConfig,
274 pub strategies: Vec<ApproximationStrategy>,
276 pub strategy_performance: HashMap<ApproximationStrategy, StrategyPerformance>,
278}
279
280impl ApproximationEngine {
281 #[must_use]
283 pub fn new() -> Self {
284 Self {
285 config: ApproximationConfig::default(),
286 strategies: vec![
287 ApproximationStrategy::Sampling,
288 ApproximationStrategy::Clustering,
289 ApproximationStrategy::DimensionalityReduction,
290 ],
291 strategy_performance: HashMap::new(),
292 }
293 }
294
295 #[must_use]
297 pub fn select_strategy(&self) -> ApproximationStrategy {
298 self.strategy_performance
300 .iter()
301 .max_by(|a, b| {
302 a.1.average_quality
303 .partial_cmp(&b.1.average_quality)
304 .unwrap_or(std::cmp::Ordering::Equal)
305 })
306 .map(|(s, _)| s.clone())
307 .unwrap_or(ApproximationStrategy::Sampling)
308 }
309
310 pub fn record_performance(
312 &mut self,
313 strategy: ApproximationStrategy,
314 quality: f64,
315 speedup: f64,
316 success: bool,
317 ) {
318 let perf = self
319 .strategy_performance
320 .entry(strategy.clone())
321 .or_insert_with(|| StrategyPerformance::new(strategy));
322
323 perf.usage_count += 1;
324 if success {
325 let n = perf.usage_count as f64;
327 perf.average_quality = (perf.average_quality * (n - 1.0) + quality) / n;
328 perf.average_speedup = (perf.average_speedup * (n - 1.0) + speedup) / n;
329 perf.success_rate = (perf.success_rate * (n - 1.0) + 1.0) / n;
330 } else {
331 perf.success_rate =
332 (perf.success_rate * (perf.usage_count - 1) as f64) / perf.usage_count as f64;
333 }
334 }
335}
336
337impl Default for ApproximationEngine {
338 fn default() -> Self {
339 Self::new()
340 }
341}
342
343#[derive(Debug, Clone)]
345pub struct StrategyPerformance {
346 pub strategy: ApproximationStrategy,
348 pub success_rate: f64,
350 pub average_quality: f64,
352 pub average_speedup: f64,
354 pub usage_count: u64,
356}
357
358impl StrategyPerformance {
359 #[must_use]
361 pub fn new(strategy: ApproximationStrategy) -> Self {
362 Self {
363 strategy,
364 success_rate: 0.0,
365 average_quality: 0.0,
366 average_speedup: 0.0,
367 usage_count: 0,
368 }
369 }
370}
371
372#[derive(Debug)]
374pub struct StreamingProcessor {
375 pub config: StreamingConfig,
377 pub windows: Vec<ProcessingWindow>,
379 pub statistics: StreamingStatistics,
381}
382
383impl StreamingProcessor {
384 #[must_use]
386 pub fn new() -> Self {
387 Self {
388 config: StreamingConfig::default(),
389 windows: Vec::new(),
390 statistics: StreamingStatistics::default(),
391 }
392 }
393
394 pub fn add_element(&mut self, data: Vec<u8>, metadata: HashMap<String, String>) {
396 let element = StreamElement {
397 data,
398 timestamp: Instant::now(),
399 metadata,
400 };
401
402 if let Some(window) = self.windows.last_mut() {
404 if window.data.len() < self.config.window_size {
405 window.data.push_back(element);
406 return;
407 }
408 }
409
410 let mut new_window = ProcessingWindow {
412 id: format!("window_{}", self.windows.len()),
413 data: VecDeque::new(),
414 start_time: Instant::now(),
415 duration: Duration::from_secs(60),
416 };
417 new_window.data.push_back(element);
418 self.windows.push(new_window);
419 self.statistics.windows_created += 1;
420 }
421
422 pub fn process(&mut self) -> Vec<StreamElement> {
424 let mut processed = Vec::new();
425
426 let now = Instant::now();
428 for window in &mut self.windows {
429 if now.duration_since(window.start_time) >= window.duration {
430 while let Some(element) = window.data.pop_front() {
431 processed.push(element);
432 self.statistics.elements_processed += 1;
433 }
434 }
435 }
436
437 self.windows.retain(|w| !w.data.is_empty());
439
440 processed
441 }
442}
443
444impl Default for StreamingProcessor {
445 fn default() -> Self {
446 Self::new()
447 }
448}
449
450#[derive(Debug)]
452pub struct ProcessingWindow {
453 pub id: String,
455 pub data: VecDeque<StreamElement>,
457 pub start_time: Instant,
459 pub duration: Duration,
461}
462
463#[derive(Debug, Clone)]
465pub struct StreamElement {
466 pub data: Vec<u8>,
468 pub timestamp: Instant,
470 pub metadata: HashMap<String, String>,
472}
473
474#[derive(Debug, Clone, Default)]
476pub struct DecompositionStatistics {
477 pub decompositions: u64,
479 pub subproblems_created: u64,
481 pub subproblems_solved: u64,
483 pub avg_subproblem_size: f64,
485}
486
487#[derive(Debug, Clone, Default)]
489pub struct StreamingStatistics {
490 pub elements_processed: u64,
492 pub windows_created: u64,
494 pub avg_window_size: f64,
496 pub processing_rate: f64,
498}