scirs2_sparse/realtime_performance_monitor/
history.rs1use super::metrics::{AggregatedMetrics, PerformanceSample, ProcessorType};
7use std::collections::{HashMap, VecDeque};
8
9#[derive(Debug)]
11pub struct PerformanceHistory {
12 pub samples: VecDeque<PerformanceSample>,
13 pub aggregated_metrics: AggregatedMetrics,
14 pub performance_baselines: HashMap<String, f64>,
15 max_samples: usize,
16 processor_metrics: HashMap<String, ProcessorMetrics>,
17}
18
19#[derive(Debug)]
21struct ProcessorMetrics {
22 samples: VecDeque<PerformanceSample>,
23 aggregated: AggregatedMetrics,
24 baseline_established: bool,
25 last_update: u64,
26}
27
28impl PerformanceHistory {
29 pub fn new(max_samples: usize) -> Self {
31 Self {
32 samples: VecDeque::with_capacity(max_samples),
33 aggregated_metrics: AggregatedMetrics::new(),
34 performance_baselines: HashMap::new(),
35 max_samples,
36 processor_metrics: HashMap::new(),
37 }
38 }
39
40 pub fn add_sample(&mut self, sample: PerformanceSample) {
42 if self.samples.len() >= self.max_samples {
44 self.samples.pop_front();
45 }
46 self.samples.push_back(sample.clone());
47
48 self.aggregated_metrics.update_with_sample(&sample);
50
51 let processor_key = format!("{}:{}", sample.processor_type, sample.processor_id);
53 let should_establish_baseline = {
54 let processor_metrics = self
55 .processor_metrics
56 .entry(processor_key.clone())
57 .or_insert_with(|| ProcessorMetrics {
58 samples: VecDeque::with_capacity(self.max_samples / 4),
59 aggregated: AggregatedMetrics::new(),
60 baseline_established: false,
61 last_update: sample.timestamp,
62 });
63
64 if processor_metrics.samples.len() >= self.max_samples / 4 {
65 processor_metrics.samples.pop_front();
66 }
67 processor_metrics.samples.push_back(sample.clone());
68 processor_metrics.aggregated.update_with_sample(&sample);
69 processor_metrics.last_update = sample.timestamp;
70
71 !processor_metrics.baseline_established && processor_metrics.samples.len() >= 10
73 };
74
75 if should_establish_baseline {
77 self.establish_baseline(&processor_key);
78 if let Some(processor_metrics) = self.processor_metrics.get_mut(&processor_key) {
79 processor_metrics.baseline_established = true;
80 }
81 }
82 }
83
84 pub fn get_processor_samples(
86 &self,
87 processor_type: ProcessorType,
88 processor_id: &str,
89 ) -> Vec<&PerformanceSample> {
90 let key = format!("{}:{}", processor_type, processor_id);
91 self.processor_metrics
92 .get(&key)
93 .map(|metrics| metrics.samples.iter().collect())
94 .unwrap_or_default()
95 }
96
97 pub fn get_processor_metrics(
99 &self,
100 processor_type: ProcessorType,
101 processor_id: &str,
102 ) -> Option<&AggregatedMetrics> {
103 let key = format!("{}:{}", processor_type, processor_id);
104 self.processor_metrics
105 .get(&key)
106 .map(|metrics| &metrics.aggregated)
107 }
108
109 pub fn get_recent_samples(&self, count: usize) -> Vec<&PerformanceSample> {
111 self.samples
112 .iter()
113 .rev()
114 .take(count)
115 .collect::<Vec<_>>()
116 .into_iter()
117 .rev()
118 .collect()
119 }
120
121 pub fn get_samples_in_range(&self, start_time: u64, end_time: u64) -> Vec<&PerformanceSample> {
123 self.samples
124 .iter()
125 .filter(|sample| sample.timestamp >= start_time && sample.timestamp <= end_time)
126 .collect()
127 }
128
129 pub fn get_samples_by_type(&self, processor_type: ProcessorType) -> Vec<&PerformanceSample> {
131 self.samples
132 .iter()
133 .filter(|sample| sample.processor_type == processor_type)
134 .collect()
135 }
136
137 fn establish_baseline(&mut self, processor_key: &str) {
139 if let Some(metrics) = self.processor_metrics.get(processor_key) {
140 let baseline_execution_time = metrics.aggregated.avg_execution_time;
141 let baseline_throughput = metrics.aggregated.avg_throughput;
142 let baseline_efficiency = metrics.aggregated.efficiency_score;
143
144 self.performance_baselines.insert(
145 format!("{}_execution_time", processor_key),
146 baseline_execution_time,
147 );
148 self.performance_baselines
149 .insert(format!("{}_throughput", processor_key), baseline_throughput);
150 self.performance_baselines
151 .insert(format!("{}_efficiency", processor_key), baseline_efficiency);
152 }
153 }
154
155 pub fn get_baseline(
157 &self,
158 processor_type: ProcessorType,
159 processor_id: &str,
160 metric: &str,
161 ) -> Option<f64> {
162 let key = format!("{}:{}_{}", processor_type, processor_id, metric);
163 self.performance_baselines.get(&key).copied()
164 }
165
166 pub fn check_performance_degradation(
168 &self,
169 processor_type: ProcessorType,
170 processor_id: &str,
171 threshold: f64,
172 ) -> Vec<String> {
173 let mut degradations = Vec::new();
174 let processor_key = format!("{}:{}", processor_type, processor_id);
175
176 if let Some(metrics) = self.processor_metrics.get(&processor_key) {
177 if let Some(baseline) =
179 self.get_baseline(processor_type, processor_id, "execution_time")
180 {
181 let current = metrics.aggregated.avg_execution_time;
182 if current > baseline * (1.0 + threshold) {
183 degradations.push(format!(
184 "Execution time increased by {:.1}%",
185 ((current - baseline) / baseline) * 100.0
186 ));
187 }
188 }
189
190 if let Some(baseline) = self.get_baseline(processor_type, processor_id, "throughput") {
192 let current = metrics.aggregated.avg_throughput;
193 if current < baseline * (1.0 - threshold) {
194 degradations.push(format!(
195 "Throughput decreased by {:.1}%",
196 ((baseline - current) / baseline) * 100.0
197 ));
198 }
199 }
200
201 if let Some(baseline) = self.get_baseline(processor_type, processor_id, "efficiency") {
203 let current = metrics.aggregated.efficiency_score;
204 if current < baseline * (1.0 - threshold) {
205 degradations.push(format!(
206 "Efficiency decreased by {:.1}%",
207 ((baseline - current) / baseline) * 100.0
208 ));
209 }
210 }
211 }
212
213 degradations
214 }
215
216 pub fn get_performance_trend(
218 &self,
219 processor_type: ProcessorType,
220 processor_id: &str,
221 metric: &str,
222 ) -> PerformanceTrend {
223 let samples = self.get_processor_samples(processor_type, processor_id);
224
225 if samples.len() < 3 {
226 return PerformanceTrend::Insufficient;
227 }
228
229 let values: Vec<f64> = samples
230 .iter()
231 .filter_map(|sample| sample.get_metric(metric))
232 .collect();
233
234 if values.len() < 3 {
235 return PerformanceTrend::Insufficient;
236 }
237
238 let n = values.len() as f64;
240 let x_values: Vec<f64> = (0..values.len()).map(|i| i as f64).collect();
241
242 let sum_x: f64 = x_values.iter().sum();
243 let sum_y: f64 = values.iter().sum();
244 let sum_xy: f64 = x_values.iter().zip(&values).map(|(x, y)| x * y).sum();
245 let sum_x2: f64 = x_values.iter().map(|x| x * x).sum();
246
247 let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x);
248
249 if slope > 0.01 {
250 PerformanceTrend::Improving
251 } else if slope < -0.01 {
252 PerformanceTrend::Degrading
253 } else {
254 PerformanceTrend::Stable
255 }
256 }
257
258 pub fn get_processor_summary(&self) -> Vec<ProcessorSummary> {
260 self.processor_metrics
261 .iter()
262 .map(|(key, metrics)| {
263 let parts: Vec<&str> = key.split(':').collect();
264 let processor_type = match parts[0] {
265 "QuantumInspired" => ProcessorType::QuantumInspired,
266 "NeuralAdaptive" => ProcessorType::NeuralAdaptive,
267 "QuantumNeuralHybrid" => ProcessorType::QuantumNeuralHybrid,
268 "MemoryCompression" => ProcessorType::MemoryCompression,
269 _ => ProcessorType::QuantumInspired, };
271 let processor_id = parts.get(1).unwrap_or(&"unknown").to_string();
272
273 ProcessorSummary {
274 processor_type,
275 processor_id,
276 sample_count: metrics.samples.len(),
277 avg_execution_time: metrics.aggregated.avg_execution_time,
278 avg_throughput: metrics.aggregated.avg_throughput,
279 efficiency_score: metrics.aggregated.efficiency_score,
280 last_update: metrics.last_update,
281 baseline_established: metrics.baseline_established,
282 }
283 })
284 .collect()
285 }
286
287 pub fn cleanup_old_samples(&mut self, retention_time_ms: u64) {
289 let current_time = std::time::SystemTime::now()
290 .duration_since(std::time::UNIX_EPOCH)
291 .unwrap_or_default()
292 .as_millis() as u64;
293
294 let cutoff_time = current_time.saturating_sub(retention_time_ms);
295
296 while let Some(sample) = self.samples.front() {
298 if sample.timestamp < cutoff_time {
299 self.samples.pop_front();
300 } else {
301 break;
302 }
303 }
304
305 for metrics in self.processor_metrics.values_mut() {
307 while let Some(sample) = metrics.samples.front() {
308 if sample.timestamp < cutoff_time {
309 metrics.samples.pop_front();
310 } else {
311 break;
312 }
313 }
314 }
315 }
316
317 pub fn memory_usage(&self) -> usize {
319 let sample_size = std::mem::size_of::<PerformanceSample>();
320 self.samples.len() * sample_size
321 + self
322 .processor_metrics
323 .values()
324 .map(|metrics| metrics.samples.len() * sample_size)
325 .sum::<usize>()
326 }
327
328 pub fn clear(&mut self) {
330 self.samples.clear();
331 self.aggregated_metrics.reset();
332 self.performance_baselines.clear();
333 self.processor_metrics.clear();
334 }
335}
336
337#[derive(Debug, Clone, Copy, PartialEq)]
339pub enum PerformanceTrend {
340 Improving,
341 Stable,
342 Degrading,
343 Insufficient, }
345
346impl std::fmt::Display for PerformanceTrend {
347 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
348 match self {
349 PerformanceTrend::Improving => write!(f, "Improving"),
350 PerformanceTrend::Stable => write!(f, "Stable"),
351 PerformanceTrend::Degrading => write!(f, "Degrading"),
352 PerformanceTrend::Insufficient => write!(f, "Insufficient Data"),
353 }
354 }
355}
356
357#[derive(Debug, Clone)]
359pub struct ProcessorSummary {
360 pub processor_type: ProcessorType,
361 pub processor_id: String,
362 pub sample_count: usize,
363 pub avg_execution_time: f64,
364 pub avg_throughput: f64,
365 pub efficiency_score: f64,
366 pub last_update: u64,
367 pub baseline_established: bool,
368}
369
370#[cfg(test)]
371mod tests {
372 use super::*;
373
374 #[test]
375 fn test_performance_history_creation() {
376 let history = PerformanceHistory::new(1000);
377 assert_eq!(history.samples.len(), 0);
378 assert_eq!(history.max_samples, 1000);
379 }
380
381 #[test]
382 fn test_add_sample() {
383 let mut history = PerformanceHistory::new(10);
384
385 let sample =
386 PerformanceSample::new(ProcessorType::QuantumInspired, "test-processor".to_string())
387 .with_execution_time(100.0);
388
389 history.add_sample(sample);
390 assert_eq!(history.samples.len(), 1);
391 assert_eq!(history.aggregated_metrics.sample_count, 1);
392 }
393
394 #[test]
395 fn test_sample_capacity_limit() {
396 let mut history = PerformanceHistory::new(3);
397
398 for i in 0..5 {
399 let sample = PerformanceSample::new(ProcessorType::QuantumInspired, "test".to_string())
400 .with_execution_time(i as f64 * 10.0);
401 history.add_sample(sample);
402 }
403
404 assert_eq!(history.samples.len(), 3);
405 assert_eq!(history.samples[0].execution_time_ms, 20.0);
407 assert_eq!(history.samples[1].execution_time_ms, 30.0);
408 assert_eq!(history.samples[2].execution_time_ms, 40.0);
409 }
410
411 #[test]
412 fn test_processor_specific_samples() {
413 let mut history = PerformanceHistory::new(100);
414
415 let sample1 =
416 PerformanceSample::new(ProcessorType::QuantumInspired, "processor1".to_string())
417 .with_execution_time(100.0);
418
419 let sample2 =
420 PerformanceSample::new(ProcessorType::NeuralAdaptive, "processor2".to_string())
421 .with_execution_time(200.0);
422
423 history.add_sample(sample1);
424 history.add_sample(sample2);
425
426 let quantum_samples =
427 history.get_processor_samples(ProcessorType::QuantumInspired, "processor1");
428 assert_eq!(quantum_samples.len(), 1);
429 assert_eq!(quantum_samples[0].execution_time_ms, 100.0);
430
431 let neural_samples =
432 history.get_processor_samples(ProcessorType::NeuralAdaptive, "processor2");
433 assert_eq!(neural_samples.len(), 1);
434 assert_eq!(neural_samples[0].execution_time_ms, 200.0);
435 }
436
437 #[test]
438 fn test_recent_samples() {
439 let mut history = PerformanceHistory::new(100);
440
441 for i in 0..10 {
442 let sample = PerformanceSample::new(ProcessorType::QuantumInspired, "test".to_string())
443 .with_execution_time(i as f64);
444 history.add_sample(sample);
445 }
446
447 let recent = history.get_recent_samples(3);
448 assert_eq!(recent.len(), 3);
449 assert_eq!(recent[0].execution_time_ms, 7.0);
450 assert_eq!(recent[1].execution_time_ms, 8.0);
451 assert_eq!(recent[2].execution_time_ms, 9.0);
452 }
453
454 #[test]
455 fn test_samples_by_type() {
456 let mut history = PerformanceHistory::new(100);
457
458 for i in 0..5 {
459 let quantum_sample =
460 PerformanceSample::new(ProcessorType::QuantumInspired, "quantum".to_string())
461 .with_execution_time(i as f64);
462
463 let neural_sample =
464 PerformanceSample::new(ProcessorType::NeuralAdaptive, "neural".to_string())
465 .with_execution_time(i as f64 + 100.0);
466
467 history.add_sample(quantum_sample);
468 history.add_sample(neural_sample);
469 }
470
471 let quantum_samples = history.get_samples_by_type(ProcessorType::QuantumInspired);
472 let neural_samples = history.get_samples_by_type(ProcessorType::NeuralAdaptive);
473
474 assert_eq!(quantum_samples.len(), 5);
475 assert_eq!(neural_samples.len(), 5);
476 }
477
478 #[test]
479 fn test_baseline_establishment() {
480 let mut history = PerformanceHistory::new(100);
481
482 for i in 0..15 {
484 let sample = PerformanceSample::new(ProcessorType::QuantumInspired, "test".to_string())
485 .with_execution_time(100.0 + i as f64);
486 history.add_sample(sample);
487 }
488
489 let baseline =
490 history.get_baseline(ProcessorType::QuantumInspired, "test", "execution_time");
491 assert!(baseline.is_some());
492 assert!(baseline.unwrap() > 100.0);
493 }
494
495 #[test]
496 fn test_performance_degradation_detection() {
497 let mut history = PerformanceHistory::new(100);
498
499 for i in 0..15 {
501 let sample = PerformanceSample::new(ProcessorType::QuantumInspired, "test".to_string())
502 .with_execution_time(100.0)
503 .with_throughput(1000.0);
504 history.add_sample(sample);
505 }
506
507 for i in 0..5 {
509 let sample = PerformanceSample::new(
510 ProcessorType::QuantumInspired,
511 "test".to_string(),
512 )
513 .with_execution_time(200.0) .with_throughput(500.0); history.add_sample(sample);
516 }
517
518 let degradations = history.check_performance_degradation(
519 ProcessorType::QuantumInspired,
520 "test",
521 0.1, );
523
524 assert!(!degradations.is_empty());
525 }
526
527 #[test]
528 fn test_processor_summary() {
529 let mut history = PerformanceHistory::new(100);
530
531 let sample =
532 PerformanceSample::new(ProcessorType::QuantumInspired, "test-processor".to_string())
533 .with_execution_time(100.0)
534 .with_throughput(500.0);
535
536 history.add_sample(sample);
537
538 let summaries = history.get_processor_summary();
539 assert_eq!(summaries.len(), 1);
540 assert_eq!(summaries[0].processor_type, ProcessorType::QuantumInspired);
541 assert_eq!(summaries[0].processor_id, "test-processor");
542 assert_eq!(summaries[0].sample_count, 1);
543 }
544
545 #[test]
546 fn test_memory_usage_calculation() {
547 let mut history = PerformanceHistory::new(100);
548
549 for i in 0..10 {
550 let sample = PerformanceSample::new(ProcessorType::QuantumInspired, "test".to_string());
551 history.add_sample(sample);
552 }
553
554 let memory_usage = history.memory_usage();
555 assert!(memory_usage > 0);
556 }
557
558 #[test]
559 fn test_clear_history() {
560 let mut history = PerformanceHistory::new(100);
561
562 let sample = PerformanceSample::new(ProcessorType::QuantumInspired, "test".to_string());
563 history.add_sample(sample);
564
565 assert_eq!(history.samples.len(), 1);
566
567 history.clear();
568 assert_eq!(history.samples.len(), 0);
569 assert_eq!(history.aggregated_metrics.sample_count, 0);
570 }
571}