quantrs2-anneal 0.1.3

Quantum annealing support for the QuantRS2 framework
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
//! Performance monitoring and alerting for tests

use std::collections::HashMap;
use std::time::{Duration, SystemTime};

/// Performance monitoring for tests
pub struct TestPerformanceMonitor {
    /// Performance metrics
    pub metrics: TestPerformanceMetrics,
    /// Benchmark comparisons
    pub benchmarks: HashMap<String, BenchmarkComparison>,
    /// Performance trends
    pub trends: PerformanceTrends,
    /// Alert system
    pub alert_system: PerformanceAlertSystem,
}

impl TestPerformanceMonitor {
    #[must_use]
    pub fn new() -> Self {
        Self {
            metrics: TestPerformanceMetrics::default(),
            benchmarks: HashMap::new(),
            trends: PerformanceTrends::default(),
            alert_system: PerformanceAlertSystem::new(),
        }
    }

    /// Record a test execution time
    pub fn record_execution_time(&mut self, duration: Duration) {
        self.metrics.execution_time_distribution.push(duration);
        self.trends
            .execution_time_trend
            .push((SystemTime::now(), duration));

        // Update average
        if !self.metrics.execution_time_distribution.is_empty() {
            let sum: Duration = self.metrics.execution_time_distribution.iter().sum();
            self.metrics.avg_execution_time =
                sum / self.metrics.execution_time_distribution.len() as u32;
        }

        // Keep only last 1000 entries
        if self.metrics.execution_time_distribution.len() > 1000 {
            self.metrics.execution_time_distribution.drain(0..1);
        }
        if self.trends.execution_time_trend.len() > 1000 {
            self.trends.execution_time_trend.drain(0..1);
        }
    }

    /// Update success rate
    pub fn update_success_rate(&mut self, success: bool) {
        let current_count = self.metrics.execution_time_distribution.len();
        if current_count == 0 {
            self.metrics.success_rate = if success { 1.0 } else { 0.0 };
        } else {
            let current_successes = (self.metrics.success_rate * current_count as f64) as usize;
            let new_successes = if success {
                current_successes + 1
            } else {
                current_successes
            };
            self.metrics.success_rate = new_successes as f64 / (current_count + 1) as f64;
        }

        self.trends
            .success_rate_trend
            .push((SystemTime::now(), self.metrics.success_rate));

        // Keep only last 1000 entries
        if self.trends.success_rate_trend.len() > 1000 {
            self.trends.success_rate_trend.drain(0..1);
        }
    }

    /// Set a benchmark baseline
    pub fn set_benchmark(&mut self, name: String, baseline: PerformanceBaseline) {
        let comparison = BenchmarkComparison {
            baseline,
            current: self.metrics.clone(),
            delta: PerformanceDelta {
                execution_time_change: 0.0,
                success_rate_change: 0.0,
                resource_usage_change: 0.0,
                overall_change: 0.0,
            },
            timestamp: SystemTime::now(),
        };
        self.benchmarks.insert(name, comparison);
    }

    /// Get current metrics
    #[must_use]
    pub const fn get_metrics(&self) -> &TestPerformanceMetrics {
        &self.metrics
    }

    /// Get performance trends
    #[must_use]
    pub const fn get_trends(&self) -> &PerformanceTrends {
        &self.trends
    }

    /// Analyze trends
    pub fn analyze_trends(&mut self) {
        // Simple trend analysis based on last N points
        let window_size = 10;

        // Analyze execution time trend
        if self.trends.execution_time_trend.len() >= window_size {
            let recent: Vec<_> = self
                .trends
                .execution_time_trend
                .iter()
                .rev()
                .take(window_size)
                .collect();

            let first_avg = recent
                .iter()
                .rev()
                .take(window_size / 2)
                .map(|(_, d)| d.as_secs_f64())
                .sum::<f64>()
                / (window_size / 2) as f64;

            let second_avg = recent
                .iter()
                .take(window_size / 2)
                .map(|(_, d)| d.as_secs_f64())
                .sum::<f64>()
                / (window_size / 2) as f64;

            let change = (second_avg - first_avg) / first_avg;

            self.trends.trend_analysis.execution_time_direction = if change < -0.1 {
                TrendDirection::Improving
            } else if change > 0.1 {
                TrendDirection::Degrading
            } else {
                TrendDirection::Stable
            };
        }
    }

    /// Check alert conditions
    pub fn check_alerts(&mut self) {
        self.alert_system.check_alerts(&self.metrics);
    }
}

/// Test performance metrics
#[derive(Debug, Clone)]
pub struct TestPerformanceMetrics {
    /// Average execution time
    pub avg_execution_time: Duration,
    /// Execution time distribution
    pub execution_time_distribution: Vec<Duration>,
    /// Success rate
    pub success_rate: f64,
    /// Resource efficiency
    pub resource_efficiency: f64,
    /// Throughput rate
    pub throughput_rate: f64,
}

impl Default for TestPerformanceMetrics {
    fn default() -> Self {
        Self {
            avg_execution_time: Duration::from_secs(0),
            execution_time_distribution: vec![],
            success_rate: 0.0,
            resource_efficiency: 0.0,
            throughput_rate: 0.0,
        }
    }
}

/// Benchmark comparison data
#[derive(Debug, Clone)]
pub struct BenchmarkComparison {
    /// Baseline performance
    pub baseline: PerformanceBaseline,
    /// Current performance
    pub current: TestPerformanceMetrics,
    /// Performance delta
    pub delta: PerformanceDelta,
    /// Comparison timestamp
    pub timestamp: SystemTime,
}

/// Performance baseline
#[derive(Debug, Clone)]
pub struct PerformanceBaseline {
    /// Baseline execution time
    pub execution_time: Duration,
    /// Baseline success rate
    pub success_rate: f64,
    /// Baseline resource usage
    pub resource_usage: f64,
    /// Baseline timestamp
    pub timestamp: SystemTime,
}

/// Performance delta comparison
#[derive(Debug, Clone)]
pub struct PerformanceDelta {
    /// Execution time change
    pub execution_time_change: f64,
    /// Success rate change
    pub success_rate_change: f64,
    /// Resource usage change
    pub resource_usage_change: f64,
    /// Overall performance change
    pub overall_change: f64,
}

/// Performance trends tracking
#[derive(Debug, Clone)]
pub struct PerformanceTrends {
    /// Execution time trend
    pub execution_time_trend: Vec<(SystemTime, Duration)>,
    /// Success rate trend
    pub success_rate_trend: Vec<(SystemTime, f64)>,
    /// Resource usage trend
    pub resource_usage_trend: Vec<(SystemTime, f64)>,
    /// Trend analysis
    pub trend_analysis: TrendAnalysis,
}

impl Default for PerformanceTrends {
    fn default() -> Self {
        Self {
            execution_time_trend: vec![],
            success_rate_trend: vec![],
            resource_usage_trend: vec![],
            trend_analysis: TrendAnalysis::default(),
        }
    }
}

/// Trend analysis results
#[derive(Debug, Clone)]
pub struct TrendAnalysis {
    /// Execution time trend direction
    pub execution_time_direction: TrendDirection,
    /// Success rate trend direction
    pub success_rate_direction: TrendDirection,
    /// Resource usage trend direction
    pub resource_usage_direction: TrendDirection,
    /// Trend confidence
    pub confidence: f64,
}

impl Default for TrendAnalysis {
    fn default() -> Self {
        Self {
            execution_time_direction: TrendDirection::Stable,
            success_rate_direction: TrendDirection::Stable,
            resource_usage_direction: TrendDirection::Stable,
            confidence: 0.0,
        }
    }
}

/// Trend directions
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TrendDirection {
    Improving,
    Stable,
    Degrading,
    Volatile,
    Unknown,
}

/// Performance alert system
pub struct PerformanceAlertSystem {
    /// Alert rules
    pub alert_rules: Vec<AlertRule>,
    /// Active alerts
    pub active_alerts: HashMap<String, PerformanceAlert>,
    /// Alert history
    pub alert_history: Vec<PerformanceAlert>,
}

impl PerformanceAlertSystem {
    #[must_use]
    pub fn new() -> Self {
        Self {
            alert_rules: vec![],
            active_alerts: HashMap::new(),
            alert_history: vec![],
        }
    }

    /// Add an alert rule
    pub fn add_rule(&mut self, rule: AlertRule) {
        self.alert_rules.push(rule);
    }

    /// Remove an alert rule
    pub fn remove_rule(&mut self, rule_name: &str) {
        self.alert_rules.retain(|r| r.name != rule_name);
    }

    /// Check alert conditions against current metrics
    pub fn check_alerts(&mut self, metrics: &TestPerformanceMetrics) {
        // Collect rules that should trigger alerts
        let triggered_rules: Vec<String> = self
            .alert_rules
            .iter()
            .filter_map(|rule| {
                let should_alert = match &rule.condition {
                    AlertCondition::ThresholdExceeded(threshold) => {
                        // Check if execution time exceeds threshold
                        metrics.avg_execution_time.as_secs_f64() > *threshold
                    }
                    AlertCondition::ThresholdBelow(threshold) => {
                        // Check if success rate is below threshold
                        metrics.success_rate < *threshold
                    }
                    AlertCondition::PercentageChange(_percentage) => {
                        // Check for significant performance change
                        false // Placeholder
                    }
                    AlertCondition::Custom(_) => false,
                };

                should_alert.then(|| rule.name.clone())
            })
            .collect();

        // Trigger alerts for collected rules
        for rule_name in triggered_rules {
            self.trigger_alert(&rule_name, metrics);
        }
    }

    /// Trigger an alert
    fn trigger_alert(&mut self, rule_name: &str, metrics: &TestPerformanceMetrics) {
        let rule = self.alert_rules.iter().find(|r| r.name == rule_name);
        if let Some(rule) = rule {
            let alert = PerformanceAlert {
                id: format!(
                    "alert_{}_{}",
                    rule_name,
                    SystemTime::now()
                        .duration_since(SystemTime::UNIX_EPOCH)
                        .unwrap_or(Duration::ZERO)
                        .as_secs()
                ),
                rule_name: rule_name.to_string(),
                message: format!("Performance alert triggered for rule: {rule_name}"),
                severity: rule.severity.clone(),
                timestamp: SystemTime::now(),
                metric_value: metrics.avg_execution_time.as_secs_f64(),
                threshold_value: 0.0, // Placeholder
                status: AlertStatus::Active,
            };

            self.active_alerts.insert(alert.id.clone(), alert.clone());
            self.alert_history.push(alert);

            // Keep only last 1000 alerts in history
            if self.alert_history.len() > 1000 {
                self.alert_history.drain(0..1);
            }
        }
    }

    /// Acknowledge an alert
    pub fn acknowledge_alert(&mut self, alert_id: &str) -> Result<(), String> {
        let alert = self
            .active_alerts
            .get_mut(alert_id)
            .ok_or_else(|| format!("Alert {alert_id} not found"))?;
        alert.status = AlertStatus::Acknowledged;
        Ok(())
    }

    /// Resolve an alert
    pub fn resolve_alert(&mut self, alert_id: &str) -> Result<(), String> {
        let alert = self
            .active_alerts
            .remove(alert_id)
            .ok_or_else(|| format!("Alert {alert_id} not found"))?;

        // Update in history
        for hist_alert in &mut self.alert_history {
            if hist_alert.id == alert_id {
                hist_alert.status = AlertStatus::Resolved;
            }
        }
        Ok(())
    }

    /// Get active alerts
    #[must_use]
    pub fn get_active_alerts(&self) -> Vec<&PerformanceAlert> {
        self.active_alerts.values().collect()
    }

    /// Get alert history
    #[must_use]
    pub fn get_alert_history(&self) -> &[PerformanceAlert] {
        &self.alert_history
    }

    /// Clear all alerts
    pub fn clear_all_alerts(&mut self) {
        self.active_alerts.clear();
        self.alert_history.clear();
    }
}

/// Alert rule definition
#[derive(Debug, Clone)]
pub struct AlertRule {
    /// Rule name
    pub name: String,
    /// Metric to monitor
    pub metric: String,
    /// Alert condition
    pub condition: AlertCondition,
    /// Alert severity
    pub severity: AlertSeverity,
    /// Alert actions
    pub actions: Vec<AlertAction>,
}

/// Alert conditions
#[derive(Debug, Clone)]
pub enum AlertCondition {
    /// Threshold exceeded
    ThresholdExceeded(f64),
    /// Threshold below
    ThresholdBelow(f64),
    /// Percentage change
    PercentageChange(f64),
    /// Custom condition
    Custom(String),
}

/// Alert severity levels
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AlertSeverity {
    Info,
    Warning,
    Error,
    Critical,
}

/// Alert actions
#[derive(Debug, Clone)]
pub enum AlertAction {
    /// Log alert
    Log,
    /// Send email
    Email(String),
    /// Execute script
    ExecuteScript(String),
    /// Custom action
    Custom(String),
}

/// Performance alert
#[derive(Debug, Clone)]
pub struct PerformanceAlert {
    /// Alert ID
    pub id: String,
    /// Alert rule name
    pub rule_name: String,
    /// Alert message
    pub message: String,
    /// Alert severity
    pub severity: AlertSeverity,
    /// Alert timestamp
    pub timestamp: SystemTime,
    /// Metric value
    pub metric_value: f64,
    /// Threshold value
    pub threshold_value: f64,
    /// Alert status
    pub status: AlertStatus,
}

/// Alert status
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AlertStatus {
    Active,
    Resolved,
    Acknowledged,
    Suppressed,
}