quantrs2_device/process_tomography/analysis/
monitoring.rs1use scirs2_core::ndarray::{Array1, Array2, Array4};
4use scirs2_core::Complex64;
5use std::collections::HashMap;
6use std::time::{Duration, SystemTime};
7
8use super::super::results::*;
9use crate::DeviceResult;
10
11impl ProcessAnomalyDetector {
12 pub const fn new(threshold: f64, algorithm: AnomalyDetectionAlgorithm) -> Self {
14 Self {
15 historical_data: Vec::new(),
16 threshold,
17 algorithm,
18 }
19 }
20
21 pub fn add_process_metrics(&mut self, metrics: ProcessMetrics) {
23 self.historical_data.push(metrics);
24
25 if self.historical_data.len() > 1000 {
27 self.historical_data.remove(0);
28 }
29 }
30
31 pub fn detect_anomaly(&self, current_metrics: &ProcessMetrics) -> DeviceResult<bool> {
33 if self.historical_data.len() < 10 {
34 return Ok(false); }
36
37 match self.algorithm {
38 AnomalyDetectionAlgorithm::StatisticalThreshold => {
39 self.detect_statistical_anomaly(current_metrics)
40 }
41 AnomalyDetectionAlgorithm::IsolationForest => {
42 self.detect_isolation_forest_anomaly(current_metrics)
43 }
44 AnomalyDetectionAlgorithm::OneClassSVM => {
45 self.detect_one_class_svm_anomaly(current_metrics)
46 }
47 AnomalyDetectionAlgorithm::LocalOutlierFactor => {
48 self.detect_lof_anomaly(current_metrics)
49 }
50 }
51 }
52
53 fn detect_statistical_anomaly(&self, current_metrics: &ProcessMetrics) -> DeviceResult<bool> {
55 let historical_fidelities: Vec<f64> = self
57 .historical_data
58 .iter()
59 .map(|m| m.process_fidelity)
60 .collect();
61
62 let historical_unitarities: Vec<f64> =
63 self.historical_data.iter().map(|m| m.unitarity).collect();
64
65 let mean_fidelity =
66 historical_fidelities.iter().sum::<f64>() / historical_fidelities.len() as f64;
67 let std_fidelity = {
68 let variance = historical_fidelities
69 .iter()
70 .map(|&x| (x - mean_fidelity).powi(2))
71 .sum::<f64>()
72 / historical_fidelities.len() as f64;
73 variance.sqrt()
74 };
75
76 let mean_unitarity =
77 historical_unitarities.iter().sum::<f64>() / historical_unitarities.len() as f64;
78 let std_unitarity = {
79 let variance = historical_unitarities
80 .iter()
81 .map(|&x| (x - mean_unitarity).powi(2))
82 .sum::<f64>()
83 / historical_unitarities.len() as f64;
84 variance.sqrt()
85 };
86
87 let fidelity_z_score = if std_fidelity > 1e-12 {
89 (current_metrics.process_fidelity - mean_fidelity).abs() / std_fidelity
90 } else {
91 0.0
92 };
93
94 let unitarity_z_score = if std_unitarity > 1e-12 {
95 (current_metrics.unitarity - mean_unitarity).abs() / std_unitarity
96 } else {
97 0.0
98 };
99
100 Ok(fidelity_z_score > self.threshold || unitarity_z_score > self.threshold)
101 }
102
103 fn detect_isolation_forest_anomaly(
105 &self,
106 current_metrics: &ProcessMetrics,
107 ) -> DeviceResult<bool> {
108 let features = self.extract_features(current_metrics);
110 let anomaly_score = self.calculate_isolation_score(&features);
111 Ok(anomaly_score > self.threshold)
112 }
113
114 fn detect_one_class_svm_anomaly(&self, current_metrics: &ProcessMetrics) -> DeviceResult<bool> {
116 let features = self.extract_features(current_metrics);
118 let distance_from_hyperplane = self.calculate_svm_distance(&features);
119 Ok(distance_from_hyperplane < -self.threshold)
120 }
121
122 fn detect_lof_anomaly(&self, current_metrics: &ProcessMetrics) -> DeviceResult<bool> {
124 let features = self.extract_features(current_metrics);
126 let lof_score = self.calculate_lof_score(&features);
127 Ok(lof_score > self.threshold)
128 }
129
130 fn extract_features(&self, metrics: &ProcessMetrics) -> Vec<f64> {
132 vec![
133 metrics.process_fidelity,
134 metrics.average_gate_fidelity,
135 metrics.unitarity,
136 metrics.entangling_power,
137 metrics.non_unitality,
138 metrics.channel_capacity,
139 metrics.coherent_information,
140 metrics.diamond_norm_distance,
141 ]
142 }
143
144 fn calculate_isolation_score(&self, features: &[f64]) -> f64 {
146 let mut total_distance = 0.0;
148
149 for (i, &feature) in features.iter().enumerate() {
150 let historical_values: Vec<f64> = self
151 .historical_data
152 .iter()
153 .map(|m| self.extract_features(m)[i])
154 .collect();
155
156 if !historical_values.is_empty() {
157 let mean = historical_values.iter().sum::<f64>() / historical_values.len() as f64;
158 total_distance += (feature - mean).abs();
159 }
160 }
161
162 total_distance / features.len() as f64
163 }
164
165 fn calculate_svm_distance(&self, features: &[f64]) -> f64 {
167 let mut weighted_sum = 0.0;
169
170 for (i, &feature) in features.iter().enumerate() {
171 let weight = 1.0 / (i + 1) as f64; weighted_sum += weight * feature;
173 }
174
175 weighted_sum - 0.5 }
177
178 fn calculate_lof_score(&self, features: &[f64]) -> f64 {
180 let k = 5.min(self.historical_data.len()); if k == 0 {
184 return 1.0;
185 }
186
187 let mut distances: Vec<f64> = self
189 .historical_data
190 .iter()
191 .map(|historical_metrics| {
192 let historical_features = self.extract_features(historical_metrics);
193 self.euclidean_distance(features, &historical_features)
194 })
195 .collect();
196
197 distances.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
198
199 let k_distance = distances[k.min(distances.len()) - 1];
201 let lrd = if k_distance > 1e-12 {
202 k as f64 / k_distance
203 } else {
204 f64::INFINITY
205 };
206
207 1.0 / lrd.max(1e-12)
209 }
210
211 fn euclidean_distance(&self, features1: &[f64], features2: &[f64]) -> f64 {
213 features1
214 .iter()
215 .zip(features2.iter())
216 .map(|(a, b)| (a - b).powi(2))
217 .sum::<f64>()
218 .sqrt()
219 }
220}
221
222impl ProcessDriftDetector {
223 pub const fn new(
225 reference_metrics: ProcessMetrics,
226 sensitivity: f64,
227 method: DriftDetectionMethod,
228 ) -> Self {
229 Self {
230 reference_metrics,
231 sensitivity,
232 method,
233 }
234 }
235
236 pub fn detect_drift(&self, historical_data: &[ProcessMetrics]) -> DeviceResult<bool> {
238 if historical_data.is_empty() {
239 return Ok(false);
240 }
241
242 match self.method {
243 DriftDetectionMethod::StatisticalTest => self.detect_statistical_drift(historical_data),
244 DriftDetectionMethod::ChangePointDetection => {
245 self.detect_change_point_drift(historical_data)
246 }
247 DriftDetectionMethod::KLDivergence => self.detect_kl_divergence_drift(historical_data),
248 DriftDetectionMethod::WassersteinDistance => {
249 self.detect_wasserstein_drift(historical_data)
250 }
251 }
252 }
253
254 fn detect_statistical_drift(&self, historical_data: &[ProcessMetrics]) -> DeviceResult<bool> {
256 let recent_fidelities: Vec<f64> = historical_data.iter()
258 .rev()
259 .take(20) .map(|m| m.process_fidelity)
261 .collect();
262
263 if recent_fidelities.is_empty() {
264 return Ok(false);
265 }
266
267 let recent_mean = recent_fidelities.iter().sum::<f64>() / recent_fidelities.len() as f64;
268 let reference_fidelity = self.reference_metrics.process_fidelity;
269
270 let difference = (recent_mean - reference_fidelity).abs();
271 let threshold = self.sensitivity * reference_fidelity;
272
273 Ok(difference > threshold)
274 }
275
276 fn detect_change_point_drift(&self, historical_data: &[ProcessMetrics]) -> DeviceResult<bool> {
278 if historical_data.len() < 10 {
279 return Ok(false);
280 }
281
282 let fidelities: Vec<f64> = historical_data.iter().map(|m| m.process_fidelity).collect();
283
284 let reference_mean = self.reference_metrics.process_fidelity;
286 let mut cusum_pos = 0.0;
287 let mut cusum_neg = 0.0;
288 let threshold = self.sensitivity * 0.1; for &fidelity in &fidelities {
291 let deviation = fidelity - reference_mean;
292 cusum_pos = (cusum_pos + deviation).max(0.0);
293 cusum_neg = (cusum_neg - deviation).max(0.0);
294
295 if cusum_pos > threshold || cusum_neg > threshold {
296 return Ok(true);
297 }
298 }
299
300 Ok(false)
301 }
302
303 fn detect_kl_divergence_drift(&self, historical_data: &[ProcessMetrics]) -> DeviceResult<bool> {
305 let recent_data: Vec<f64> = historical_data
307 .iter()
308 .rev()
309 .take(50)
310 .map(|m| m.process_fidelity)
311 .collect();
312
313 if recent_data.len() < 10 {
314 return Ok(false);
315 }
316
317 let reference_hist = self.create_histogram(&[self.reference_metrics.process_fidelity]);
319 let recent_hist = self.create_histogram(&recent_data);
320
321 let kl_divergence = self.calculate_kl_divergence(&reference_hist, &recent_hist);
322
323 Ok(kl_divergence > self.sensitivity)
324 }
325
326 fn detect_wasserstein_drift(&self, historical_data: &[ProcessMetrics]) -> DeviceResult<bool> {
328 let recent_data: Vec<f64> = historical_data
330 .iter()
331 .rev()
332 .take(50)
333 .map(|m| m.process_fidelity)
334 .collect();
335
336 if recent_data.is_empty() {
337 return Ok(false);
338 }
339
340 let reference_value = self.reference_metrics.process_fidelity;
341 let recent_mean = recent_data.iter().sum::<f64>() / recent_data.len() as f64;
342
343 let wasserstein_distance = (reference_value - recent_mean).abs();
344
345 Ok(wasserstein_distance > self.sensitivity * 0.1)
346 }
347
348 fn create_histogram(&self, data: &[f64]) -> Vec<f64> {
350 let num_bins = 10;
351 let mut histogram = vec![0.0; num_bins];
352
353 if data.is_empty() {
354 return histogram;
355 }
356
357 let min_val = data.iter().copied().fold(f64::INFINITY, f64::min);
358 let max_val = data.iter().copied().fold(f64::NEG_INFINITY, f64::max);
359
360 if (max_val - min_val).abs() < 1e-12 {
361 histogram[0] = 1.0;
362 return histogram;
363 }
364
365 let bin_width = (max_val - min_val) / num_bins as f64;
366
367 for &value in data {
368 let bin_idx = ((value - min_val) / bin_width).floor() as usize;
369 let bin_idx = bin_idx.min(num_bins - 1);
370 histogram[bin_idx] += 1.0;
371 }
372
373 let total = histogram.iter().sum::<f64>();
375 if total > 1e-12 {
376 for bin in &mut histogram {
377 *bin /= total;
378 }
379 }
380
381 histogram
382 }
383
384 fn calculate_kl_divergence(&self, p: &[f64], q: &[f64]) -> f64 {
386 let mut kl_div = 0.0;
387 let epsilon = 1e-12; for (p_i, q_i) in p.iter().zip(q.iter()) {
390 let p_safe = p_i.max(epsilon);
391 let q_safe = q_i.max(epsilon);
392 kl_div += p_safe * (p_safe / q_safe).ln();
393 }
394
395 kl_div
396 }
397}
398
399pub fn generate_monitoring_result(
401 current_metrics: ProcessMetrics,
402 anomaly_detector: &ProcessAnomalyDetector,
403 drift_detector: &ProcessDriftDetector,
404 experimental_conditions: ExperimentalConditions,
405) -> DeviceResult<ProcessMonitoringResult> {
406 let is_anomalous = anomaly_detector.detect_anomaly(¤t_metrics)?;
408 let anomaly_score = if is_anomalous { 0.8 } else { 0.1 };
409
410 let has_drift = drift_detector.detect_drift(&anomaly_detector.historical_data)?;
412 let drift_indicator = if has_drift { 0.7 } else { 0.05 };
413
414 let alert_level = if anomaly_score > 0.7 || drift_indicator > 0.6 {
416 AlertLevel::Critical
417 } else if anomaly_score > 0.4 || drift_indicator > 0.3 {
418 AlertLevel::Warning
419 } else {
420 AlertLevel::Normal
421 };
422
423 Ok(ProcessMonitoringResult {
424 current_metrics,
425 experimental_conditions,
426 anomaly_score,
427 drift_indicator,
428 alert_level,
429 })
430}