1use crate::error::{QuantRS2Error, QuantRS2Result};
9use crate::gate::GateOp;
10use crate::qubit::QubitId;
11use scirs2_core::ndarray::{Array1, Array2};
12use scirs2_core::random::{thread_rng, Rng};
13use scirs2_core::{Complex32, Complex64};
14use std::collections::HashMap;
15use std::sync::{Arc, RwLock};
16
17#[derive(Debug, Clone)]
22pub struct NeuralErrorPredictor {
23 input_weights: Array2<f64>,
25 hidden_weights: Array2<f64>,
27 input_bias: Array1<f64>,
29 hidden_bias: Array1<f64>,
31 learning_rate: f64,
33 training_history: Arc<RwLock<Vec<TrainingExample>>>,
35}
36
37#[derive(Debug, Clone)]
39pub struct TrainingExample {
40 pub features: Vec<f64>,
42 pub error_rate: f64,
44 pub timestamp: std::time::Instant,
46}
47
48#[derive(Debug, Clone)]
50pub struct CircuitFeatures {
51 pub depth: usize,
53 pub single_qubit_gates: usize,
55 pub two_qubit_gates: usize,
57 pub connectivity: f64,
59 pub average_gate_fidelity: f64,
61 pub measurement_count: usize,
63 pub width: usize,
65 pub entanglement_entropy: f64,
67}
68
69impl NeuralErrorPredictor {
70 pub fn new(input_size: usize, hidden_size: usize, learning_rate: f64) -> Self {
77 let mut rng = thread_rng();
78
79 let xavier_input = (6.0 / (input_size + hidden_size) as f64).sqrt();
81 let xavier_hidden = (6.0 / (hidden_size + 1) as f64).sqrt();
82
83 let input_weights = Array2::from_shape_fn((hidden_size, input_size), |_| {
84 rng.gen_range(-xavier_input..xavier_input)
85 });
86
87 let hidden_weights = Array2::from_shape_fn((1, hidden_size), |_| {
88 rng.gen_range(-xavier_hidden..xavier_hidden)
89 });
90
91 let input_bias = Array1::zeros(hidden_size);
92 let hidden_bias = Array1::zeros(1);
93
94 Self {
95 input_weights,
96 hidden_weights,
97 input_bias,
98 hidden_bias,
99 learning_rate,
100 training_history: Arc::new(RwLock::new(Vec::new())),
101 }
102 }
103
104 pub fn predict(&self, features: &[f64]) -> QuantRS2Result<f64> {
112 if features.len() != self.input_weights.ncols() {
113 return Err(QuantRS2Error::InvalidInput(format!(
114 "Expected {} features, got {}",
115 self.input_weights.ncols(),
116 features.len()
117 )));
118 }
119
120 let input = Array1::from_vec(features.to_vec());
122
123 let hidden_pre = self.input_weights.dot(&input) + &self.input_bias;
125 let hidden = hidden_pre.mapv(|x| x.max(0.0)); let output_pre = self.hidden_weights.dot(&hidden) + &self.hidden_bias;
129 let output = 1.0 / (1.0 + (-output_pre[0]).exp()); Ok(output.clamp(0.0, 1.0))
132 }
133
134 pub fn train(&mut self, features: &[f64], observed_error_rate: f64) -> QuantRS2Result<()> {
140 if features.len() != self.input_weights.ncols() {
141 return Err(QuantRS2Error::InvalidInput(
142 "Feature size mismatch".to_string(),
143 ));
144 }
145
146 {
148 let mut history = self
149 .training_history
150 .write()
151 .unwrap_or_else(|e| e.into_inner());
152 history.push(TrainingExample {
153 features: features.to_vec(),
154 error_rate: observed_error_rate,
155 timestamp: std::time::Instant::now(),
156 });
157
158 let len = history.len();
160 if len > 1000 {
161 history.drain(0..len - 1000);
162 }
163 }
164
165 let input = Array1::from_vec(features.to_vec());
167
168 let hidden_pre = self.input_weights.dot(&input) + &self.input_bias;
170 let hidden = hidden_pre.mapv(|x| x.max(0.0));
171
172 let output_pre = self.hidden_weights.dot(&hidden) + &self.hidden_bias;
173 let predicted = 1.0 / (1.0 + (-output_pre[0]).exp());
174
175 let output_error = predicted - observed_error_rate;
178 let output_delta = output_error * predicted * (1.0 - predicted); let hidden_error = output_delta * self.hidden_weights.row(0).to_owned();
182 let hidden_delta = hidden_error.mapv(|x| if x > 0.0 { x } else { 0.0 }); for i in 0..self.hidden_weights.ncols() {
186 self.hidden_weights[[0, i]] -= self.learning_rate * output_delta * hidden[i];
187 }
188 self.hidden_bias[0] -= self.learning_rate * output_delta;
189
190 for i in 0..self.input_weights.nrows() {
191 for j in 0..self.input_weights.ncols() {
192 self.input_weights[[i, j]] -= self.learning_rate * hidden_delta[i] * input[j];
193 }
194 self.input_bias[i] -= self.learning_rate * hidden_delta[i];
195 }
196
197 Ok(())
198 }
199
200 pub fn get_training_history(&self) -> Vec<TrainingExample> {
202 self.training_history
203 .read()
204 .unwrap_or_else(|e| e.into_inner())
205 .clone()
206 }
207
208 pub fn calculate_accuracy(&self) -> f64 {
210 let history = self
211 .training_history
212 .read()
213 .unwrap_or_else(|e| e.into_inner());
214 if history.is_empty() {
215 return 0.0;
216 }
217
218 let mut total_error = 0.0;
219 for example in history.iter() {
220 if let Ok(predicted) = self.predict(&example.features) {
221 total_error += (predicted - example.error_rate).abs();
222 }
223 }
224
225 1.0 - (total_error / history.len() as f64)
226 }
227}
228
229impl CircuitFeatures {
230 pub fn extract_from_circuit(gates: &[Box<dyn GateOp>], num_qubits: usize) -> Self {
236 let mut single_qubit_gates = 0;
237 let mut two_qubit_gates = 0;
238 let mut measurement_count = 0;
239 let mut max_depth = 0;
240 let mut qubit_depths: HashMap<QubitId, usize> = HashMap::new();
241
242 for gate in gates {
244 let qubits = gate.qubits();
245
246 match qubits.len() {
247 1 => single_qubit_gates += 1,
248 2 => two_qubit_gates += 1,
249 _ => {}
250 }
251
252 let current_depth = qubits
254 .iter()
255 .map(|q| *qubit_depths.get(q).unwrap_or(&0))
256 .max()
257 .unwrap_or(0)
258 + 1;
259
260 for qubit in qubits {
261 qubit_depths.insert(qubit, current_depth);
262 }
263
264 max_depth = max_depth.max(current_depth);
265
266 if gate.name().to_lowercase().contains("measure") {
268 measurement_count += 1;
269 }
270 }
271
272 let total_gates = single_qubit_gates + two_qubit_gates;
273 let connectivity = if total_gates > 0 {
274 (single_qubit_gates + 2 * two_qubit_gates) as f64 / total_gates as f64
275 } else {
276 0.0
277 };
278
279 let entanglement_entropy = if num_qubits > 0 {
282 (two_qubit_gates as f64 / num_qubits as f64).min(num_qubits as f64)
283 } else {
284 0.0
285 };
286
287 let average_gate_fidelity = (two_qubit_gates as f64).mul_add(-0.005, 0.99);
289
290 Self {
291 depth: max_depth,
292 single_qubit_gates,
293 two_qubit_gates,
294 connectivity,
295 average_gate_fidelity: average_gate_fidelity.max(0.90),
296 measurement_count,
297 width: num_qubits,
298 entanglement_entropy,
299 }
300 }
301
302 pub fn to_vector(&self) -> Vec<f64> {
304 vec![
305 self.depth as f64,
306 self.single_qubit_gates as f64,
307 self.two_qubit_gates as f64,
308 self.connectivity,
309 self.average_gate_fidelity,
310 self.measurement_count as f64,
311 self.width as f64,
312 self.entanglement_entropy,
313 ]
314 }
315}
316
317pub struct AdaptiveErrorMitigation {
321 predictor: NeuralErrorPredictor,
323 mitigation_strength: f64,
325 min_shots: usize,
327 metrics: Arc<RwLock<MitigationMetrics>>,
329}
330
331#[derive(Debug, Clone)]
333pub struct MitigationMetrics {
334 pub total_circuits: usize,
335 pub average_improvement: f64,
336 pub prediction_accuracy: f64,
337 pub adaptive_adjustments: usize,
338}
339
340impl AdaptiveErrorMitigation {
341 pub fn new() -> Self {
343 Self {
344 predictor: NeuralErrorPredictor::new(8, 16, 0.01),
345 mitigation_strength: 1.0,
346 min_shots: 1024,
347 metrics: Arc::new(RwLock::new(MitigationMetrics {
348 total_circuits: 0,
349 average_improvement: 0.0,
350 prediction_accuracy: 0.0,
351 adaptive_adjustments: 0,
352 })),
353 }
354 }
355
356 pub fn recommend_mitigation(&self, features: &CircuitFeatures) -> QuantRS2Result<(usize, f64)> {
364 let predicted_error = self.predictor.predict(&features.to_vector())?;
365
366 let recommended_shots =
368 (self.min_shots as f64 * predicted_error.mul_add(10.0, 1.0)) as usize;
369
370 let strength = self.mitigation_strength * predicted_error.mul_add(2.0, 1.0);
372
373 Ok((recommended_shots, strength))
374 }
375
376 pub fn update_from_results(
378 &mut self,
379 features: &CircuitFeatures,
380 observed_error: f64,
381 ) -> QuantRS2Result<()> {
382 self.predictor
383 .train(&features.to_vector(), observed_error)?;
384
385 {
387 let mut metrics = self.metrics.write().unwrap_or_else(|e| e.into_inner());
388 metrics.total_circuits += 1;
389 metrics.prediction_accuracy = self.predictor.calculate_accuracy();
390 }
391
392 Ok(())
393 }
394
395 pub fn get_metrics(&self) -> MitigationMetrics {
397 self.metrics
398 .read()
399 .unwrap_or_else(|e| e.into_inner())
400 .clone()
401 }
402}
403
404impl Default for AdaptiveErrorMitigation {
405 fn default() -> Self {
406 Self::new()
407 }
408}
409
410#[cfg(test)]
411mod tests {
412 use super::*;
413
414 #[test]
415 fn test_neural_predictor_creation() {
416 let predictor = NeuralErrorPredictor::new(8, 16, 0.01);
417 assert_eq!(predictor.input_weights.ncols(), 8);
418 assert_eq!(predictor.input_weights.nrows(), 16);
419 }
420
421 #[test]
422 fn test_prediction() {
423 let predictor = NeuralErrorPredictor::new(8, 16, 0.01);
424 let features = vec![10.0, 5.0, 3.0, 1.5, 0.99, 2.0, 4.0, 1.2];
425
426 let result = predictor.predict(&features);
427 assert!(result.is_ok());
428
429 let error_rate = result.expect("Failed to predict error rate");
430 assert!(error_rate >= 0.0 && error_rate <= 1.0);
431 }
432
433 #[test]
434 fn test_training() {
435 let mut predictor = NeuralErrorPredictor::new(8, 16, 0.01);
436 let features = vec![10.0, 5.0, 3.0, 1.5, 0.99, 2.0, 4.0, 1.2];
437
438 for _ in 0..100 {
440 let result = predictor.train(&features, 0.05);
441 assert!(result.is_ok());
442 }
443
444 let history = predictor.get_training_history();
446 assert_eq!(history.len(), 100);
447 }
448
449 #[test]
450 fn test_adaptive_mitigation() {
451 let mitigation = AdaptiveErrorMitigation::new();
452
453 let features = CircuitFeatures {
454 depth: 10,
455 single_qubit_gates: 20,
456 two_qubit_gates: 8,
457 connectivity: 1.4,
458 average_gate_fidelity: 0.99,
459 measurement_count: 4,
460 width: 4,
461 entanglement_entropy: 2.0,
462 };
463
464 let result = mitigation.recommend_mitigation(&features);
465 assert!(result.is_ok());
466
467 let (shots, strength) = result.expect("Failed to recommend mitigation");
468 assert!(shots >= 1024);
469 assert!(strength > 0.0);
470 }
471}