1use crate::{
19 error::{QuantRS2Error, QuantRS2Result},
20 gate::GateOp,
21 qubit::QubitId,
22};
23use scirs2_core::ndarray::{Array1, Array2, Array3};
24use scirs2_core::random::prelude::*;
25use scirs2_core::Complex64;
26use std::collections::HashMap;
27
28#[derive(Debug, Clone)]
30pub struct NoiseModel {
31 pub single_qubit_depolarizing: f64,
33 pub two_qubit_depolarizing: f64,
35 pub t1_relaxation: f64,
37 pub t2_dephasing: f64,
39 pub readout_error: f64,
41 pub gate_duration: f64,
43}
44
45impl Default for NoiseModel {
46 fn default() -> Self {
47 Self {
48 single_qubit_depolarizing: 0.001,
49 two_qubit_depolarizing: 0.01,
50 t1_relaxation: 50.0,
51 t2_dephasing: 70.0,
52 readout_error: 0.02,
53 gate_duration: 0.1,
54 }
55 }
56}
57
58impl NoiseModel {
59 pub fn new(
61 single_qubit_depolarizing: f64,
62 two_qubit_depolarizing: f64,
63 t1: f64,
64 t2: f64,
65 readout_error: f64,
66 ) -> Self {
67 Self {
68 single_qubit_depolarizing,
69 two_qubit_depolarizing,
70 t1_relaxation: t1,
71 t2_dephasing: t2,
72 readout_error,
73 gate_duration: 0.1,
74 }
75 }
76
77 pub fn single_qubit_fidelity(&self) -> f64 {
79 let depolarizing_fidelity = 1.0 - self.single_qubit_depolarizing;
80 let coherence_decay = (-self.gate_duration / self.t1_relaxation).exp()
81 * (-self.gate_duration / self.t2_dephasing).exp();
82 depolarizing_fidelity * coherence_decay
83 }
84
85 pub fn two_qubit_fidelity(&self) -> f64 {
87 let depolarizing_fidelity = 1.0 - self.two_qubit_depolarizing;
88 let coherence_decay = (-2.0 * self.gate_duration / self.t1_relaxation).exp()
89 * (-2.0 * self.gate_duration / self.t2_dephasing).exp();
90 depolarizing_fidelity * coherence_decay
91 }
92}
93
94pub struct RandomizedBenchmarking {
98 pub num_qubits: usize,
100 pub sequence_lengths: Vec<usize>,
102 pub num_sequences: usize,
104 rng: ThreadRng,
106}
107
108impl RandomizedBenchmarking {
109 pub fn new(num_qubits: usize, max_sequence_length: usize, num_sequences: usize) -> Self {
111 let sequence_lengths: Vec<usize> = (1..=max_sequence_length).step_by(5).collect();
112 Self {
113 num_qubits,
114 sequence_lengths,
115 num_sequences,
116 rng: thread_rng(),
117 }
118 }
119
120 pub fn run<F>(&mut self, mut apply_circuit: F) -> QuantRS2Result<RandomizedBenchmarkingResult>
124 where
125 F: FnMut(&[Box<dyn GateOp>]) -> Vec<Complex64>,
126 {
127 let mut survival_probabilities = HashMap::new();
128
129 let lengths = self.sequence_lengths.clone();
131
132 for &length in &lengths {
133 let mut total_survival = 0.0;
134
135 for _ in 0..self.num_sequences {
136 let sequence = self.generate_clifford_sequence(length);
138
139 let final_state = apply_circuit(&sequence);
141 let survival = self.measure_survival_probability(&final_state);
142
143 total_survival += survival;
144 }
145
146 let avg_survival = total_survival / (self.num_sequences as f64);
147 survival_probabilities.insert(length, avg_survival);
148 }
149
150 let (decay_rate, asymptote) = self.fit_exponential_decay(&survival_probabilities)?;
152
153 let avg_gate_fidelity = 1.0 - (1.0 - decay_rate) / 2.0;
155
156 Ok(RandomizedBenchmarkingResult {
157 survival_probabilities,
158 decay_rate,
159 asymptote,
160 avg_gate_fidelity,
161 num_qubits: self.num_qubits,
162 })
163 }
164
165 fn generate_clifford_sequence(&self, length: usize) -> Vec<Box<dyn GateOp>> {
167 vec![]
170 }
171
172 fn measure_survival_probability(&self, state: &[Complex64]) -> f64 {
174 state[0].norm_sqr()
176 }
177
178 fn fit_exponential_decay(&self, data: &HashMap<usize, f64>) -> QuantRS2Result<(f64, f64)> {
180 if data.len() < 3 {
185 return Err(QuantRS2Error::InvalidInput(
186 "Insufficient data points for fitting".to_string(),
187 ));
188 }
189
190 let asymptote = *data
192 .values()
193 .min_by(|a, b| a.partial_cmp(b).unwrap())
194 .unwrap();
195
196 let mut sum_x = 0.0;
197 let mut sum_y = 0.0;
198 let mut sum_xy = 0.0;
199 let mut sum_xx = 0.0;
200 let mut count = 0;
201
202 for (&length, &survival) in data.iter() {
203 if survival > asymptote {
204 let x = length as f64;
205 let y = (survival - asymptote).ln();
206
207 sum_x += x;
208 sum_y += y;
209 sum_xy += x * y;
210 sum_xx += x * x;
211 count += 1;
212 }
213 }
214
215 if count < 2 {
216 return Err(QuantRS2Error::InvalidInput(
217 "Insufficient valid data points".to_string(),
218 ));
219 }
220
221 let n = count as f64;
222 #[allow(clippy::suspicious_operation_groupings)]
224 let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
225 let decay_rate = slope.exp();
226
227 Ok((decay_rate, asymptote))
228 }
229}
230
231#[derive(Debug, Clone)]
233pub struct RandomizedBenchmarkingResult {
234 pub survival_probabilities: HashMap<usize, f64>,
236 pub decay_rate: f64,
238 pub asymptote: f64,
240 pub avg_gate_fidelity: f64,
242 pub num_qubits: usize,
244}
245
246pub struct ZeroNoiseExtrapolation {
250 pub scaling_factors: Vec<f64>,
252 pub extrapolation_method: ExtrapolationMethod,
254}
255
256#[derive(Debug, Clone, Copy)]
257pub enum ExtrapolationMethod {
258 Linear,
260 Polynomial(usize),
262 Exponential,
264}
265
266impl Default for ZeroNoiseExtrapolation {
267 fn default() -> Self {
268 Self {
269 scaling_factors: vec![1.0, 2.0, 3.0],
270 extrapolation_method: ExtrapolationMethod::Linear,
271 }
272 }
273}
274
275impl ZeroNoiseExtrapolation {
276 pub fn new(scaling_factors: Vec<f64>, extrapolation_method: ExtrapolationMethod) -> Self {
278 Self {
279 scaling_factors,
280 extrapolation_method,
281 }
282 }
283
284 pub fn mitigate<F>(&self, circuit_executor: F) -> QuantRS2Result<f64>
288 where
289 F: Fn(f64) -> f64,
290 {
291 let mut noisy_results = Vec::new();
293 for &scale in &self.scaling_factors {
294 let result = circuit_executor(scale);
295 noisy_results.push((scale, result));
296 }
297
298 let mitigated_result = match self.extrapolation_method {
300 ExtrapolationMethod::Linear => self.linear_extrapolation(&noisy_results)?,
301 ExtrapolationMethod::Polynomial(degree) => {
302 self.polynomial_extrapolation(&noisy_results, degree)?
303 }
304 ExtrapolationMethod::Exponential => self.exponential_extrapolation(&noisy_results)?,
305 };
306
307 Ok(mitigated_result)
308 }
309
310 fn linear_extrapolation(&self, data: &[(f64, f64)]) -> QuantRS2Result<f64> {
312 if data.len() < 2 {
313 return Err(QuantRS2Error::InvalidInput(
314 "Need at least 2 data points for linear extrapolation".to_string(),
315 ));
316 }
317
318 let n = data.len() as f64;
320 let sum_x: f64 = data.iter().map(|(x, _)| x).sum();
321 let sum_y: f64 = data.iter().map(|(_, y)| y).sum();
322 let sum_xy: f64 = data.iter().map(|(x, y)| x * y).sum();
323 let sum_xx: f64 = data.iter().map(|(x, _)| x * x).sum();
324
325 #[allow(clippy::suspicious_operation_groupings)]
327 let b = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
328 let a = (sum_y - b * sum_x) / n;
329
330 Ok(a)
332 }
333
334 fn polynomial_extrapolation(&self, data: &[(f64, f64)], degree: usize) -> QuantRS2Result<f64> {
336 if data.len() < degree + 1 {
337 return Err(QuantRS2Error::InvalidInput(format!(
338 "Need at least {} data points for degree {} polynomial",
339 degree + 1,
340 degree
341 )));
342 }
343
344 let n = data.len();
346 let mut a_matrix = Array2::zeros((n, degree + 1));
347 let mut b_vector = Array1::zeros(n);
348
349 for (i, &(x, y)) in data.iter().enumerate() {
350 b_vector[i] = y;
351 for j in 0..=degree {
352 a_matrix[[i, j]] = x.powi(j as i32);
353 }
354 }
355
356 let at = a_matrix.t();
358 let ata = at.dot(&a_matrix);
359 let atb = at.dot(&b_vector);
360
361 let coeffs = self.solve_normal_equations(&ata, &atb)?;
367
368 Ok(coeffs[0])
370 }
371
372 fn solve_normal_equations(
374 &self,
375 ata: &Array2<f64>,
376 atb: &Array1<f64>,
377 ) -> QuantRS2Result<Array1<f64>> {
378 let n = ata.nrows();
379 let mut augmented = Array2::zeros((n, n + 1));
380
381 for i in 0..n {
383 for j in 0..n {
384 augmented[[i, j]] = ata[[i, j]];
385 }
386 augmented[[i, n]] = atb[i];
387 }
388
389 for k in 0..n {
391 let mut max_idx = k;
393 let mut max_val = augmented[[k, k]].abs();
394 for i in (k + 1)..n {
395 let val = augmented[[i, k]].abs();
396 if val > max_val {
397 max_val = val;
398 max_idx = i;
399 }
400 }
401
402 if max_idx != k {
404 for j in 0..=n {
405 let tmp = augmented[[k, j]];
406 augmented[[k, j]] = augmented[[max_idx, j]];
407 augmented[[max_idx, j]] = tmp;
408 }
409 }
410
411 if augmented[[k, k]].abs() < 1e-10 {
413 return Err(QuantRS2Error::ComputationError(
414 "Singular matrix in polynomial fitting".to_string(),
415 ));
416 }
417
418 for i in (k + 1)..n {
420 let factor = augmented[[i, k]] / augmented[[k, k]];
421 for j in k..=n {
422 augmented[[i, j]] -= factor * augmented[[k, j]];
423 }
424 }
425 }
426
427 let mut coeffs = Array1::zeros(n);
429 for i in (0..n).rev() {
430 let mut sum = augmented[[i, n]];
431 for j in (i + 1)..n {
432 sum -= augmented[[i, j]] * coeffs[j];
433 }
434 coeffs[i] = sum / augmented[[i, i]];
435 }
436
437 Ok(coeffs)
438 }
439
440 fn exponential_extrapolation(&self, data: &[(f64, f64)]) -> QuantRS2Result<f64> {
442 let log_data: Vec<(f64, f64)> = data
446 .iter()
447 .filter(|(_, y)| *y > 0.0)
448 .map(|(x, y)| (*x, y.ln()))
449 .collect();
450
451 if log_data.len() < 2 {
452 return Err(QuantRS2Error::InvalidInput(
453 "Insufficient positive data for exponential fit".to_string(),
454 ));
455 }
456
457 let n = log_data.len() as f64;
459 let sum_x: f64 = log_data.iter().map(|(x, _)| x).sum();
460 let sum_y: f64 = log_data.iter().map(|(_, y)| y).sum();
461 let sum_xy: f64 = log_data.iter().map(|(x, y)| x * y).sum();
462 let sum_xx: f64 = log_data.iter().map(|(x, _)| x * x).sum();
463
464 #[allow(clippy::suspicious_operation_groupings)]
466 let b = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
467 let log_a = (sum_y - b * sum_x) / n;
468 let a = log_a.exp();
469
470 Ok(a)
472 }
473}
474
475pub struct ProbabilisticErrorCancellation {
479 pub noise_model: NoiseModel,
481 pub num_samples: usize,
483}
484
485impl ProbabilisticErrorCancellation {
486 pub fn new(noise_model: NoiseModel, num_samples: usize) -> Self {
488 Self {
489 noise_model,
490 num_samples,
491 }
492 }
493
494 pub fn mitigate<F>(&self, ideal_executor: F) -> QuantRS2Result<f64>
498 where
499 F: Fn(&[Box<dyn GateOp>]) -> f64,
500 {
501 let fidelity = self.noise_model.single_qubit_fidelity();
508 let noise_strength = 1.0 - fidelity;
509
510 let ideal_weight = 1.0 / (1.0 - noise_strength);
512 let noise_weight = -noise_strength / (1.0 - noise_strength);
513
514 let ideal_result = ideal_executor(&[]);
519
520 let mitigated = ideal_weight * ideal_result;
522
523 Ok(mitigated)
524 }
525}
526
527pub struct DynamicalDecoupling {
531 pub sequence_type: DDSequenceType,
533 pub num_pulses: usize,
535}
536
537#[derive(Debug, Clone, Copy, PartialEq, Eq)]
538pub enum DDSequenceType {
539 SpinEcho,
541 CarrPurcell,
543 CPMG,
545 XY4,
547 XY8,
549 UDD,
551}
552
553impl DynamicalDecoupling {
554 pub fn new(sequence_type: DDSequenceType, num_pulses: usize) -> Self {
556 Self {
557 sequence_type,
558 num_pulses,
559 }
560 }
561
562 pub fn generate_sequence(&self, idle_time: f64) -> Vec<(f64, DDPulse)> {
566 match self.sequence_type {
567 DDSequenceType::SpinEcho => {
568 vec![(idle_time / 2.0, DDPulse::PauliX)]
569 }
570 DDSequenceType::CarrPurcell => {
571 let mut sequence = Vec::new();
572 for i in 1..=self.num_pulses {
573 let time = (i as f64) * idle_time / (self.num_pulses as f64 + 1.0);
574 sequence.push((time, DDPulse::PauliX));
575 }
576 sequence
577 }
578 DDSequenceType::CPMG => {
579 let mut sequence = Vec::new();
580 for i in 1..=self.num_pulses {
581 let time = (i as f64) * idle_time / (self.num_pulses as f64 + 1.0);
582 let pulse = if i % 2 == 1 {
583 DDPulse::PauliX
584 } else {
585 DDPulse::PauliY
586 };
587 sequence.push((time, pulse));
588 }
589 sequence
590 }
591 DDSequenceType::XY4 => {
592 let pulses = [
593 DDPulse::PauliX,
594 DDPulse::PauliY,
595 DDPulse::PauliX,
596 DDPulse::PauliY,
597 ];
598 let mut sequence = Vec::new();
599 for i in 1..=4 {
600 let time = (i as f64) * idle_time / 5.0;
601 sequence.push((time, pulses[i - 1]));
602 }
603 sequence
604 }
605 DDSequenceType::XY8 => {
606 let pulses = [
607 DDPulse::PauliX,
608 DDPulse::PauliY,
609 DDPulse::PauliX,
610 DDPulse::PauliY,
611 DDPulse::PauliY,
612 DDPulse::PauliX,
613 DDPulse::PauliY,
614 DDPulse::PauliX,
615 ];
616 let mut sequence = Vec::new();
617 for i in 1..=8 {
618 let time = (i as f64) * idle_time / 9.0;
619 sequence.push((time, pulses[i - 1]));
620 }
621 sequence
622 }
623 DDSequenceType::UDD => {
624 let mut sequence = Vec::new();
626 for k in 1..=self.num_pulses {
627 let angle =
628 std::f64::consts::PI * (k as f64) / (2.0 * (self.num_pulses as f64 + 1.0));
629 let time = idle_time * angle.sin().powi(2);
630 sequence.push((time, DDPulse::PauliX));
631 }
632 sequence
633 }
634 }
635 }
636
637 pub fn coherence_improvement_factor(&self, t2: f64, pulse_duration: f64) -> f64 {
639 let n = self.num_pulses as f64;
641 let pulse_error = pulse_duration / t2;
642
643 (n * n) / (1.0 + n * pulse_error)
646 }
647}
648
649#[derive(Debug, Clone, Copy, PartialEq, Eq)]
650pub enum DDPulse {
651 PauliX,
652 PauliY,
653 PauliZ,
654}
655
656pub struct CrossEntropyBenchmarking {
660 pub num_qubits: usize,
662 pub circuit_depth: usize,
664 pub num_circuits: usize,
666}
667
668impl CrossEntropyBenchmarking {
669 pub fn new(num_qubits: usize, circuit_depth: usize, num_circuits: usize) -> Self {
671 Self {
672 num_qubits,
673 circuit_depth,
674 num_circuits,
675 }
676 }
677
678 pub fn run<F, G>(
682 &self,
683 quantum_executor: F,
684 classical_simulator: G,
685 ) -> QuantRS2Result<CrossEntropyResult>
686 where
687 F: Fn(usize) -> Vec<usize>, G: Fn(usize) -> Vec<f64>, {
690 let mut total_cross_entropy = 0.0;
691
692 for circuit_id in 0..self.num_circuits {
693 let measurements = quantum_executor(circuit_id);
695
696 let ideal_probs = classical_simulator(circuit_id);
698
699 let cross_entropy = self.compute_cross_entropy(&measurements, &ideal_probs);
701 total_cross_entropy += cross_entropy;
702 }
703
704 let avg_cross_entropy = total_cross_entropy / (self.num_circuits as f64);
705
706 let fidelity = (1 << self.num_qubits) as f64 * avg_cross_entropy;
708
709 Ok(CrossEntropyResult {
710 cross_entropy: avg_cross_entropy,
711 fidelity,
712 num_qubits: self.num_qubits,
713 circuit_depth: self.circuit_depth,
714 })
715 }
716
717 fn compute_cross_entropy(&self, measurements: &[usize], ideal_probs: &[f64]) -> f64 {
719 let num_measurements = measurements.len() as f64;
720 let mut cross_entropy = 0.0;
721
722 for &bitstring in measurements {
723 if bitstring < ideal_probs.len() {
724 cross_entropy += ideal_probs[bitstring];
725 }
726 }
727
728 cross_entropy / num_measurements
729 }
730}
731
732#[derive(Debug, Clone)]
733pub struct CrossEntropyResult {
734 pub cross_entropy: f64,
736 pub fidelity: f64,
738 pub num_qubits: usize,
740 pub circuit_depth: usize,
742}
743
744#[cfg(test)]
745mod tests {
746 use super::*;
747
748 #[test]
749 fn test_noise_model() {
750 let noise = NoiseModel::default();
751 let fidelity = noise.single_qubit_fidelity();
752 assert!(fidelity > 0.9 && fidelity < 1.0);
753
754 let two_qubit_fidelity = noise.two_qubit_fidelity();
755 assert!(two_qubit_fidelity > 0.8 && two_qubit_fidelity < 1.0);
756 assert!(two_qubit_fidelity < fidelity); }
758
759 #[test]
760 fn test_zne_linear_extrapolation() {
761 let zne = ZeroNoiseExtrapolation::default();
762
763 let executor = |scale: f64| 1.0 - 0.1 * scale;
765
766 let mitigated = zne.mitigate(executor).unwrap();
767
768 assert!((mitigated - 1.0).abs() < 0.05);
770 println!("ZNE mitigated result: {}", mitigated);
771 }
772
773 #[test]
774 fn test_dynamical_decoupling_sequences() {
775 let dd_spin_echo = DynamicalDecoupling::new(DDSequenceType::SpinEcho, 1);
776 let sequence = dd_spin_echo.generate_sequence(1.0);
777 assert_eq!(sequence.len(), 1);
778 assert_eq!(sequence[0].1, DDPulse::PauliX);
779
780 let dd_xy4 = DynamicalDecoupling::new(DDSequenceType::XY4, 4);
781 let sequence = dd_xy4.generate_sequence(1.0);
782 assert_eq!(sequence.len(), 4);
783
784 let dd_udd = DynamicalDecoupling::new(DDSequenceType::UDD, 5);
785 let sequence = dd_udd.generate_sequence(1.0);
786 assert_eq!(sequence.len(), 5);
787 }
788
789 #[test]
790 fn test_coherence_improvement() {
791 let dd = DynamicalDecoupling::new(DDSequenceType::CPMG, 10);
792 let improvement = dd.coherence_improvement_factor(100.0, 0.1);
793 assert!(improvement > 1.0);
794 println!("DD coherence improvement: {}x", improvement);
795 }
796
797 #[test]
798 fn test_cross_entropy_benchmarking() {
799 let xeb = CrossEntropyBenchmarking::new(5, 10, 100);
800
801 let quantum_exec = |_circuit_id: usize| vec![0, 1, 2, 3, 4];
803
804 let classical_sim = |_circuit_id: usize| vec![0.0312; 32]; let result = xeb.run(quantum_exec, classical_sim).unwrap();
808
809 assert!(result.cross_entropy > 0.0);
810 assert!(result.fidelity > 0.0);
811 println!("XEB fidelity: {}", result.fidelity);
812 }
813
814 #[test]
815 fn test_zne_polynomial_extrapolation() {
816 let zne = ZeroNoiseExtrapolation::new(
817 vec![1.0, 2.0, 3.0, 4.0],
818 ExtrapolationMethod::Polynomial(2),
819 );
820
821 let executor = |scale: f64| 1.0 - 0.05 * scale - 0.01 * scale * scale;
823
824 let mitigated = zne.mitigate(executor).unwrap();
825
826 assert!((mitigated - 1.0).abs() < 0.1);
828 println!("ZNE polynomial mitigated result: {}", mitigated);
829 }
830
831 #[test]
832 fn test_zne_exponential_extrapolation() {
833 let zne =
834 ZeroNoiseExtrapolation::new(vec![1.0, 2.0, 3.0], ExtrapolationMethod::Exponential);
835
836 let executor = |scale: f64| 0.9 * (-0.1 * scale).exp();
838
839 let mitigated = zne.mitigate(executor).unwrap();
840
841 assert!((mitigated - 0.9).abs() < 0.05);
843 println!("ZNE exponential mitigated result: {}", mitigated);
844 }
845}