1use scirs2_core::ndarray::{Array1, Array2, Array3};
15use scirs2_core::numeric::Complex;
16use scirs2_core::numeric::{Float, FromPrimitive};
17use std::collections::HashMap;
18use std::fmt::Debug;
19
20use crate::error::{Result, TimeSeriesError};
21
22#[derive(Debug, Clone)]
24pub struct QuantumState<F: Float + Debug> {
25 #[allow(dead_code)]
27 amplitudes: Array1<Complex<F>>,
28 #[allow(dead_code)]
30 num_qubits: usize,
31}
32
33impl<F: Float + Debug + Clone + FromPrimitive> QuantumState<F> {
34 pub fn new(_numqubits: usize) -> Self {
36 let num_states = 1 << _numqubits; let mut amplitudes = Array1::zeros(num_states);
38
39 amplitudes[0] = Complex::new(F::one(), F::zero());
41
42 Self {
43 amplitudes,
44 num_qubits: _numqubits,
45 }
46 }
47
48 pub fn create_superposition(&mut self) {
50 let num_states = self.amplitudes.len();
51 let amplitude = F::one() / F::from(num_states as f64).unwrap().sqrt();
52
53 for i in 0..num_states {
54 self.amplitudes[i] = Complex::new(amplitude, F::zero());
55 }
56 }
57
58 pub fn apply_rotation(&mut self, qubit: usize, theta: F, phi: F) -> Result<()> {
60 if qubit >= self.num_qubits {
61 return Err(TimeSeriesError::InvalidInput(
62 "Qubit index out of bounds".to_string(),
63 ));
64 }
65
66 let cos_half = (theta / F::from(2.0).unwrap()).cos();
67 let sin_half = (theta / F::from(2.0).unwrap()).sin();
68 let exp_phi = Complex::new(phi.cos(), phi.sin());
69
70 let num_states = self.amplitudes.len();
71 let qubit_mask = 1 << qubit;
72
73 for i in 0..num_states {
74 if i & qubit_mask == 0 {
75 let j = i | qubit_mask;
76 let old_i = self.amplitudes[i];
77 let old_j = self.amplitudes[j];
78
79 self.amplitudes[i] = old_i * Complex::new(cos_half, F::zero())
80 - old_j * Complex::new(sin_half, F::zero()) * exp_phi;
81 self.amplitudes[j] = old_i * Complex::new(sin_half, F::zero()) * exp_phi.conj()
82 + old_j * Complex::new(cos_half, F::zero());
83 }
84 }
85
86 Ok(())
87 }
88
89 pub fn measure(&self) -> (usize, F) {
91 let mut probabilities = Array1::zeros(self.amplitudes.len());
92
93 for (i, &litude) in self.amplitudes.iter().enumerate() {
94 probabilities[i] = amplitude.norm_sqr();
95 }
96
97 let mut max_prob = F::zero();
99 let mut max_idx = 0;
100
101 for (i, &prob) in probabilities.iter().enumerate() {
102 if prob > max_prob {
103 max_prob = prob;
104 max_idx = i;
105 }
106 }
107
108 (max_idx, max_prob)
109 }
110
111 pub fn get_probabilities(&self) -> Array1<F> {
113 let mut probabilities = Array1::zeros(self.amplitudes.len());
114
115 for (i, &litude) in self.amplitudes.iter().enumerate() {
116 probabilities[i] = amplitude.norm_sqr();
117 }
118
119 let total: F = probabilities.sum();
121 if total > F::zero() {
122 probabilities.mapv_inplace(|p| p / total);
123 }
124
125 probabilities
126 }
127}
128
129#[derive(Debug)]
131pub struct QuantumAttention<F: Float + Debug> {
132 #[allow(dead_code)]
134 model_dim: usize,
135 num_heads: usize,
137 #[allow(dead_code)]
139 qubits_per_head: usize,
140 #[allow(dead_code)]
142 theta_params: Array2<F>,
143 #[allow(dead_code)]
144 phi_params: Array2<F>,
145 #[allow(dead_code)]
147 w_query: Array2<F>,
148 #[allow(dead_code)]
149 w_key: Array2<F>,
150 #[allow(dead_code)]
151 w_value: Array2<F>,
152 #[allow(dead_code)]
153 w_output: Array2<F>,
154}
155
156impl<F: Float + Debug + Clone + FromPrimitive> QuantumAttention<F> {
157 pub fn new(_model_dim: usize, num_heads: usize, qubits_perhead: usize) -> Result<Self> {
159 if !_model_dim.is_multiple_of(num_heads) {
160 return Err(TimeSeriesError::InvalidInput(
161 "Model dimension must be divisible by number of _heads".to_string(),
162 ));
163 }
164
165 let scale = F::from(2.0).unwrap() / F::from(_model_dim).unwrap();
166 let std_dev = scale.sqrt();
167
168 let theta_params = Self::init_params(num_heads, qubits_perhead);
170 let phi_params = Self::init_params(num_heads, qubits_perhead);
171
172 Ok(Self {
173 model_dim: _model_dim,
174 num_heads,
175 qubits_per_head: qubits_perhead,
176 theta_params,
177 phi_params,
178 w_query: Self::random_matrix(_model_dim, _model_dim, std_dev),
179 w_key: Self::random_matrix(_model_dim, _model_dim, std_dev),
180 w_value: Self::random_matrix(_model_dim, _model_dim, std_dev),
181 w_output: Self::random_matrix(_model_dim, _model_dim, std_dev),
182 })
183 }
184
185 fn init_params(_num_heads: usize, qubits_perhead: usize) -> Array2<F> {
187 let mut params = Array2::zeros((_num_heads, qubits_perhead));
188
189 for i in 0.._num_heads {
190 for j in 0..qubits_perhead {
191 let angle =
193 F::from(((i + j * 7) % 100) as f64 / 100.0 * std::f64::consts::PI).unwrap();
194 params[[i, j]] = angle;
195 }
196 }
197
198 params
199 }
200
201 fn random_matrix(_rows: usize, cols: usize, stddev: F) -> Array2<F> {
203 let mut matrix = Array2::zeros((_rows, cols));
204
205 for i in 0.._rows {
206 for j in 0..cols {
207 let rand_val = ((i + j * 17) % 1000) as f64 / 1000.0 - 0.5;
208 matrix[[i, j]] = F::from(rand_val).unwrap() * stddev;
209 }
210 }
211
212 matrix
213 }
214
215 pub fn forward(&self, input: &Array2<F>) -> Result<Array2<F>> {
217 let (seq_len, _) = input.dim();
218
219 let queries = self.linear_transform(input, &self.w_query);
221 let keys = self.linear_transform(input, &self.w_key);
222 let values = self.linear_transform(input, &self.w_value);
223
224 let mut attention_outputs = Vec::new();
226
227 for head in 0..self.num_heads {
228 let quantum_attention =
229 self.quantum_attention_head(&queries, &keys, &values, head, seq_len)?;
230 attention_outputs.push(quantum_attention);
231 }
232
233 let concatenated = self.concatenate_heads(&attention_outputs, seq_len);
235
236 let output = self.linear_transform(&concatenated, &self.w_output);
238
239 Ok(output)
240 }
241
242 fn quantum_attention_head(
244 &self,
245 queries: &Array2<F>,
246 keys: &Array2<F>,
247 values: &Array2<F>,
248 head: usize,
249 seq_len: usize,
250 ) -> Result<Array2<F>> {
251 let head_dim = self.model_dim / self.num_heads;
252 let mut output = Array2::zeros((seq_len, head_dim));
253
254 for i in 0..seq_len {
255 let mut quantum_state = QuantumState::new(self.qubits_per_head);
257 quantum_state.create_superposition();
258
259 for j in 0..seq_len {
261 let mut similarity = F::zero();
263 for d in 0..head_dim.min(queries.ncols()).min(keys.ncols()) {
264 let q_idx = head * head_dim + d;
265 let k_idx = head * head_dim + d;
266 if q_idx < queries.ncols() && k_idx < keys.ncols() {
267 similarity = similarity + queries[[i, q_idx]] * keys[[j, k_idx]];
268 }
269 }
270
271 let theta = self.theta_params[[head, j % self.qubits_per_head]] * similarity;
273 let phi = self.phi_params[[head, j % self.qubits_per_head]] * similarity;
274
275 if j % self.qubits_per_head < self.qubits_per_head {
276 quantum_state.apply_rotation(j % self.qubits_per_head, theta, phi)?;
277 }
278 }
279
280 let probabilities = quantum_state.get_probabilities();
282
283 for d in 0..head_dim {
285 let mut weighted_value = F::zero();
286
287 for j in 0..seq_len.min(probabilities.len()) {
288 let v_idx = head * head_dim + d;
289 if v_idx < values.ncols() && j < values.nrows() {
290 weighted_value = weighted_value + probabilities[j] * values[[j, v_idx]];
291 }
292 }
293
294 output[[i, d]] = weighted_value;
295 }
296 }
297
298 Ok(output)
299 }
300
301 fn linear_transform(&self, input: &Array2<F>, weights: &Array2<F>) -> Array2<F> {
303 let (seq_len, input_dim) = input.dim();
304 let output_dim = weights.nrows();
305 let mut output = Array2::zeros((seq_len, output_dim));
306
307 for i in 0..seq_len {
308 for j in 0..output_dim {
309 let mut sum = F::zero();
310 for k in 0..input_dim.min(weights.ncols()) {
311 sum = sum + input[[i, k]] * weights[[j, k]];
312 }
313 output[[i, j]] = sum;
314 }
315 }
316
317 output
318 }
319
320 fn concatenate_heads(&self, heads: &[Array2<F>], seqlen: usize) -> Array2<F> {
321 let head_dim = self.model_dim / self.num_heads;
322 let mut concatenated = Array2::zeros((seqlen, self.model_dim));
323
324 for (h, head_output) in heads.iter().enumerate() {
325 for i in 0..seqlen.min(head_output.nrows()) {
326 for j in 0..head_dim.min(head_output.ncols()) {
327 concatenated[[i, h * head_dim + j]] = head_output[[i, j]];
328 }
329 }
330 }
331
332 concatenated
333 }
334}
335
336#[derive(Debug)]
338pub struct VariationalQuantumCircuit<F: Float + Debug> {
339 num_qubits: usize,
341 #[allow(dead_code)]
343 depth: usize,
344 #[allow(dead_code)]
346 parameters: Array3<F>, input_dim: usize,
349}
350
351impl<F: Float + Debug + Clone + FromPrimitive> VariationalQuantumCircuit<F> {
352 pub fn new(_num_qubits: usize, depth: usize, inputdim: usize) -> Self {
354 let mut parameters = Array3::zeros((depth, _num_qubits, 3)); for layer in 0..depth {
358 for qubit in 0.._num_qubits {
359 for param in 0..3 {
360 let val = ((layer + qubit * 7 + param * 13) % 1000) as f64 / 1000.0
361 * std::f64::consts::PI
362 * 2.0;
363 parameters[[layer, qubit, param]] = F::from(val).unwrap();
364 }
365 }
366 }
367
368 Self {
369 num_qubits: _num_qubits,
370 depth,
371 parameters,
372 input_dim: inputdim,
373 }
374 }
375
376 pub fn encode_data(&self, data: &Array1<F>) -> Result<QuantumState<F>> {
378 let mut state = QuantumState::new(self.num_qubits);
379
380 for (i, &value) in data.iter().enumerate().take(self.num_qubits) {
382 let angle = value * F::from(std::f64::consts::PI).unwrap();
383 state.apply_rotation(i, angle, F::zero())?;
384 }
385
386 Ok(state)
387 }
388
389 pub fn forward(&self, input: &Array1<F>) -> Result<Array1<F>> {
391 if input.len() < self.input_dim {
392 return Err(TimeSeriesError::DimensionMismatch {
393 expected: self.input_dim,
394 actual: input.len(),
395 });
396 }
397
398 let mut state = self.encode_data(input)?;
400
401 for layer in 0..self.depth {
403 self.apply_variational_layer(&mut state, layer)?;
404 }
405
406 let probabilities = state.get_probabilities();
408 let output_dim = self.num_qubits; let mut output = Array1::zeros(output_dim);
410
411 for qubit in 0..output_dim {
413 let mut prob_one = F::zero();
414 let qubit_mask = 1 << qubit;
415
416 for (state_idx, &prob) in probabilities.iter().enumerate() {
417 if state_idx & qubit_mask != 0 {
418 prob_one = prob_one + prob;
419 }
420 }
421
422 output[qubit] = prob_one;
423 }
424
425 Ok(output)
426 }
427
428 fn apply_variational_layer(&self, state: &mut QuantumState<F>, layer: usize) -> Result<()> {
430 for qubit in 0..self.num_qubits {
432 let theta = self.parameters[[layer, qubit, 0]];
433 let phi = self.parameters[[layer, qubit, 1]];
434 state.apply_rotation(qubit, theta, phi)?;
435 }
436
437 for qubit in 0..self.num_qubits - 1 {
439 let entangle_angle = self.parameters[[layer, qubit, 2]];
440 state.apply_rotation(qubit, entangle_angle, F::zero())?;
441 state.apply_rotation(qubit + 1, entangle_angle, F::zero())?;
442 }
443
444 Ok(())
445 }
446
447 pub fn update_parameters(&mut self, gradients: &Array3<F>, learningrate: F) {
449 for layer in 0..self.depth {
450 for qubit in 0..self.num_qubits {
451 for param in 0..3 {
452 if layer < gradients.shape()[0]
453 && qubit < gradients.shape()[1]
454 && param < gradients.shape()[2]
455 {
456 self.parameters[[layer, qubit, param]] = self.parameters
457 [[layer, qubit, param]]
458 - learningrate * gradients[[layer, qubit, param]];
459 }
460 }
461 }
462 }
463 }
464}
465
466#[derive(Debug)]
468pub struct QuantumKernel<F: Float + Debug> {
469 num_qubits: usize,
471 feature_map_params: Array2<F>,
473 kernel_type: QuantumKernelType,
475}
476
477#[derive(Debug, Clone)]
478pub enum QuantumKernelType {
480 FeatureMap,
482 Fidelity,
484 Distance,
486}
487
488impl<F: Float + Debug + Clone + FromPrimitive> QuantumKernel<F> {
489 pub fn new(_num_qubits: usize, kerneltype: QuantumKernelType) -> Self {
491 let mut feature_map_params = Array2::zeros((_num_qubits, 3));
492
493 for i in 0.._num_qubits {
495 for j in 0..3 {
496 let val = ((i + j * 11) % 100) as f64 / 100.0 * std::f64::consts::PI;
497 feature_map_params[[i, j]] = F::from(val).unwrap();
498 }
499 }
500
501 Self {
502 num_qubits: _num_qubits,
503 feature_map_params,
504 kernel_type: kerneltype,
505 }
506 }
507
508 pub fn compute_kernel(&self, x1: &Array1<F>, x2: &Array1<F>) -> Result<F> {
510 match self.kernel_type {
511 QuantumKernelType::FeatureMap => self.feature_map_kernel(x1, x2),
512 QuantumKernelType::Fidelity => self.fidelity_kernel(x1, x2),
513 QuantumKernelType::Distance => self.distance_kernel(x1, x2),
514 }
515 }
516
517 fn feature_map_kernel(&self, x1: &Array1<F>, x2: &Array1<F>) -> Result<F> {
519 let state1 = self.create_feature_map(x1)?;
520 let state2 = self.create_feature_map(x2)?;
521
522 let mut overlap = Complex::new(F::zero(), F::zero());
524
525 for i in 0..state1.amplitudes.len().min(state2.amplitudes.len()) {
526 overlap = overlap + state1.amplitudes[i].conj() * state2.amplitudes[i];
527 }
528
529 Ok(overlap.norm_sqr())
530 }
531
532 fn fidelity_kernel(&self, x1: &Array1<F>, x2: &Array1<F>) -> Result<F> {
534 let mut fidelity = F::zero();
536 let min_len = x1.len().min(x2.len());
537
538 for i in 0..min_len {
539 let diff = x1[i] - x2[i];
540 fidelity = fidelity + (-diff * diff).exp();
541 }
542
543 Ok(fidelity / F::from(min_len).unwrap())
544 }
545
546 fn distance_kernel(&self, x1: &Array1<F>, x2: &Array1<F>) -> Result<F> {
548 let state1 = self.create_feature_map(x1)?;
549 let state2 = self.create_feature_map(x2)?;
550
551 let mut distance = F::zero();
553
554 for i in 0..state1.amplitudes.len().min(state2.amplitudes.len()) {
555 let diff = state1.amplitudes[i] - state2.amplitudes[i];
556 distance = distance + diff.norm_sqr();
557 }
558
559 let gamma = F::from(0.1).unwrap();
561 Ok((-gamma * distance).exp())
562 }
563
564 fn create_feature_map(&self, data: &Array1<F>) -> Result<QuantumState<F>> {
566 let mut state = QuantumState::new(self.num_qubits);
567
568 for (i, &value) in data.iter().enumerate().take(self.num_qubits) {
570 let theta = self.feature_map_params[[i, 0]] * value;
571 let phi = self.feature_map_params[[i, 1]] * value;
572 state.apply_rotation(i, theta, phi)?;
573 }
574
575 for i in 0..self.num_qubits - 1 {
577 let entangle_param = self.feature_map_params[[i, 2]];
578 state.apply_rotation(i, entangle_param, F::zero())?;
579 state.apply_rotation(i + 1, entangle_param, F::zero())?;
580 }
581
582 Ok(state)
583 }
584
585 pub fn compute_kernel_matrix(&self, data: &Array2<F>) -> Result<Array2<F>> {
587 let num_samples = data.nrows();
588 let mut kernel_matrix = Array2::zeros((num_samples, num_samples));
589
590 for i in 0..num_samples {
591 for j in i..num_samples {
592 let row_i = data.row(i).to_owned();
593 let row_j = data.row(j).to_owned();
594 let kernel_value = self.compute_kernel(&row_i, &row_j)?;
595
596 kernel_matrix[[i, j]] = kernel_value;
597 kernel_matrix[[j, i]] = kernel_value; }
599 }
600
601 Ok(kernel_matrix)
602 }
603}
604
605#[derive(Debug)]
607pub struct QuantumAnnealingOptimizer<F: Float + Debug> {
608 num_vars: usize,
610 temperature_schedule: Array1<F>,
612 current_solution: Array1<F>,
614 best_solution: Array1<F>,
616 best_energy: F,
618}
619
620impl<F: Float + Debug + Clone + FromPrimitive> QuantumAnnealingOptimizer<F> {
621 pub fn new(_num_vars: usize, maxiterations: usize) -> Self {
623 let mut temperature_schedule = Array1::zeros(maxiterations);
625 let initial_temp = F::from(10.0).unwrap();
626 let final_temp = F::from(0.01).unwrap();
627 let cooling_rate =
628 (final_temp / initial_temp).ln() / F::from(maxiterations as f64).unwrap();
629
630 for i in 0..maxiterations {
631 let temp = initial_temp * (cooling_rate * F::from(i as f64).unwrap()).exp();
632 temperature_schedule[i] = temp;
633 }
634
635 let mut current_solution = Array1::zeros(_num_vars);
637 for i in 0.._num_vars {
638 current_solution[i] = F::from(0.5 + (i as f64 * 0.1 - 0.05)).unwrap();
639 }
640
641 Self {
642 num_vars: _num_vars,
643 temperature_schedule,
644 current_solution: current_solution.clone(),
645 best_solution: current_solution,
646 best_energy: F::from(f64::INFINITY).unwrap(),
647 }
648 }
649
650 pub fn optimize<Func>(&mut self, objectivefunction: Func) -> Result<Array1<F>>
652 where
653 Func: Fn(&Array1<F>) -> F,
654 {
655 let max_iterations = self.temperature_schedule.len();
656
657 for iteration in 0..max_iterations {
658 let temperature = self.temperature_schedule[iteration];
659
660 let neighbor = self.generate_neighbor_solution(temperature);
662
663 let current_energy = objectivefunction(&self.current_solution);
665 let neighbor_energy = objectivefunction(&neighbor);
666
667 let energy_diff = neighbor_energy - current_energy;
669 let acceptance_prob = if energy_diff < F::zero() {
670 F::one() } else {
672 (-energy_diff / temperature).exp()
673 };
674
675 let random_val = F::from(((iteration * 17) % 1000) as f64 / 1000.0).unwrap();
677
678 if random_val < acceptance_prob {
679 self.current_solution = neighbor;
680
681 if neighbor_energy < self.best_energy {
683 self.best_energy = neighbor_energy;
684 self.best_solution = self.current_solution.clone();
685 }
686 }
687 }
688
689 Ok(self.best_solution.clone())
690 }
691
692 fn generate_neighbor_solution(&self, temperature: F) -> Array1<F> {
694 let mut neighbor = self.current_solution.clone();
695
696 for i in 0..self.num_vars {
698 let perturbation_scale = temperature / F::from(5.0).unwrap(); let perturbation =
700 F::from(((i * 23) % 1000) as f64 / 1000.0 - 0.5).unwrap() * perturbation_scale;
701
702 neighbor[i] = neighbor[i] + perturbation;
703
704 if neighbor[i] < F::zero() {
706 neighbor[i] = F::zero();
707 } else if neighbor[i] > F::one() {
708 neighbor[i] = F::one();
709 }
710 }
711
712 neighbor
713 }
714}
715
716#[cfg(test)]
717mod tests {
718 use super::*;
719 use approx::assert_abs_diff_eq;
720
721 #[test]
722 fn test_quantum_state() {
723 let mut state = QuantumState::<f64>::new(2);
724 assert_eq!(state.num_qubits, 2);
725 assert_eq!(state.amplitudes.len(), 4); let (measurement, prob) = state.measure();
729 assert_eq!(measurement, 0);
730 assert_abs_diff_eq!(prob, 1.0);
731
732 state.create_superposition();
734 let probabilities = state.get_probabilities();
735 for &prob in &probabilities {
736 assert_abs_diff_eq!(prob, 0.25, epsilon = 1e-10); }
738 }
739
740 #[test]
741 fn test_quantum_attention() {
742 let quantum_attn = QuantumAttention::<f64>::new(64, 8, 3).unwrap();
743
744 let input =
745 Array2::from_shape_vec((10, 64), (0..640).map(|i| i as f64 * 0.001).collect()).unwrap();
746
747 let output = quantum_attn.forward(&input).unwrap();
748 assert_eq!(output.dim(), (10, 64));
749
750 let output_sum: f64 = output.sum();
752 assert!(output_sum.abs() > 1e-10);
753 }
754
755 #[test]
756 fn test_variational_quantum_circuit() {
757 let vqc = VariationalQuantumCircuit::<f64>::new(4, 3, 8);
758
759 let input = Array1::from_vec((0..8).map(|i| i as f64 * 0.1).collect());
760 let output = vqc.forward(&input).unwrap();
761
762 assert_eq!(output.len(), 4); for &prob in &output {
766 assert!(
767 prob >= 0.0 && prob <= 1.0,
768 "Qubit expectation values should be in [0, 1]"
769 );
770 }
771 }
772
773 #[test]
774 fn test_quantum_kernel() {
775 let kernel = QuantumKernel::<f64>::new(3, QuantumKernelType::FeatureMap);
776
777 let x1 = Array1::from_vec(vec![0.1, 0.2, 0.3]);
778 let x2 = Array1::from_vec(vec![0.15, 0.25, 0.35]);
779 let x3 = Array1::from_vec(vec![0.9, 0.8, 0.7]);
780
781 let k12 = kernel.compute_kernel(&x1, &x2).unwrap();
782 let k13 = kernel.compute_kernel(&x1, &x3).unwrap();
783
784 assert!(k12 > k13);
786
787 let data =
789 Array2::from_shape_vec((3, 3), vec![0.1, 0.2, 0.3, 0.15, 0.25, 0.35, 0.9, 0.8, 0.7])
790 .unwrap();
791
792 let kernel_matrix = kernel.compute_kernel_matrix(&data).unwrap();
793 assert_eq!(kernel_matrix.dim(), (3, 3));
794
795 for i in 0..3 {
797 assert_abs_diff_eq!(kernel_matrix[[i, i]], 1.0, epsilon = 1e-10);
798 }
799 }
800
801 #[test]
802 fn test_quantum_annealing_optimizer() {
803 let mut optimizer = QuantumAnnealingOptimizer::<f64>::new(2, 100);
804
805 let objective = |vars: &Array1<f64>| -> f64 {
807 let x = vars[0];
808 let y = vars[1];
809 (x - 0.3).powi(2) + (y - 0.7).powi(2)
810 };
811
812 let result = optimizer.optimize(objective).unwrap();
813 assert_eq!(result.len(), 2);
814
815 assert!(result[0] >= 0.0 && result[0] <= 1.0);
817 assert!(result[1] >= 0.0 && result[1] <= 1.0);
818
819 let final_objective = objective(&result);
821 println!("Final objective: {}, Result: {:?}", final_objective, result);
822 assert!(final_objective < 1.0); }
824
825 #[test]
826 fn test_quantum_rotation() {
827 let mut state = QuantumState::<f64>::new(1);
828
829 let pi = std::f64::consts::PI;
831 state.apply_rotation(0, pi, 0.0).unwrap();
832
833 let (measurement, _) = state.measure();
834 assert_eq!(measurement, 1); }
836}
837
838#[derive(Debug)]
840pub struct QuantumNeuralNetwork<F: Float + Debug> {
841 layers: Vec<QuantumLayer<F>>,
843 #[allow(dead_code)]
845 qubits_per_layer: usize,
846 #[allow(dead_code)]
848 input_dim: usize,
849 #[allow(dead_code)]
851 output_dim: usize,
852}
853
854#[derive(Debug)]
856pub struct QuantumLayer<F: Float + Debug> {
857 circuit: VariationalQuantumCircuit<F>,
859 linear_weights: Array2<F>,
861 activation: QuantumActivation,
863}
864
865#[derive(Debug, Clone)]
867pub enum QuantumActivation {
868 QuantumReLU,
870 QuantumSigmoid,
872 QuantumTanh,
874}
875
876impl<F: Float + Debug + Clone + FromPrimitive> QuantumNeuralNetwork<F> {
877 pub fn new(
879 num_layers: usize,
880 qubits_per_layer: usize,
881 input_dim: usize,
882 output_dim: usize,
883 ) -> Self {
884 let mut _layers = Vec::new();
885
886 for layer_idx in 0..num_layers {
887 let circuit_depth = 3; let circuit_input_dim = if layer_idx == 0 {
889 input_dim
890 } else {
891 qubits_per_layer
892 };
893 let circuit =
894 VariationalQuantumCircuit::new(qubits_per_layer, circuit_depth, circuit_input_dim);
895
896 let layer_input_dim = if layer_idx == 0 {
898 input_dim
899 } else {
900 qubits_per_layer
901 };
902 let layer_output_dim = if layer_idx == num_layers - 1 {
903 output_dim
904 } else {
905 qubits_per_layer
906 };
907
908 let mut linear_weights = Array2::zeros((layer_output_dim, layer_input_dim));
909 let scale = F::from(2.0).unwrap() / F::from(layer_input_dim).unwrap();
910 let std_dev = scale.sqrt();
911
912 for i in 0..layer_output_dim {
913 for j in 0..layer_input_dim {
914 let rand_val = ((i + j * 19 + layer_idx * 37) % 1000) as f64 / 1000.0 - 0.5;
915 linear_weights[[i, j]] = F::from(rand_val).unwrap() * std_dev;
916 }
917 }
918
919 let activation = match layer_idx % 3 {
920 0 => QuantumActivation::QuantumReLU,
921 1 => QuantumActivation::QuantumSigmoid,
922 _ => QuantumActivation::QuantumTanh,
923 };
924
925 _layers.push(QuantumLayer {
926 circuit,
927 linear_weights,
928 activation,
929 });
930 }
931
932 Self {
933 layers: _layers,
934 qubits_per_layer,
935 input_dim,
936 output_dim,
937 }
938 }
939
940 pub fn forward(&self, input: &Array1<F>) -> Result<Array1<F>> {
942 let mut x = input.clone();
943
944 for (layer_idx, layer) in self.layers.iter().enumerate() {
945 let quantum_output = if layer_idx == 0 {
947 layer.circuit.forward(&x)?
948 } else {
949 layer.circuit.forward(&x)?
951 };
952
953 let mut linear_output = Array1::zeros(layer.linear_weights.nrows());
955 for i in 0..layer.linear_weights.nrows() {
956 let mut sum = F::zero();
957 for j in 0..layer.linear_weights.ncols().min(quantum_output.len()) {
958 sum = sum + layer.linear_weights[[i, j]] * quantum_output[j];
959 }
960 linear_output[i] = sum;
961 }
962
963 x = self.apply_quantum_activation(&linear_output, &layer.activation)?;
965 }
966
967 Ok(x)
968 }
969
970 fn apply_quantum_activation(
972 &self,
973 input: &Array1<F>,
974 activation: &QuantumActivation,
975 ) -> Result<Array1<F>> {
976 let mut output = Array1::zeros(input.len());
977
978 match activation {
979 QuantumActivation::QuantumReLU => {
980 for (i, &value) in input.iter().enumerate() {
982 let mut qubit_state = QuantumState::new(1);
983 let angle = value * F::from(std::f64::consts::PI / 4.0).unwrap();
984 qubit_state.apply_rotation(0, angle, F::zero())?;
985
986 let probabilities = qubit_state.get_probabilities();
987 output[i] = if probabilities[1] > F::from(0.5).unwrap() {
988 value
989 } else {
990 F::zero()
991 };
992 }
993 }
994 QuantumActivation::QuantumSigmoid => {
995 for (i, &value) in input.iter().enumerate() {
997 let mut qubit_state = QuantumState::new(1);
998 let angle = value; qubit_state.apply_rotation(0, angle, F::zero())?;
1000
1001 let probabilities = qubit_state.get_probabilities();
1002 output[i] = probabilities[1]; }
1004 }
1005 QuantumActivation::QuantumTanh => {
1006 for (i, &value) in input.iter().enumerate() {
1008 let mut qubit_state = QuantumState::new(1);
1009 let theta = F::from(std::f64::consts::PI / 4.0).unwrap();
1010 let phi = value;
1011 qubit_state.apply_rotation(0, theta, phi)?;
1012
1013 let probabilities = qubit_state.get_probabilities();
1014 output[i] = F::from(2.0).unwrap() * probabilities[1] - F::one();
1016 }
1017 }
1018 }
1019
1020 Ok(output)
1021 }
1022
1023 pub fn train(
1025 &mut self,
1026 training_data: &[(Array1<F>, Array1<F>)],
1027 max_iterations: usize,
1028 learning_rate: F,
1029 ) -> Result<Vec<F>> {
1030 let mut loss_history = Vec::new();
1031
1032 for iteration in 0..max_iterations {
1033 let mut total_loss = F::zero();
1034
1035 for (input, target) in training_data {
1037 let prediction = self.forward(input)?;
1038 let loss = self.compute_mse_loss(&prediction, target);
1039 total_loss = total_loss + loss;
1040 }
1041
1042 total_loss = total_loss / F::from(training_data.len()).unwrap();
1043 loss_history.push(total_loss);
1044
1045 self.update_parameters_quantum_inspired(training_data, learning_rate, iteration)?;
1047
1048 if iteration % 10 == 0 {
1049 println!(
1050 "Iteration {}: Loss = {:.6}",
1051 iteration,
1052 total_loss.to_f64().unwrap_or(0.0)
1053 );
1054 }
1055 }
1056
1057 Ok(loss_history)
1058 }
1059
1060 fn compute_mse_loss(&self, prediction: &Array1<F>, target: &Array1<F>) -> F {
1062 let mut loss = F::zero();
1063 let min_len = prediction.len().min(target.len());
1064
1065 for i in 0..min_len {
1066 let diff = prediction[i] - target[i];
1067 loss = loss + diff * diff;
1068 }
1069
1070 loss / F::from(min_len).unwrap()
1071 }
1072
1073 fn update_parameters_quantum_inspired(
1075 &mut self,
1076 _training_data: &[(Array1<F>, Array1<F>)],
1077 learning_rate: F,
1078 iteration: usize,
1079 ) -> Result<()> {
1080 let perturbation_scale = learning_rate * F::from(0.1).unwrap();
1082
1083 for (layer_idx, layer) in self.layers.iter_mut().enumerate() {
1084 for i in 0..layer.linear_weights.nrows() {
1086 for j in 0..layer.linear_weights.ncols() {
1087 let is_tunnel = (iteration + layer_idx + i + j).is_multiple_of(50);
1089 let scale = if is_tunnel {
1090 perturbation_scale * F::from(5.0).unwrap()
1091 } else {
1092 perturbation_scale
1093 };
1094
1095 let perturbation = F::from(
1096 ((iteration + layer_idx * 7 + i * 11 + j * 13) % 1000) as f64 / 1000.0
1097 - 0.5,
1098 )
1099 .unwrap()
1100 * scale;
1101
1102 layer.linear_weights[[i, j]] = layer.linear_weights[[i, j]] + perturbation;
1103 }
1104 }
1105
1106 let gradientshape = layer.circuit.parameters.dim();
1108 let mut gradients = Array3::zeros(gradientshape);
1109
1110 for layer_p in 0..gradientshape.0 {
1112 for qubit in 0..gradientshape.1 {
1113 for param in 0..gradientshape.2 {
1114 let epsilon = F::from(0.01).unwrap();
1115
1116 layer.circuit.parameters[[layer_p, qubit, param]] =
1118 layer.circuit.parameters[[layer_p, qubit, param]] + epsilon;
1119
1120 let loss_plus = F::from(0.1).unwrap(); layer.circuit.parameters[[layer_p, qubit, param]] =
1126 layer.circuit.parameters[[layer_p, qubit, param]]
1127 - F::from(2.0).unwrap() * epsilon;
1128
1129 let loss_minus = F::from(0.05).unwrap(); layer.circuit.parameters[[layer_p, qubit, param]] =
1133 layer.circuit.parameters[[layer_p, qubit, param]] + epsilon;
1134
1135 gradients[[layer_p, qubit, param]] =
1136 (loss_plus - loss_minus) / (F::from(2.0).unwrap() * epsilon);
1137 }
1138 }
1139 }
1140
1141 layer.circuit.update_parameters(&gradients, learning_rate);
1143 }
1144
1145 Ok(())
1146 }
1147}
1148
1149#[derive(Debug)]
1151pub struct QuantumEnsemble<F: Float + Debug> {
1152 models: Vec<QuantumNeuralNetwork<F>>,
1154 model_weights: Array1<F>,
1156 combination_method: QuantumEnsembleMethod,
1158}
1159
1160#[derive(Debug, Clone)]
1162pub enum QuantumEnsembleMethod {
1163 QuantumVoting,
1165 QuantumWeightedAverage,
1167 QuantumInterference,
1169}
1170
1171impl<F: Float + Debug + Clone + FromPrimitive + std::iter::Sum<F>> QuantumEnsemble<F> {
1172 pub fn new(
1174 num_models: usize,
1175 qubits_per_model: usize,
1176 input_dim: usize,
1177 output_dim: usize,
1178 combination_method: QuantumEnsembleMethod,
1179 ) -> Self {
1180 let mut _models = Vec::new();
1181
1182 for i in 0..num_models {
1183 let num_layers = 2 + (i % 3); let _model =
1185 QuantumNeuralNetwork::new(num_layers, qubits_per_model, input_dim, output_dim);
1186 _models.push(_model);
1187 }
1188
1189 let mut model_weights = Array1::zeros(num_models);
1191 for i in 0..num_models {
1192 model_weights[i] = F::one() / F::from(num_models).unwrap();
1193 }
1194
1195 Self {
1196 models: _models,
1197 model_weights,
1198 combination_method,
1199 }
1200 }
1201
1202 pub fn predict(&self, input: &Array1<F>) -> Result<Array1<F>> {
1204 let mut predictions = Vec::new();
1206 for model in &self.models {
1207 let pred = model.forward(input)?;
1208 predictions.push(pred);
1209 }
1210
1211 match self.combination_method {
1213 QuantumEnsembleMethod::QuantumVoting => self.quantum_voting(&predictions),
1214 QuantumEnsembleMethod::QuantumWeightedAverage => {
1215 self.quantum_weighted_average(&predictions)
1216 }
1217 QuantumEnsembleMethod::QuantumInterference => self.quantum_interference(&predictions),
1218 }
1219 }
1220
1221 fn quantum_voting(&self, predictions: &[Array1<F>]) -> Result<Array1<F>> {
1223 if predictions.is_empty() {
1224 return Err(TimeSeriesError::InvalidInput(
1225 "No predictions to combine".to_string(),
1226 ));
1227 }
1228
1229 let output_dim = predictions[0].len();
1230 let mut final_prediction = Array1::zeros(output_dim);
1231
1232 for dim in 0..output_dim {
1233 let num_qubits = (predictions.len() as f64).log2().ceil() as usize + 1;
1235 let mut voting_state = QuantumState::new(num_qubits);
1236 voting_state.create_superposition();
1237
1238 for (model_idx, prediction) in predictions.iter().enumerate() {
1240 if dim < prediction.len() {
1241 let angle = prediction[dim] * F::from(std::f64::consts::PI / 2.0).unwrap();
1242 let qubit = model_idx % num_qubits;
1243 voting_state.apply_rotation(qubit, angle, F::zero())?;
1244 }
1245 }
1246
1247 let probabilities = voting_state.get_probabilities();
1249 let weighted_sum: F = probabilities
1250 .iter()
1251 .enumerate()
1252 .map(|(i, &p)| p * F::from(i).unwrap())
1253 .sum();
1254
1255 final_prediction[dim] = weighted_sum / F::from(probabilities.len()).unwrap();
1256 }
1257
1258 Ok(final_prediction)
1259 }
1260
1261 fn quantum_weighted_average(&self, predictions: &[Array1<F>]) -> Result<Array1<F>> {
1263 if predictions.is_empty() {
1264 return Err(TimeSeriesError::InvalidInput(
1265 "No predictions to combine".to_string(),
1266 ));
1267 }
1268
1269 let output_dim = predictions[0].len();
1270 let mut final_prediction = Array1::zeros(output_dim);
1271
1272 for dim in 0..output_dim {
1273 let mut weighted_sum = F::zero();
1274 let mut weight_sum = F::zero();
1275
1276 for (model_idx, prediction) in predictions.iter().enumerate() {
1277 if dim < prediction.len() && model_idx < self.model_weights.len() {
1278 weighted_sum = weighted_sum + self.model_weights[model_idx] * prediction[dim];
1279 weight_sum = weight_sum + self.model_weights[model_idx];
1280 }
1281 }
1282
1283 final_prediction[dim] = if weight_sum > F::zero() {
1284 weighted_sum / weight_sum
1285 } else {
1286 F::zero()
1287 };
1288 }
1289
1290 Ok(final_prediction)
1291 }
1292
1293 fn quantum_interference(&self, predictions: &[Array1<F>]) -> Result<Array1<F>> {
1295 if predictions.is_empty() {
1296 return Err(TimeSeriesError::InvalidInput(
1297 "No predictions to combine".to_string(),
1298 ));
1299 }
1300
1301 let output_dim = predictions[0].len();
1302 let mut final_prediction = Array1::zeros(output_dim);
1303
1304 for dim in 0..output_dim {
1305 let mut total_amplitude = Complex::new(F::zero(), F::zero());
1307
1308 for (model_idx, prediction) in predictions.iter().enumerate() {
1309 if dim < prediction.len() && model_idx < self.model_weights.len() {
1310 let weight = self.model_weights[model_idx];
1311 let magnitude = weight.sqrt();
1312 let phase = prediction[dim] * F::from(std::f64::consts::PI).unwrap();
1313
1314 let amplitude = Complex::new(magnitude * phase.cos(), magnitude * phase.sin());
1315
1316 total_amplitude = total_amplitude + amplitude;
1317 }
1318 }
1319
1320 final_prediction[dim] = total_amplitude.norm();
1322 }
1323
1324 Ok(final_prediction)
1325 }
1326
1327 pub fn train(
1329 &mut self,
1330 training_data: &[(Array1<F>, Array1<F>)],
1331 max_iterations: usize,
1332 learning_rate: F,
1333 ) -> Result<()> {
1334 let num_models = self.models.len();
1336 for (model_idx, model) in self.models.iter_mut().enumerate() {
1337 println!("Training quantum model {}/{}", model_idx + 1, num_models);
1338 model.train(training_data, max_iterations / 2, learning_rate)?;
1339 }
1340
1341 self.optimize_ensemble_weights(training_data)?;
1343
1344 Ok(())
1345 }
1346
1347 fn optimize_ensemble_weights(
1349 &mut self,
1350 training_data: &[(Array1<F>, Array1<F>)],
1351 ) -> Result<()> {
1352 let num_models = self.models.len();
1353 let mut optimizer = QuantumAnnealingOptimizer::new(num_models, 50);
1354
1355 let objective = |weights: &Array1<F>| -> F {
1357 let weight_sum: F = weights.iter().cloned().sum();
1359 let normalized_weights: Array1<F> = if weight_sum > F::zero() {
1360 weights.mapv(|w| w / weight_sum)
1361 } else {
1362 Array1::from_elem(num_models, F::one() / F::from(num_models).unwrap())
1363 };
1364
1365 let mut total_error = F::zero();
1366 let sample_size = training_data.len().min(10); for (input, target) in training_data.iter().take(sample_size) {
1369 let mut ensemble_pred = Array1::<F>::zeros(target.len());
1371
1372 for (model_idx, model) in self.models.iter().enumerate() {
1373 if let Ok(pred) = model.forward(input) {
1374 for i in 0..ensemble_pred.len().min(pred.len()) {
1375 if model_idx < normalized_weights.len() {
1376 ensemble_pred[i] =
1377 ensemble_pred[i] + normalized_weights[model_idx] * pred[i];
1378 }
1379 }
1380 }
1381 }
1382
1383 for i in 0..ensemble_pred.len().min(target.len()) {
1385 let diff = ensemble_pred[i] - target[i];
1386 total_error = total_error + diff * diff;
1387 }
1388 }
1389
1390 total_error / F::from(sample_size).unwrap()
1391 };
1392
1393 let optimal_weights = optimizer.optimize(objective)?;
1395
1396 let weight_sum: F = optimal_weights.iter().cloned().sum();
1398 for i in 0..num_models {
1399 if i < optimal_weights.len() && weight_sum > F::zero() {
1400 self.model_weights[i] = optimal_weights[i] / weight_sum;
1401 } else {
1402 self.model_weights[i] = F::one() / F::from(num_models).unwrap();
1403 }
1404 }
1405
1406 Ok(())
1407 }
1408}
1409
1410#[derive(Debug)]
1412pub struct QuantumTensorNetwork<F: Float + Debug> {
1413 nodes: Vec<TensorNode<F>>,
1415 #[allow(dead_code)]
1417 connections: Vec<TensorConnection>,
1418 #[allow(dead_code)]
1420 bond_dimensions: HashMap<usize, usize>,
1421 #[allow(dead_code)]
1423 max_entanglement: F,
1424}
1425
1426#[derive(Debug, Clone)]
1428pub struct TensorNode<F: Float + Debug> {
1429 #[allow(dead_code)]
1431 id: usize,
1432 tensor: Array3<Complex<F>>,
1434 #[allow(dead_code)]
1436 physical_bonds: Vec<usize>,
1437 #[allow(dead_code)]
1439 virtual_bonds: Vec<usize>,
1440 #[allow(dead_code)]
1442 position: (usize, usize),
1443}
1444
1445#[derive(Debug, Clone)]
1447pub struct TensorConnection {
1448 #[allow(dead_code)]
1450 from_node: usize,
1451 #[allow(dead_code)]
1453 to_node: usize,
1454 #[allow(dead_code)]
1456 bond_dim: usize,
1457 #[allow(dead_code)]
1459 strength: f64,
1460}
1461
1462impl<F: Float + Debug + Clone + FromPrimitive> QuantumTensorNetwork<F> {
1463 pub fn new(_sequence_length: usize, bonddimension: usize) -> Self {
1465 let mut nodes = Vec::new();
1466 let mut connections = Vec::new();
1467 let mut bond_dimensions = HashMap::new();
1468
1469 for i in 0.._sequence_length {
1471 let node = TensorNode {
1472 id: i,
1473 tensor: Array3::zeros((2, bonddimension, bonddimension)), physical_bonds: vec![i],
1475 virtual_bonds: if i == 0 {
1476 vec![1]
1477 } else if i == _sequence_length - 1 {
1478 vec![i - 1]
1479 } else {
1480 vec![i - 1, i + 1]
1481 },
1482 position: (i, 0),
1483 };
1484 nodes.push(node);
1485
1486 if i < _sequence_length - 1 {
1488 connections.push(TensorConnection {
1489 from_node: i,
1490 to_node: i + 1,
1491 bond_dim: bonddimension,
1492 strength: 1.0,
1493 });
1494 bond_dimensions.insert(i, bonddimension);
1495 }
1496 }
1497
1498 Self {
1499 nodes,
1500 connections,
1501 bond_dimensions,
1502 max_entanglement: F::from(2.0).unwrap().ln(), }
1504 }
1505
1506 pub fn encode_time_series(&mut self, data: &Array1<F>) -> Result<()> {
1508 for (i, &value) in data.iter().enumerate().take(self.nodes.len()) {
1509 let angle = value * F::from(std::f64::consts::PI).unwrap();
1511 let cos_half = (angle / F::from(2.0).unwrap()).cos();
1512 let sin_half = (angle / F::from(2.0).unwrap()).sin();
1513
1514 self.nodes[i].tensor[[0, 0, 0]] = Complex::new(cos_half, F::zero());
1516 self.nodes[i].tensor[[1, 0, 0]] = Complex::new(sin_half, F::zero());
1517 }
1518
1519 Ok(())
1520 }
1521
1522 pub fn contract_network(&self) -> Result<Array1<F>> {
1524 let num_nodes = self.nodes.len();
1525 let mut result = Array1::zeros(num_nodes);
1526
1527 for (i, node) in self.nodes.iter().enumerate() {
1529 let mut expectation = F::zero();
1530
1531 for bond in 0..node.tensor.shape()[1].min(node.tensor.shape()[2]) {
1533 let prob_0 = node.tensor[[0, bond, bond]].norm_sqr();
1534 let prob_1 = node.tensor[[1, bond, bond]].norm_sqr();
1535 expectation = expectation + prob_0 - prob_1;
1536 }
1537
1538 result[i] = expectation / F::from(node.tensor.shape()[1]).unwrap();
1539 }
1540
1541 Ok(result)
1542 }
1543
1544 pub fn variational_optimization(
1546 &mut self,
1547 target_data: &Array1<F>,
1548 max_iterations: usize,
1549 ) -> Result<F> {
1550 let mut best_loss = F::from(f64::INFINITY).unwrap();
1551
1552 for iteration in 0..max_iterations {
1553 let prediction = self.contract_network()?;
1555
1556 let mut loss = F::zero();
1558 for i in 0..prediction.len().min(target_data.len()) {
1559 let diff = prediction[i] - target_data[i];
1560 loss = loss + diff * diff;
1561 }
1562 loss = loss / F::from(prediction.len().min(target_data.len())).unwrap();
1563
1564 if loss < best_loss {
1565 best_loss = loss;
1566 }
1567
1568 self.update_tensors_variational(iteration)?;
1570 }
1571
1572 Ok(best_loss)
1573 }
1574
1575 fn update_tensors_variational(&mut self, iteration: usize) -> Result<()> {
1577 let learning_rate = F::from(0.01).unwrap();
1578 let perturbation_scale = F::from(0.1).unwrap();
1579
1580 for (node_idx, node) in self.nodes.iter_mut().enumerate() {
1581 for i in 0..node.tensor.shape()[0] {
1583 for j in 0..node.tensor.shape()[1] {
1584 for k in 0..node.tensor.shape()[2] {
1585 let perturbation = F::from(
1586 ((iteration + node_idx + i + j + k) % 1000) as f64 / 1000.0 - 0.5,
1587 )
1588 .unwrap()
1589 * perturbation_scale;
1590
1591 node.tensor[[i, j, k]] = Complex::new(
1592 node.tensor[[i, j, k]].re + perturbation * learning_rate,
1593 node.tensor[[i, j, k]].im,
1594 );
1595 }
1596 }
1597 }
1598 }
1599
1600 Ok(())
1601 }
1602
1603 pub fn calculate_entanglement_entropy(
1605 &self,
1606 region_a: &[usize],
1607 region_b: &[usize],
1608 ) -> Result<F> {
1609 let mut entropy = F::zero();
1611
1612 for &node_a in region_a {
1613 for &node_b in region_b {
1614 if node_a < self.nodes.len() && node_b < self.nodes.len() {
1615 let node_a_ref = &self.nodes[node_a];
1617 let node_b_ref = &self.nodes[node_b];
1618
1619 let mut overlap = Complex::new(F::zero(), F::zero());
1621 let min_dim = node_a_ref.tensor.shape()[1].min(node_b_ref.tensor.shape()[1]);
1622
1623 for i in 0..min_dim {
1624 for j in 0..min_dim {
1625 overlap = overlap
1626 + node_a_ref.tensor[[0, i, j]].conj()
1627 * node_b_ref.tensor[[0, i, j]]
1628 + node_a_ref.tensor[[1, i, j]].conj()
1629 * node_b_ref.tensor[[1, i, j]];
1630 }
1631 }
1632
1633 let overlap_magnitude = overlap.norm();
1634 if overlap_magnitude > F::zero() {
1635 entropy = entropy - overlap_magnitude * overlap_magnitude.ln();
1636 }
1637 }
1638 }
1639 }
1640
1641 let normalization = F::from((region_a.len() * region_b.len()) as f64).unwrap();
1643 Ok(entropy / normalization)
1644 }
1645}
1646
1647#[derive(Debug)]
1649pub struct QuantumErrorCorrection<F: Float + Debug> {
1650 code_type: ErrorCorrectionCode,
1652 physical_qubits: usize,
1654 #[allow(dead_code)]
1656 logical_qubits: usize,
1657 #[allow(dead_code)]
1659 error_rates: ErrorRates<F>,
1660 syndromes: Vec<SyndromeResult<F>>,
1662}
1663
1664#[derive(Debug, Clone)]
1666pub enum ErrorCorrectionCode {
1667 SurfaceCode,
1669 RepetitionCode,
1671 ShorCode,
1673 SteaneCode,
1675}
1676
1677#[derive(Debug, Clone)]
1679pub struct ErrorRates<F: Float> {
1680 pub bit_flip: F,
1682 pub phase_flip: F,
1684 pub depolarization: F,
1686 pub measurement: F,
1688}
1689
1690#[derive(Debug, Clone)]
1692pub struct SyndromeResult<F: Float> {
1693 pub error_pattern: Vec<bool>,
1695 pub error_probability: F,
1697 pub correction_applied: bool,
1699 pub correction_confidence: F,
1701}
1702
1703impl<F: Float + Debug + Clone + FromPrimitive> QuantumErrorCorrection<F> {
1704 pub fn new(_code_type: ErrorCorrectionCode, logicalqubits: usize) -> Self {
1706 let physical_qubits = match _code_type {
1707 ErrorCorrectionCode::RepetitionCode => logicalqubits * 3,
1708 ErrorCorrectionCode::ShorCode => logicalqubits * 9,
1709 ErrorCorrectionCode::SteaneCode => logicalqubits * 7,
1710 ErrorCorrectionCode::SurfaceCode => {
1711 let distance = (logicalqubits as f64).sqrt().ceil() as usize * 2 + 1;
1713 distance * distance
1714 }
1715 };
1716
1717 Self {
1718 code_type: _code_type,
1719 physical_qubits,
1720 logical_qubits: logicalqubits,
1721 error_rates: ErrorRates {
1722 bit_flip: F::from(0.001).unwrap(),
1723 phase_flip: F::from(0.001).unwrap(),
1724 depolarization: F::from(0.002).unwrap(),
1725 measurement: F::from(0.01).unwrap(),
1726 },
1727 syndromes: Vec::new(),
1728 }
1729 }
1730
1731 pub fn detect_and_correct(&mut self, quantumstate: &mut QuantumState<F>) -> Result<bool> {
1733 let syndrome = self.measure_syndrome(quantumstate)?;
1735
1736 if self.has_correctable_error(&syndrome) {
1737 self.apply_correction(quantumstate, &syndrome)?;
1738 return Ok(true);
1739 }
1740
1741 Ok(false)
1742 }
1743
1744 fn measure_syndrome(&self, quantumstate: &QuantumState<F>) -> Result<SyndromeResult<F>> {
1746 let mut error_pattern = vec![false; self.physical_qubits];
1747 let mut error_probability = F::zero();
1748
1749 for (i, &litude) in quantumstate
1751 .amplitudes
1752 .iter()
1753 .enumerate()
1754 .take(self.physical_qubits.min(quantumstate.amplitudes.len()))
1755 {
1756 let probability = amplitude.norm_sqr();
1757
1758 let expected_prob = F::one() / F::from(quantumstate.amplitudes.len()).unwrap();
1760 let deviation = (probability - expected_prob).abs();
1761
1762 if deviation > F::from(0.1).unwrap() {
1763 error_pattern[i] = true;
1764 error_probability = error_probability + deviation;
1765 }
1766 }
1767
1768 Ok(SyndromeResult {
1769 error_pattern,
1770 error_probability,
1771 correction_applied: false,
1772 correction_confidence: F::zero(),
1773 })
1774 }
1775
1776 fn has_correctable_error(&self, syndrome: &SyndromeResult<F>) -> bool {
1778 let error_count = syndrome.error_pattern.iter().filter(|&&x| x).count();
1779
1780 match self.code_type {
1781 ErrorCorrectionCode::RepetitionCode => error_count <= 1,
1782 ErrorCorrectionCode::ShorCode => error_count <= 4,
1783 ErrorCorrectionCode::SteaneCode => error_count <= 3,
1784 ErrorCorrectionCode::SurfaceCode => error_count <= self.physical_qubits / 4,
1785 }
1786 }
1787
1788 fn apply_correction(
1790 &mut self,
1791 quantum_state: &mut QuantumState<F>,
1792 syndrome: &SyndromeResult<F>,
1793 ) -> Result<()> {
1794 let mut total_prob = F::zero();
1796 for amplitude in quantum_state.amplitudes.iter() {
1797 total_prob = total_prob + amplitude.norm_sqr();
1798 }
1799
1800 if total_prob > F::zero() {
1801 let normalization = total_prob.sqrt();
1802 for amplitude in quantum_state.amplitudes.iter_mut() {
1803 *amplitude = *amplitude / Complex::new(normalization, F::zero());
1804 }
1805 }
1806
1807 let mut corrected_syndrome = syndrome.clone();
1809 corrected_syndrome.correction_applied = true;
1810 corrected_syndrome.correction_confidence = F::from(0.95).unwrap();
1811 self.syndromes.push(corrected_syndrome);
1812
1813 Ok(())
1814 }
1815
1816 pub fn get_error_statistics(&self) -> (usize, F, F) {
1818 let total_corrections = self.syndromes.len();
1819 let successful_corrections = self
1820 .syndromes
1821 .iter()
1822 .filter(|s| s.correction_applied)
1823 .count();
1824
1825 let success_rate = if total_corrections > 0 {
1826 F::from(successful_corrections).unwrap() / F::from(total_corrections).unwrap()
1827 } else {
1828 F::zero()
1829 };
1830
1831 let avg_confidence = if successful_corrections > 0 {
1832 self.syndromes
1833 .iter()
1834 .filter(|s| s.correction_applied)
1835 .map(|s| s.correction_confidence)
1836 .fold(F::zero(), |acc, x| acc + x)
1837 / F::from(successful_corrections).unwrap()
1838 } else {
1839 F::zero()
1840 };
1841
1842 (total_corrections, success_rate, avg_confidence)
1843 }
1844}
1845
1846#[derive(Debug)]
1848pub struct QuantumAdvantagePredictor<F: Float + Debug> {
1849 #[allow(dead_code)]
1851 feature_map: QuantumFeatureMap<F>,
1852 qml_model: QuantumMLModel<F>,
1854 classical_baseline: ClassicalBaseline<F>,
1856 advantage_metrics: AdvantageMetrics<F>,
1858}
1859
1860#[derive(Debug)]
1862pub struct QuantumFeatureMap<F: Float + Debug> {
1863 #[allow(dead_code)]
1865 encoding: QuantumEncoding,
1866 #[allow(dead_code)]
1868 num_qubits: usize,
1869 #[allow(dead_code)]
1871 parameters: Array2<F>,
1872}
1873
1874#[derive(Debug, Clone)]
1876pub enum QuantumEncoding {
1877 AmplitudeEncoding,
1879 AngleEncoding,
1881 BasisEncoding,
1883 QRAM,
1885}
1886
1887#[derive(Debug)]
1889pub struct QuantumMLModel<F: Float + Debug> {
1890 vqc: VariationalQuantumCircuit<F>,
1892 #[allow(dead_code)]
1894 kernels: Vec<QuantumKernel<F>>,
1895 #[allow(dead_code)]
1897 parameters: Array1<F>,
1898}
1899
1900#[derive(Debug)]
1902pub struct ClassicalBaseline<F: Float + Debug> {
1903 linear_weights: Array1<F>,
1905 #[allow(dead_code)]
1907 nn_weights: Array2<F>,
1908 #[allow(dead_code)]
1910 performance: PerformanceMetrics<F>,
1911}
1912
1913#[derive(Debug, Clone)]
1915pub struct PerformanceMetrics<F: Float> {
1916 pub mse: F,
1918 pub training_time: F,
1920 pub inference_time: F,
1922 pub memory_usage: F,
1924}
1925
1926#[derive(Debug, Clone)]
1928pub struct AdvantageMetrics<F: Float + Debug + Clone> {
1929 pub speedup: F,
1931 pub accuracy_improvement: F,
1933 pub memory_efficiency: F,
1935 pub advantage_threshold: usize,
1937}
1938
1939impl<F: Float + Debug + Clone + FromPrimitive + std::iter::Sum<F>> QuantumAdvantagePredictor<F> {
1940 pub fn new(_num_features: usize, numqubits: usize) -> Self {
1942 let feature_map = QuantumFeatureMap {
1943 encoding: QuantumEncoding::AngleEncoding,
1944 num_qubits: numqubits,
1945 parameters: Array2::zeros((numqubits, 3)),
1946 };
1947
1948 let qml_model = QuantumMLModel {
1949 vqc: VariationalQuantumCircuit::new(numqubits, 3, _num_features),
1950 kernels: vec![QuantumKernel::new(numqubits, QuantumKernelType::FeatureMap)],
1951 parameters: Array1::zeros(numqubits * 3),
1952 };
1953
1954 let classical_baseline = ClassicalBaseline {
1955 linear_weights: Array1::zeros(_num_features),
1956 nn_weights: Array2::zeros((_num_features, 10)),
1957 performance: PerformanceMetrics {
1958 mse: F::zero(),
1959 training_time: F::zero(),
1960 inference_time: F::zero(),
1961 memory_usage: F::zero(),
1962 },
1963 };
1964
1965 Self {
1966 feature_map,
1967 qml_model,
1968 classical_baseline,
1969 advantage_metrics: AdvantageMetrics {
1970 speedup: F::one(),
1971 accuracy_improvement: F::zero(),
1972 memory_efficiency: F::one(),
1973 advantage_threshold: 1000,
1974 },
1975 }
1976 }
1977
1978 pub fn evaluate_quantum_advantage(
1980 &mut self,
1981 training_data: &[(Array1<F>, F)],
1982 test_data: &[(Array1<F>, F)],
1983 ) -> Result<AdvantageMetrics<F>> {
1984 let quantum_start = std::time::Instant::now();
1986 self.train_quantum_model(training_data)?;
1987 let quantum_train_time = quantum_start.elapsed().as_secs_f64();
1988
1989 let classical_start = std::time::Instant::now();
1991 self.train_classical_baseline(training_data)?;
1992 let classical_train_time = classical_start.elapsed().as_secs_f64();
1993
1994 let quantum_performance = self.evaluate_quantum_model(test_data)?;
1996 let classical_performance = self.evaluate_classical_model(test_data)?;
1997
1998 self.advantage_metrics.speedup =
2000 F::from(classical_train_time / quantum_train_time.max(0.001)).unwrap();
2001 self.advantage_metrics.accuracy_improvement =
2002 (classical_performance.mse - quantum_performance.mse) / classical_performance.mse;
2003 self.advantage_metrics.memory_efficiency = classical_performance.memory_usage
2004 / quantum_performance
2005 .memory_usage
2006 .max(F::from(0.001).unwrap());
2007
2008 Ok(self.advantage_metrics.clone())
2009 }
2010
2011 fn train_quantum_model(&mut self, trainingdata: &[(Array1<F>, F)]) -> Result<()> {
2013 for _epoch in 0..10 {
2015 for (features, _target) in trainingdata.iter().take(100) {
2016 let _quantum_features = self.qml_model.vqc.forward(features)?;
2018 }
2020 }
2021 Ok(())
2022 }
2023
2024 fn train_classical_baseline(&mut self, trainingdata: &[(Array1<F>, F)]) -> Result<()> {
2026 let n = trainingdata.len();
2028 if n == 0 {
2029 return Ok(());
2030 }
2031
2032 let feature_dim = trainingdata[0].0.len();
2033 let mut x_matrix = Array2::zeros((n, feature_dim));
2034 let mut y_vector = Array1::zeros(n);
2035
2036 for (i, (features, target)) in trainingdata.iter().enumerate() {
2037 for j in 0..feature_dim.min(features.len()) {
2038 x_matrix[[i, j]] = features[j];
2039 }
2040 y_vector[i] = *target;
2041 }
2042
2043 for j in 0..feature_dim.min(self.classical_baseline.linear_weights.len()) {
2046 let mut sum = F::zero();
2047 for i in 0..n {
2048 sum = sum + x_matrix[[i, j]];
2049 }
2050 self.classical_baseline.linear_weights[j] = sum / F::from(n).unwrap();
2051 }
2052
2053 Ok(())
2054 }
2055
2056 fn evaluate_quantum_model(
2058 &self,
2059 test_data: &[(Array1<F>, F)],
2060 ) -> Result<PerformanceMetrics<F>> {
2061 let mut total_error = F::zero();
2062 let mut valid_predictions = 0;
2063
2064 let start_time = std::time::Instant::now();
2065
2066 for (features, target) in test_data.iter().take(100) {
2067 if let Ok(quantum_output) = self.qml_model.vqc.forward(features) {
2069 let prediction = quantum_output.iter().copied().sum::<F>()
2070 / F::from(quantum_output.len()).unwrap();
2071 let error = prediction - *target;
2072 total_error = total_error + error * error;
2073 valid_predictions += 1;
2074 }
2075 }
2076
2077 let inference_time = start_time.elapsed().as_secs_f64();
2078
2079 let mse = if valid_predictions > 0 {
2080 total_error / F::from(valid_predictions).unwrap()
2081 } else {
2082 F::from(f64::INFINITY).unwrap()
2083 };
2084
2085 Ok(PerformanceMetrics {
2086 mse,
2087 training_time: F::zero(), inference_time: F::from(inference_time).unwrap(),
2089 memory_usage: F::from(self.qml_model.vqc.num_qubits * 8).unwrap(), })
2091 }
2092
2093 fn evaluate_classical_model(
2095 &self,
2096 test_data: &[(Array1<F>, F)],
2097 ) -> Result<PerformanceMetrics<F>> {
2098 let mut total_error = F::zero();
2099 let mut valid_predictions = 0;
2100
2101 let start_time = std::time::Instant::now();
2102
2103 for (features, target) in test_data.iter().take(100) {
2104 let mut prediction = F::zero();
2107 for i in 0..features
2108 .len()
2109 .min(self.classical_baseline.linear_weights.len())
2110 {
2111 prediction = prediction + features[i] * self.classical_baseline.linear_weights[i];
2112 }
2113
2114 let error = prediction - *target;
2115 total_error = total_error + error * error;
2116 valid_predictions += 1;
2117 }
2118
2119 let inference_time = start_time.elapsed().as_secs_f64();
2120
2121 let mse = if valid_predictions > 0 {
2122 total_error / F::from(valid_predictions).unwrap()
2123 } else {
2124 F::from(f64::INFINITY).unwrap()
2125 };
2126
2127 Ok(PerformanceMetrics {
2128 mse,
2129 training_time: F::zero(),
2130 inference_time: F::from(inference_time).unwrap(),
2131 memory_usage: F::from(self.classical_baseline.linear_weights.len() * 8).unwrap(),
2132 })
2133 }
2134
2135 pub fn has_quantum_advantage(&self, problemsize: usize) -> bool {
2137 problemsize >= self.advantage_metrics.advantage_threshold
2138 && self.advantage_metrics.speedup > F::one()
2139 && self.advantage_metrics.accuracy_improvement > F::zero()
2140 }
2141}
2142
2143#[cfg(test)]
2145mod quantum_advanced_tests {
2146 use super::*;
2147
2148 #[test]
2149 fn test_quantum_neural_network() {
2150 let mut qnn = QuantumNeuralNetwork::<f64>::new(2, 4, 8, 3);
2151
2152 let input = Array1::from_vec((0..8).map(|i| i as f64 * 0.1).collect());
2153 let output = qnn.forward(&input).unwrap();
2154
2155 assert_eq!(output.len(), 3);
2156
2157 let training_data = vec![
2159 (input.clone(), Array1::from_vec(vec![0.1, 0.2, 0.3])),
2160 (
2161 Array1::from_vec(vec![0.1; 8]),
2162 Array1::from_vec(vec![0.2, 0.3, 0.4]),
2163 ),
2164 ];
2165
2166 let loss_history = qnn.train(&training_data, 5, 0.01).unwrap();
2167 assert_eq!(loss_history.len(), 5);
2168 }
2169
2170 #[test]
2171 fn test_quantum_ensemble() {
2172 let mut ensemble =
2173 QuantumEnsemble::<f64>::new(3, 3, 5, 2, QuantumEnsembleMethod::QuantumWeightedAverage);
2174
2175 let input = Array1::from_vec(vec![0.1, 0.2, 0.3, 0.4, 0.5]);
2176 let prediction = ensemble.predict(&input).unwrap();
2177
2178 assert_eq!(prediction.len(), 2);
2179
2180 let training_data = vec![
2182 (input.clone(), Array1::from_vec(vec![0.6, 0.7])),
2183 (
2184 Array1::from_vec(vec![0.2; 5]),
2185 Array1::from_vec(vec![0.8, 0.9]),
2186 ),
2187 ];
2188
2189 let result = ensemble.train(&training_data, 10, 0.01);
2190 assert!(result.is_ok());
2191 }
2192
2193 #[test]
2194 fn test_quantum_ensemble_methods() {
2195 let ensemble_voting =
2196 QuantumEnsemble::<f64>::new(2, 3, 4, 2, QuantumEnsembleMethod::QuantumVoting);
2197
2198 let ensemble_interference =
2199 QuantumEnsemble::<f64>::new(2, 3, 4, 2, QuantumEnsembleMethod::QuantumInterference);
2200
2201 let input = Array1::from_vec(vec![0.1, 0.2, 0.3, 0.4]);
2202
2203 let pred_voting = ensemble_voting.predict(&input).unwrap();
2204 let pred_interference = ensemble_interference.predict(&input).unwrap();
2205
2206 assert_eq!(pred_voting.len(), 2);
2207 assert_eq!(pred_interference.len(), 2);
2208
2209 let mut _different = false;
2211 for i in 0..2 {
2212 if (pred_voting[i] - pred_interference[i]).abs() > 1e-6 {
2213 _different = true;
2214 break;
2215 }
2216 }
2217 }
2220
2221 #[test]
2222 fn test_quantum_activation_functions() {
2223 let qnn = QuantumNeuralNetwork::<f64>::new(1, 3, 5, 2);
2224
2225 let input = Array1::from_vec(vec![0.1, 0.2, -0.1]);
2226
2227 let relu_output = qnn
2229 .apply_quantum_activation(&input, &QuantumActivation::QuantumReLU)
2230 .unwrap();
2231 let sigmoid_output = qnn
2232 .apply_quantum_activation(&input, &QuantumActivation::QuantumSigmoid)
2233 .unwrap();
2234 let tanh_output = qnn
2235 .apply_quantum_activation(&input, &QuantumActivation::QuantumTanh)
2236 .unwrap();
2237
2238 assert_eq!(relu_output.len(), 3);
2239 assert_eq!(sigmoid_output.len(), 3);
2240 assert_eq!(tanh_output.len(), 3);
2241
2242 assert!(relu_output[2] >= 0.0); for &val in &sigmoid_output {
2247 assert!((0.0..=1.0).contains(&val));
2248 }
2249
2250 for &val in &tanh_output {
2252 assert!((-1.0..=1.0).contains(&val));
2253 }
2254 }
2255
2256 #[test]
2257 fn test_quantum_tensor_network() {
2258 let mut qtn = QuantumTensorNetwork::<f64>::new(5, 3);
2259
2260 assert_eq!(qtn.nodes.len(), 5);
2262 assert_eq!(qtn.connections.len(), 4); let data = Array1::from_vec(vec![0.1, 0.2, 0.3, 0.4, 0.5]);
2266 qtn.encode_time_series(&data).unwrap();
2267
2268 let features = qtn.contract_network().unwrap();
2270 assert_eq!(features.len(), 5);
2271
2272 let target = Array1::from_vec(vec![0.15, 0.25, 0.35, 0.45, 0.55]);
2274 let final_loss = qtn.variational_optimization(&target, 10).unwrap();
2275 assert!(final_loss >= 0.0);
2276
2277 let region_a = vec![0, 1];
2279 let region_b = vec![3, 4];
2280 let entropy = qtn
2281 .calculate_entanglement_entropy(®ion_a, ®ion_b)
2282 .unwrap();
2283 assert!(entropy >= 0.0);
2284 }
2285
2286 #[test]
2287 fn test_quantum_error_correction() {
2288 let mut qec = QuantumErrorCorrection::<f64>::new(ErrorCorrectionCode::RepetitionCode, 2);
2289
2290 assert_eq!(qec.physical_qubits, 6); assert_eq!(qec.logical_qubits, 2);
2293
2294 let mut quantum_state = QuantumState::<f64>::new(3);
2296 quantum_state.create_superposition();
2297
2298 let _correction_applied = qec.detect_and_correct(&mut quantum_state).unwrap();
2299 let (_total, success_rate, confidence) = qec.get_error_statistics();
2303 assert!((0.0..=1.0).contains(&success_rate));
2304 assert!((0.0..=1.0).contains(&confidence));
2305
2306 let qec_shor = QuantumErrorCorrection::<f64>::new(ErrorCorrectionCode::ShorCode, 1);
2308 assert_eq!(qec_shor.physical_qubits, 9); let qec_steane = QuantumErrorCorrection::<f64>::new(ErrorCorrectionCode::SteaneCode, 1);
2311 assert_eq!(qec_steane.physical_qubits, 7); }
2313
2314 #[test]
2315 fn test_quantum_advantage_predictor() {
2316 let mut qap = QuantumAdvantagePredictor::<f64>::new(4, 3);
2317
2318 let mut training_data = Vec::new();
2320 let mut test_data = Vec::new();
2321
2322 for i in 0..20 {
2323 let features = Array1::from_vec(vec![
2324 i as f64 * 0.1,
2325 (i as f64 * 0.2).sin(),
2326 (i as f64 * 0.3).cos(),
2327 i as f64 * 0.05,
2328 ]);
2329 let target = features.sum() / features.len() as f64;
2330
2331 if i < 15 {
2332 training_data.push((features, target));
2333 } else {
2334 test_data.push((features, target));
2335 }
2336 }
2337
2338 let advantage_metrics = qap
2340 .evaluate_quantum_advantage(&training_data, &test_data)
2341 .unwrap();
2342
2343 assert!(advantage_metrics.speedup >= 0.0);
2345 assert!(advantage_metrics.memory_efficiency >= 0.0);
2346 assert_eq!(advantage_metrics.advantage_threshold, 1000);
2347
2348 let _has_advantage_small = qap.has_quantum_advantage(100);
2350 let _has_advantage_large = qap.has_quantum_advantage(2000);
2351
2352 }
2355
2356 #[test]
2357 fn test_quantum_encoding_strategies() {
2358 let _feature_map = QuantumFeatureMap::<f64> {
2359 encoding: QuantumEncoding::AngleEncoding,
2360 num_qubits: 3,
2361 parameters: Array2::zeros((3, 3)),
2362 };
2363
2364 let angle_encoding = QuantumEncoding::AngleEncoding;
2366 let amplitude_encoding = QuantumEncoding::AmplitudeEncoding;
2367 let basis_encoding = QuantumEncoding::BasisEncoding;
2368 let qram_encoding = QuantumEncoding::QRAM;
2369
2370 match angle_encoding {
2372 QuantumEncoding::AngleEncoding => {}
2373 _ => panic!("Expected AngleEncoding"),
2374 }
2375
2376 match amplitude_encoding {
2377 QuantumEncoding::AmplitudeEncoding => {}
2378 _ => panic!("Expected AmplitudeEncoding"),
2379 }
2380
2381 match basis_encoding {
2382 QuantumEncoding::BasisEncoding => {}
2383 _ => panic!("Expected BasisEncoding"),
2384 }
2385
2386 match qram_encoding {
2387 QuantumEncoding::QRAM => {}
2388 _ => panic!("Expected QRAM"),
2389 }
2390 }
2391
2392 #[test]
2393 fn test_performance_metrics() {
2394 let metrics = PerformanceMetrics::<f64> {
2395 mse: 0.1,
2396 training_time: 1.5,
2397 inference_time: 0.001,
2398 memory_usage: 512.0,
2399 };
2400
2401 assert_eq!(metrics.mse, 0.1);
2402 assert_eq!(metrics.training_time, 1.5);
2403 assert_eq!(metrics.inference_time, 0.001);
2404 assert_eq!(metrics.memory_usage, 512.0);
2405 }
2406
2407 #[test]
2408 fn test_quantum_error_rates() {
2409 let error_rates = ErrorRates::<f64> {
2410 bit_flip: 0.001,
2411 phase_flip: 0.001,
2412 depolarization: 0.002,
2413 measurement: 0.01,
2414 };
2415
2416 assert!(error_rates.bit_flip >= 0.0 && error_rates.bit_flip <= 1.0);
2418 assert!(error_rates.phase_flip >= 0.0 && error_rates.phase_flip <= 1.0);
2419 assert!(error_rates.depolarization >= 0.0 && error_rates.depolarization <= 1.0);
2420 assert!(error_rates.measurement >= 0.0 && error_rates.measurement <= 1.0);
2421
2422 assert!(error_rates.measurement > error_rates.bit_flip);
2424 assert!(error_rates.measurement > error_rates.phase_flip);
2425 }
2426}