1use crate::error::{QuantRS2Error, QuantRS2Result};
9use crate::qml::{EncodingStrategy, EntanglementPattern, QMLConfig, QMLLayer};
10use scirs2_core::ndarray::{Array1, Array2};
11use scirs2_core::Complex64;
12use std::collections::HashMap;
13use std::sync::Arc;
14#[derive(Debug, Clone)]
16pub struct QuantumKernelConfig {
17 pub num_qubits: usize,
19 pub feature_map: FeatureMapType,
21 pub reps: usize,
23 pub entanglement: EntanglementPattern,
25 pub parameter_scaling: f64,
27}
28impl Default for QuantumKernelConfig {
29 fn default() -> Self {
30 Self {
31 num_qubits: 4,
32 feature_map: FeatureMapType::ZZFeatureMap,
33 reps: 2,
34 entanglement: EntanglementPattern::Full,
35 parameter_scaling: 2.0,
36 }
37 }
38}
39#[derive(Debug, Clone, Copy, PartialEq, Eq)]
41pub enum FeatureMapType {
42 ZZFeatureMap,
44 PauliFeatureMap,
46 IQPFeatureMap,
48 TrainableFeatureMap,
50}
51pub struct QuantumKernel {
53 config: QuantumKernelConfig,
55 kernel_cache: Option<Array2<f64>>,
57 training_data: Option<Array2<f64>>,
59}
60impl QuantumKernel {
61 pub const fn new(config: QuantumKernelConfig) -> Self {
63 Self {
64 config,
65 kernel_cache: None,
66 training_data: None,
67 }
68 }
69 pub fn kernel(&self, x1: &[f64], x2: &[f64]) -> QuantRS2Result<f64> {
71 if x1.len() != self.config.num_qubits || x2.len() != self.config.num_qubits {
72 return Err(QuantRS2Error::InvalidInput(format!(
73 "Data dimension {} doesn't match num_qubits {}",
74 x1.len(),
75 self.config.num_qubits
76 )));
77 }
78 let state1 = self.encode_data(x1)?;
79 let state2 = self.encode_data(x2)?;
80 let inner: Complex64 = state1
81 .iter()
82 .zip(state2.iter())
83 .map(|(a, b)| a.conj() * b)
84 .sum();
85 Ok(inner.norm_sqr())
86 }
87 pub fn kernel_matrix(&mut self, data: &Array2<f64>) -> QuantRS2Result<Array2<f64>> {
89 let n_samples = data.nrows();
90 let mut kernel_matrix = Array2::zeros((n_samples, n_samples));
91 for i in 0..n_samples {
92 for j in i..n_samples {
93 let x_i = data.row(i).to_vec();
94 let x_j = data.row(j).to_vec();
95 let k_ij = self.kernel(&x_i, &x_j)?;
96 kernel_matrix[[i, j]] = k_ij;
97 kernel_matrix[[j, i]] = k_ij;
98 }
99 }
100 self.kernel_cache = Some(kernel_matrix.clone());
101 self.training_data = Some(data.clone());
102 Ok(kernel_matrix)
103 }
104 fn encode_data(&self, data: &[f64]) -> QuantRS2Result<Array1<Complex64>> {
106 let dim = 1 << self.config.num_qubits;
107 let mut state = Array1::zeros(dim);
108 state[0] = Complex64::new(1.0, 0.0);
109 match self.config.feature_map {
110 FeatureMapType::ZZFeatureMap => {
111 self.apply_zz_feature_map(&mut state, data)?;
112 }
113 FeatureMapType::PauliFeatureMap => {
114 self.apply_pauli_feature_map(&mut state, data)?;
115 }
116 FeatureMapType::IQPFeatureMap => {
117 self.apply_iqp_feature_map(&mut state, data)?;
118 }
119 FeatureMapType::TrainableFeatureMap => {
120 self.apply_trainable_feature_map(&mut state, data)?;
121 }
122 }
123 Ok(state)
124 }
125 fn apply_zz_feature_map(
126 &self,
127 state: &mut Array1<Complex64>,
128 data: &[f64],
129 ) -> QuantRS2Result<()> {
130 for _ in 0..self.config.reps {
131 for (i, &x) in data.iter().enumerate() {
132 let angle = self.config.parameter_scaling * x;
133 Self::apply_rz(state, i, angle);
134 Self::apply_ry(state, i, angle);
135 }
136 for i in 0..self.config.num_qubits - 1 {
137 let angle = self.config.parameter_scaling
138 * (std::f64::consts::PI - data[i])
139 * (std::f64::consts::PI - data[i + 1]);
140 Self::apply_rzz(state, i, i + 1, angle);
141 }
142 }
143 Ok(())
144 }
145 fn apply_pauli_feature_map(
146 &self,
147 state: &mut Array1<Complex64>,
148 data: &[f64],
149 ) -> QuantRS2Result<()> {
150 for _ in 0..self.config.reps {
151 for (i, &x) in data.iter().enumerate() {
152 let angle = self.config.parameter_scaling * x;
153 Self::apply_rx(state, i, angle);
154 Self::apply_rz(state, i, angle);
155 }
156 }
157 Ok(())
158 }
159 fn apply_iqp_feature_map(
160 &self,
161 state: &mut Array1<Complex64>,
162 data: &[f64],
163 ) -> QuantRS2Result<()> {
164 for i in 0..self.config.num_qubits {
165 Self::apply_hadamard(state, i);
166 }
167 for (i, &x) in data.iter().enumerate() {
168 let angle = self.config.parameter_scaling * x * x;
169 Self::apply_rz(state, i, angle);
170 }
171 Ok(())
172 }
173 fn apply_trainable_feature_map(
174 &self,
175 state: &mut Array1<Complex64>,
176 data: &[f64],
177 ) -> QuantRS2Result<()> {
178 for _ in 0..self.config.reps {
179 for (i, &x) in data.iter().enumerate() {
180 Self::apply_ry(state, i, x);
181 Self::apply_rz(state, i, x);
182 }
183 }
184 Ok(())
185 }
186 fn apply_rx(state: &mut Array1<Complex64>, qubit: usize, angle: f64) {
187 let cos = (angle / 2.0).cos();
188 let sin = (angle / 2.0).sin();
189 let dim = state.len();
190 let mask = 1 << qubit;
191 for i in 0..dim / 2 {
192 let idx0 = (i & !(mask >> 1)) | ((i & (mask >> 1)) << 1);
193 let idx1 = idx0 | mask;
194 if idx1 < dim {
195 let a = state[idx0];
196 let b = state[idx1];
197 state[idx0] = Complex64::new(cos, 0.0) * a + Complex64::new(0.0, -sin) * b;
198 state[idx1] = Complex64::new(0.0, -sin) * a + Complex64::new(cos, 0.0) * b;
199 }
200 }
201 }
202 fn apply_ry(state: &mut Array1<Complex64>, qubit: usize, angle: f64) {
203 let cos = (angle / 2.0).cos();
204 let sin = (angle / 2.0).sin();
205 let dim = state.len();
206 let mask = 1 << qubit;
207 for i in 0..dim / 2 {
208 let idx0 = (i & !(mask >> 1)) | ((i & (mask >> 1)) << 1);
209 let idx1 = idx0 | mask;
210 if idx1 < dim {
211 let a = state[idx0];
212 let b = state[idx1];
213 state[idx0] = Complex64::new(cos, 0.0) * a - Complex64::new(sin, 0.0) * b;
214 state[idx1] = Complex64::new(sin, 0.0) * a + Complex64::new(cos, 0.0) * b;
215 }
216 }
217 }
218 fn apply_rz(state: &mut Array1<Complex64>, qubit: usize, angle: f64) {
219 let dim = state.len();
220 let mask = 1 << qubit;
221 for i in 0..dim {
222 if i & mask != 0 {
223 state[i] *= Complex64::new(0.0, angle / 2.0).exp();
224 } else {
225 state[i] *= Complex64::new(0.0, -angle / 2.0).exp();
226 }
227 }
228 }
229 fn apply_rzz(state: &mut Array1<Complex64>, q1: usize, q2: usize, angle: f64) {
230 let dim = state.len();
231 let mask1 = 1 << q1;
232 let mask2 = 1 << q2;
233 for i in 0..dim {
234 let bit1 = (i & mask1) != 0;
235 let bit2 = (i & mask2) != 0;
236 let parity = if bit1 == bit2 { 1.0 } else { -1.0 };
237 state[i] *= Complex64::new(0.0, parity * angle / 2.0).exp();
238 }
239 }
240 fn apply_hadamard(state: &mut Array1<Complex64>, qubit: usize) {
241 let inv_sqrt2 = 1.0 / std::f64::consts::SQRT_2;
242 let dim = state.len();
243 let mask = 1 << qubit;
244 for i in 0..dim / 2 {
245 let idx0 = (i & !(mask >> 1)) | ((i & (mask >> 1)) << 1);
246 let idx1 = idx0 | mask;
247 if idx1 < dim {
248 let a = state[idx0];
249 let b = state[idx1];
250 state[idx0] = Complex64::new(inv_sqrt2, 0.0) * (a + b);
251 state[idx1] = Complex64::new(inv_sqrt2, 0.0) * (a - b);
252 }
253 }
254 }
255}
256pub struct QuantumSVM {
258 kernel: QuantumKernel,
260 support_vectors: Vec<usize>,
262 alphas: Vec<f64>,
264 bias: f64,
266 labels: Vec<f64>,
268 training_data: Option<Array2<f64>>,
270}
271impl QuantumSVM {
272 pub const fn new(kernel_config: QuantumKernelConfig) -> Self {
274 Self {
275 kernel: QuantumKernel::new(kernel_config),
276 support_vectors: Vec::new(),
277 alphas: Vec::new(),
278 bias: 0.0,
279 labels: Vec::new(),
280 training_data: None,
281 }
282 }
283 pub fn fit(&mut self, data: &Array2<f64>, labels: &[f64], c: f64) -> QuantRS2Result<()> {
285 let n_samples = data.nrows();
286 let kernel_matrix = self.kernel.kernel_matrix(data)?;
287 self.alphas = vec![0.0; n_samples];
288 self.labels = labels.to_vec();
289 self.training_data = Some(data.clone());
290 let learning_rate = 0.01;
291 let max_iter = 100;
292 for _ in 0..max_iter {
293 for i in 0..n_samples {
294 let mut grad = 1.0;
295 for j in 0..n_samples {
296 grad -= self.alphas[j] * labels[i] * labels[j] * kernel_matrix[[i, j]];
297 }
298 self.alphas[i] += learning_rate * grad;
299 self.alphas[i] = self.alphas[i].clamp(0.0, c);
300 }
301 }
302 let epsilon = 1e-6;
303 self.support_vectors = (0..n_samples)
304 .filter(|&i| self.alphas[i] > epsilon)
305 .collect();
306 if !self.support_vectors.is_empty() {
307 let sv = self.support_vectors[0];
308 let mut b = labels[sv];
309 for j in 0..n_samples {
310 b -= self.alphas[j] * labels[j] * kernel_matrix[[sv, j]];
311 }
312 self.bias = b;
313 }
314 Ok(())
315 }
316 pub fn predict(&self, x: &[f64]) -> QuantRS2Result<f64> {
318 let training_data = self
319 .training_data
320 .as_ref()
321 .ok_or_else(|| QuantRS2Error::RuntimeError("Model not trained".to_string()))?;
322 let mut decision = self.bias;
323 for &i in &self.support_vectors {
324 let x_i = training_data.row(i).to_vec();
325 let k = self.kernel.kernel(&x_i, x)?;
326 decision += self.alphas[i] * self.labels[i] * k;
327 }
328 Ok(if decision >= 0.0 { 1.0 } else { -1.0 })
329 }
330 pub fn predict_proba(&self, x: &[f64]) -> QuantRS2Result<f64> {
332 let training_data = self
333 .training_data
334 .as_ref()
335 .ok_or_else(|| QuantRS2Error::RuntimeError("Model not trained".to_string()))?;
336 let mut decision = self.bias;
337 for &i in &self.support_vectors {
338 let x_i = training_data.row(i).to_vec();
339 let k = self.kernel.kernel(&x_i, x)?;
340 decision += self.alphas[i] * self.labels[i] * k;
341 }
342 Ok(1.0 / (1.0 + (-decision).exp()))
343 }
344}
345#[derive(Debug, Clone)]
347pub struct TransferLearningConfig {
348 pub freeze_pretrained: bool,
350 pub fine_tune_epochs: usize,
352 pub fine_tune_lr: f64,
354 pub split_layer: usize,
356}
357impl Default for TransferLearningConfig {
358 fn default() -> Self {
359 Self {
360 freeze_pretrained: true,
361 fine_tune_epochs: 50,
362 fine_tune_lr: 0.01,
363 split_layer: 2,
364 }
365 }
366}
367pub struct QuantumTransferLearning {
369 pretrained_params: Vec<f64>,
371 new_params: Vec<f64>,
373 config: TransferLearningConfig,
375 num_qubits: usize,
377}
378impl QuantumTransferLearning {
379 pub fn from_pretrained(
381 pretrained_params: Vec<f64>,
382 num_qubits: usize,
383 config: TransferLearningConfig,
384 ) -> Self {
385 let new_param_count = num_qubits * 3;
386 let new_params = vec![0.0; new_param_count];
387 Self {
388 pretrained_params,
389 new_params,
390 config,
391 num_qubits,
392 }
393 }
394 pub fn parameters(&self) -> Vec<f64> {
396 let mut params = self.pretrained_params.clone();
397 params.extend(self.new_params.clone());
398 params
399 }
400 pub fn trainable_parameters(&self) -> &[f64] {
402 if self.config.freeze_pretrained {
403 &self.new_params
404 } else {
405 &self.new_params
406 }
407 }
408 pub fn update_parameters(&mut self, new_values: &[f64]) -> QuantRS2Result<()> {
410 if new_values.len() != self.new_params.len() {
411 return Err(QuantRS2Error::InvalidInput(format!(
412 "Expected {} parameters, got {}",
413 self.new_params.len(),
414 new_values.len()
415 )));
416 }
417 self.new_params.copy_from_slice(new_values);
418 Ok(())
419 }
420 pub fn num_trainable(&self) -> usize {
422 if self.config.freeze_pretrained {
423 self.new_params.len()
424 } else {
425 self.pretrained_params.len() + self.new_params.len()
426 }
427 }
428}
429#[derive(Debug, Clone, Copy, PartialEq, Eq)]
431pub enum VotingStrategy {
432 Hard,
434 Soft,
436 Weighted,
438}
439pub struct QuantumEnsemble {
441 models: Vec<Vec<f64>>,
443 weights: Vec<f64>,
445 voting: VotingStrategy,
447 num_qubits: usize,
449}
450impl QuantumEnsemble {
451 pub const fn new(num_qubits: usize, voting: VotingStrategy) -> Self {
453 Self {
454 models: Vec::new(),
455 weights: Vec::new(),
456 voting,
457 num_qubits,
458 }
459 }
460 pub fn add_model(&mut self, params: Vec<f64>, weight: f64) {
462 self.models.push(params);
463 self.weights.push(weight);
464 }
465 pub fn num_models(&self) -> usize {
467 self.models.len()
468 }
469 pub fn combine_predictions(&self, predictions: &[f64]) -> QuantRS2Result<f64> {
471 if predictions.len() != self.models.len() {
472 return Err(QuantRS2Error::InvalidInput(
473 "Predictions count doesn't match models".to_string(),
474 ));
475 }
476 match self.voting {
477 VotingStrategy::Hard => {
478 let sum: f64 = predictions.iter().sum();
479 Ok(if sum > 0.5 * predictions.len() as f64 {
480 1.0
481 } else {
482 0.0
483 })
484 }
485 VotingStrategy::Soft => {
486 let avg = predictions.iter().sum::<f64>() / predictions.len() as f64;
487 Ok(avg)
488 }
489 VotingStrategy::Weighted => {
490 let total_weight: f64 = self.weights.iter().sum();
491 let weighted_sum: f64 = predictions
492 .iter()
493 .zip(self.weights.iter())
494 .map(|(p, w)| p * w)
495 .sum();
496 Ok(weighted_sum / total_weight)
497 }
498 }
499 }
500 pub fn bagging_sample(data: &Array2<f64>, sample_size: usize, seed: u64) -> Array2<f64> {
502 use scirs2_core::random::prelude::*;
503 let mut rng = seeded_rng(seed);
504 let n_samples = data.nrows();
505 let n_features = data.ncols();
506 let mut sampled = Array2::zeros((sample_size, n_features));
507 for i in 0..sample_size {
508 let idx = rng.gen_range(0..n_samples);
509 sampled.row_mut(i).assign(&data.row(idx));
510 }
511 sampled
512 }
513}
514pub struct QMLMetrics;
516impl QMLMetrics {
517 pub fn accuracy(predictions: &[f64], labels: &[f64]) -> f64 {
519 if predictions.len() != labels.len() {
520 return 0.0;
521 }
522 let correct: usize = predictions
523 .iter()
524 .zip(labels.iter())
525 .filter(|(&p, &l)| (p - l).abs() < 0.5)
526 .count();
527 correct as f64 / predictions.len() as f64
528 }
529 pub fn precision(predictions: &[f64], labels: &[f64]) -> f64 {
531 let (tp, fp, _, _) = Self::confusion_counts(predictions, labels);
532 if tp + fp == 0 {
533 0.0
534 } else {
535 tp as f64 / (tp + fp) as f64
536 }
537 }
538 pub fn recall(predictions: &[f64], labels: &[f64]) -> f64 {
540 let (tp, _, _, fn_) = Self::confusion_counts(predictions, labels);
541 if tp + fn_ == 0 {
542 0.0
543 } else {
544 tp as f64 / (tp + fn_) as f64
545 }
546 }
547 pub fn f1_score(predictions: &[f64], labels: &[f64]) -> f64 {
549 let precision = Self::precision(predictions, labels);
550 let recall = Self::recall(predictions, labels);
551 if precision + recall == 0.0 {
552 0.0
553 } else {
554 2.0 * precision * recall / (precision + recall)
555 }
556 }
557 fn confusion_counts(predictions: &[f64], labels: &[f64]) -> (usize, usize, usize, usize) {
558 let mut tp = 0;
559 let mut fp = 0;
560 let mut tn = 0;
561 let mut fn_ = 0;
562 for (&p, &l) in predictions.iter().zip(labels.iter()) {
563 let pred_pos = p >= 0.5;
564 let label_pos = l >= 0.5;
565 match (pred_pos, label_pos) {
566 (true, true) => tp += 1,
567 (true, false) => fp += 1,
568 (false, true) => fn_ += 1,
569 (false, false) => tn += 1,
570 }
571 }
572 (tp, fp, tn, fn_)
573 }
574}
575#[cfg(test)]
576mod tests {
577 use super::*;
578 #[test]
579 fn test_quantum_kernel_config_default() {
580 let config = QuantumKernelConfig::default();
581 assert_eq!(config.num_qubits, 4);
582 assert_eq!(config.reps, 2);
583 }
584 #[test]
585 fn test_quantum_kernel_creation() {
586 let config = QuantumKernelConfig {
587 num_qubits: 2,
588 ..Default::default()
589 };
590 let kernel = QuantumKernel::new(config);
591 assert!(kernel.kernel_cache.is_none());
592 }
593 #[test]
594 fn test_quantum_kernel_value() {
595 let config = QuantumKernelConfig {
596 num_qubits: 2,
597 reps: 1,
598 ..Default::default()
599 };
600 let kernel = QuantumKernel::new(config);
601 let x1 = vec![0.5, 0.3];
602 let x2 = vec![0.5, 0.3];
603 let k = kernel
604 .kernel(&x1, &x2)
605 .expect("Failed to compute kernel value");
606 assert!(k >= 0.0, "Kernel value should be non-negative");
607 }
608 #[test]
609 fn test_quantum_svm_creation() {
610 let config = QuantumKernelConfig {
611 num_qubits: 2,
612 ..Default::default()
613 };
614 let qsvm = QuantumSVM::new(config);
615 assert!(qsvm.support_vectors.is_empty());
616 }
617 #[test]
618 fn test_transfer_learning_creation() {
619 let pretrained = vec![0.1, 0.2, 0.3, 0.4];
620 let config = TransferLearningConfig::default();
621 let model = QuantumTransferLearning::from_pretrained(pretrained.clone(), 2, config);
622 assert_eq!(model.pretrained_params.len(), 4);
623 assert!(!model.new_params.is_empty());
624 }
625 #[test]
626 fn test_ensemble_creation() {
627 let mut ensemble = QuantumEnsemble::new(2, VotingStrategy::Soft);
628 ensemble.add_model(vec![0.1, 0.2], 1.0);
629 ensemble.add_model(vec![0.3, 0.4], 1.0);
630 assert_eq!(ensemble.num_models(), 2);
631 }
632 #[test]
633 fn test_ensemble_voting() {
634 let mut ensemble = QuantumEnsemble::new(2, VotingStrategy::Hard);
635 ensemble.add_model(vec![0.1], 1.0);
636 ensemble.add_model(vec![0.2], 1.0);
637 ensemble.add_model(vec![0.3], 1.0);
638 let predictions = vec![0.2, 0.3, 0.4];
639 let result = ensemble
640 .combine_predictions(&predictions)
641 .expect("Failed to combine predictions (hard voting low)");
642 assert_eq!(result, 0.0);
643 let predictions = vec![0.6, 0.7, 0.8];
644 let result = ensemble
645 .combine_predictions(&predictions)
646 .expect("Failed to combine predictions (hard voting high)");
647 assert_eq!(result, 1.0);
648 }
649 #[test]
650 fn test_metrics_accuracy() {
651 let predictions = vec![1.0, 0.0, 1.0, 1.0];
652 let labels = vec![1.0, 0.0, 0.0, 1.0];
653 let acc = QMLMetrics::accuracy(&predictions, &labels);
654 assert_eq!(acc, 0.75);
655 }
656 #[test]
657 fn test_metrics_precision_recall() {
658 let predictions = vec![1.0, 1.0, 0.0, 0.0];
659 let labels = vec![1.0, 0.0, 0.0, 1.0];
660 let precision = QMLMetrics::precision(&predictions, &labels);
661 let recall = QMLMetrics::recall(&predictions, &labels);
662 assert_eq!(precision, 0.5);
663 assert_eq!(recall, 0.5);
664 }
665 #[test]
666 fn test_bagging_sample() {
667 let data = Array2::from_shape_vec((10, 3), (0..30).map(|x| x as f64).collect())
668 .expect("Failed to create test array for bagging");
669 let sample = QuantumEnsemble::bagging_sample(&data, 5, 42);
670 assert_eq!(sample.nrows(), 5);
671 assert_eq!(sample.ncols(), 3);
672 }
673}