1use crate::error::{QuantRS2Error, QuantRS2Result};
10use crate::qml::{EncodingStrategy, EntanglementPattern, QMLConfig, QMLLayer};
11use scirs2_core::ndarray::{Array1, Array2};
12use scirs2_core::Complex64;
13use std::collections::HashMap;
14use std::sync::Arc;
15
16#[derive(Debug, Clone)]
22pub struct QuantumKernelConfig {
23 pub num_qubits: usize,
25 pub feature_map: FeatureMapType,
27 pub reps: usize,
29 pub entanglement: EntanglementPattern,
31 pub parameter_scaling: f64,
33}
34
35impl Default for QuantumKernelConfig {
36 fn default() -> Self {
37 Self {
38 num_qubits: 4,
39 feature_map: FeatureMapType::ZZFeatureMap,
40 reps: 2,
41 entanglement: EntanglementPattern::Full,
42 parameter_scaling: 2.0,
43 }
44 }
45}
46
47#[derive(Debug, Clone, Copy, PartialEq)]
49pub enum FeatureMapType {
50 ZZFeatureMap,
52 PauliFeatureMap,
54 IQPFeatureMap,
56 TrainableFeatureMap,
58}
59
60pub struct QuantumKernel {
62 config: QuantumKernelConfig,
64 kernel_cache: Option<Array2<f64>>,
66 training_data: Option<Array2<f64>>,
68}
69
70impl QuantumKernel {
71 pub fn new(config: QuantumKernelConfig) -> Self {
73 Self {
74 config,
75 kernel_cache: None,
76 training_data: None,
77 }
78 }
79
80 pub fn kernel(&self, x1: &[f64], x2: &[f64]) -> QuantRS2Result<f64> {
82 if x1.len() != self.config.num_qubits || x2.len() != self.config.num_qubits {
83 return Err(QuantRS2Error::InvalidInput(format!(
84 "Data dimension {} doesn't match num_qubits {}",
85 x1.len(),
86 self.config.num_qubits
87 )));
88 }
89
90 let state1 = self.encode_data(x1)?;
93 let state2 = self.encode_data(x2)?;
94
95 let inner: Complex64 = state1
97 .iter()
98 .zip(state2.iter())
99 .map(|(a, b)| a.conj() * b)
100 .sum();
101
102 Ok(inner.norm_sqr())
103 }
104
105 pub fn kernel_matrix(&mut self, data: &Array2<f64>) -> QuantRS2Result<Array2<f64>> {
107 let n_samples = data.nrows();
108 let mut kernel_matrix = Array2::zeros((n_samples, n_samples));
109
110 for i in 0..n_samples {
111 for j in i..n_samples {
112 let x_i = data.row(i).to_vec();
113 let x_j = data.row(j).to_vec();
114
115 let k_ij = self.kernel(&x_i, &x_j)?;
116 kernel_matrix[[i, j]] = k_ij;
117 kernel_matrix[[j, i]] = k_ij; }
119 }
120
121 self.kernel_cache = Some(kernel_matrix.clone());
123 self.training_data = Some(data.clone());
124
125 Ok(kernel_matrix)
126 }
127
128 fn encode_data(&self, data: &[f64]) -> QuantRS2Result<Array1<Complex64>> {
130 let dim = 1 << self.config.num_qubits;
131 let mut state = Array1::zeros(dim);
132 state[0] = Complex64::new(1.0, 0.0);
133
134 match self.config.feature_map {
136 FeatureMapType::ZZFeatureMap => {
137 self.apply_zz_feature_map(&mut state, data)?;
138 }
139 FeatureMapType::PauliFeatureMap => {
140 self.apply_pauli_feature_map(&mut state, data)?;
141 }
142 FeatureMapType::IQPFeatureMap => {
143 self.apply_iqp_feature_map(&mut state, data)?;
144 }
145 FeatureMapType::TrainableFeatureMap => {
146 self.apply_trainable_feature_map(&mut state, data)?;
147 }
148 }
149
150 Ok(state)
151 }
152
153 fn apply_zz_feature_map(
154 &self,
155 state: &mut Array1<Complex64>,
156 data: &[f64],
157 ) -> QuantRS2Result<()> {
158 for _ in 0..self.config.reps {
159 for (i, &x) in data.iter().enumerate() {
161 let angle = self.config.parameter_scaling * x;
162 self.apply_rz(state, i, angle);
163 self.apply_ry(state, i, angle);
164 }
165
166 for i in 0..self.config.num_qubits - 1 {
168 let angle = self.config.parameter_scaling
169 * (std::f64::consts::PI - data[i])
170 * (std::f64::consts::PI - data[i + 1]);
171 self.apply_rzz(state, i, i + 1, angle);
172 }
173 }
174 Ok(())
175 }
176
177 fn apply_pauli_feature_map(
178 &self,
179 state: &mut Array1<Complex64>,
180 data: &[f64],
181 ) -> QuantRS2Result<()> {
182 for _ in 0..self.config.reps {
183 for (i, &x) in data.iter().enumerate() {
184 let angle = self.config.parameter_scaling * x;
185 self.apply_rx(state, i, angle);
186 self.apply_rz(state, i, angle);
187 }
188 }
189 Ok(())
190 }
191
192 fn apply_iqp_feature_map(
193 &self,
194 state: &mut Array1<Complex64>,
195 data: &[f64],
196 ) -> QuantRS2Result<()> {
197 for i in 0..self.config.num_qubits {
199 self.apply_hadamard(state, i);
200 }
201
202 for (i, &x) in data.iter().enumerate() {
204 let angle = self.config.parameter_scaling * x * x;
205 self.apply_rz(state, i, angle);
206 }
207
208 Ok(())
209 }
210
211 fn apply_trainable_feature_map(
212 &self,
213 state: &mut Array1<Complex64>,
214 data: &[f64],
215 ) -> QuantRS2Result<()> {
216 for _ in 0..self.config.reps {
218 for (i, &x) in data.iter().enumerate() {
219 self.apply_ry(state, i, x);
220 self.apply_rz(state, i, x);
221 }
222 }
223 Ok(())
224 }
225
226 fn apply_rx(&self, state: &mut Array1<Complex64>, qubit: usize, angle: f64) {
228 let cos = (angle / 2.0).cos();
229 let sin = (angle / 2.0).sin();
230 let dim = state.len();
231 let mask = 1 << qubit;
232
233 for i in 0..dim / 2 {
234 let idx0 = (i & !(mask >> 1)) | ((i & (mask >> 1)) << 1);
235 let idx1 = idx0 | mask;
236
237 if idx1 < dim {
238 let a = state[idx0];
239 let b = state[idx1];
240
241 state[idx0] = Complex64::new(cos, 0.0) * a + Complex64::new(0.0, -sin) * b;
242 state[idx1] = Complex64::new(0.0, -sin) * a + Complex64::new(cos, 0.0) * b;
243 }
244 }
245 }
246
247 fn apply_ry(&self, state: &mut Array1<Complex64>, qubit: usize, angle: f64) {
248 let cos = (angle / 2.0).cos();
249 let sin = (angle / 2.0).sin();
250 let dim = state.len();
251 let mask = 1 << qubit;
252
253 for i in 0..dim / 2 {
254 let idx0 = (i & !(mask >> 1)) | ((i & (mask >> 1)) << 1);
255 let idx1 = idx0 | mask;
256
257 if idx1 < dim {
258 let a = state[idx0];
259 let b = state[idx1];
260
261 state[idx0] = Complex64::new(cos, 0.0) * a - Complex64::new(sin, 0.0) * b;
262 state[idx1] = Complex64::new(sin, 0.0) * a + Complex64::new(cos, 0.0) * b;
263 }
264 }
265 }
266
267 fn apply_rz(&self, state: &mut Array1<Complex64>, qubit: usize, angle: f64) {
268 let dim = state.len();
269 let mask = 1 << qubit;
270
271 for i in 0..dim {
272 if i & mask != 0 {
273 state[i] *= Complex64::new(0.0, angle / 2.0).exp();
274 } else {
275 state[i] *= Complex64::new(0.0, -angle / 2.0).exp();
276 }
277 }
278 }
279
280 fn apply_rzz(&self, state: &mut Array1<Complex64>, q1: usize, q2: usize, angle: f64) {
281 let dim = state.len();
282 let mask1 = 1 << q1;
283 let mask2 = 1 << q2;
284
285 for i in 0..dim {
286 let bit1 = (i & mask1) != 0;
287 let bit2 = (i & mask2) != 0;
288 let parity = if bit1 == bit2 { 1.0 } else { -1.0 };
289
290 state[i] *= Complex64::new(0.0, parity * angle / 2.0).exp();
291 }
292 }
293
294 fn apply_hadamard(&self, state: &mut Array1<Complex64>, qubit: usize) {
295 let inv_sqrt2 = 1.0 / std::f64::consts::SQRT_2;
296 let dim = state.len();
297 let mask = 1 << qubit;
298
299 for i in 0..dim / 2 {
300 let idx0 = (i & !(mask >> 1)) | ((i & (mask >> 1)) << 1);
301 let idx1 = idx0 | mask;
302
303 if idx1 < dim {
304 let a = state[idx0];
305 let b = state[idx1];
306
307 state[idx0] = Complex64::new(inv_sqrt2, 0.0) * (a + b);
308 state[idx1] = Complex64::new(inv_sqrt2, 0.0) * (a - b);
309 }
310 }
311 }
312}
313
314pub struct QuantumSVM {
320 kernel: QuantumKernel,
322 support_vectors: Vec<usize>,
324 alphas: Vec<f64>,
326 bias: f64,
328 labels: Vec<f64>,
330 training_data: Option<Array2<f64>>,
332}
333
334impl QuantumSVM {
335 pub fn new(kernel_config: QuantumKernelConfig) -> Self {
337 Self {
338 kernel: QuantumKernel::new(kernel_config),
339 support_vectors: Vec::new(),
340 alphas: Vec::new(),
341 bias: 0.0,
342 labels: Vec::new(),
343 training_data: None,
344 }
345 }
346
347 pub fn fit(&mut self, data: &Array2<f64>, labels: &[f64], c: f64) -> QuantRS2Result<()> {
349 let n_samples = data.nrows();
350
351 let kernel_matrix = self.kernel.kernel_matrix(data)?;
353
354 self.alphas = vec![0.0; n_samples];
356 self.labels = labels.to_vec();
357 self.training_data = Some(data.clone());
358
359 let learning_rate = 0.01;
361 let max_iter = 100;
362
363 for _ in 0..max_iter {
364 for i in 0..n_samples {
365 let mut grad = 1.0;
366 for j in 0..n_samples {
367 grad -= self.alphas[j] * labels[i] * labels[j] * kernel_matrix[[i, j]];
368 }
369
370 self.alphas[i] += learning_rate * grad;
371 self.alphas[i] = self.alphas[i].clamp(0.0, c);
372 }
373 }
374
375 let epsilon = 1e-6;
377 self.support_vectors = (0..n_samples)
378 .filter(|&i| self.alphas[i] > epsilon)
379 .collect();
380
381 if !self.support_vectors.is_empty() {
383 let sv = self.support_vectors[0];
384 let mut b = labels[sv];
385 for j in 0..n_samples {
386 b -= self.alphas[j] * labels[j] * kernel_matrix[[sv, j]];
387 }
388 self.bias = b;
389 }
390
391 Ok(())
392 }
393
394 pub fn predict(&self, x: &[f64]) -> QuantRS2Result<f64> {
396 let training_data = self
397 .training_data
398 .as_ref()
399 .ok_or_else(|| QuantRS2Error::RuntimeError("Model not trained".to_string()))?;
400
401 let mut decision = self.bias;
402
403 for &i in &self.support_vectors {
404 let x_i = training_data.row(i).to_vec();
405 let k = self.kernel.kernel(&x_i, x)?;
406 decision += self.alphas[i] * self.labels[i] * k;
407 }
408
409 Ok(if decision >= 0.0 { 1.0 } else { -1.0 })
410 }
411
412 pub fn predict_proba(&self, x: &[f64]) -> QuantRS2Result<f64> {
414 let training_data = self
415 .training_data
416 .as_ref()
417 .ok_or_else(|| QuantRS2Error::RuntimeError("Model not trained".to_string()))?;
418
419 let mut decision = self.bias;
420 for &i in &self.support_vectors {
421 let x_i = training_data.row(i).to_vec();
422 let k = self.kernel.kernel(&x_i, x)?;
423 decision += self.alphas[i] * self.labels[i] * k;
424 }
425
426 Ok(1.0 / (1.0 + (-decision).exp()))
428 }
429}
430
431#[derive(Debug, Clone)]
437pub struct TransferLearningConfig {
438 pub freeze_pretrained: bool,
440 pub fine_tune_epochs: usize,
442 pub fine_tune_lr: f64,
444 pub split_layer: usize,
446}
447
448impl Default for TransferLearningConfig {
449 fn default() -> Self {
450 Self {
451 freeze_pretrained: true,
452 fine_tune_epochs: 50,
453 fine_tune_lr: 0.01,
454 split_layer: 2,
455 }
456 }
457}
458
459pub struct QuantumTransferLearning {
461 pretrained_params: Vec<f64>,
463 new_params: Vec<f64>,
465 config: TransferLearningConfig,
467 num_qubits: usize,
469}
470
471impl QuantumTransferLearning {
472 pub fn from_pretrained(
474 pretrained_params: Vec<f64>,
475 num_qubits: usize,
476 config: TransferLearningConfig,
477 ) -> Self {
478 let new_param_count = num_qubits * 3; let new_params = vec![0.0; new_param_count];
481
482 Self {
483 pretrained_params,
484 new_params,
485 config,
486 num_qubits,
487 }
488 }
489
490 pub fn parameters(&self) -> Vec<f64> {
492 let mut params = self.pretrained_params.clone();
493 params.extend(self.new_params.clone());
494 params
495 }
496
497 pub fn trainable_parameters(&self) -> &[f64] {
499 if self.config.freeze_pretrained {
500 &self.new_params
501 } else {
502 &self.new_params
504 }
505 }
506
507 pub fn update_parameters(&mut self, new_values: &[f64]) -> QuantRS2Result<()> {
509 if new_values.len() != self.new_params.len() {
510 return Err(QuantRS2Error::InvalidInput(format!(
511 "Expected {} parameters, got {}",
512 self.new_params.len(),
513 new_values.len()
514 )));
515 }
516
517 self.new_params.copy_from_slice(new_values);
518 Ok(())
519 }
520
521 pub fn num_trainable(&self) -> usize {
523 if self.config.freeze_pretrained {
524 self.new_params.len()
525 } else {
526 self.pretrained_params.len() + self.new_params.len()
527 }
528 }
529}
530
531#[derive(Debug, Clone, Copy, PartialEq)]
537pub enum VotingStrategy {
538 Hard,
540 Soft,
542 Weighted,
544}
545
546pub struct QuantumEnsemble {
548 models: Vec<Vec<f64>>,
550 weights: Vec<f64>,
552 voting: VotingStrategy,
554 num_qubits: usize,
556}
557
558impl QuantumEnsemble {
559 pub fn new(num_qubits: usize, voting: VotingStrategy) -> Self {
561 Self {
562 models: Vec::new(),
563 weights: Vec::new(),
564 voting,
565 num_qubits,
566 }
567 }
568
569 pub fn add_model(&mut self, params: Vec<f64>, weight: f64) {
571 self.models.push(params);
572 self.weights.push(weight);
573 }
574
575 pub fn num_models(&self) -> usize {
577 self.models.len()
578 }
579
580 pub fn combine_predictions(&self, predictions: &[f64]) -> QuantRS2Result<f64> {
582 if predictions.len() != self.models.len() {
583 return Err(QuantRS2Error::InvalidInput(
584 "Predictions count doesn't match models".to_string(),
585 ));
586 }
587
588 match self.voting {
589 VotingStrategy::Hard => {
590 let sum: f64 = predictions.iter().sum();
592 Ok(if sum > 0.5 * predictions.len() as f64 {
593 1.0
594 } else {
595 0.0
596 })
597 }
598 VotingStrategy::Soft => {
599 let avg = predictions.iter().sum::<f64>() / predictions.len() as f64;
601 Ok(avg)
602 }
603 VotingStrategy::Weighted => {
604 let total_weight: f64 = self.weights.iter().sum();
606 let weighted_sum: f64 = predictions
607 .iter()
608 .zip(self.weights.iter())
609 .map(|(p, w)| p * w)
610 .sum();
611 Ok(weighted_sum / total_weight)
612 }
613 }
614 }
615
616 pub fn bagging_sample(data: &Array2<f64>, sample_size: usize, seed: u64) -> Array2<f64> {
618 use scirs2_core::random::prelude::*;
619 let mut rng = seeded_rng(seed);
620
621 let n_samples = data.nrows();
622 let n_features = data.ncols();
623
624 let mut sampled = Array2::zeros((sample_size, n_features));
625 for i in 0..sample_size {
626 let idx = rng.gen_range(0..n_samples);
627 sampled.row_mut(i).assign(&data.row(idx));
628 }
629
630 sampled
631 }
632}
633
634pub struct QMLMetrics;
640
641impl QMLMetrics {
642 pub fn accuracy(predictions: &[f64], labels: &[f64]) -> f64 {
644 if predictions.len() != labels.len() {
645 return 0.0;
646 }
647
648 let correct: usize = predictions
649 .iter()
650 .zip(labels.iter())
651 .filter(|(&p, &l)| (p - l).abs() < 0.5)
652 .count();
653
654 correct as f64 / predictions.len() as f64
655 }
656
657 pub fn precision(predictions: &[f64], labels: &[f64]) -> f64 {
659 let (tp, fp, _, _) = Self::confusion_counts(predictions, labels);
660 if tp + fp == 0 {
661 0.0
662 } else {
663 tp as f64 / (tp + fp) as f64
664 }
665 }
666
667 pub fn recall(predictions: &[f64], labels: &[f64]) -> f64 {
669 let (tp, _, _, fn_) = Self::confusion_counts(predictions, labels);
670 if tp + fn_ == 0 {
671 0.0
672 } else {
673 tp as f64 / (tp + fn_) as f64
674 }
675 }
676
677 pub fn f1_score(predictions: &[f64], labels: &[f64]) -> f64 {
679 let precision = Self::precision(predictions, labels);
680 let recall = Self::recall(predictions, labels);
681
682 if precision + recall == 0.0 {
683 0.0
684 } else {
685 2.0 * precision * recall / (precision + recall)
686 }
687 }
688
689 fn confusion_counts(predictions: &[f64], labels: &[f64]) -> (usize, usize, usize, usize) {
690 let mut tp = 0;
691 let mut fp = 0;
692 let mut tn = 0;
693 let mut fn_ = 0;
694
695 for (&p, &l) in predictions.iter().zip(labels.iter()) {
696 let pred_pos = p >= 0.5;
697 let label_pos = l >= 0.5;
698
699 match (pred_pos, label_pos) {
700 (true, true) => tp += 1,
701 (true, false) => fp += 1,
702 (false, true) => fn_ += 1,
703 (false, false) => tn += 1,
704 }
705 }
706
707 (tp, fp, tn, fn_)
708 }
709}
710
711#[cfg(test)]
712mod tests {
713 use super::*;
714
715 #[test]
716 fn test_quantum_kernel_config_default() {
717 let config = QuantumKernelConfig::default();
718 assert_eq!(config.num_qubits, 4);
719 assert_eq!(config.reps, 2);
720 }
721
722 #[test]
723 fn test_quantum_kernel_creation() {
724 let config = QuantumKernelConfig {
725 num_qubits: 2,
726 ..Default::default()
727 };
728 let kernel = QuantumKernel::new(config);
729 assert!(kernel.kernel_cache.is_none());
730 }
731
732 #[test]
733 fn test_quantum_kernel_value() {
734 let config = QuantumKernelConfig {
735 num_qubits: 2,
736 reps: 1,
737 ..Default::default()
738 };
739 let kernel = QuantumKernel::new(config);
740
741 let x1 = vec![0.5, 0.3];
742 let x2 = vec![0.5, 0.3];
743
744 let k = kernel.kernel(&x1, &x2).unwrap();
745 assert!(k >= 0.0, "Kernel value should be non-negative");
746 }
749
750 #[test]
751 fn test_quantum_svm_creation() {
752 let config = QuantumKernelConfig {
753 num_qubits: 2,
754 ..Default::default()
755 };
756 let qsvm = QuantumSVM::new(config);
757 assert!(qsvm.support_vectors.is_empty());
758 }
759
760 #[test]
761 fn test_transfer_learning_creation() {
762 let pretrained = vec![0.1, 0.2, 0.3, 0.4];
763 let config = TransferLearningConfig::default();
764 let model = QuantumTransferLearning::from_pretrained(pretrained.clone(), 2, config);
765
766 assert_eq!(model.pretrained_params.len(), 4);
767 assert!(model.new_params.len() > 0);
768 }
769
770 #[test]
771 fn test_ensemble_creation() {
772 let mut ensemble = QuantumEnsemble::new(2, VotingStrategy::Soft);
773 ensemble.add_model(vec![0.1, 0.2], 1.0);
774 ensemble.add_model(vec![0.3, 0.4], 1.0);
775
776 assert_eq!(ensemble.num_models(), 2);
777 }
778
779 #[test]
780 fn test_ensemble_voting() {
781 let mut ensemble = QuantumEnsemble::new(2, VotingStrategy::Hard);
782 ensemble.add_model(vec![0.1], 1.0);
783 ensemble.add_model(vec![0.2], 1.0);
784 ensemble.add_model(vec![0.3], 1.0);
785
786 let predictions = vec![0.2, 0.3, 0.4];
788 let result = ensemble.combine_predictions(&predictions).unwrap();
789 assert_eq!(result, 0.0);
790
791 let predictions = vec![0.6, 0.7, 0.8];
793 let result = ensemble.combine_predictions(&predictions).unwrap();
794 assert_eq!(result, 1.0);
795 }
796
797 #[test]
798 fn test_metrics_accuracy() {
799 let predictions = vec![1.0, 0.0, 1.0, 1.0];
800 let labels = vec![1.0, 0.0, 0.0, 1.0];
801
802 let acc = QMLMetrics::accuracy(&predictions, &labels);
803 assert_eq!(acc, 0.75);
804 }
805
806 #[test]
807 fn test_metrics_precision_recall() {
808 let predictions = vec![1.0, 1.0, 0.0, 0.0];
809 let labels = vec![1.0, 0.0, 0.0, 1.0];
810
811 let precision = QMLMetrics::precision(&predictions, &labels);
812 let recall = QMLMetrics::recall(&predictions, &labels);
813
814 assert_eq!(precision, 0.5); assert_eq!(recall, 0.5); }
817
818 #[test]
819 fn test_bagging_sample() {
820 let data = Array2::from_shape_vec((10, 3), (0..30).map(|x| x as f64).collect()).unwrap();
821 let sample = QuantumEnsemble::bagging_sample(&data, 5, 42);
822 assert_eq!(sample.nrows(), 5);
823 assert_eq!(sample.ncols(), 3);
824 }
825}