1use crate::{
16 error::{QuantRS2Error, QuantRS2Result},
17 gate::GateOp,
18};
19use scirs2_core::ndarray::{Array1, Array2};
20use scirs2_core::Complex64 as Complex;
21use std::collections::HashMap;
22
23pub struct VariationalQCOptimizer {
29 optimizer: Box<dyn ClassicalOptimizer>,
31 circuit_evaluator: CircuitEvaluator,
33 config: VQCConfig,
35}
36
37#[derive(Debug, Clone)]
39pub struct VQCConfig {
40 pub max_iterations: usize,
42 pub tolerance: f64,
44 pub learning_rate: f64,
46 pub shots_per_evaluation: usize,
48 pub use_parameter_shift: bool,
50}
51
52impl Default for VQCConfig {
53 fn default() -> Self {
54 Self {
55 max_iterations: 1000,
56 tolerance: 1e-6,
57 learning_rate: 0.01,
58 shots_per_evaluation: 1000,
59 use_parameter_shift: true,
60 }
61 }
62}
63
64pub trait ClassicalOptimizer {
66 fn step(&mut self, params: &[f64], gradient: &[f64]) -> Vec<f64>;
68
69 fn get_params(&self) -> &[f64];
71}
72
73pub struct GradientDescentOptimizer {
75 params: Vec<f64>,
76 learning_rate: f64,
77}
78
79impl GradientDescentOptimizer {
80 pub const fn new(initial_params: Vec<f64>, learning_rate: f64) -> Self {
82 Self {
83 params: initial_params,
84 learning_rate,
85 }
86 }
87}
88
89impl ClassicalOptimizer for GradientDescentOptimizer {
90 fn step(&mut self, _params: &[f64], gradient: &[f64]) -> Vec<f64> {
91 for (param, &grad) in self.params.iter_mut().zip(gradient.iter()) {
92 *param -= self.learning_rate * grad;
93 }
94 self.params.clone()
95 }
96
97 fn get_params(&self) -> &[f64] {
98 &self.params
99 }
100}
101
102pub struct AdamOptimizer {
104 params: Vec<f64>,
105 learning_rate: f64,
106 beta1: f64,
107 beta2: f64,
108 epsilon: f64,
109 m: Vec<f64>, v: Vec<f64>, t: usize, }
113
114impl AdamOptimizer {
115 pub fn new(initial_params: Vec<f64>, learning_rate: f64) -> Self {
117 let n = initial_params.len();
118 Self {
119 params: initial_params,
120 learning_rate,
121 beta1: 0.9,
122 beta2: 0.999,
123 epsilon: 1e-8,
124 m: vec![0.0; n],
125 v: vec![0.0; n],
126 t: 0,
127 }
128 }
129}
130
131impl ClassicalOptimizer for AdamOptimizer {
132 fn step(&mut self, _params: &[f64], gradient: &[f64]) -> Vec<f64> {
133 self.t += 1;
134
135 for i in 0..self.params.len() {
136 self.m[i] = self
138 .beta1
139 .mul_add(self.m[i], (1.0 - self.beta1) * gradient[i]);
140
141 self.v[i] = self
143 .beta2
144 .mul_add(self.v[i], (1.0 - self.beta2) * gradient[i].powi(2));
145
146 let m_hat = self.m[i] / (1.0 - self.beta1.powi(self.t as i32));
148
149 let v_hat = self.v[i] / (1.0 - self.beta2.powi(self.t as i32));
151
152 self.params[i] -= self.learning_rate * m_hat / (v_hat.sqrt() + self.epsilon);
154 }
155
156 self.params.clone()
157 }
158
159 fn get_params(&self) -> &[f64] {
160 &self.params
161 }
162}
163
164pub struct CircuitEvaluator {
166 num_qubits: usize,
168 circuit_structure: Vec<LayerSpec>,
170}
171
172#[derive(Debug, Clone)]
174pub struct LayerSpec {
175 pub gate_type: String,
177 pub qubits: Vec<usize>,
179 pub is_parameterized: bool,
181}
182
183impl CircuitEvaluator {
184 pub const fn new(num_qubits: usize, circuit_structure: Vec<LayerSpec>) -> Self {
186 Self {
187 num_qubits,
188 circuit_structure,
189 }
190 }
191
192 pub fn evaluate(&self, params: &[f64]) -> QuantRS2Result<f64> {
194 Ok(params.iter().map(|x| x.cos()).sum::<f64>() / params.len() as f64)
197 }
198
199 pub fn compute_gradient(&self, params: &[f64]) -> QuantRS2Result<Vec<f64>> {
201 let shift = std::f64::consts::PI / 2.0;
202 let mut gradient = Vec::new();
203
204 for i in 0..params.len() {
205 let mut params_plus = params.to_vec();
206 let mut params_minus = params.to_vec();
207
208 params_plus[i] += shift;
209 params_minus[i] -= shift;
210
211 let value_plus = self.evaluate(¶ms_plus)?;
212 let value_minus = self.evaluate(¶ms_minus)?;
213
214 gradient.push((value_plus - value_minus) / 2.0);
215 }
216
217 Ok(gradient)
218 }
219
220 pub fn compute_gradient_finite_diff(
222 &self,
223 params: &[f64],
224 eps: f64,
225 ) -> QuantRS2Result<Vec<f64>> {
226 let mut gradient = Vec::new();
227
228 for i in 0..params.len() {
229 let mut params_plus = params.to_vec();
230 let mut params_minus = params.to_vec();
231
232 params_plus[i] += eps;
233 params_minus[i] -= eps;
234
235 let value_plus = self.evaluate(¶ms_plus)?;
236 let value_minus = self.evaluate(¶ms_minus)?;
237
238 gradient.push((value_plus - value_minus) / (2.0 * eps));
239 }
240
241 Ok(gradient)
242 }
243}
244
245impl VariationalQCOptimizer {
246 pub fn new(
248 optimizer: Box<dyn ClassicalOptimizer>,
249 circuit_evaluator: CircuitEvaluator,
250 config: VQCConfig,
251 ) -> Self {
252 Self {
253 optimizer,
254 circuit_evaluator,
255 config,
256 }
257 }
258
259 pub fn optimize(&mut self) -> QuantRS2Result<OptimizationResult> {
261 let mut history = Vec::new();
262 let mut best_value = f64::INFINITY;
263 let mut best_params = self.optimizer.get_params().to_vec();
264
265 for iteration in 0..self.config.max_iterations {
266 let params = self.optimizer.get_params().to_vec();
267
268 let cost = self.circuit_evaluator.evaluate(¶ms)?;
270
271 let gradient = if self.config.use_parameter_shift {
273 self.circuit_evaluator.compute_gradient(¶ms)?
274 } else {
275 self.circuit_evaluator
276 .compute_gradient_finite_diff(¶ms, 1e-5)?
277 };
278
279 let new_params = self.optimizer.step(¶ms, &gradient);
281
282 history.push(cost);
283
284 if cost < best_value {
285 best_value = cost;
286 best_params.clone_from(&new_params);
287 }
288
289 if iteration > 0
291 && (history[iteration] - history[iteration - 1]).abs() < self.config.tolerance
292 {
293 break;
294 }
295 }
296
297 let iterations = history.len();
298 Ok(OptimizationResult {
299 best_params,
300 best_value,
301 history,
302 iterations,
303 })
304 }
305}
306
307#[derive(Debug, Clone)]
309pub struct OptimizationResult {
310 pub best_params: Vec<f64>,
312 pub best_value: f64,
314 pub history: Vec<f64>,
316 pub iterations: usize,
318}
319
320pub struct QuantumClassicalNN {
326 classical_pre: Vec<ClassicalLayer>,
328 quantum_layer: QuantumLayer,
330 classical_post: Vec<ClassicalLayer>,
332}
333
334pub struct ClassicalLayer {
336 weights: Array2<f64>,
338 biases: Array1<f64>,
340 activation: ActivationFunction,
342}
343
344#[derive(Debug, Clone, Copy)]
345pub enum ActivationFunction {
346 ReLU,
347 Sigmoid,
348 Tanh,
349 Linear,
350}
351
352impl ClassicalLayer {
353 pub fn new(input_dim: usize, output_dim: usize, activation: ActivationFunction) -> Self {
355 Self {
356 weights: Array2::zeros((output_dim, input_dim)),
357 biases: Array1::zeros(output_dim),
358 activation,
359 }
360 }
361
362 pub fn forward(&self, input: &Array1<f64>) -> Array1<f64> {
364 let mut output = self.weights.dot(input) + &self.biases;
365
366 match self.activation {
368 ActivationFunction::ReLU => {
369 output.mapv_inplace(|x| x.max(0.0));
370 }
371 ActivationFunction::Sigmoid => {
372 output.mapv_inplace(|x| 1.0 / (1.0 + (-x).exp()));
373 }
374 ActivationFunction::Tanh => {
375 output.mapv_inplace(|x| x.tanh());
376 }
377 ActivationFunction::Linear => {}
378 }
379
380 output
381 }
382}
383
384pub struct QuantumLayer {
386 num_qubits: usize,
388 circuit: Vec<LayerSpec>,
390 params: Vec<f64>,
392}
393
394impl QuantumLayer {
395 pub fn new(num_qubits: usize, circuit: Vec<LayerSpec>) -> Self {
397 let num_params = circuit.iter().filter(|l| l.is_parameterized).count();
398 Self {
399 num_qubits,
400 circuit,
401 params: vec![0.0; num_params],
402 }
403 }
404
405 pub fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
407 let quantum_state = self.encode_input(input)?;
409
410 let output_state = self.apply_circuit(&quantum_state)?;
412
413 let classical_output = self.decode_output(&output_state)?;
415
416 Ok(classical_output)
417 }
418
419 fn encode_input(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
421 let dim = 2_usize.pow(self.num_qubits as u32);
422 let mut state = Array1::zeros(dim);
423
424 let norm = input.iter().map(|x| x.powi(2)).sum::<f64>().sqrt();
426 for i in 0..input.len().min(dim) {
427 state[i] = Complex::new(input[i] / norm, 0.0);
428 }
429
430 Ok(state)
431 }
432
433 fn apply_circuit(&self, state: &Array1<Complex>) -> QuantRS2Result<Array1<Complex>> {
435 Ok(state.clone())
437 }
438
439 fn decode_output(&self, state: &Array1<Complex>) -> QuantRS2Result<Array1<f64>> {
441 let output_dim = self.num_qubits;
443 let mut output = Array1::zeros(output_dim);
444
445 for i in 0..output_dim {
446 output[i] = state
447 .iter()
448 .take(2_usize.pow(i as u32))
449 .map(|x| x.norm_sqr())
450 .sum();
451 }
452
453 Ok(output)
454 }
455}
456
457impl QuantumClassicalNN {
458 pub const fn new(
460 classical_pre: Vec<ClassicalLayer>,
461 quantum_layer: QuantumLayer,
462 classical_post: Vec<ClassicalLayer>,
463 ) -> Self {
464 Self {
465 classical_pre,
466 quantum_layer,
467 classical_post,
468 }
469 }
470
471 pub fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
473 let mut current = input.clone();
474
475 for layer in &self.classical_pre {
477 current = layer.forward(¤t);
478 }
479
480 current = self.quantum_layer.forward(¤t)?;
482
483 for layer in &self.classical_post {
485 current = layer.forward(¤t);
486 }
487
488 Ok(current)
489 }
490
491 pub fn train(
493 &mut self,
494 training_data: &[(Array1<f64>, Array1<f64>)],
495 epochs: usize,
496 learning_rate: f64,
497 ) -> QuantRS2Result<Vec<f64>> {
498 let mut loss_history = Vec::new();
499
500 for epoch in 0..epochs {
501 let mut total_loss = 0.0;
502
503 for (input, target) in training_data {
504 let output = self.forward(input)?;
506
507 let loss: f64 = output
509 .iter()
510 .zip(target.iter())
511 .map(|(o, t)| (o - t).powi(2))
512 .sum();
513 total_loss += loss;
514
515 }
517
518 let avg_loss = total_loss / training_data.len() as f64;
519 loss_history.push(avg_loss);
520 }
521
522 Ok(loss_history)
523 }
524}
525
526pub struct QuantumFeatureMap {
532 num_qubits: usize,
534 feature_map_type: FeatureMapType,
536}
537
538#[derive(Debug, Clone, Copy)]
539pub enum FeatureMapType {
540 Amplitude,
542 Angle,
544 Basis,
546 IQP,
548 Pauli,
550}
551
552impl QuantumFeatureMap {
553 pub const fn new(num_qubits: usize, feature_map_type: FeatureMapType) -> Self {
555 Self {
556 num_qubits,
557 feature_map_type,
558 }
559 }
560
561 pub fn encode(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
563 match self.feature_map_type {
564 FeatureMapType::Amplitude => self.amplitude_encoding(data),
565 FeatureMapType::Angle => self.angle_encoding(data),
566 FeatureMapType::Basis => self.basis_encoding(data),
567 FeatureMapType::IQP => self.iqp_encoding(data),
568 FeatureMapType::Pauli => self.pauli_encoding(data),
569 }
570 }
571
572 fn amplitude_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
574 let dim = 2_usize.pow(self.num_qubits as u32);
575 let mut state = Array1::zeros(dim);
576
577 let norm = data.iter().map(|x| x.powi(2)).sum::<f64>().sqrt();
578 for i in 0..data.len().min(dim) {
579 state[i] = Complex::new(data[i] / norm, 0.0);
580 }
581
582 Ok(state)
583 }
584
585 fn angle_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
587 let dim = 2_usize.pow(self.num_qubits as u32);
588 let mut state = Array1::zeros(dim);
589 state[0] = Complex::new(1.0, 0.0);
590
591 Ok(state)
594 }
595
596 fn basis_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
598 let dim = 2_usize.pow(self.num_qubits as u32);
599 let mut state = Array1::zeros(dim);
600
601 let mut index = 0usize;
603 for (i, &val) in data.iter().enumerate().take(self.num_qubits) {
604 if val > 0.5 {
605 index |= 1 << i;
606 }
607 }
608
609 state[index] = Complex::new(1.0, 0.0);
610 Ok(state)
611 }
612
613 fn iqp_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
615 let dim = 2_usize.pow(self.num_qubits as u32);
617 let hadamard_coeff = 1.0 / (dim as f64).sqrt();
618 let mut state = Array1::from_elem(dim, Complex::new(hadamard_coeff, 0.0));
619
620 Ok(state)
623 }
624
625 fn pauli_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
627 let dim = 2_usize.pow(self.num_qubits as u32);
628 let mut state = Array1::zeros(dim);
629 state[0] = Complex::new(1.0, 0.0);
630
631 Ok(state)
634 }
635
636 pub fn kernel(&self, data1: &Array1<f64>, data2: &Array1<f64>) -> QuantRS2Result<f64> {
638 let state1 = self.encode(data1)?;
639 let state2 = self.encode(data2)?;
640
641 let overlap: Complex = state1
643 .iter()
644 .zip(state2.iter())
645 .map(|(a, b)| a.conj() * b)
646 .sum();
647
648 Ok(overlap.norm_sqr())
649 }
650}
651
652#[cfg(test)]
653mod tests {
654 use super::*;
655
656 #[test]
657 fn test_gradient_descent_optimizer() {
658 let initial_params = vec![1.0, 2.0, 3.0];
659 let mut optimizer = GradientDescentOptimizer::new(initial_params.clone(), 0.1);
660
661 let gradient = vec![1.0, 1.0, 1.0];
662 let new_params = optimizer.step(&initial_params, &gradient);
663
664 assert_eq!(new_params[0], 0.9);
665 assert_eq!(new_params[1], 1.9);
666 assert_eq!(new_params[2], 2.9);
667 }
668
669 #[test]
670 fn test_adam_optimizer() {
671 let initial_params = vec![1.0, 2.0, 3.0];
672 let mut optimizer = AdamOptimizer::new(initial_params, 0.01);
673
674 let gradient = vec![1.0, 1.0, 1.0];
675 let new_params = optimizer.step(&[], &gradient);
676
677 assert!(new_params[0] < 1.0);
679 }
680
681 #[test]
682 fn test_quantum_feature_map_amplitude() {
683 let feature_map = QuantumFeatureMap::new(2, FeatureMapType::Amplitude);
684 let data = Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]);
685
686 let state = feature_map
687 .encode(&data)
688 .expect("Failed to encode data in quantum feature map");
689
690 assert!((state[0].norm() - 1.0).abs() < 1e-10);
692 }
693
694 #[test]
695 fn test_quantum_kernel() {
696 let feature_map = QuantumFeatureMap::new(2, FeatureMapType::Amplitude);
697 let data1 = Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]);
698 let data2 = Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]);
699
700 let kernel_value = feature_map
701 .kernel(&data1, &data2)
702 .expect("Failed to compute quantum kernel");
703
704 assert!((kernel_value - 1.0).abs() < 1e-10);
706 }
707}