1use crate::{
16 error::{QuantRS2Error, QuantRS2Result},
17 gate::GateOp,
18};
19use scirs2_core::ndarray::{Array1, Array2};
20use scirs2_core::Complex64 as Complex;
21use std::collections::HashMap;
22
23pub struct VariationalQCOptimizer {
29 optimizer: Box<dyn ClassicalOptimizer>,
31 circuit_evaluator: CircuitEvaluator,
33 config: VQCConfig,
35}
36
37#[derive(Debug, Clone)]
39pub struct VQCConfig {
40 pub max_iterations: usize,
42 pub tolerance: f64,
44 pub learning_rate: f64,
46 pub shots_per_evaluation: usize,
48 pub use_parameter_shift: bool,
50}
51
52impl Default for VQCConfig {
53 fn default() -> Self {
54 Self {
55 max_iterations: 1000,
56 tolerance: 1e-6,
57 learning_rate: 0.01,
58 shots_per_evaluation: 1000,
59 use_parameter_shift: true,
60 }
61 }
62}
63
64pub trait ClassicalOptimizer {
66 fn step(&mut self, params: &[f64], gradient: &[f64]) -> Vec<f64>;
68
69 fn get_params(&self) -> &[f64];
71}
72
73pub struct GradientDescentOptimizer {
75 params: Vec<f64>,
76 learning_rate: f64,
77}
78
79impl GradientDescentOptimizer {
80 pub fn new(initial_params: Vec<f64>, learning_rate: f64) -> Self {
82 Self {
83 params: initial_params,
84 learning_rate,
85 }
86 }
87}
88
89impl ClassicalOptimizer for GradientDescentOptimizer {
90 fn step(&mut self, _params: &[f64], gradient: &[f64]) -> Vec<f64> {
91 for (param, &grad) in self.params.iter_mut().zip(gradient.iter()) {
92 *param -= self.learning_rate * grad;
93 }
94 self.params.clone()
95 }
96
97 fn get_params(&self) -> &[f64] {
98 &self.params
99 }
100}
101
102pub struct AdamOptimizer {
104 params: Vec<f64>,
105 learning_rate: f64,
106 beta1: f64,
107 beta2: f64,
108 epsilon: f64,
109 m: Vec<f64>, v: Vec<f64>, t: usize, }
113
114impl AdamOptimizer {
115 pub fn new(initial_params: Vec<f64>, learning_rate: f64) -> Self {
117 let n = initial_params.len();
118 Self {
119 params: initial_params,
120 learning_rate,
121 beta1: 0.9,
122 beta2: 0.999,
123 epsilon: 1e-8,
124 m: vec![0.0; n],
125 v: vec![0.0; n],
126 t: 0,
127 }
128 }
129}
130
131impl ClassicalOptimizer for AdamOptimizer {
132 fn step(&mut self, _params: &[f64], gradient: &[f64]) -> Vec<f64> {
133 self.t += 1;
134
135 for i in 0..self.params.len() {
136 self.m[i] = self.beta1 * self.m[i] + (1.0 - self.beta1) * gradient[i];
138
139 self.v[i] = self.beta2 * self.v[i] + (1.0 - self.beta2) * gradient[i].powi(2);
141
142 let m_hat = self.m[i] / (1.0 - self.beta1.powi(self.t as i32));
144
145 let v_hat = self.v[i] / (1.0 - self.beta2.powi(self.t as i32));
147
148 self.params[i] -= self.learning_rate * m_hat / (v_hat.sqrt() + self.epsilon);
150 }
151
152 self.params.clone()
153 }
154
155 fn get_params(&self) -> &[f64] {
156 &self.params
157 }
158}
159
160pub struct CircuitEvaluator {
162 num_qubits: usize,
164 circuit_structure: Vec<LayerSpec>,
166}
167
168#[derive(Debug, Clone)]
170pub struct LayerSpec {
171 pub gate_type: String,
173 pub qubits: Vec<usize>,
175 pub is_parameterized: bool,
177}
178
179impl CircuitEvaluator {
180 pub fn new(num_qubits: usize, circuit_structure: Vec<LayerSpec>) -> Self {
182 Self {
183 num_qubits,
184 circuit_structure,
185 }
186 }
187
188 pub fn evaluate(&self, params: &[f64]) -> QuantRS2Result<f64> {
190 Ok(params.iter().map(|x| x.cos()).sum::<f64>() / params.len() as f64)
193 }
194
195 pub fn compute_gradient(&self, params: &[f64]) -> QuantRS2Result<Vec<f64>> {
197 let shift = std::f64::consts::PI / 2.0;
198 let mut gradient = Vec::new();
199
200 for i in 0..params.len() {
201 let mut params_plus = params.to_vec();
202 let mut params_minus = params.to_vec();
203
204 params_plus[i] += shift;
205 params_minus[i] -= shift;
206
207 let value_plus = self.evaluate(¶ms_plus)?;
208 let value_minus = self.evaluate(¶ms_minus)?;
209
210 gradient.push((value_plus - value_minus) / 2.0);
211 }
212
213 Ok(gradient)
214 }
215
216 pub fn compute_gradient_finite_diff(
218 &self,
219 params: &[f64],
220 eps: f64,
221 ) -> QuantRS2Result<Vec<f64>> {
222 let mut gradient = Vec::new();
223
224 for i in 0..params.len() {
225 let mut params_plus = params.to_vec();
226 let mut params_minus = params.to_vec();
227
228 params_plus[i] += eps;
229 params_minus[i] -= eps;
230
231 let value_plus = self.evaluate(¶ms_plus)?;
232 let value_minus = self.evaluate(¶ms_minus)?;
233
234 gradient.push((value_plus - value_minus) / (2.0 * eps));
235 }
236
237 Ok(gradient)
238 }
239}
240
241impl VariationalQCOptimizer {
242 pub fn new(
244 optimizer: Box<dyn ClassicalOptimizer>,
245 circuit_evaluator: CircuitEvaluator,
246 config: VQCConfig,
247 ) -> Self {
248 Self {
249 optimizer,
250 circuit_evaluator,
251 config,
252 }
253 }
254
255 pub fn optimize(&mut self) -> QuantRS2Result<OptimizationResult> {
257 let mut history = Vec::new();
258 let mut best_value = f64::INFINITY;
259 let mut best_params = self.optimizer.get_params().to_vec();
260
261 for iteration in 0..self.config.max_iterations {
262 let params = self.optimizer.get_params().to_vec();
263
264 let cost = self.circuit_evaluator.evaluate(¶ms)?;
266
267 let gradient = if self.config.use_parameter_shift {
269 self.circuit_evaluator.compute_gradient(¶ms)?
270 } else {
271 self.circuit_evaluator
272 .compute_gradient_finite_diff(¶ms, 1e-5)?
273 };
274
275 let new_params = self.optimizer.step(¶ms, &gradient);
277
278 history.push(cost);
279
280 if cost < best_value {
281 best_value = cost;
282 best_params = new_params.clone();
283 }
284
285 if iteration > 0
287 && (history[iteration] - history[iteration - 1]).abs() < self.config.tolerance
288 {
289 break;
290 }
291 }
292
293 let iterations = history.len();
294 Ok(OptimizationResult {
295 best_params,
296 best_value,
297 history,
298 iterations,
299 })
300 }
301}
302
303#[derive(Debug, Clone)]
305pub struct OptimizationResult {
306 pub best_params: Vec<f64>,
308 pub best_value: f64,
310 pub history: Vec<f64>,
312 pub iterations: usize,
314}
315
316pub struct QuantumClassicalNN {
322 classical_pre: Vec<ClassicalLayer>,
324 quantum_layer: QuantumLayer,
326 classical_post: Vec<ClassicalLayer>,
328}
329
330pub struct ClassicalLayer {
332 weights: Array2<f64>,
334 biases: Array1<f64>,
336 activation: ActivationFunction,
338}
339
340#[derive(Debug, Clone, Copy)]
341pub enum ActivationFunction {
342 ReLU,
343 Sigmoid,
344 Tanh,
345 Linear,
346}
347
348impl ClassicalLayer {
349 pub fn new(input_dim: usize, output_dim: usize, activation: ActivationFunction) -> Self {
351 Self {
352 weights: Array2::zeros((output_dim, input_dim)),
353 biases: Array1::zeros(output_dim),
354 activation,
355 }
356 }
357
358 pub fn forward(&self, input: &Array1<f64>) -> Array1<f64> {
360 let mut output = self.weights.dot(input) + &self.biases;
361
362 match self.activation {
364 ActivationFunction::ReLU => {
365 output.mapv_inplace(|x| x.max(0.0));
366 }
367 ActivationFunction::Sigmoid => {
368 output.mapv_inplace(|x| 1.0 / (1.0 + (-x).exp()));
369 }
370 ActivationFunction::Tanh => {
371 output.mapv_inplace(|x| x.tanh());
372 }
373 ActivationFunction::Linear => {}
374 }
375
376 output
377 }
378}
379
380pub struct QuantumLayer {
382 num_qubits: usize,
384 circuit: Vec<LayerSpec>,
386 params: Vec<f64>,
388}
389
390impl QuantumLayer {
391 pub fn new(num_qubits: usize, circuit: Vec<LayerSpec>) -> Self {
393 let num_params = circuit.iter().filter(|l| l.is_parameterized).count();
394 Self {
395 num_qubits,
396 circuit,
397 params: vec![0.0; num_params],
398 }
399 }
400
401 pub fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
403 let quantum_state = self.encode_input(input)?;
405
406 let output_state = self.apply_circuit(&quantum_state)?;
408
409 let classical_output = self.decode_output(&output_state)?;
411
412 Ok(classical_output)
413 }
414
415 fn encode_input(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
417 let dim = 2_usize.pow(self.num_qubits as u32);
418 let mut state = Array1::zeros(dim);
419
420 let norm = input.iter().map(|x| x.powi(2)).sum::<f64>().sqrt();
422 for i in 0..input.len().min(dim) {
423 state[i] = Complex::new(input[i] / norm, 0.0);
424 }
425
426 Ok(state)
427 }
428
429 fn apply_circuit(&self, state: &Array1<Complex>) -> QuantRS2Result<Array1<Complex>> {
431 Ok(state.clone())
433 }
434
435 fn decode_output(&self, state: &Array1<Complex>) -> QuantRS2Result<Array1<f64>> {
437 let output_dim = self.num_qubits;
439 let mut output = Array1::zeros(output_dim);
440
441 for i in 0..output_dim {
442 output[i] = state
443 .iter()
444 .take(2_usize.pow(i as u32))
445 .map(|x| x.norm_sqr())
446 .sum();
447 }
448
449 Ok(output)
450 }
451}
452
453impl QuantumClassicalNN {
454 pub fn new(
456 classical_pre: Vec<ClassicalLayer>,
457 quantum_layer: QuantumLayer,
458 classical_post: Vec<ClassicalLayer>,
459 ) -> Self {
460 Self {
461 classical_pre,
462 quantum_layer,
463 classical_post,
464 }
465 }
466
467 pub fn forward(&self, input: &Array1<f64>) -> QuantRS2Result<Array1<f64>> {
469 let mut current = input.clone();
470
471 for layer in &self.classical_pre {
473 current = layer.forward(¤t);
474 }
475
476 current = self.quantum_layer.forward(¤t)?;
478
479 for layer in &self.classical_post {
481 current = layer.forward(¤t);
482 }
483
484 Ok(current)
485 }
486
487 pub fn train(
489 &mut self,
490 training_data: &[(Array1<f64>, Array1<f64>)],
491 epochs: usize,
492 learning_rate: f64,
493 ) -> QuantRS2Result<Vec<f64>> {
494 let mut loss_history = Vec::new();
495
496 for epoch in 0..epochs {
497 let mut total_loss = 0.0;
498
499 for (input, target) in training_data {
500 let output = self.forward(input)?;
502
503 let loss: f64 = output
505 .iter()
506 .zip(target.iter())
507 .map(|(o, t)| (o - t).powi(2))
508 .sum();
509 total_loss += loss;
510
511 }
513
514 let avg_loss = total_loss / training_data.len() as f64;
515 loss_history.push(avg_loss);
516 }
517
518 Ok(loss_history)
519 }
520}
521
522pub struct QuantumFeatureMap {
528 num_qubits: usize,
530 feature_map_type: FeatureMapType,
532}
533
534#[derive(Debug, Clone, Copy)]
535pub enum FeatureMapType {
536 Amplitude,
538 Angle,
540 Basis,
542 IQP,
544 Pauli,
546}
547
548impl QuantumFeatureMap {
549 pub fn new(num_qubits: usize, feature_map_type: FeatureMapType) -> Self {
551 Self {
552 num_qubits,
553 feature_map_type,
554 }
555 }
556
557 pub fn encode(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
559 match self.feature_map_type {
560 FeatureMapType::Amplitude => self.amplitude_encoding(data),
561 FeatureMapType::Angle => self.angle_encoding(data),
562 FeatureMapType::Basis => self.basis_encoding(data),
563 FeatureMapType::IQP => self.iqp_encoding(data),
564 FeatureMapType::Pauli => self.pauli_encoding(data),
565 }
566 }
567
568 fn amplitude_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
570 let dim = 2_usize.pow(self.num_qubits as u32);
571 let mut state = Array1::zeros(dim);
572
573 let norm = data.iter().map(|x| x.powi(2)).sum::<f64>().sqrt();
574 for i in 0..data.len().min(dim) {
575 state[i] = Complex::new(data[i] / norm, 0.0);
576 }
577
578 Ok(state)
579 }
580
581 fn angle_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
583 let dim = 2_usize.pow(self.num_qubits as u32);
584 let mut state = Array1::zeros(dim);
585 state[0] = Complex::new(1.0, 0.0);
586
587 Ok(state)
590 }
591
592 fn basis_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
594 let dim = 2_usize.pow(self.num_qubits as u32);
595 let mut state = Array1::zeros(dim);
596
597 let mut index = 0usize;
599 for (i, &val) in data.iter().enumerate().take(self.num_qubits) {
600 if val > 0.5 {
601 index |= 1 << i;
602 }
603 }
604
605 state[index] = Complex::new(1.0, 0.0);
606 Ok(state)
607 }
608
609 fn iqp_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
611 let dim = 2_usize.pow(self.num_qubits as u32);
613 let hadamard_coeff = 1.0 / (dim as f64).sqrt();
614 let mut state = Array1::from_elem(dim, Complex::new(hadamard_coeff, 0.0));
615
616 Ok(state)
619 }
620
621 fn pauli_encoding(&self, data: &Array1<f64>) -> QuantRS2Result<Array1<Complex>> {
623 let dim = 2_usize.pow(self.num_qubits as u32);
624 let mut state = Array1::zeros(dim);
625 state[0] = Complex::new(1.0, 0.0);
626
627 Ok(state)
630 }
631
632 pub fn kernel(&self, data1: &Array1<f64>, data2: &Array1<f64>) -> QuantRS2Result<f64> {
634 let state1 = self.encode(data1)?;
635 let state2 = self.encode(data2)?;
636
637 let overlap: Complex = state1
639 .iter()
640 .zip(state2.iter())
641 .map(|(a, b)| a.conj() * b)
642 .sum();
643
644 Ok(overlap.norm_sqr())
645 }
646}
647
648#[cfg(test)]
649mod tests {
650 use super::*;
651
652 #[test]
653 fn test_gradient_descent_optimizer() {
654 let initial_params = vec![1.0, 2.0, 3.0];
655 let mut optimizer = GradientDescentOptimizer::new(initial_params.clone(), 0.1);
656
657 let gradient = vec![1.0, 1.0, 1.0];
658 let new_params = optimizer.step(&initial_params, &gradient);
659
660 assert_eq!(new_params[0], 0.9);
661 assert_eq!(new_params[1], 1.9);
662 assert_eq!(new_params[2], 2.9);
663 }
664
665 #[test]
666 fn test_adam_optimizer() {
667 let initial_params = vec![1.0, 2.0, 3.0];
668 let mut optimizer = AdamOptimizer::new(initial_params, 0.01);
669
670 let gradient = vec![1.0, 1.0, 1.0];
671 let new_params = optimizer.step(&[], &gradient);
672
673 assert!(new_params[0] < 1.0);
675 }
676
677 #[test]
678 fn test_quantum_feature_map_amplitude() {
679 let feature_map = QuantumFeatureMap::new(2, FeatureMapType::Amplitude);
680 let data = Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]);
681
682 let state = feature_map.encode(&data).unwrap();
683
684 assert!((state[0].norm() - 1.0).abs() < 1e-10);
686 }
687
688 #[test]
689 fn test_quantum_kernel() {
690 let feature_map = QuantumFeatureMap::new(2, FeatureMapType::Amplitude);
691 let data1 = Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]);
692 let data2 = Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]);
693
694 let kernel_value = feature_map.kernel(&data1, &data2).unwrap();
695
696 assert!((kernel_value - 1.0).abs() < 1e-10);
698 }
699}