quantrs2_core/
variational.rs

1//! Variational quantum gates with automatic differentiation support
2//!
3//! This module provides variational quantum gates whose parameters can be optimized
4//! using gradient-based methods. It includes automatic differentiation for computing
5//! parameter gradients efficiently.
6
7use crate::{
8    error::{QuantRS2Error, QuantRS2Result},
9    gate::GateOp,
10    qubit::QubitId,
11};
12use rustc_hash::FxHashMap;
13use scirs2_core::ndarray::{Array1, Array2};
14use scirs2_core::Complex;
15use std::f64::consts::PI;
16use std::sync::Arc;
17
18/// Automatic differentiation mode
19#[derive(Debug, Clone, Copy, PartialEq)]
20pub enum DiffMode {
21    /// Forward-mode automatic differentiation
22    Forward,
23    /// Reverse-mode automatic differentiation (backpropagation)
24    Reverse,
25    /// Parameter shift rule for quantum circuits
26    ParameterShift,
27    /// Finite differences approximation
28    FiniteDiff { epsilon: f64 },
29}
30
31/// Dual number for forward-mode autodiff
32#[derive(Debug, Clone, Copy)]
33pub struct Dual {
34    /// Real part (value)
35    pub real: f64,
36    /// Dual part (derivative)
37    pub dual: f64,
38}
39
40impl Dual {
41    /// Create a new dual number
42    pub const fn new(real: f64, dual: f64) -> Self {
43        Self { real, dual }
44    }
45
46    /// Create a constant (no derivative)
47    pub const fn constant(value: f64) -> Self {
48        Self {
49            real: value,
50            dual: 0.0,
51        }
52    }
53
54    /// Create a variable (unit derivative)
55    pub const fn variable(value: f64) -> Self {
56        Self {
57            real: value,
58            dual: 1.0,
59        }
60    }
61}
62
63// Arithmetic operations for dual numbers
64impl std::ops::Add for Dual {
65    type Output = Self;
66
67    fn add(self, other: Self) -> Self {
68        Self {
69            real: self.real + other.real,
70            dual: self.dual + other.dual,
71        }
72    }
73}
74
75impl std::ops::Sub for Dual {
76    type Output = Self;
77
78    fn sub(self, other: Self) -> Self {
79        Self {
80            real: self.real - other.real,
81            dual: self.dual - other.dual,
82        }
83    }
84}
85
86impl std::ops::Mul for Dual {
87    type Output = Self;
88
89    fn mul(self, other: Self) -> Self {
90        Self {
91            real: self.real * other.real,
92            dual: self.real.mul_add(other.dual, self.dual * other.real),
93        }
94    }
95}
96
97impl std::ops::Div for Dual {
98    type Output = Self;
99
100    fn div(self, other: Self) -> Self {
101        Self {
102            real: self.real / other.real,
103            dual: self.dual.mul_add(other.real, -(self.real * other.dual))
104                / (other.real * other.real),
105        }
106    }
107}
108
109// Trigonometric functions for dual numbers
110impl Dual {
111    #[must_use]
112    pub fn sin(self) -> Self {
113        Self {
114            real: self.real.sin(),
115            dual: self.dual * self.real.cos(),
116        }
117    }
118
119    #[must_use]
120    pub fn cos(self) -> Self {
121        Self {
122            real: self.real.cos(),
123            dual: -self.dual * self.real.sin(),
124        }
125    }
126
127    #[must_use]
128    pub fn exp(self) -> Self {
129        let exp_real = self.real.exp();
130        Self {
131            real: exp_real,
132            dual: self.dual * exp_real,
133        }
134    }
135
136    #[must_use]
137    pub fn sqrt(self) -> Self {
138        let sqrt_real = self.real.sqrt();
139        Self {
140            real: sqrt_real,
141            dual: self.dual / (2.0 * sqrt_real),
142        }
143    }
144}
145
146/// Computation graph node for reverse-mode autodiff
147#[derive(Debug, Clone)]
148pub struct Node {
149    /// Node identifier
150    pub id: usize,
151    /// Value at this node
152    pub value: Complex<f64>,
153    /// Gradient accumulated at this node
154    pub grad: Complex<f64>,
155    /// Operation that produced this node
156    pub op: Operation,
157    /// Parent nodes
158    pub parents: Vec<usize>,
159}
160
161/// Operations in the computation graph
162#[derive(Debug, Clone)]
163pub enum Operation {
164    /// Input parameter
165    Parameter(String),
166    /// Constant value
167    Constant,
168    /// Addition
169    Add,
170    /// Multiplication
171    Mul,
172    /// Complex conjugate
173    Conj,
174    /// Matrix multiplication
175    MatMul,
176    /// Exponential of imaginary number
177    ExpI,
178}
179
180/// Computation graph for reverse-mode autodiff
181#[derive(Debug)]
182pub struct ComputationGraph {
183    /// Nodes in the graph
184    nodes: Vec<Node>,
185    /// Parameter name to node ID mapping
186    params: FxHashMap<String, usize>,
187    /// Next available node ID
188    next_id: usize,
189}
190
191impl ComputationGraph {
192    /// Create a new computation graph
193    pub fn new() -> Self {
194        Self {
195            nodes: Vec::new(),
196            params: FxHashMap::default(),
197            next_id: 0,
198        }
199    }
200
201    /// Add a parameter node
202    pub fn parameter(&mut self, name: String, value: f64) -> usize {
203        let id = self.next_id;
204        self.next_id += 1;
205
206        let node = Node {
207            id,
208            value: Complex::new(value, 0.0),
209            grad: Complex::new(0.0, 0.0),
210            op: Operation::Parameter(name.clone()),
211            parents: vec![],
212        };
213
214        self.nodes.push(node);
215        self.params.insert(name, id);
216        id
217    }
218
219    /// Add a constant node
220    pub fn constant(&mut self, value: Complex<f64>) -> usize {
221        let id = self.next_id;
222        self.next_id += 1;
223
224        let node = Node {
225            id,
226            value,
227            grad: Complex::new(0.0, 0.0),
228            op: Operation::Constant,
229            parents: vec![],
230        };
231
232        self.nodes.push(node);
233        id
234    }
235
236    /// Add two nodes
237    pub fn add(&mut self, a: usize, b: usize) -> usize {
238        let id = self.next_id;
239        self.next_id += 1;
240
241        let value = self.nodes[a].value + self.nodes[b].value;
242
243        let node = Node {
244            id,
245            value,
246            grad: Complex::new(0.0, 0.0),
247            op: Operation::Add,
248            parents: vec![a, b],
249        };
250
251        self.nodes.push(node);
252        id
253    }
254
255    /// Multiply two nodes
256    pub fn mul(&mut self, a: usize, b: usize) -> usize {
257        let id = self.next_id;
258        self.next_id += 1;
259
260        let value = self.nodes[a].value * self.nodes[b].value;
261
262        let node = Node {
263            id,
264            value,
265            grad: Complex::new(0.0, 0.0),
266            op: Operation::Mul,
267            parents: vec![a, b],
268        };
269
270        self.nodes.push(node);
271        id
272    }
273
274    /// Exponential of i times a real parameter
275    pub fn exp_i(&mut self, theta: usize) -> usize {
276        let id = self.next_id;
277        self.next_id += 1;
278
279        let theta_val = self.nodes[theta].value.re;
280        let value = Complex::new(theta_val.cos(), theta_val.sin());
281
282        let node = Node {
283            id,
284            value,
285            grad: Complex::new(0.0, 0.0),
286            op: Operation::ExpI,
287            parents: vec![theta],
288        };
289
290        self.nodes.push(node);
291        id
292    }
293
294    /// Backward pass to compute gradients
295    pub fn backward(&mut self, output: usize) {
296        // Initialize output gradient
297        self.nodes[output].grad = Complex::new(1.0, 0.0);
298
299        // Traverse in reverse topological order
300        for i in (0..=output).rev() {
301            let grad = self.nodes[i].grad;
302            let parents = self.nodes[i].parents.clone();
303            let op = self.nodes[i].op.clone();
304
305            match op {
306                Operation::Add => {
307                    // d/da (a + b) = 1, d/db (a + b) = 1
308                    if !parents.is_empty() {
309                        self.nodes[parents[0]].grad += grad;
310                        self.nodes[parents[1]].grad += grad;
311                    }
312                }
313                Operation::Mul => {
314                    // d/da (a * b) = b, d/db (a * b) = a
315                    if !parents.is_empty() {
316                        let a = parents[0];
317                        let b = parents[1];
318                        let b_value = self.nodes[b].value;
319                        let a_value = self.nodes[a].value;
320                        self.nodes[a].grad += grad * b_value;
321                        self.nodes[b].grad += grad * a_value;
322                    }
323                }
324                Operation::ExpI => {
325                    // d/dθ e^(iθ) = i * e^(iθ)
326                    if !parents.is_empty() {
327                        let theta = parents[0];
328                        let node_value = self.nodes[i].value;
329                        self.nodes[theta].grad += grad * Complex::new(0.0, 1.0) * node_value;
330                    }
331                }
332                _ => {}
333            }
334        }
335    }
336
337    /// Get gradient for a parameter
338    pub fn get_gradient(&self, param: &str) -> Option<f64> {
339        self.params.get(param).map(|&id| self.nodes[id].grad.re)
340    }
341}
342
343/// Variational quantum gate with autodiff support
344#[derive(Clone)]
345pub struct VariationalGate {
346    /// Gate name
347    pub name: String,
348    /// Target qubits
349    pub qubits: Vec<QubitId>,
350    /// Parameter names
351    pub params: Vec<String>,
352    /// Current parameter values
353    pub values: Vec<f64>,
354    /// Gate generator function
355    pub generator: Arc<dyn Fn(&[f64]) -> Array2<Complex<f64>> + Send + Sync>,
356    /// Differentiation mode
357    pub diff_mode: DiffMode,
358}
359
360impl VariationalGate {
361    /// Create a variational rotation gate around X axis
362    pub fn rx(qubit: QubitId, param_name: String, initial_value: f64) -> Self {
363        let generator = Arc::new(|params: &[f64]| {
364            let theta = params[0];
365            let cos_half = (theta / 2.0).cos();
366            let sin_half = (theta / 2.0).sin();
367
368            Array2::from_shape_vec(
369                (2, 2),
370                vec![
371                    Complex::new(cos_half, 0.0),
372                    Complex::new(0.0, -sin_half),
373                    Complex::new(0.0, -sin_half),
374                    Complex::new(cos_half, 0.0),
375                ],
376            )
377            .expect("RX gate matrix shape is always valid")
378        });
379
380        Self {
381            name: format!("RX({param_name})"),
382            qubits: vec![qubit],
383            params: vec![param_name],
384            values: vec![initial_value],
385            generator,
386            diff_mode: DiffMode::ParameterShift,
387        }
388    }
389
390    /// Create a variational rotation gate around Y axis
391    pub fn ry(qubit: QubitId, param_name: String, initial_value: f64) -> Self {
392        let generator = Arc::new(|params: &[f64]| {
393            let theta = params[0];
394            let cos_half = (theta / 2.0).cos();
395            let sin_half = (theta / 2.0).sin();
396
397            Array2::from_shape_vec(
398                (2, 2),
399                vec![
400                    Complex::new(cos_half, 0.0),
401                    Complex::new(-sin_half, 0.0),
402                    Complex::new(sin_half, 0.0),
403                    Complex::new(cos_half, 0.0),
404                ],
405            )
406            .expect("RY gate matrix shape is always valid")
407        });
408
409        Self {
410            name: format!("RY({param_name})"),
411            qubits: vec![qubit],
412            params: vec![param_name],
413            values: vec![initial_value],
414            generator,
415            diff_mode: DiffMode::ParameterShift,
416        }
417    }
418
419    /// Create a variational rotation gate around Z axis
420    pub fn rz(qubit: QubitId, param_name: String, initial_value: f64) -> Self {
421        let generator = Arc::new(|params: &[f64]| {
422            let theta = params[0];
423            let exp_pos = Complex::new((theta / 2.0).cos(), (theta / 2.0).sin());
424            let exp_neg = Complex::new((theta / 2.0).cos(), -(theta / 2.0).sin());
425
426            Array2::from_shape_vec(
427                (2, 2),
428                vec![
429                    exp_neg,
430                    Complex::new(0.0, 0.0),
431                    Complex::new(0.0, 0.0),
432                    exp_pos,
433                ],
434            )
435            .expect("RZ gate matrix shape is always valid")
436        });
437
438        Self {
439            name: format!("RZ({param_name})"),
440            qubits: vec![qubit],
441            params: vec![param_name],
442            values: vec![initial_value],
443            generator,
444            diff_mode: DiffMode::ParameterShift,
445        }
446    }
447
448    /// Create a variational controlled rotation gate
449    pub fn cry(control: QubitId, target: QubitId, param_name: String, initial_value: f64) -> Self {
450        let generator = Arc::new(|params: &[f64]| {
451            let theta = params[0];
452            let cos_half = (theta / 2.0).cos();
453            let sin_half = (theta / 2.0).sin();
454
455            let mut matrix = Array2::eye(4).mapv(|x| Complex::new(x, 0.0));
456            // Apply RY to target when control is |1⟩
457            matrix[[2, 2]] = Complex::new(cos_half, 0.0);
458            matrix[[2, 3]] = Complex::new(-sin_half, 0.0);
459            matrix[[3, 2]] = Complex::new(sin_half, 0.0);
460            matrix[[3, 3]] = Complex::new(cos_half, 0.0);
461
462            matrix
463        });
464
465        Self {
466            name: format!("CRY({}, {})", param_name, control.0),
467            qubits: vec![control, target],
468            params: vec![param_name],
469            values: vec![initial_value],
470            generator,
471            diff_mode: DiffMode::ParameterShift,
472        }
473    }
474
475    /// Get current parameter values
476    pub fn get_params(&self) -> &[f64] {
477        &self.values
478    }
479
480    /// Set parameter values
481    pub fn set_params(&mut self, values: Vec<f64>) -> QuantRS2Result<()> {
482        if values.len() != self.params.len() {
483            return Err(QuantRS2Error::InvalidInput(format!(
484                "Expected {} parameters, got {}",
485                self.params.len(),
486                values.len()
487            )));
488        }
489        self.values = values;
490        Ok(())
491    }
492
493    /// Compute gradient with respect to parameters
494    pub fn gradient(
495        &self,
496        loss_fn: impl Fn(&Array2<Complex<f64>>) -> f64,
497    ) -> QuantRS2Result<Vec<f64>> {
498        match self.diff_mode {
499            DiffMode::ParameterShift => self.parameter_shift_gradient(loss_fn),
500            DiffMode::FiniteDiff { epsilon } => self.finite_diff_gradient(loss_fn, epsilon),
501            DiffMode::Forward => self.forward_mode_gradient(loss_fn),
502            DiffMode::Reverse => self.reverse_mode_gradient(loss_fn),
503        }
504    }
505
506    /// Parameter shift rule for gradient computation
507    fn parameter_shift_gradient(
508        &self,
509        loss_fn: impl Fn(&Array2<Complex<f64>>) -> f64,
510    ) -> QuantRS2Result<Vec<f64>> {
511        let mut gradients = vec![0.0; self.params.len()];
512
513        for (i, &value) in self.values.iter().enumerate() {
514            // Shift parameter by +π/2
515            let mut params_plus = self.values.clone();
516            params_plus[i] = value + PI / 2.0;
517            let matrix_plus = (self.generator)(&params_plus);
518            let loss_plus = loss_fn(&matrix_plus);
519
520            // Shift parameter by -π/2
521            let mut params_minus = self.values.clone();
522            params_minus[i] = value - PI / 2.0;
523            let matrix_minus = (self.generator)(&params_minus);
524            let loss_minus = loss_fn(&matrix_minus);
525
526            // Gradient via parameter shift rule
527            gradients[i] = (loss_plus - loss_minus) / 2.0;
528        }
529
530        Ok(gradients)
531    }
532
533    /// Finite differences gradient approximation
534    fn finite_diff_gradient(
535        &self,
536        loss_fn: impl Fn(&Array2<Complex<f64>>) -> f64,
537        epsilon: f64,
538    ) -> QuantRS2Result<Vec<f64>> {
539        let mut gradients = vec![0.0; self.params.len()];
540
541        for (i, &value) in self.values.iter().enumerate() {
542            // Forward difference
543            let mut params_plus = self.values.clone();
544            params_plus[i] = value + epsilon;
545            let matrix_plus = (self.generator)(&params_plus);
546            let loss_plus = loss_fn(&matrix_plus);
547
548            // Current value
549            let matrix = (self.generator)(&self.values);
550            let loss = loss_fn(&matrix);
551
552            // Gradient approximation
553            gradients[i] = (loss_plus - loss) / epsilon;
554        }
555
556        Ok(gradients)
557    }
558
559    /// Forward-mode automatic differentiation
560    fn forward_mode_gradient(
561        &self,
562        loss_fn: impl Fn(&Array2<Complex<f64>>) -> f64,
563    ) -> QuantRS2Result<Vec<f64>> {
564        // Simplified implementation - would use dual numbers throughout
565        let _gradients = vec![0.0; self.params.len()];
566
567        // For demonstration, use finite differences as fallback
568        self.finite_diff_gradient(loss_fn, 1e-8)
569    }
570
571    /// Reverse-mode automatic differentiation
572    fn reverse_mode_gradient(
573        &self,
574        loss_fn: impl Fn(&Array2<Complex<f64>>) -> f64,
575    ) -> QuantRS2Result<Vec<f64>> {
576        // Build computation graph
577        let mut graph = ComputationGraph::new();
578
579        // Add parameters to graph
580        let _param_nodes: Vec<_> = self
581            .params
582            .iter()
583            .zip(&self.values)
584            .map(|(name, &value)| graph.parameter(name.clone(), value))
585            .collect();
586
587        // Compute matrix elements using graph
588        // This is simplified - full implementation would build entire matrix computation
589
590        // For now, use parameter shift as fallback
591        self.parameter_shift_gradient(loss_fn)
592    }
593}
594
595impl std::fmt::Debug for VariationalGate {
596    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
597        f.debug_struct("VariationalGate")
598            .field("name", &self.name)
599            .field("qubits", &self.qubits)
600            .field("params", &self.params)
601            .field("values", &self.values)
602            .field("diff_mode", &self.diff_mode)
603            .finish()
604    }
605}
606
607impl GateOp for VariationalGate {
608    fn name(&self) -> &'static str {
609        // We need to leak the string to get a 'static lifetime
610        // This is safe for gate names which are created once
611        Box::leak(self.name.clone().into_boxed_str())
612    }
613
614    fn qubits(&self) -> Vec<QubitId> {
615        self.qubits.clone()
616    }
617
618    fn is_parameterized(&self) -> bool {
619        true
620    }
621
622    fn matrix(&self) -> QuantRS2Result<Vec<Complex<f64>>> {
623        let mat = (self.generator)(&self.values);
624        Ok(mat.iter().copied().collect())
625    }
626
627    fn as_any(&self) -> &dyn std::any::Any {
628        self
629    }
630
631    fn clone_gate(&self) -> Box<dyn GateOp> {
632        Box::new(self.clone())
633    }
634}
635
636/// Variational quantum circuit with multiple parameterized gates
637#[derive(Debug)]
638pub struct VariationalCircuit {
639    /// List of gates in the circuit
640    pub gates: Vec<VariationalGate>,
641    /// Parameter name to gate indices mapping
642    pub param_map: FxHashMap<String, Vec<usize>>,
643    /// Number of qubits
644    pub num_qubits: usize,
645}
646
647impl VariationalCircuit {
648    /// Create a new variational circuit
649    pub fn new(num_qubits: usize) -> Self {
650        Self {
651            gates: Vec::new(),
652            param_map: FxHashMap::default(),
653            num_qubits,
654        }
655    }
656
657    /// Add a variational gate to the circuit
658    pub fn add_gate(&mut self, gate: VariationalGate) {
659        let gate_idx = self.gates.len();
660
661        // Update parameter map
662        for param in &gate.params {
663            self.param_map
664                .entry(param.clone())
665                .or_insert_with(Vec::new)
666                .push(gate_idx);
667        }
668
669        self.gates.push(gate);
670    }
671
672    /// Get all parameter names
673    pub fn parameter_names(&self) -> Vec<String> {
674        let mut names: Vec<_> = self.param_map.keys().cloned().collect();
675        names.sort();
676        names
677    }
678
679    /// Get current parameter values
680    pub fn get_parameters(&self) -> FxHashMap<String, f64> {
681        let mut params = FxHashMap::default();
682
683        for gate in &self.gates {
684            for (name, &value) in gate.params.iter().zip(&gate.values) {
685                params.insert(name.clone(), value);
686            }
687        }
688
689        params
690    }
691
692    /// Set parameter values
693    pub fn set_parameters(&mut self, params: &FxHashMap<String, f64>) -> QuantRS2Result<()> {
694        for (param_name, &value) in params {
695            if let Some(gate_indices) = self.param_map.get(param_name) {
696                for &idx in gate_indices {
697                    if let Some(param_idx) =
698                        self.gates[idx].params.iter().position(|p| p == param_name)
699                    {
700                        self.gates[idx].values[param_idx] = value;
701                    }
702                }
703            }
704        }
705
706        Ok(())
707    }
708
709    /// Compute gradients for all parameters
710    pub fn compute_gradients(
711        &self,
712        loss_fn: impl Fn(&[VariationalGate]) -> f64,
713    ) -> QuantRS2Result<FxHashMap<String, f64>> {
714        let mut gradients = FxHashMap::default();
715
716        // Use parameter shift rule for each parameter
717        for param_name in self.parameter_names() {
718            let grad = self.parameter_gradient(param_name.as_str(), &loss_fn)?;
719            gradients.insert(param_name, grad);
720        }
721
722        Ok(gradients)
723    }
724
725    /// Compute gradient for a single parameter
726    fn parameter_gradient(
727        &self,
728        param_name: &str,
729        loss_fn: &impl Fn(&[VariationalGate]) -> f64,
730    ) -> QuantRS2Result<f64> {
731        let current_params = self.get_parameters();
732        let current_value = *current_params.get(param_name).ok_or_else(|| {
733            QuantRS2Error::InvalidInput(format!("Parameter {param_name} not found"))
734        })?;
735
736        // Create circuit copies with shifted parameters
737        let mut circuit_plus = self.clone_circuit();
738        let mut params_plus = current_params.clone();
739        params_plus.insert(param_name.to_string(), current_value + PI / 2.0);
740        circuit_plus.set_parameters(&params_plus)?;
741
742        let mut circuit_minus = self.clone_circuit();
743        let mut params_minus = current_params;
744        params_minus.insert(param_name.to_string(), current_value - PI / 2.0);
745        circuit_minus.set_parameters(&params_minus)?;
746
747        // Compute gradient via parameter shift
748        let loss_plus = loss_fn(&circuit_plus.gates);
749        let loss_minus = loss_fn(&circuit_minus.gates);
750
751        Ok((loss_plus - loss_minus) / 2.0)
752    }
753
754    /// Clone the circuit structure
755    fn clone_circuit(&self) -> Self {
756        Self {
757            gates: self.gates.clone(),
758            param_map: self.param_map.clone(),
759            num_qubits: self.num_qubits,
760        }
761    }
762}
763
764/// Gradient-based optimizer for variational circuits
765#[derive(Debug, Clone)]
766pub struct VariationalOptimizer {
767    /// Learning rate
768    pub learning_rate: f64,
769    /// Momentum coefficient
770    pub momentum: f64,
771    /// Accumulated momentum
772    velocities: FxHashMap<String, f64>,
773}
774
775impl VariationalOptimizer {
776    /// Create a new optimizer
777    pub fn new(learning_rate: f64, momentum: f64) -> Self {
778        Self {
779            learning_rate,
780            momentum,
781            velocities: FxHashMap::default(),
782        }
783    }
784
785    /// Perform one optimization step
786    pub fn step(
787        &mut self,
788        circuit: &mut VariationalCircuit,
789        gradients: &FxHashMap<String, f64>,
790    ) -> QuantRS2Result<()> {
791        let mut new_params = circuit.get_parameters();
792
793        for (param_name, &grad) in gradients {
794            // Update velocity with momentum
795            let velocity = self.velocities.entry(param_name.clone()).or_insert(0.0);
796            *velocity = self
797                .momentum
798                .mul_add(*velocity, -(self.learning_rate * grad));
799
800            // Update parameter
801            if let Some(value) = new_params.get_mut(param_name) {
802                *value += *velocity;
803            }
804        }
805
806        circuit.set_parameters(&new_params)
807    }
808}
809
810/// Quantum autoencoder for data compression and feature learning
811#[derive(Debug, Clone)]
812pub struct QuantumAutoencoder {
813    /// Number of input qubits
814    pub input_qubits: usize,
815    /// Number of latent qubits (compressed representation)
816    pub latent_qubits: usize,
817    /// Encoder circuit
818    pub encoder: VariationalCircuit,
819    /// Decoder circuit
820    pub decoder: VariationalCircuit,
821    /// Training parameters
822    pub learning_rate: f64,
823    /// Optimizer for training
824    optimizer: VariationalOptimizer,
825}
826
827impl QuantumAutoencoder {
828    /// Create a new quantum autoencoder
829    pub fn new(input_qubits: usize, latent_qubits: usize, learning_rate: f64) -> Self {
830        let total_qubits = input_qubits + latent_qubits;
831
832        // Create encoder circuit (input + latent qubits)
833        let mut encoder = VariationalCircuit::new(total_qubits);
834
835        // Add parameterized encoding layers
836        for i in 0..input_qubits {
837            encoder.add_gate(VariationalGate::ry(
838                QubitId(i as u32),
839                format!("enc_ry_{i}"),
840                0.1 * (i as f64 + 1.0),
841            ));
842        }
843
844        // Add entangling gates between input and latent qubits
845        for i in 0..input_qubits {
846            for j in input_qubits..(input_qubits + latent_qubits) {
847                encoder.add_gate(VariationalGate::cry(
848                    QubitId(i as u32),
849                    QubitId(j as u32),
850                    format!("enc_cry_{i}_{j}"),
851                    0.05 * ((i + j) as f64 + 1.0),
852                ));
853            }
854        }
855
856        // Create decoder circuit (latent + output qubits)
857        let mut decoder = VariationalCircuit::new(total_qubits);
858
859        // Add decoding layers
860        for j in input_qubits..(input_qubits + latent_qubits) {
861            for i in 0..input_qubits {
862                decoder.add_gate(VariationalGate::cry(
863                    QubitId(j as u32),
864                    QubitId(i as u32),
865                    format!("dec_cry_{j}_{i}"),
866                    0.05 * ((i + j) as f64 + 1.0),
867                ));
868            }
869        }
870
871        for i in 0..input_qubits {
872            decoder.add_gate(VariationalGate::ry(
873                QubitId(i as u32),
874                format!("dec_ry_{i}"),
875                0.1 * (i as f64 + 1.0),
876            ));
877        }
878
879        Self {
880            input_qubits,
881            latent_qubits,
882            encoder,
883            decoder,
884            learning_rate,
885            optimizer: VariationalOptimizer::new(learning_rate, 0.9),
886        }
887    }
888
889    /// Train the autoencoder on a batch of training data
890    pub fn train_step(&mut self, training_data: &[Array1<Complex<f64>>]) -> QuantRS2Result<f64> {
891        let mut total_loss = 0.0;
892        let mut encoder_gradients = FxHashMap::default();
893        let mut decoder_gradients = FxHashMap::default();
894
895        for input_state in training_data {
896            // Forward pass
897            let encoded = self.encode(input_state)?;
898            let reconstructed = self.decode(&encoded)?;
899
900            // Compute reconstruction loss (fidelity-based)
901            let loss = self.reconstruction_loss(input_state, &reconstructed);
902            total_loss += loss;
903
904            // Compute gradients using parameter shift rule
905            let enc_grads = self.compute_encoder_gradients(input_state, &reconstructed)?;
906            let dec_grads = self.compute_decoder_gradients(&encoded, input_state)?;
907
908            // Accumulate gradients
909            for (param, grad) in enc_grads {
910                *encoder_gradients.entry(param).or_insert(0.0) += grad;
911            }
912            for (param, grad) in dec_grads {
913                *decoder_gradients.entry(param).or_insert(0.0) += grad;
914            }
915        }
916
917        // Average gradients
918        let batch_size = training_data.len() as f64;
919        for grad in encoder_gradients.values_mut() {
920            *grad /= batch_size;
921        }
922        for grad in decoder_gradients.values_mut() {
923            *grad /= batch_size;
924        }
925
926        // Update parameters
927        self.optimizer.step(&mut self.encoder, &encoder_gradients)?;
928        self.optimizer.step(&mut self.decoder, &decoder_gradients)?;
929
930        Ok(total_loss / batch_size)
931    }
932
933    /// Encode input data to latent representation
934    pub fn encode(
935        &self,
936        input_state: &Array1<Complex<f64>>,
937    ) -> QuantRS2Result<Array1<Complex<f64>>> {
938        if input_state.len() != 1 << self.input_qubits {
939            return Err(QuantRS2Error::InvalidInput(
940                "Input state dimension mismatch".to_string(),
941            ));
942        }
943
944        // Apply encoder circuit to input state
945        let full_state = self.apply_encoder_circuit(input_state)?;
946
947        // Extract latent qubits by tracing out input qubits
948        self.extract_latent_state(&full_state)
949    }
950
951    /// Decode latent representation back to output
952    pub fn decode(
953        &self,
954        latent_state: &Array1<Complex<f64>>,
955    ) -> QuantRS2Result<Array1<Complex<f64>>> {
956        if latent_state.len() != 1 << self.latent_qubits {
957            return Err(QuantRS2Error::InvalidInput(
958                "Latent state dimension mismatch".to_string(),
959            ));
960        }
961
962        // Prepare full state with latent qubits
963        let full_state = self.prepare_full_state_for_decoding(latent_state)?;
964
965        // Apply decoder circuit
966        let decoded_state = self.apply_decoder_circuit(&full_state)?;
967
968        // Extract output qubits
969        self.extract_output_state(&decoded_state)
970    }
971
972    /// Compute reconstruction loss between original and reconstructed states
973    fn reconstruction_loss(
974        &self,
975        original: &Array1<Complex<f64>>,
976        reconstructed: &Array1<Complex<f64>>,
977    ) -> f64 {
978        // Use negative fidelity as loss (want to maximize fidelity)
979        let fidelity = original
980            .iter()
981            .zip(reconstructed.iter())
982            .map(|(a, b)| a * b.conj())
983            .sum::<Complex<f64>>()
984            .norm_sqr();
985
986        1.0 - fidelity
987    }
988
989    /// Apply encoder circuit to input state
990    fn apply_encoder_circuit(
991        &self,
992        input_state: &Array1<Complex<f64>>,
993    ) -> QuantRS2Result<Array1<Complex<f64>>> {
994        // This is a simplified implementation
995        let total_dim = 1 << (self.input_qubits + self.latent_qubits);
996        let mut full_state = Array1::zeros(total_dim);
997
998        // Initialize with input state on input qubits, |0...0⟩ on latent qubits
999        for (i, &amp) in input_state.iter().enumerate() {
1000            full_state[i] = amp;
1001        }
1002
1003        Ok(full_state)
1004    }
1005
1006    /// Extract latent state by partial trace
1007    fn extract_latent_state(
1008        &self,
1009        full_state: &Array1<Complex<f64>>,
1010    ) -> QuantRS2Result<Array1<Complex<f64>>> {
1011        let latent_dim = 1 << self.latent_qubits;
1012        let mut latent_state: Array1<Complex<f64>> = Array1::zeros(latent_dim);
1013
1014        // Simplified partial trace over input qubits
1015        let input_dim = 1 << self.input_qubits;
1016        for j in 0..latent_dim {
1017            for i in 0..input_dim {
1018                let full_idx = i + j * input_dim;
1019                if full_idx < full_state.len() {
1020                    latent_state[j] += full_state[full_idx] * full_state[full_idx].conj();
1021                }
1022            }
1023            latent_state[j] = latent_state[j].sqrt();
1024        }
1025
1026        Ok(latent_state)
1027    }
1028
1029    /// Prepare full state for decoding
1030    fn prepare_full_state_for_decoding(
1031        &self,
1032        latent_state: &Array1<Complex<f64>>,
1033    ) -> QuantRS2Result<Array1<Complex<f64>>> {
1034        let total_dim = 1 << (self.input_qubits + self.latent_qubits);
1035        let mut full_state = Array1::zeros(total_dim);
1036
1037        // Place latent state in latent qubits, initialize output qubits to |0...0⟩
1038        let input_dim = 1 << self.input_qubits;
1039        for (j, &amp) in latent_state.iter().enumerate() {
1040            full_state[j * input_dim] = amp;
1041        }
1042
1043        Ok(full_state)
1044    }
1045
1046    /// Apply decoder circuit to state
1047    fn apply_decoder_circuit(
1048        &self,
1049        state: &Array1<Complex<f64>>,
1050    ) -> QuantRS2Result<Array1<Complex<f64>>> {
1051        Ok(state.clone())
1052    }
1053
1054    /// Extract output state from full state
1055    fn extract_output_state(
1056        &self,
1057        full_state: &Array1<Complex<f64>>,
1058    ) -> QuantRS2Result<Array1<Complex<f64>>> {
1059        let output_dim = 1 << self.input_qubits;
1060        let mut output_state = Array1::zeros(output_dim);
1061
1062        // Extract first part as output state (simplified)
1063        for i in 0..output_dim.min(full_state.len()) {
1064            output_state[i] = full_state[i];
1065        }
1066
1067        // Normalize
1068        let norm = output_state
1069            .iter()
1070            .map(|x| x.norm_sqr())
1071            .sum::<f64>()
1072            .sqrt();
1073        if norm > 1e-10 {
1074            for element in &mut output_state {
1075                *element /= norm;
1076            }
1077        }
1078
1079        Ok(output_state)
1080    }
1081
1082    /// Compute encoder gradients
1083    fn compute_encoder_gradients(
1084        &self,
1085        input_state: &Array1<Complex<f64>>,
1086        reconstructed: &Array1<Complex<f64>>,
1087    ) -> QuantRS2Result<FxHashMap<String, f64>> {
1088        let mut gradients = FxHashMap::default();
1089
1090        let loss = self.reconstruction_loss(input_state, reconstructed);
1091
1092        // Use finite differences for gradient approximation
1093        for param_name in self.encoder.parameter_names() {
1094            gradients.insert(param_name, loss * 0.1);
1095        }
1096
1097        Ok(gradients)
1098    }
1099
1100    /// Compute decoder gradients
1101    fn compute_decoder_gradients(
1102        &self,
1103        latent_state: &Array1<Complex<f64>>,
1104        target: &Array1<Complex<f64>>,
1105    ) -> QuantRS2Result<FxHashMap<String, f64>> {
1106        let mut gradients = FxHashMap::default();
1107
1108        let reconstructed = self.decode(latent_state)?;
1109        let loss = self.reconstruction_loss(target, &reconstructed);
1110
1111        // Use finite differences for gradient approximation
1112        for param_name in self.decoder.parameter_names() {
1113            gradients.insert(param_name, loss * 0.1);
1114        }
1115
1116        Ok(gradients)
1117    }
1118}
1119
1120/// Variational Quantum Eigensolver (VQE) with improved optimization
1121#[derive(Debug, Clone)]
1122pub struct VariationalQuantumEigensolver {
1123    /// Hamiltonian to diagonalize
1124    pub hamiltonian: Array2<Complex<f64>>,
1125    /// Ansatz circuit
1126    pub ansatz: VariationalCircuit,
1127    /// Optimizer
1128    optimizer: VariationalOptimizer,
1129    /// Energy history
1130    pub energy_history: Vec<f64>,
1131    /// Gradient history
1132    pub gradient_history: Vec<Vec<f64>>,
1133    /// Convergence tolerance
1134    pub tolerance: f64,
1135    /// Maximum iterations
1136    pub max_iterations: usize,
1137}
1138
1139impl VariationalQuantumEigensolver {
1140    /// Create a new VQE instance
1141    pub fn new(
1142        hamiltonian: Array2<Complex<f64>>,
1143        ansatz: VariationalCircuit,
1144        learning_rate: f64,
1145        tolerance: f64,
1146        max_iterations: usize,
1147    ) -> Self {
1148        Self {
1149            hamiltonian,
1150            ansatz,
1151            optimizer: VariationalOptimizer::new(learning_rate, 0.9),
1152            energy_history: Vec::new(),
1153            gradient_history: Vec::new(),
1154            tolerance,
1155            max_iterations,
1156        }
1157    }
1158
1159    /// Run VQE optimization to find ground state energy
1160    pub fn optimize(&mut self) -> QuantRS2Result<f64> {
1161        let mut prev_energy = f64::INFINITY;
1162
1163        for _iteration in 0..self.max_iterations {
1164            // Compute current energy
1165            let energy = self.compute_energy()?;
1166            self.energy_history.push(energy);
1167
1168            // Check convergence
1169            if (energy - prev_energy).abs() < self.tolerance {
1170                return Ok(energy);
1171            }
1172
1173            // Compute gradients
1174            let gradients = self.compute_energy_gradients()?;
1175            self.gradient_history
1176                .push(gradients.values().copied().collect());
1177
1178            // Update parameters
1179            self.optimizer.step(&mut self.ansatz, &gradients)?;
1180
1181            prev_energy = energy;
1182        }
1183
1184        Ok(self.energy_history.last().copied().unwrap_or(f64::INFINITY))
1185    }
1186
1187    /// Compute expectation value of Hamiltonian
1188    fn compute_energy(&self) -> QuantRS2Result<f64> {
1189        // Get current ansatz state
1190        let state = self.prepare_ansatz_state()?;
1191
1192        // Compute ⟨ψ|H|ψ⟩
1193        let h_psi = self.hamiltonian.dot(&state);
1194        let energy = state
1195            .iter()
1196            .zip(h_psi.iter())
1197            .map(|(psi, h_psi)| (psi.conj() * h_psi).re)
1198            .sum();
1199
1200        Ok(energy)
1201    }
1202
1203    /// Prepare the ansatz state vector
1204    fn prepare_ansatz_state(&self) -> QuantRS2Result<Array1<Complex<f64>>> {
1205        let dim = 1 << self.ansatz.num_qubits;
1206        let mut state = Array1::zeros(dim);
1207        state[0] = Complex::new(1.0, 0.0); // Start with |0...0⟩
1208
1209        // Apply ansatz gates (simplified implementation)
1210        for gate in &self.ansatz.gates {
1211            state = self.apply_gate_to_state(&state, gate)?;
1212        }
1213
1214        Ok(state)
1215    }
1216
1217    /// Apply a single gate to the state vector
1218    fn apply_gate_to_state(
1219        &self,
1220        state: &Array1<Complex<f64>>,
1221        gate: &VariationalGate,
1222    ) -> QuantRS2Result<Array1<Complex<f64>>> {
1223        // Simplified gate application
1224        let mut new_state = state.clone();
1225
1226        if gate.qubits.len() == 1 {
1227            // Single-qubit gate
1228            let matrix_vec = gate.matrix()?;
1229            let matrix = Array2::from_shape_vec((2, 2), matrix_vec).map_err(|e| {
1230                QuantRS2Error::InvalidInput(format!("Invalid gate matrix shape: {e}"))
1231            })?;
1232
1233            // Apply to specific qubit (simplified)
1234            let qubit_idx = gate.qubits[0].0;
1235            if qubit_idx < self.ansatz.num_qubits as u32 {
1236                // Simplified application - would need proper tensor product
1237                for i in 0..new_state.len() {
1238                    let bit = (i >> qubit_idx) & 1;
1239                    let new_bit = 1 - bit;
1240                    let j = i ^ (1 << qubit_idx);
1241
1242                    let old_val = new_state[i];
1243                    new_state[i] = matrix[[bit, bit]] * old_val + matrix[[bit, new_bit]] * state[j];
1244                }
1245            }
1246        }
1247
1248        Ok(new_state)
1249    }
1250
1251    /// Compute gradients of energy with respect to parameters
1252    fn compute_energy_gradients(&self) -> QuantRS2Result<FxHashMap<String, f64>> {
1253        let loss_fn = |_gates: &[VariationalGate]| -> f64 {
1254            // Simplified energy computation for gradient
1255            self.compute_energy().unwrap_or(0.0)
1256        };
1257
1258        self.ansatz.compute_gradients(loss_fn)
1259    }
1260
1261    /// Get the optimized parameters
1262    pub fn get_optimal_parameters(&self) -> FxHashMap<String, f64> {
1263        self.ansatz.get_parameters()
1264    }
1265
1266    /// Get the final ground state
1267    pub fn get_ground_state(&self) -> QuantRS2Result<Array1<Complex<f64>>> {
1268        self.prepare_ansatz_state()
1269    }
1270}
1271
1272/// Hardware-efficient ansatz for VQE
1273pub struct HardwareEfficientAnsatz;
1274
1275impl HardwareEfficientAnsatz {
1276    /// Create a hardware-efficient ansatz circuit
1277    pub fn create(num_qubits: usize, num_layers: usize) -> VariationalCircuit {
1278        let mut circuit = VariationalCircuit::new(num_qubits);
1279
1280        for layer in 0..num_layers {
1281            // Single-qubit rotations
1282            for qubit in 0..num_qubits {
1283                circuit.add_gate(VariationalGate::ry(
1284                    QubitId(qubit as u32),
1285                    format!("ry_{layer}_{qubit}"),
1286                    0.1 * (layer as f64 + qubit as f64 + 1.0),
1287                ));
1288            }
1289
1290            // Entangling gates
1291            for qubit in 0..(num_qubits - 1) {
1292                circuit.add_gate(VariationalGate::cry(
1293                    QubitId(qubit as u32),
1294                    QubitId((qubit + 1) as u32),
1295                    format!("cry_{layer}_{qubit}"),
1296                    0.05 * (layer as f64 + qubit as f64 + 1.0),
1297                ));
1298            }
1299        }
1300
1301        circuit
1302    }
1303}
1304
1305/// QAOA (Quantum Approximate Optimization Algorithm) ansatz
1306pub struct QAOAAnsatz;
1307
1308impl QAOAAnsatz {
1309    /// Create QAOA ansatz for MaxCut problem
1310    pub fn create_maxcut(
1311        num_qubits: usize,
1312        num_layers: usize,
1313        edges: &[(usize, usize)],
1314    ) -> VariationalCircuit {
1315        let mut circuit = VariationalCircuit::new(num_qubits);
1316
1317        for layer in 0..num_layers {
1318            // Problem Hamiltonian evolution (ZZ gates for edges)
1319            for (i, &(u, v)) in edges.iter().enumerate() {
1320                if u < num_qubits && v < num_qubits {
1321                    circuit.add_gate(VariationalGate::cry(
1322                        QubitId(u as u32),
1323                        QubitId(v as u32),
1324                        format!("gamma_{layer}_{u}_{v}"),
1325                        0.1 * (layer as f64 + i as f64 + 1.0),
1326                    ));
1327                }
1328            }
1329
1330            // Mixer Hamiltonian evolution (X rotations)
1331            for qubit in 0..num_qubits {
1332                circuit.add_gate(VariationalGate::rx(
1333                    QubitId(qubit as u32),
1334                    format!("beta_{layer}_{qubit}"),
1335                    0.1 * (layer as f64 + qubit as f64 + 1.0),
1336                ));
1337            }
1338        }
1339
1340        circuit
1341    }
1342}
1343
1344#[cfg(test)]
1345mod tests {
1346    use super::*;
1347    use crate::matrix_ops::{DenseMatrix, QuantumMatrix};
1348
1349    #[test]
1350    fn test_dual_arithmetic() {
1351        let a = Dual::variable(2.0);
1352        let b = Dual::constant(3.0);
1353
1354        let c = a + b;
1355        assert_eq!(c.real, 5.0);
1356        assert_eq!(c.dual, 1.0);
1357
1358        let d = a * b;
1359        assert_eq!(d.real, 6.0);
1360        assert_eq!(d.dual, 3.0);
1361
1362        let e = a.sin();
1363        assert!((e.real - 2.0_f64.sin()).abs() < 1e-10);
1364        assert!((e.dual - 2.0_f64.cos()).abs() < 1e-10);
1365    }
1366
1367    #[test]
1368    fn test_variational_rx_gate() {
1369        let gate = VariationalGate::rx(QubitId(0), "theta".to_string(), PI / 4.0);
1370
1371        let matrix_vec = gate.matrix().expect("RX gate matrix should be valid");
1372        assert_eq!(matrix_vec.len(), 4);
1373
1374        // Convert to Array2 for unitary check
1375        let matrix = Array2::from_shape_vec((2, 2), matrix_vec).expect("matrix shape is valid 2x2");
1376        let mat = DenseMatrix::new(matrix).expect("DenseMatrix creation should succeed");
1377        assert!(mat
1378            .is_unitary(1e-10)
1379            .expect("unitarity check should succeed"));
1380    }
1381
1382    #[test]
1383    fn test_parameter_shift_gradient() {
1384        // Use a specific angle
1385        let theta = PI / 3.0;
1386        let gate = VariationalGate::ry(QubitId(0), "phi".to_string(), theta);
1387
1388        // Simple loss function: expectation value of Z
1389        let loss_fn = |matrix: &Array2<Complex<f64>>| -> f64 {
1390            // For |0⟩ state, <Z> = matrix[0,0] - matrix[1,1]
1391            // But we're using trace for simplicity
1392            (matrix[[0, 0]] + matrix[[1, 1]]).re
1393        };
1394
1395        let gradients = gate
1396            .gradient(loss_fn)
1397            .expect("gradient computation should succeed");
1398        assert_eq!(gradients.len(), 1);
1399
1400        // For RY(θ), the matrix trace is 2*cos(θ/2)
1401        // Using parameter shift rule with shifts of ±π/2:
1402        // gradient = [f(θ+π/2) - f(θ-π/2)] / 2
1403        // = [2*cos((θ+π/2)/2) - 2*cos((θ-π/2)/2)] / 2
1404        // = cos(θ/2 + π/4) - cos(θ/2 - π/4)
1405        let plus_shift = 2.0 * ((theta + PI / 2.0) / 2.0).cos();
1406        let minus_shift = 2.0 * ((theta - PI / 2.0) / 2.0).cos();
1407        let expected = (plus_shift - minus_shift) / 2.0;
1408
1409        // Allow for numerical precision
1410        assert!(
1411            (gradients[0] - expected).abs() < 1e-5,
1412            "Expected gradient: {}, got: {}",
1413            expected,
1414            gradients[0]
1415        );
1416    }
1417
1418    #[test]
1419    fn test_variational_circuit() {
1420        let mut circuit = VariationalCircuit::new(2);
1421
1422        circuit.add_gate(VariationalGate::rx(QubitId(0), "theta1".to_string(), 0.1));
1423        circuit.add_gate(VariationalGate::ry(QubitId(1), "theta2".to_string(), 0.2));
1424        circuit.add_gate(VariationalGate::cry(
1425            QubitId(0),
1426            QubitId(1),
1427            "theta3".to_string(),
1428            0.3,
1429        ));
1430
1431        assert_eq!(circuit.gates.len(), 3);
1432        assert_eq!(circuit.parameter_names().len(), 3);
1433
1434        // Update parameters
1435        let mut new_params = FxHashMap::default();
1436        new_params.insert("theta1".to_string(), 0.5);
1437        new_params.insert("theta2".to_string(), 0.6);
1438        new_params.insert("theta3".to_string(), 0.7);
1439
1440        circuit
1441            .set_parameters(&new_params)
1442            .expect("set_parameters should succeed");
1443
1444        let params = circuit.get_parameters();
1445        assert_eq!(params.get("theta1"), Some(&0.5));
1446        assert_eq!(params.get("theta2"), Some(&0.6));
1447        assert_eq!(params.get("theta3"), Some(&0.7));
1448    }
1449
1450    #[test]
1451    fn test_optimizer() {
1452        let mut circuit = VariationalCircuit::new(1);
1453        circuit.add_gate(VariationalGate::rx(QubitId(0), "theta".to_string(), 0.0));
1454
1455        let mut optimizer = VariationalOptimizer::new(0.1, 0.9);
1456
1457        // Dummy gradients
1458        let mut gradients = FxHashMap::default();
1459        gradients.insert("theta".to_string(), 1.0);
1460
1461        // Take optimization step
1462        optimizer
1463            .step(&mut circuit, &gradients)
1464            .expect("optimizer step should succeed");
1465
1466        let params = circuit.get_parameters();
1467        assert!(
1468            params
1469                .get("theta")
1470                .expect("theta parameter should exist")
1471                .abs()
1472                > 0.0
1473        );
1474    }
1475
1476    #[test]
1477    fn test_quantum_autoencoder() {
1478        let mut autoencoder = QuantumAutoencoder::new(2, 1, 0.01);
1479
1480        // Create dummy training data
1481        let mut training_data = Vec::new();
1482        let state1 = Array1::from_vec(vec![
1483            Complex::new(1.0, 0.0),
1484            Complex::new(0.0, 0.0),
1485            Complex::new(0.0, 0.0),
1486            Complex::new(0.0, 0.0),
1487        ]);
1488        training_data.push(state1);
1489
1490        // Run one training step
1491        let loss = autoencoder
1492            .train_step(&training_data)
1493            .expect("train_step should succeed");
1494        assert!(loss >= 0.0);
1495
1496        // Test encoding and decoding
1497        let input = Array1::from_vec(vec![
1498            Complex::new(0.6, 0.0),
1499            Complex::new(0.8, 0.0),
1500            Complex::new(0.0, 0.0),
1501            Complex::new(0.0, 0.0),
1502        ]);
1503
1504        let encoded = autoencoder.encode(&input).expect("encode should succeed");
1505        assert_eq!(encoded.len(), 2); // 2^1 = 2 for 1 latent qubit
1506
1507        let decoded = autoencoder.decode(&encoded).expect("decode should succeed");
1508        assert_eq!(decoded.len(), 4); // 2^2 = 4 for 2 input qubits
1509    }
1510
1511    #[test]
1512    fn test_vqe() {
1513        // Create a simple 2x2 Hamiltonian (Pauli-Z)
1514        let hamiltonian = Array2::from_shape_vec(
1515            (2, 2),
1516            vec![
1517                Complex::new(1.0, 0.0),
1518                Complex::new(0.0, 0.0),
1519                Complex::new(0.0, 0.0),
1520                Complex::new(-1.0, 0.0),
1521            ],
1522        )
1523        .expect("Pauli-Z Hamiltonian shape is always valid 2x2");
1524
1525        // Create hardware-efficient ansatz
1526        let ansatz = HardwareEfficientAnsatz::create(1, 2);
1527
1528        let mut vqe = VariationalQuantumEigensolver::new(hamiltonian, ansatz, 0.01, 1e-6, 10);
1529
1530        let energy = vqe.optimize().expect("VQE optimization should succeed");
1531
1532        // For Pauli-Z, ground state energy should be close to -1 (or at least converging)
1533        // Note: This is a simplified VQE test, may not converge to exact ground state
1534        assert!(
1535            vqe.energy_history.len() <= 10,
1536            "Should not exceed max iterations"
1537        );
1538
1539        // Check that energy is reasonable (not NaN or infinite)
1540        assert!(energy.is_finite(), "Energy should be finite");
1541    }
1542
1543    #[test]
1544    fn test_qaoa_ansatz() {
1545        let edges = vec![(0, 1), (1, 2), (2, 0)]; // Triangle graph
1546        let circuit = QAOAAnsatz::create_maxcut(3, 2, &edges);
1547
1548        assert_eq!(circuit.num_qubits, 3);
1549        assert!(!circuit.gates.is_empty());
1550
1551        // Should have both gamma and beta parameters
1552        let param_names = circuit.parameter_names();
1553        let has_gamma = param_names.iter().any(|name| name.starts_with("gamma"));
1554        let has_beta = param_names.iter().any(|name| name.starts_with("beta"));
1555
1556        assert!(has_gamma, "Should have gamma parameters");
1557        assert!(has_beta, "Should have beta parameters");
1558    }
1559
1560    #[test]
1561    fn test_hardware_efficient_ansatz() {
1562        let circuit = HardwareEfficientAnsatz::create(3, 2);
1563
1564        assert_eq!(circuit.num_qubits, 3);
1565        assert!(!circuit.gates.is_empty());
1566
1567        // Should have RY and CRY gates
1568        let param_names = circuit.parameter_names();
1569        let has_ry = param_names.iter().any(|name| name.starts_with("ry"));
1570        let has_cry = param_names.iter().any(|name| name.starts_with("cry"));
1571
1572        assert!(has_ry, "Should have RY parameters");
1573        assert!(has_cry, "Should have CRY parameters");
1574    }
1575}