1use crate::circuit_integration::{QuantumLayer, QuantumMLExecutor};
8use crate::error::{MLError, Result};
9use crate::simulator_backends::{DynamicCircuit, Observable, SimulationResult, SimulatorBackend};
10use quantrs2_circuit::prelude::*;
11use quantrs2_core::prelude::*;
12use scirs2_core::ndarray::{s, Array1, Array2, Array3, Array4, ArrayD, Axis};
13use std::collections::HashMap;
14use std::sync::Arc;
15
16pub struct QuantumCircuitLayer {
18 circuit: Circuit<8>, symbols: Vec<String>,
22 observable: Observable,
24 backend: Arc<dyn SimulatorBackend>,
26 differentiable: bool,
28 repetitions: Option<usize>,
30}
31
32impl QuantumCircuitLayer {
33 pub fn new(
35 circuit: Circuit<8>, symbols: Vec<String>,
37 observable: Observable,
38 backend: Arc<dyn SimulatorBackend>,
39 ) -> Self {
40 Self {
41 circuit,
42 symbols,
43 observable,
44 backend,
45 differentiable: true,
46 repetitions: None,
47 }
48 }
49
50 pub fn set_differentiable(mut self, differentiable: bool) -> Self {
52 self.differentiable = differentiable;
53 self
54 }
55
56 pub fn set_repetitions(mut self, repetitions: usize) -> Self {
58 self.repetitions = Some(repetitions);
59 self
60 }
61
62 pub fn forward(&self, inputs: &Array2<f64>, parameters: &Array2<f64>) -> Result<Array1<f64>> {
64 let batch_size = inputs.nrows();
65 let mut outputs = Array1::zeros(batch_size);
66
67 for batch_idx in 0..batch_size {
68 let input_data = inputs.row(batch_idx);
70 let param_data = parameters.row(batch_idx % parameters.nrows());
71 let combined_params: Vec<f64> = input_data
72 .iter()
73 .chain(param_data.iter())
74 .copied()
75 .collect();
76
77 let dynamic_circuit =
79 crate::simulator_backends::DynamicCircuit::from_circuit(self.circuit.clone())?;
80 let expectation = self.backend.expectation_value(
81 &dynamic_circuit,
82 &combined_params,
83 &self.observable,
84 )?;
85
86 outputs[batch_idx] = expectation;
87 }
88
89 Ok(outputs)
90 }
91
92 pub fn compute_gradients(
94 &self,
95 inputs: &Array2<f64>,
96 parameters: &Array2<f64>,
97 upstream_gradients: &Array1<f64>,
98 ) -> Result<(Array2<f64>, Array2<f64>)> {
99 if !self.differentiable {
100 return Err(MLError::InvalidConfiguration(
101 "Layer is not differentiable".to_string(),
102 ));
103 }
104
105 let batch_size = inputs.nrows();
106 let num_input_params = inputs.ncols();
107 let num_trainable_params = parameters.ncols();
108
109 let mut input_gradients = Array2::zeros((batch_size, num_input_params));
110 let mut param_gradients = Array2::zeros((batch_size, num_trainable_params));
111
112 for batch_idx in 0..batch_size {
113 let input_data = inputs.row(batch_idx);
114 let param_data = parameters.row(batch_idx % parameters.nrows());
115 let combined_params: Vec<f64> = input_data
116 .iter()
117 .chain(param_data.iter())
118 .copied()
119 .collect();
120
121 let dynamic_circuit =
123 crate::simulator_backends::DynamicCircuit::from_circuit(self.circuit.clone())?;
124 let gradients = self.backend.compute_gradients(
125 &dynamic_circuit,
126 &combined_params,
127 &self.observable,
128 crate::simulator_backends::GradientMethod::ParameterShift,
129 )?;
130
131 let upstream_grad = upstream_gradients[batch_idx];
133 for (i, grad) in gradients.iter().enumerate() {
134 if i < num_input_params {
135 input_gradients[[batch_idx, i]] = grad * upstream_grad;
136 } else {
137 param_gradients[[batch_idx, i - num_input_params]] = grad * upstream_grad;
138 }
139 }
140 }
141
142 Ok((input_gradients, param_gradients))
143 }
144}
145
146pub struct PQCLayer {
148 layer: QuantumCircuitLayer,
150 input_scaling: f64,
152 init_strategy: ParameterInitStrategy,
154 regularization: Option<RegularizationType>,
156}
157
158#[derive(Debug, Clone)]
160pub enum ParameterInitStrategy {
161 RandomNormal { mean: f64, std: f64 },
163 RandomUniform { low: f64, high: f64 },
165 Zeros,
167 Ones,
169 Custom(Vec<f64>),
171}
172
173#[derive(Debug, Clone)]
175pub enum RegularizationType {
176 L1(f64),
178 L2(f64),
180 Dropout(f64),
182}
183
184impl PQCLayer {
185 pub fn new(
187 circuit: Circuit<8>, symbols: Vec<String>,
189 observable: Observable,
190 backend: Arc<dyn SimulatorBackend>,
191 ) -> Self {
192 let layer = QuantumCircuitLayer::new(circuit, symbols, observable, backend);
193
194 Self {
195 layer,
196 input_scaling: 1.0,
197 init_strategy: ParameterInitStrategy::RandomNormal {
198 mean: 0.0,
199 std: 0.1,
200 },
201 regularization: None,
202 }
203 }
204
205 pub fn with_input_scaling(mut self, scaling: f64) -> Self {
207 self.input_scaling = scaling;
208 self
209 }
210
211 pub fn with_initialization(mut self, strategy: ParameterInitStrategy) -> Self {
213 self.init_strategy = strategy;
214 self
215 }
216
217 pub fn with_regularization(mut self, regularization: RegularizationType) -> Self {
219 self.regularization = Some(regularization);
220 self
221 }
222
223 pub fn initialize_parameters(&self, batch_size: usize, num_params: usize) -> Array2<f64> {
225 match &self.init_strategy {
226 ParameterInitStrategy::RandomNormal { mean, std } => {
227 Array2::from_shape_fn((batch_size, num_params), |_| {
229 let u1 = fastrand::f64();
230 let u2 = fastrand::f64();
231 let z0 = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
232 mean + std * z0
233 })
234 }
235 ParameterInitStrategy::RandomUniform { low, high } => {
236 Array2::from_shape_fn((batch_size, num_params), |_| {
237 fastrand::f64() * (high - low) + low
238 })
239 }
240 ParameterInitStrategy::Zeros => Array2::zeros((batch_size, num_params)),
241 ParameterInitStrategy::Ones => Array2::ones((batch_size, num_params)),
242 ParameterInitStrategy::Custom(values) => {
243 let mut params = Array2::zeros((batch_size, num_params));
244 for i in 0..batch_size {
245 for j in 0..num_params.min(values.len()) {
246 params[[i, j]] = values[j];
247 }
248 }
249 params
250 }
251 }
252 }
253
254 pub fn forward(&self, inputs: &Array2<f64>, parameters: &Array2<f64>) -> Result<Array1<f64>> {
256 let scaled_inputs = inputs * self.input_scaling;
258
259 let outputs = self.layer.forward(&scaled_inputs, parameters)?;
261
262 Ok(outputs)
266 }
267
268 pub fn compute_gradients(
270 &self,
271 inputs: &Array2<f64>,
272 parameters: &Array2<f64>,
273 upstream_gradients: &Array1<f64>,
274 ) -> Result<(Array2<f64>, Array2<f64>)> {
275 let scaled_inputs = inputs * self.input_scaling;
276 let (mut input_grads, mut param_grads) =
277 self.layer
278 .compute_gradients(&scaled_inputs, parameters, upstream_gradients)?;
279
280 input_grads *= self.input_scaling;
282
283 if let Some(ref reg) = self.regularization {
285 match reg {
286 RegularizationType::L1(lambda) => {
287 param_grads += &(parameters.mapv(|x| lambda * x.signum()));
288 }
289 RegularizationType::L2(lambda) => {
290 param_grads += &(parameters * (2.0 * lambda));
291 }
292 RegularizationType::Dropout(_) => {
293 }
295 }
296 }
297
298 Ok((input_grads, param_grads))
299 }
300}
301
302pub struct QuantumConvolutionalLayer {
304 pqc: PQCLayer,
306 filter_size: (usize, usize),
308 stride: (usize, usize),
310 padding: PaddingType,
312}
313
314#[derive(Debug, Clone)]
316pub enum PaddingType {
317 Valid,
319 Same,
321 Custom(usize),
323}
324
325impl QuantumConvolutionalLayer {
326 pub fn new(
328 circuit: Circuit<8>, symbols: Vec<String>,
330 observable: Observable,
331 backend: Arc<dyn SimulatorBackend>,
332 filter_size: (usize, usize),
333 ) -> Self {
334 let pqc = PQCLayer::new(circuit, symbols, observable, backend);
335
336 Self {
337 pqc,
338 filter_size,
339 stride: (1, 1),
340 padding: PaddingType::Valid,
341 }
342 }
343
344 pub fn with_stride(mut self, stride: (usize, usize)) -> Self {
346 self.stride = stride;
347 self
348 }
349
350 pub fn with_padding(mut self, padding: PaddingType) -> Self {
352 self.padding = padding;
353 self
354 }
355
356 pub fn forward(&self, inputs: &Array4<f64>, parameters: &Array2<f64>) -> Result<Array4<f64>> {
358 let (batch_size, height, width, channels) = inputs.dim();
359 let (filter_h, filter_w) = self.filter_size;
360 let (stride_h, stride_w) = self.stride;
361
362 let output_h = (height - filter_h) / stride_h + 1;
364 let output_w = (width - filter_w) / stride_w + 1;
365
366 let mut outputs = Array4::zeros((batch_size, output_h, output_w, 1));
367
368 for batch in 0..batch_size {
369 for out_y in 0..output_h {
370 for out_x in 0..output_w {
371 let start_y = out_y * stride_h;
373 let start_x = out_x * stride_w;
374
375 let mut patch_data = Array2::zeros((1, filter_h * filter_w * channels));
376 let mut patch_idx = 0;
377
378 for dy in 0..filter_h {
379 for dx in 0..filter_w {
380 for c in 0..channels {
381 if start_y + dy < height && start_x + dx < width {
382 patch_data[[0, patch_idx]] =
383 inputs[[batch, start_y + dy, start_x + dx, c]];
384 }
385 patch_idx += 1;
386 }
387 }
388 }
389
390 let result = self.pqc.forward(&patch_data, parameters)?;
392 outputs[[batch, out_y, out_x, 0]] = result[0];
393 }
394 }
395 }
396
397 Ok(outputs)
398 }
399}
400
401pub struct TFQModel {
403 layers: Vec<Box<dyn TFQLayer>>,
405 input_shape: Vec<usize>,
407 loss_function: TFQLossFunction,
409 optimizer: TFQOptimizer,
411}
412
413pub trait TFQLayer: Send + Sync {
415 fn forward(&self, inputs: &ArrayD<f64>) -> Result<ArrayD<f64>>;
417
418 fn backward(&self, upstream_gradients: &ArrayD<f64>) -> Result<ArrayD<f64>>;
420
421 fn get_parameters(&self) -> Vec<Array1<f64>>;
423
424 fn set_parameters(&mut self, params: Vec<Array1<f64>>) -> Result<()>;
426
427 fn name(&self) -> &str;
429}
430
431#[derive(Debug, Clone)]
433pub enum TFQLossFunction {
434 MeanSquaredError,
436 BinaryCrossentropy,
438 CategoricalCrossentropy,
440 Hinge,
442 Custom(String),
444}
445
446#[derive(Debug, Clone)]
448pub enum TFQOptimizer {
449 Adam {
451 learning_rate: f64,
452 beta1: f64,
453 beta2: f64,
454 epsilon: f64,
455 },
456 SGD { learning_rate: f64, momentum: f64 },
458 RMSprop {
460 learning_rate: f64,
461 rho: f64,
462 epsilon: f64,
463 },
464}
465
466impl TFQModel {
467 pub fn new(input_shape: Vec<usize>) -> Self {
469 Self {
470 layers: Vec::new(),
471 input_shape,
472 loss_function: TFQLossFunction::MeanSquaredError,
473 optimizer: TFQOptimizer::Adam {
474 learning_rate: 0.001,
475 beta1: 0.9,
476 beta2: 0.999,
477 epsilon: 1e-8,
478 },
479 }
480 }
481
482 pub fn add_layer(&mut self, layer: Box<dyn TFQLayer>) {
484 self.layers.push(layer);
485 }
486
487 pub fn set_loss(mut self, loss: TFQLossFunction) -> Self {
489 self.loss_function = loss;
490 self
491 }
492
493 pub fn set_optimizer(mut self, optimizer: TFQOptimizer) -> Self {
495 self.optimizer = optimizer;
496 self
497 }
498
499 pub fn compile(&mut self) -> Result<()> {
501 if self.layers.is_empty() {
503 return Err(MLError::InvalidConfiguration(
504 "Model must have at least one layer".to_string(),
505 ));
506 }
507
508 Ok(())
509 }
510
511 pub fn predict(&self, inputs: &ArrayD<f64>) -> Result<ArrayD<f64>> {
513 let mut current = inputs.clone();
514
515 for layer in &self.layers {
516 current = layer.forward(¤t)?;
517 }
518
519 Ok(current)
520 }
521
522 pub fn train_step(&mut self, inputs: &ArrayD<f64>, targets: &ArrayD<f64>) -> Result<f64> {
524 let predictions = self.predict(inputs)?;
526
527 let loss = self.compute_loss(&predictions, targets)?;
529
530 let mut gradients = self.compute_loss_gradients(&predictions, targets)?;
532
533 for layer in self.layers.iter().rev() {
534 gradients = layer.backward(&gradients)?;
535 }
536
537 self.update_parameters()?;
539
540 Ok(loss)
541 }
542
543 fn compute_loss(&self, predictions: &ArrayD<f64>, targets: &ArrayD<f64>) -> Result<f64> {
545 match &self.loss_function {
546 TFQLossFunction::MeanSquaredError => {
547 let diff = predictions - targets;
548 diff.mapv(|x| x * x).mean().ok_or_else(|| {
549 MLError::InvalidConfiguration("Cannot compute mean of empty array".to_string())
550 })
551 }
552 TFQLossFunction::BinaryCrossentropy => {
553 let epsilon = 1e-15;
554 let clipped_preds = predictions.mapv(|x| x.max(epsilon).min(1.0 - epsilon));
555 let loss = targets * clipped_preds.mapv(|x| x.ln())
556 + (1.0 - targets) * clipped_preds.mapv(|x| (1.0 - x).ln());
557 let mean_loss = loss.mean().ok_or_else(|| {
558 MLError::InvalidConfiguration("Cannot compute mean of empty array".to_string())
559 })?;
560 Ok(-mean_loss)
561 }
562 _ => Err(MLError::InvalidConfiguration(
563 "Loss function not implemented".to_string(),
564 )),
565 }
566 }
567
568 fn compute_loss_gradients(
570 &self,
571 predictions: &ArrayD<f64>,
572 targets: &ArrayD<f64>,
573 ) -> Result<ArrayD<f64>> {
574 match &self.loss_function {
575 TFQLossFunction::MeanSquaredError => {
576 Ok(2.0 * (predictions - targets) / predictions.len() as f64)
577 }
578 TFQLossFunction::BinaryCrossentropy => {
579 let epsilon = 1e-15;
580 let clipped_preds = predictions.mapv(|x| x.max(epsilon).min(1.0 - epsilon));
581 Ok((clipped_preds.clone() - targets)
582 / (clipped_preds.clone() * (1.0 - &clipped_preds)))
583 }
584 _ => Err(MLError::InvalidConfiguration(
585 "Loss gradient not implemented".to_string(),
586 )),
587 }
588 }
589
590 fn update_parameters(&mut self) -> Result<()> {
592 Ok(())
594 }
595}
596
597pub struct QuantumDataset {
599 circuits: Vec<DynamicCircuit>,
601 parameters: Array2<f64>,
603 labels: Array1<f64>,
605 batch_size: usize,
607}
608
609impl QuantumDataset {
610 pub fn new(
612 circuits: Vec<Circuit<8>>, parameters: Array2<f64>,
614 labels: Array1<f64>,
615 batch_size: usize,
616 ) -> Result<Self> {
617 let dynamic_circuits: std::result::Result<Vec<DynamicCircuit>, crate::error::MLError> =
618 circuits
619 .into_iter()
620 .map(|c| DynamicCircuit::from_circuit(c))
621 .collect();
622
623 Ok(Self {
624 circuits: dynamic_circuits?,
625 parameters,
626 labels,
627 batch_size,
628 })
629 }
630
631 pub fn batches(&self) -> QuantumDatasetIterator {
633 QuantumDatasetIterator::new(self)
634 }
635
636 pub fn shuffle(&mut self) {
638 let n = self.circuits.len();
639 let mut indices: Vec<usize> = (0..n).collect();
640
641 for i in (1..n).rev() {
643 let j = fastrand::usize(0..=i);
644 indices.swap(i, j);
645 }
646
647 let mut new_circuits = Vec::with_capacity(n);
649 let mut new_parameters = Array2::zeros(self.parameters.dim());
650 let mut new_labels = Array1::zeros(self.labels.dim());
651
652 for (new_idx, &old_idx) in indices.iter().enumerate() {
653 new_circuits.push(self.circuits[old_idx].clone());
654 new_parameters
655 .row_mut(new_idx)
656 .assign(&self.parameters.row(old_idx));
657 new_labels[new_idx] = self.labels[old_idx];
658 }
659
660 self.circuits = new_circuits;
661 self.parameters = new_parameters;
662 self.labels = new_labels;
663 }
664}
665
666pub struct QuantumDatasetIterator<'a> {
668 dataset: &'a QuantumDataset,
669 current_batch: usize,
670 total_batches: usize,
671}
672
673impl<'a> QuantumDatasetIterator<'a> {
674 fn new(dataset: &'a QuantumDataset) -> Self {
675 let total_batches = (dataset.circuits.len() + dataset.batch_size - 1) / dataset.batch_size;
676 Self {
677 dataset,
678 current_batch: 0,
679 total_batches,
680 }
681 }
682}
683
684impl<'a> Iterator for QuantumDatasetIterator<'a> {
685 type Item = (Vec<DynamicCircuit>, Array2<f64>, Array1<f64>);
686
687 fn next(&mut self) -> Option<Self::Item> {
688 if self.current_batch >= self.total_batches {
689 return None;
690 }
691
692 let start_idx = self.current_batch * self.dataset.batch_size;
693 let end_idx =
694 ((self.current_batch + 1) * self.dataset.batch_size).min(self.dataset.circuits.len());
695
696 let batch_circuits = self.dataset.circuits[start_idx..end_idx].to_vec();
697 let batch_parameters = self
698 .dataset
699 .parameters
700 .slice(s![start_idx..end_idx, ..])
701 .to_owned();
702 let batch_labels = self.dataset.labels.slice(s![start_idx..end_idx]).to_owned();
703
704 self.current_batch += 1;
705 Some((batch_circuits, batch_parameters, batch_labels))
706 }
707}
708
709pub mod tfq_utils {
711 use super::*;
712
713 pub fn circuit_to_tfq_format(circuit: &DynamicCircuit) -> Result<TFQCircuitFormat> {
715 let tfq_gates: Vec<TFQGate> = Vec::new();
719
720 Ok(TFQCircuitFormat {
721 gates: tfq_gates,
722 num_qubits: circuit.num_qubits(),
723 })
724 }
725
726 pub fn create_data_encoding_circuit(
728 num_qubits: usize,
729 encoding_type: DataEncodingType,
730 ) -> Result<DynamicCircuit> {
731 let mut builder: Circuit<8> = CircuitBuilder::new(); match encoding_type {
734 DataEncodingType::Amplitude => {
735 for qubit in 0..num_qubits {
737 builder.ry(qubit, 0.0)?; }
739 }
740 DataEncodingType::Angle => {
741 for qubit in 0..num_qubits {
743 builder.rz(qubit, 0.0)?; }
745 }
746 DataEncodingType::Basis => {
747 for qubit in 0..num_qubits {
749 builder.x(qubit)?; }
751 }
752 }
753
754 let circuit = builder.build();
755 DynamicCircuit::from_circuit(circuit)
756 }
757
758 pub fn create_hardware_efficient_ansatz(
760 num_qubits: usize,
761 layers: usize,
762 ) -> Result<DynamicCircuit> {
763 let mut builder: Circuit<8> = CircuitBuilder::new(); for layer in 0..layers {
766 for qubit in 0..num_qubits {
768 builder.ry(qubit, 0.0)?;
769 builder.rz(qubit, 0.0)?;
770 }
771
772 for qubit in 0..num_qubits - 1 {
774 builder.cnot(qubit, qubit + 1)?;
775 }
776
777 if layer < layers - 1 && num_qubits > 2 {
779 builder.cnot(num_qubits - 1, 0)?;
780 }
781 }
782
783 let circuit = builder.build();
784 DynamicCircuit::from_circuit(circuit)
785 }
786
787 pub fn batch_execute_circuits(
789 circuits: &[DynamicCircuit],
790 parameters: &Array2<f64>,
791 observables: &[Observable],
792 backend: &dyn SimulatorBackend,
793 ) -> Result<Array2<f64>> {
794 let batch_size = circuits.len();
795 let num_observables = observables.len();
796 let mut results = Array2::zeros((batch_size, num_observables));
797
798 for (circuit_idx, circuit) in circuits.iter().enumerate() {
799 let params = parameters.row(circuit_idx % parameters.nrows());
800 let params_slice = params.as_slice().ok_or_else(|| {
801 MLError::InvalidConfiguration("Parameters must be contiguous in memory".to_string())
802 })?;
803
804 for (obs_idx, observable) in observables.iter().enumerate() {
805 let expectation = backend.expectation_value(circuit, params_slice, observable)?;
806 results[[circuit_idx, obs_idx]] = expectation;
807 }
808 }
809
810 Ok(results)
811 }
812}
813
814#[derive(Debug, Clone)]
816pub struct TFQCircuitFormat {
817 gates: Vec<TFQGate>,
819 num_qubits: usize,
821}
822
823#[derive(Debug, Clone)]
825pub struct TFQGate {
826 gate_type: String,
828 qubits: Vec<usize>,
830 parameters: Vec<f64>,
832}
833
834#[derive(Debug, Clone)]
836pub enum DataEncodingType {
837 Amplitude,
839 Angle,
841 Basis,
843}
844
845#[cfg(test)]
846mod tests {
847 use super::*;
848 use crate::simulator_backends::{BackendCapabilities, StatevectorBackend};
849
850 #[test]
851 #[ignore]
852 fn test_quantum_circuit_layer() {
853 let mut builder = CircuitBuilder::new();
854 builder.ry(0, 0.0).expect("RY gate should succeed");
855 builder.ry(1, 0.0).expect("RY gate should succeed");
856 builder.cnot(0, 1).expect("CNOT gate should succeed");
857 let circuit = builder.build();
858
859 let symbols = vec!["theta1".to_string(), "theta2".to_string()];
860 let observable = Observable::PauliZ(vec![0, 1]);
861 let backend = Arc::new(StatevectorBackend::new(8));
862
863 let layer = QuantumCircuitLayer::new(circuit, symbols, observable, backend);
864
865 let inputs = Array2::from_shape_vec((2, 2), vec![0.1, 0.2, 0.3, 0.4])
866 .expect("Valid shape for inputs");
867 let parameters = Array2::from_shape_vec((2, 2), vec![0.5, 0.6, 0.7, 0.8])
868 .expect("Valid shape for parameters");
869
870 let result = layer.forward(&inputs, ¶meters);
871 assert!(result.is_ok());
872 }
873
874 #[test]
875 fn test_pqc_layer_initialization() -> Result<()> {
876 let mut builder = CircuitBuilder::new();
877 builder.h(0)?;
878 let circuit = builder.build();
879
880 let symbols = vec!["param1".to_string()];
881 let observable = Observable::PauliZ(vec![0]);
882 let backend = Arc::new(StatevectorBackend::new(8));
883
884 let pqc = PQCLayer::new(circuit, symbols, observable, backend).with_initialization(
885 ParameterInitStrategy::RandomNormal {
886 mean: 0.0,
887 std: 0.1,
888 },
889 );
890
891 let params = pqc.initialize_parameters(5, 3);
892 assert_eq!(params.shape(), &[5, 3]);
893 Ok(())
894 }
895
896 #[test]
897 #[ignore]
898 fn test_tfq_utils() {
899 let circuit = tfq_utils::create_data_encoding_circuit(3, DataEncodingType::Angle)
900 .expect("Data encoding circuit creation should succeed");
901 assert_eq!(circuit.num_qubits(), 3);
902
903 let ansatz = tfq_utils::create_hardware_efficient_ansatz(4, 2)
904 .expect("Hardware efficient ansatz creation should succeed");
905 assert_eq!(ansatz.num_qubits(), 4);
906 }
907
908 #[test]
909 fn test_quantum_dataset() -> Result<()> {
910 let circuits = vec![CircuitBuilder::new().build(), CircuitBuilder::new().build()];
911 let parameters = Array2::from_shape_vec((2, 3), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
912 .expect("Valid shape for parameters");
913 let labels = Array1::from_vec(vec![0.0, 1.0]);
914
915 let dataset = QuantumDataset::new(circuits, parameters, labels, 1);
916 let dataset = dataset?;
917 let batches: Vec<_> = dataset.batches().collect();
918
919 assert_eq!(batches.len(), 2);
920 assert_eq!(batches[0].0.len(), 1); Ok(())
922 }
923
924 #[test]
925 #[ignore]
926 fn test_tfq_model() {
927 let mut model = TFQModel::new(vec![2, 2])
928 .set_loss(TFQLossFunction::MeanSquaredError)
929 .set_optimizer(TFQOptimizer::Adam {
930 learning_rate: 0.01,
931 beta1: 0.9,
932 beta2: 0.999,
933 epsilon: 1e-8,
934 });
935
936 assert!(model.compile().is_ok());
937 }
938}