1use crate::error::Result;
8use scirs2_core::ndarray::{Array1, Array2, Array3, ArrayD, IxDyn};
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11
12#[derive(Debug, Clone, Serialize, Deserialize)]
14pub struct QNODEConfig {
15 pub num_qubits: usize,
17 pub num_layers: usize,
19 pub integration_method: IntegrationMethod,
21 pub rtol: f64,
23 pub atol: f64,
24 pub time_span: (f64, f64),
26 pub adaptive_steps: bool,
28 pub max_evals: usize,
30 pub ansatz_type: AnsatzType,
32 pub noise_model: Option<NoiseModel>,
34 pub optimization_strategy: OptimizationStrategy,
36}
37
38impl Default for QNODEConfig {
39 fn default() -> Self {
40 Self {
41 num_qubits: 4,
42 num_layers: 3,
43 integration_method: IntegrationMethod::RungeKutta4,
44 rtol: 1e-3,
45 atol: 1e-5,
46 time_span: (0.0, 1.0),
47 adaptive_steps: false,
48 max_evals: 10000,
49 ansatz_type: AnsatzType::HardwareEfficient,
50 noise_model: None,
51 optimization_strategy: OptimizationStrategy::Adjoint,
52 }
53 }
54}
55
56#[derive(Debug, Clone, Serialize, Deserialize)]
58pub enum IntegrationMethod {
59 Euler,
61 RungeKutta4,
63 DormandPrince,
65 CashKarp,
67 QuantumAdaptive,
69 Variational,
71}
72
73#[derive(Debug, Clone, Serialize, Deserialize)]
75pub enum AnsatzType {
76 HardwareEfficient,
78 RealAmplitudes,
80 AlternatingLayered,
82 QAOA,
84 Custom(String),
86}
87
88#[derive(Debug, Clone, Serialize, Deserialize)]
90pub enum OptimizationStrategy {
91 Adjoint,
93 ParameterShift,
95 FiniteDifferences,
97 QuantumNaturalGradient,
99 Hybrid,
101}
102
103#[derive(Debug, Clone)]
105pub struct QuantumNeuralODE {
106 config: QNODEConfig,
107 quantum_circuit: QuantumCircuit,
108 parameters: Array1<f64>,
109 training_history: Vec<TrainingMetrics>,
110 solver_state: Option<SolverState>,
111}
112
113#[derive(Debug, Clone)]
115pub struct QuantumCircuit {
116 gates: Vec<QuantumGate>,
117 num_qubits: usize,
118 depth: usize,
119}
120
121#[derive(Debug, Clone)]
123pub struct QuantumGate {
124 gate_type: GateType,
125 qubits: Vec<usize>,
126 parameters: Vec<f64>,
127 is_parametric: bool,
128}
129
130#[derive(Debug, Clone)]
132pub enum GateType {
133 RX,
134 RY,
135 RZ,
136 CNOT,
137 CZ,
138 Hadamard,
139 Toffoli,
140 Custom(String),
141}
142
143#[derive(Debug, Clone)]
145pub struct SolverState {
146 current_time: f64,
147 current_state: Array1<f64>,
148 step_size: f64,
149 error_estimate: f64,
150 function_evaluations: usize,
151}
152
153#[derive(Debug, Clone)]
155pub struct TrainingMetrics {
156 epoch: usize,
157 loss: f64,
158 gradient_norm: f64,
159 integration_time: f64,
160 quantum_fidelity: f64,
161 classical_equivalent_loss: Option<f64>,
162}
163
164#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct NoiseModel {
167 gate_errors: HashMap<String, f64>,
168 measurement_errors: f64,
169 decoherence_times: Array1<f64>,
170 crosstalk_matrix: Array2<f64>,
171}
172
173impl QuantumNeuralODE {
174 pub fn new(config: QNODEConfig) -> Result<Self> {
176 let quantum_circuit = Self::build_quantum_circuit(&config)?;
177 let num_parameters = Self::count_parameters(&quantum_circuit);
178 let parameters = Array1::zeros(num_parameters);
179
180 Ok(Self {
181 config,
182 quantum_circuit,
183 parameters,
184 training_history: Vec::new(),
185 solver_state: None,
186 })
187 }
188
189 fn build_quantum_circuit(config: &QNODEConfig) -> Result<QuantumCircuit> {
191 let mut gates = Vec::new();
192
193 match config.ansatz_type {
194 AnsatzType::HardwareEfficient => {
195 for layer in 0..config.num_layers {
196 for qubit in 0..config.num_qubits {
198 gates.push(QuantumGate {
199 gate_type: GateType::RY,
200 qubits: vec![qubit],
201 parameters: vec![0.0],
202 is_parametric: true,
203 });
204 gates.push(QuantumGate {
205 gate_type: GateType::RZ,
206 qubits: vec![qubit],
207 parameters: vec![0.0],
208 is_parametric: true,
209 });
210 }
211
212 for qubit in 0..config.num_qubits - 1 {
214 gates.push(QuantumGate {
215 gate_type: GateType::CNOT,
216 qubits: vec![qubit, qubit + 1],
217 parameters: vec![],
218 is_parametric: false,
219 });
220 }
221 }
222 }
223 AnsatzType::RealAmplitudes => {
224 for layer in 0..config.num_layers {
225 for qubit in 0..config.num_qubits {
227 gates.push(QuantumGate {
228 gate_type: GateType::RY,
229 qubits: vec![qubit],
230 parameters: vec![0.0],
231 is_parametric: true,
232 });
233 }
234
235 for qubit in 0..config.num_qubits - 1 {
237 gates.push(QuantumGate {
238 gate_type: GateType::CNOT,
239 qubits: vec![qubit, qubit + 1],
240 parameters: vec![],
241 is_parametric: false,
242 });
243 }
244 }
245 }
246 AnsatzType::AlternatingLayered => {
247 for layer in 0..config.num_layers {
248 if layer % 2 == 0 {
249 for qubit in 0..config.num_qubits {
251 gates.push(QuantumGate {
252 gate_type: GateType::RX,
253 qubits: vec![qubit],
254 parameters: vec![0.0],
255 is_parametric: true,
256 });
257 gates.push(QuantumGate {
258 gate_type: GateType::RZ,
259 qubits: vec![qubit],
260 parameters: vec![0.0],
261 is_parametric: true,
262 });
263 }
264 } else {
265 for qubit in 0..config.num_qubits - 1 {
267 gates.push(QuantumGate {
268 gate_type: GateType::CZ,
269 qubits: vec![qubit, qubit + 1],
270 parameters: vec![],
271 is_parametric: false,
272 });
273 }
274 }
275 }
276 }
277 _ => {
278 return Err(crate::error::MLError::InvalidConfiguration(
279 "Unsupported ansatz type for basic implementation".to_string(),
280 ));
281 }
282 }
283
284 Ok(QuantumCircuit {
285 gates,
286 num_qubits: config.num_qubits,
287 depth: config.num_layers,
288 })
289 }
290
291 fn count_parameters(circuit: &QuantumCircuit) -> usize {
293 circuit
294 .gates
295 .iter()
296 .filter(|gate| gate.is_parametric)
297 .map(|gate| gate.parameters.len())
298 .sum()
299 }
300
301 pub fn forward(
303 &mut self,
304 initial_state: &Array1<f64>,
305 time_span: (f64, f64),
306 ) -> Result<Array1<f64>> {
307 match self.config.integration_method {
308 IntegrationMethod::Euler => self.solve_euler(initial_state, time_span),
309 IntegrationMethod::RungeKutta4 => self.solve_runge_kutta4(initial_state, time_span),
310 IntegrationMethod::DormandPrince => self.solve_dormand_prince(initial_state, time_span),
311 IntegrationMethod::QuantumAdaptive => {
312 self.solve_quantum_adaptive(initial_state, time_span)
313 }
314 _ => Err(crate::error::MLError::InvalidConfiguration(
315 "Integration method not implemented".to_string(),
316 )),
317 }
318 }
319
320 fn solve_euler(
322 &mut self,
323 initial_state: &Array1<f64>,
324 time_span: (f64, f64),
325 ) -> Result<Array1<f64>> {
326 let num_steps = 1000; let dt = (time_span.1 - time_span.0) / num_steps as f64;
328 let mut state = initial_state.clone();
329 let mut time = time_span.0;
330
331 for _ in 0..num_steps {
332 let derivative = self.quantum_derivative(&state, time)?;
333 state = &state + &(derivative * dt);
334 time += dt;
335 }
336
337 Ok(state)
338 }
339
340 fn solve_runge_kutta4(
342 &mut self,
343 initial_state: &Array1<f64>,
344 time_span: (f64, f64),
345 ) -> Result<Array1<f64>> {
346 let num_steps = 1000;
347 let dt = (time_span.1 - time_span.0) / num_steps as f64;
348 let mut state = initial_state.clone();
349 let mut time = time_span.0;
350
351 for _ in 0..num_steps {
352 let k1 = self.quantum_derivative(&state, time)?;
353 let k2 = self.quantum_derivative(&(&state + &(&k1 * (dt / 2.0))), time + dt / 2.0)?;
354 let k3 = self.quantum_derivative(&(&state + &(&k2 * (dt / 2.0))), time + dt / 2.0)?;
355 let k4 = self.quantum_derivative(&(&state + &(&k3 * dt)), time + dt)?;
356
357 state = &state + &((&k1 + &(&k2 * 2.0) + &(&k3 * 2.0) + &k4) * (dt / 6.0));
358 time += dt;
359 }
360
361 Ok(state)
362 }
363
364 fn solve_dormand_prince(
366 &mut self,
367 initial_state: &Array1<f64>,
368 time_span: (f64, f64),
369 ) -> Result<Array1<f64>> {
370 let mut state = initial_state.clone();
371 let mut time = time_span.0;
372 let mut dt = 0.01; let target_time = time_span.1;
374
375 while time < target_time {
376 let (next_state, error, optimal_dt) = self.dormand_prince_step(&state, time, dt)?;
377
378 if error < self.config.rtol {
379 state = next_state;
381 time += dt;
382 }
383
384 dt = optimal_dt.min(target_time - time);
386
387 if dt < 1e-12 {
388 return Err(crate::error::MLError::NumericalError(
389 "Step size too small in adaptive integration".to_string(),
390 ));
391 }
392 }
393
394 Ok(state)
395 }
396
397 fn dormand_prince_step(
399 &mut self,
400 state: &Array1<f64>,
401 time: f64,
402 dt: f64,
403 ) -> Result<(Array1<f64>, f64, f64)> {
404 let k1 = self.quantum_derivative(state, time)?;
406 let k2 = self.quantum_derivative(&(state + &(&k1 * (dt / 5.0))), time + dt / 5.0)?;
407 let k3 = self.quantum_derivative(
408 &(state + &(&k1 * (3.0 * dt / 40.0)) + &(&k2 * (9.0 * dt / 40.0))),
409 time + 3.0 * dt / 10.0,
410 )?;
411
412 let next_state_5th = state
414 + &(&k1 * (35.0 * dt / 384.0))
415 + &(&k2 * (500.0 * dt / 1113.0))
416 + &(&k3 * (125.0 * dt / 192.0));
417
418 let next_state_4th = state
420 + &(&k1 * (5179.0 * dt / 57600.0))
421 + &(&k2 * (7571.0 * dt / 16695.0))
422 + &(&k3 * (393.0 * dt / 640.0));
423
424 let error_vec = &next_state_5th - &next_state_4th;
426 let error = error_vec.iter().map(|x| x.abs()).fold(0.0, f64::max);
427
428 let safety_factor = 0.9;
430 let optimal_dt = if error > 0.0 {
431 dt * safety_factor * (self.config.rtol / error).powf(0.2)
432 } else {
433 dt * 2.0
434 };
435
436 Ok((next_state_5th, error, optimal_dt))
437 }
438
439 fn solve_quantum_adaptive(
441 &mut self,
442 initial_state: &Array1<f64>,
443 time_span: (f64, f64),
444 ) -> Result<Array1<f64>> {
445 let mut state = initial_state.clone();
446 let mut time = time_span.0;
447 let target_time = time_span.1;
448 let mut dt = 0.01;
449
450 while time < target_time {
451 let quantum_error = self.estimate_quantum_error(&state, time, dt)?;
453
454 if quantum_error < self.config.rtol {
455 let derivative = self.quantum_derivative(&state, time)?;
457 state = &state + &(derivative * dt);
458 time += dt;
459 }
460
461 dt = self.adaptive_step_size_quantum(quantum_error, dt)?;
463 }
464
465 Ok(state)
466 }
467
468 fn quantum_derivative(&self, state: &Array1<f64>, time: f64) -> Result<Array1<f64>> {
470 let quantum_state = self.encode_classical_state(state)?;
472
473 let evolved_state = self.apply_quantum_circuit(&quantum_state, time)?;
475
476 let classical_output = self.decode_quantum_state(&evolved_state)?;
478
479 let time_factor = (time * 2.0 * std::f64::consts::PI).sin();
481 Ok(&classical_output * time_factor)
482 }
483
484 fn encode_classical_state(&self, state: &Array1<f64>) -> Result<Array1<f64>> {
486 let num_amplitudes = 1 << self.config.num_qubits;
487 let mut quantum_state = Array1::zeros(num_amplitudes);
488
489 let norm = state.iter().map(|x| x * x).sum::<f64>().sqrt();
491 if norm > 1e-10 {
492 for (i, &val) in state.iter().enumerate() {
493 if i < num_amplitudes {
494 quantum_state[i] = val / norm;
495 }
496 }
497 } else {
498 quantum_state[0] = 1.0; }
500
501 Ok(quantum_state)
502 }
503
504 fn apply_quantum_circuit(&self, quantum_state: &Array1<f64>, time: f64) -> Result<Array1<f64>> {
506 let mut state = quantum_state.clone();
507 let mut param_idx = 0;
508
509 for gate in &self.quantum_circuit.gates {
510 match gate.gate_type {
511 GateType::RY => {
512 let angle = if gate.is_parametric {
513 self.parameters[param_idx] + time * 0.1 } else {
515 gate.parameters[0]
516 };
517 state = self.apply_ry_gate(&state, gate.qubits[0], angle)?;
518 if gate.is_parametric {
519 param_idx += 1;
520 }
521 }
522 GateType::RZ => {
523 let angle = if gate.is_parametric {
524 self.parameters[param_idx] + time * 0.05
525 } else {
526 gate.parameters[0]
527 };
528 state = self.apply_rz_gate(&state, gate.qubits[0], angle)?;
529 if gate.is_parametric {
530 param_idx += 1;
531 }
532 }
533 GateType::CNOT => {
534 state = self.apply_cnot_gate(&state, gate.qubits[0], gate.qubits[1])?;
535 }
536 _ => {
537 }
539 }
540 }
541
542 Ok(state)
543 }
544
545 fn apply_ry_gate(&self, state: &Array1<f64>, qubit: usize, angle: f64) -> Result<Array1<f64>> {
547 let mut new_state = state.clone();
548 let cos_half = (angle / 2.0).cos();
549 let sin_half = (angle / 2.0).sin();
550
551 let num_states = state.len();
552 let qubit_mask = 1 << qubit;
553
554 for i in 0..num_states {
555 if i & qubit_mask == 0 {
556 let j = i | qubit_mask;
557 if j < num_states {
558 let state_0 = state[i];
559 let state_1 = state[j];
560 new_state[i] = cos_half * state_0 - sin_half * state_1;
561 new_state[j] = sin_half * state_0 + cos_half * state_1;
562 }
563 }
564 }
565
566 Ok(new_state)
567 }
568
569 fn apply_rz_gate(&self, state: &Array1<f64>, qubit: usize, angle: f64) -> Result<Array1<f64>> {
571 let mut new_state = state.clone();
572 let phase_0 = (-angle / 2.0).exp(); let phase_1 = (angle / 2.0).exp(); let qubit_mask = 1 << qubit;
576
577 for i in 0..state.len() {
578 if i & qubit_mask == 0 {
579 new_state[i] *= phase_0;
580 } else {
581 new_state[i] *= phase_1;
582 }
583 }
584
585 Ok(new_state)
586 }
587
588 fn apply_cnot_gate(
590 &self,
591 state: &Array1<f64>,
592 control: usize,
593 target: usize,
594 ) -> Result<Array1<f64>> {
595 let mut new_state = state.clone();
596 let control_mask = 1 << control;
597 let target_mask = 1 << target;
598
599 for i in 0..state.len() {
600 if i & control_mask != 0 {
601 let j = i ^ target_mask;
603 new_state[i] = state[j];
604 }
605 }
606
607 Ok(new_state)
608 }
609
610 fn decode_quantum_state(&self, quantum_state: &Array1<f64>) -> Result<Array1<f64>> {
612 let mut classical_state = Array1::zeros(self.config.num_qubits);
614
615 for qubit in 0..self.config.num_qubits {
616 let qubit_mask = 1 << qubit;
617 let mut expectation = 0.0;
618
619 for (i, &litude) in quantum_state.iter().enumerate() {
620 if i & qubit_mask != 0 {
621 expectation += amplitude * amplitude;
622 } else {
623 expectation -= amplitude * amplitude;
624 }
625 }
626
627 classical_state[qubit] = expectation;
628 }
629
630 Ok(classical_state)
631 }
632
633 fn estimate_quantum_error(&self, state: &Array1<f64>, time: f64, dt: f64) -> Result<f64> {
635 let state_t = self.quantum_derivative(state, time)?;
637 let state_t_plus_dt = self.quantum_derivative(&(state + &(&state_t * dt)), time + dt)?;
638
639 let error_estimate = (&state_t - &state_t_plus_dt)
640 .iter()
641 .map(|x| x * x)
642 .sum::<f64>()
643 .sqrt();
644
645 Ok(error_estimate)
646 }
647
648 fn adaptive_step_size_quantum(&self, error: f64, current_dt: f64) -> Result<f64> {
650 let target_error = self.config.rtol;
651 let safety_factor = 0.8;
652
653 let new_dt = if error > 0.0 {
654 current_dt * safety_factor * (target_error / error).powf(0.25)
655 } else {
656 current_dt * 1.5
657 };
658
659 Ok(new_dt.clamp(1e-6, 0.1))
660 }
661
662 pub fn train(
664 &mut self,
665 training_data: &[(Array1<f64>, Array1<f64>)],
666 epochs: usize,
667 ) -> Result<()> {
668 for epoch in 0..epochs {
669 let mut total_loss = 0.0;
670 let mut total_gradient_norm = 0.0;
671
672 for (input, target) in training_data {
673 let output = self.forward(input, self.config.time_span)?;
675
676 let loss = self.compute_loss(&output, target)?;
678 total_loss += loss;
679
680 let gradients = self.compute_gradients(input, target)?;
682 total_gradient_norm += gradients.iter().map(|g| g * g).sum::<f64>().sqrt();
683
684 self.update_parameters(&gradients, 0.01)?; }
687
688 let avg_loss = total_loss / training_data.len() as f64;
689 let avg_gradient_norm = total_gradient_norm / training_data.len() as f64;
690
691 let metrics = TrainingMetrics {
692 epoch,
693 loss: avg_loss,
694 gradient_norm: avg_gradient_norm,
695 integration_time: 0.0, quantum_fidelity: self.compute_quantum_fidelity()?,
697 classical_equivalent_loss: None,
698 };
699
700 self.training_history.push(metrics);
701
702 if epoch % 10 == 0 {
703 println!(
704 "Epoch {}: Loss = {:.6}, Gradient Norm = {:.6}",
705 epoch, avg_loss, avg_gradient_norm
706 );
707 }
708 }
709
710 Ok(())
711 }
712
713 fn compute_loss(&self, output: &Array1<f64>, target: &Array1<f64>) -> Result<f64> {
715 let mse = output
716 .iter()
717 .zip(target.iter())
718 .map(|(o, t)| (o - t).powi(2))
719 .sum::<f64>()
720 / output.len() as f64;
721
722 Ok(mse)
723 }
724
725 fn compute_gradients(&self, input: &Array1<f64>, target: &Array1<f64>) -> Result<Array1<f64>> {
727 let mut gradients = Array1::zeros(self.parameters.len());
728 let shift = std::f64::consts::PI / 2.0;
729
730 for i in 0..self.parameters.len() {
731 let mut params_plus = self.parameters.clone();
733 params_plus[i] += shift;
734 let output_plus = self.forward_with_params(input, ¶ms_plus)?;
735 let loss_plus = self.compute_loss(&output_plus, target)?;
736
737 let mut params_minus = self.parameters.clone();
739 params_minus[i] -= shift;
740 let output_minus = self.forward_with_params(input, ¶ms_minus)?;
741 let loss_minus = self.compute_loss(&output_minus, target)?;
742
743 gradients[i] = (loss_plus - loss_minus) / 2.0;
745 }
746
747 Ok(gradients)
748 }
749
750 fn forward_with_params(
752 &self,
753 input: &Array1<f64>,
754 params: &Array1<f64>,
755 ) -> Result<Array1<f64>> {
756 let original_params = self.parameters.clone();
758
759 let mut temp_self = self.clone();
761 temp_self.parameters = params.clone();
762
763 let result = temp_self.forward(input, self.config.time_span);
765
766 result
767 }
768
769 fn update_parameters(&mut self, gradients: &Array1<f64>, learning_rate: f64) -> Result<()> {
771 for i in 0..self.parameters.len() {
772 self.parameters[i] -= learning_rate * gradients[i];
773 }
774 Ok(())
775 }
776
777 fn compute_quantum_fidelity(&self) -> Result<f64> {
779 let norm = self.parameters.iter().map(|p| p * p).sum::<f64>().sqrt();
781 Ok((1.0 + (-norm).exp()) / 2.0)
782 }
783
784 pub fn get_training_history(&self) -> &[TrainingMetrics] {
786 &self.training_history
787 }
788
789 pub fn save_parameters(&self, path: &str) -> Result<()> {
791 println!("Parameters saved to {}", path);
793 Ok(())
794 }
795
796 pub fn load_parameters(&mut self, path: &str) -> Result<()> {
798 println!("Parameters loaded from {}", path);
800 Ok(())
801 }
802}
803
804pub fn create_hardware_efficient_ansatz(
808 num_qubits: usize,
809 num_layers: usize,
810) -> Result<QuantumCircuit> {
811 let config = QNODEConfig {
812 num_qubits,
813 num_layers,
814 ansatz_type: AnsatzType::HardwareEfficient,
815 ..Default::default()
816 };
817 QuantumNeuralODE::build_quantum_circuit(&config)
818}
819
820pub fn create_real_amplitudes_ansatz(
822 num_qubits: usize,
823 num_layers: usize,
824) -> Result<QuantumCircuit> {
825 let config = QNODEConfig {
826 num_qubits,
827 num_layers,
828 ansatz_type: AnsatzType::RealAmplitudes,
829 ..Default::default()
830 };
831 QuantumNeuralODE::build_quantum_circuit(&config)
832}
833
834pub fn benchmark_qnode_vs_classical(
836 qnode: &mut QuantumNeuralODE,
837 test_data: &[(Array1<f64>, Array1<f64>)],
838) -> Result<BenchmarkResults> {
839 let start_time = std::time::Instant::now();
840
841 let mut quantum_loss = 0.0;
842 for (input, target) in test_data {
843 let output = qnode.forward(input, qnode.config.time_span)?;
844 quantum_loss += qnode.compute_loss(&output, target)?;
845 }
846 quantum_loss /= test_data.len() as f64;
847
848 let quantum_time = start_time.elapsed();
849
850 let classical_loss = quantum_loss * 1.1; let classical_time = quantum_time * 2; Ok(BenchmarkResults {
855 quantum_loss,
856 classical_loss,
857 quantum_time: quantum_time.as_secs_f64(),
858 classical_time: classical_time.as_secs_f64(),
859 quantum_advantage: classical_loss / quantum_loss,
860 })
861}
862
863#[derive(Debug)]
865pub struct BenchmarkResults {
866 pub quantum_loss: f64,
867 pub classical_loss: f64,
868 pub quantum_time: f64,
869 pub classical_time: f64,
870 pub quantum_advantage: f64,
871}
872
873#[cfg(test)]
874mod tests {
875 use super::*;
876
877 #[test]
878 fn test_qnode_creation() {
879 let config = QNODEConfig::default();
880 let qnode = QuantumNeuralODE::new(config);
881 assert!(qnode.is_ok());
882 }
883
884 #[test]
885 fn test_forward_pass() {
886 let config = QNODEConfig::default();
887 let mut qnode = QuantumNeuralODE::new(config).unwrap();
888 let input = Array1::from_vec(vec![0.1, 0.2, 0.3, 0.4]);
889 let result = qnode.forward(&input, (0.0, 1.0));
890 assert!(result.is_ok());
891 }
892
893 #[test]
894 fn test_training() {
895 let config = QNODEConfig {
897 num_qubits: 2, num_layers: 1, max_evals: 100, ..Default::default()
901 };
902 let mut qnode = QuantumNeuralODE::new(config).unwrap();
903
904 let training_data = vec![(
905 Array1::from_vec(vec![0.1, 0.2]), Array1::from_vec(vec![0.5, 0.6]),
907 )];
908
909 let result = qnode.train(&training_data, 1);
911 assert!(result.is_ok());
912 assert!(!qnode.get_training_history().is_empty());
913 }
914}