Skip to main content

scirs2_spatial/quantum_inspired/algorithms/
quantum_machine_learning.rs

1//! Quantum-Inspired Machine Learning Algorithms
2//!
3//! This module provides quantum-inspired machine learning algorithms for spatial
4//! classification and pattern recognition tasks. The algorithms leverage quantum
5//! computing principles — specifically quantum feature maps and kernel methods —
6//! to achieve enhanced classification performance on complex spatial data.
7//!
8//! # Algorithms
9//!
10//! - [`QuantumSVMModel`]: Quantum-enhanced Support Vector Machine using quantum
11//!   kernel functions computed via random Fourier features (Rahimi-Recht method)
12//! - [`QuantumClassifier`]: Hybrid quantum-classical classifier wrapping the
13//!   quantum SVM with optional preprocessing
14//!
15//! # Theoretical Foundation
16//!
17//! The quantum feature map φ: ℝᵈ → ℝᴰ (D >> d) approximates a quantum kernel
18//! `k(x, z) = ⟨φ(x), φ(z)⟩ ≈ exp(-‖x - z‖² / (2σ²))` by drawing random
19//! frequencies ω from a distribution whose Fourier transform is the kernel's
20//! spectral density. This is the Rahimi-Recht random Fourier feature approach.
21//!
22//! The SVM dual problem is solved via Sequential Minimal Optimization (SMO)
23//! to find support vectors and dual coefficients α.
24
25use crate::error::{SpatialError, SpatialResult};
26use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
27use scirs2_core::random::{Rng, RngExt};
28use std::f64::consts::PI;
29
30/// Quantum-Enhanced Support Vector Machine
31///
32/// Implements a kernel SVM where the kernel is computed via a quantum-inspired
33/// random Fourier feature map (Rahimi-Recht approximation of the RBF kernel).
34/// The feature map φ(x) = √(2/D) · [cos(ω₁ᵀx + b₁), …, cos(ω_Dᵀx + b_D)]ᵀ
35/// approximates `exp(-‖x - z‖² / (2σ²))` for random ωᵢ ~ N(0, I/σ²) and
36/// bᵢ ~ Uniform[0, 2π].
37///
38/// # Training Algorithm
39///
40/// Binary classification is performed via a coordinate-descent dual SVM solver
41/// (simplified SMO) on the quantum kernel matrix, respecting the box constraint
42/// 0 ≤ αᵢ ≤ C for all support vectors.
43///
44/// # Example
45/// ```rust
46/// use scirs2_core::ndarray::{Array1, Array2};
47/// use scirs2_spatial::quantum_inspired::algorithms::quantum_machine_learning::QuantumSVMModel;
48///
49/// # fn example() -> Result<(), Box<dyn std::error::Error>> {
50/// // Two-class separable dataset (4 points, 2 features)
51/// let x = Array2::from_shape_vec((4, 2), vec![
52///     0.0, 0.0,
53///     1.0, 0.0,
54///     0.0, 1.0,
55///     5.0, 5.0,
56/// ])?;
57/// let y = Array1::from_vec(vec![1.0, 1.0, 1.0, -1.0]);
58///
59/// let mut model = QuantumSVMModel::new(4, 1.0);
60/// model.fit(&x, &y)?;
61///
62/// let x_test = Array2::from_shape_vec((1, 2), vec![0.5, 0.5])?;
63/// let preds = model.predict(&x_test)?;
64/// assert_eq!(preds.len(), 1);
65/// # Ok(())
66/// # }
67/// ```
68#[derive(Debug, Clone)]
69pub struct QuantumSVMModel {
70    /// Number of qubits (determines random Fourier feature dimension D = 2^n_qubits)
71    n_qubits: usize,
72    /// Regularisation parameter C (box constraint for dual variables)
73    regularization: f64,
74    /// Stored support vectors (a subset of training points)
75    support_vectors: Vec<Array1<f64>>,
76    /// Dual coefficients αᵢ · yᵢ for each support vector
77    alphas: Vec<f64>,
78    /// Bias term learned during training
79    bias: f64,
80    /// Random frequency matrix Ω ∈ ℝ^{D × d}, rows are ωᵢ
81    random_weights: Option<Array2<f64>>,
82    /// Random phase offsets bᵢ ~ Uniform[0, 2π] (length D)
83    random_offsets: Option<Array1<f64>>,
84    /// RBF bandwidth parameter σ
85    bandwidth: f64,
86}
87
88impl QuantumSVMModel {
89    /// Construct a new `QuantumSVMModel`.
90    ///
91    /// # Arguments
92    /// * `n_qubits` - Number of qubits; the random Fourier feature dimension
93    ///   is `D = 2^n_qubits` (clamped to [4, 256] for practical use).
94    /// * `regularization` - The SVM regularisation constant C > 0.
95    ///
96    /// # Panics
97    /// Does not panic — invalid parameters are caught in [`fit`].
98    pub fn new(n_qubits: usize, regularization: f64) -> Self {
99        Self {
100            n_qubits,
101            regularization,
102            support_vectors: Vec::new(),
103            alphas: Vec::new(),
104            bias: 0.0,
105            random_weights: None,
106            random_offsets: None,
107            bandwidth: 1.0,
108        }
109    }
110
111    /// Return the number of qubits controlling the feature map dimension.
112    pub fn n_qubits(&self) -> usize {
113        self.n_qubits
114    }
115
116    /// Return the regularisation parameter C.
117    pub fn regularization(&self) -> f64 {
118        self.regularization
119    }
120
121    /// Return the number of support vectors (0 before training).
122    pub fn num_support_vectors(&self) -> usize {
123        self.support_vectors.len()
124    }
125
126    /// Compute the quantum feature dimension D = 2^n_qubits, clamped to [4, 256].
127    fn feature_dim(&self) -> usize {
128        let raw = 1usize << self.n_qubits;
129        raw.clamp(4, 256)
130    }
131
132    /// Initialise random Fourier feature parameters for input dimension `d`.
133    ///
134    /// Draws ωᵢ from N(0, I / σ²) and bᵢ from Uniform[0, 2π].
135    fn init_random_features(&mut self, d: usize) {
136        let big_d = self.feature_dim();
137        let mut rng = scirs2_core::random::rng();
138
139        // Box-Muller transform to produce Gaussian samples (σ² = bandwidth² so
140        // 1/σ² = 1/bandwidth²)
141        let scale = 1.0 / self.bandwidth;
142        let mut weights = Array2::<f64>::zeros((big_d, d));
143        let mut offsets = Array1::<f64>::zeros(big_d);
144
145        for i in 0..big_d {
146            for j in 0..d {
147                // Box-Muller: u1, u2 ~ Uniform(0,1) → z ~ N(0,1)
148                let u1: f64 = rng.random_range(1e-10_f64..1.0_f64);
149                let u2: f64 = rng.random_range(0.0_f64..1.0_f64);
150                let z = (-2.0 * u1.ln()).sqrt() * (2.0 * PI * u2).cos();
151                weights[[i, j]] = z * scale;
152            }
153            offsets[i] = rng.random_range(0.0_f64..(2.0 * PI));
154        }
155
156        self.random_weights = Some(weights);
157        self.random_offsets = Some(offsets);
158    }
159
160    /// Map a single sample `x ∈ ℝᵈ` to its quantum feature vector `φ(x) ∈ ℝᴰ`.
161    fn quantum_feature_map(&self, x: &ArrayView1<'_, f64>) -> SpatialResult<Array1<f64>> {
162        let weights = self.random_weights.as_ref().ok_or_else(|| {
163            SpatialError::InvalidInput("Model not fitted: call fit() first".to_string())
164        })?;
165        let offsets = self.random_offsets.as_ref().ok_or_else(|| {
166            SpatialError::InvalidInput("Model not fitted: call fit() first".to_string())
167        })?;
168
169        let big_d = weights.nrows();
170        let scale = (2.0 / big_d as f64).sqrt();
171
172        let mut phi = Array1::<f64>::zeros(big_d);
173        for i in 0..big_d {
174            let row = weights.row(i);
175            // dot product ωᵢᵀ x
176            let dot: f64 = row.iter().zip(x.iter()).map(|(w, xi)| w * xi).sum();
177            phi[i] = scale * (dot + offsets[i]).cos();
178        }
179        Ok(phi)
180    }
181
182    /// Compute the quantum kernel k(a, b) = φ(a)ᵀ φ(b).
183    fn quantum_kernel(
184        &self,
185        a: &ArrayView1<'_, f64>,
186        b: &ArrayView1<'_, f64>,
187    ) -> SpatialResult<f64> {
188        let phi_a = self.quantum_feature_map(a)?;
189        let phi_b = self.quantum_feature_map(b)?;
190        Ok(phi_a.iter().zip(phi_b.iter()).map(|(ai, bi)| ai * bi).sum())
191    }
192
193    /// Train the model on labelled data.
194    ///
195    /// # Arguments
196    /// * `x` - Training features matrix of shape `(n_samples, n_features)`.
197    /// * `y` - Binary labels `{+1, -1}` of shape `(n_samples,)`.
198    ///
199    /// # Errors
200    /// Returns [`SpatialError::InvalidInput`] for empty inputs, shape mismatch,
201    /// invalid label values, or non-positive `regularization`.
202    pub fn fit(&mut self, x: &Array2<f64>, y: &Array1<f64>) -> SpatialResult<()> {
203        let (n, d) = x.dim();
204
205        if n == 0 {
206            return Err(SpatialError::InvalidInput(
207                "Training set must be non-empty".to_string(),
208            ));
209        }
210        if y.len() != n {
211            return Err(SpatialError::InvalidInput(format!(
212                "x has {} rows but y has {} elements",
213                n,
214                y.len()
215            )));
216        }
217        if self.regularization <= 0.0 {
218            return Err(SpatialError::InvalidInput(
219                "regularization (C) must be positive".to_string(),
220            ));
221        }
222        // Validate labels are ±1
223        for (i, &yi) in y.iter().enumerate() {
224            if (yi - 1.0).abs() > 1e-9 && (yi + 1.0).abs() > 1e-9 {
225                return Err(SpatialError::InvalidInput(format!(
226                    "Label y[{}] = {} is not in {{-1, +1}}",
227                    i, yi
228                )));
229            }
230        }
231
232        // Auto-tune bandwidth via median heuristic on pairwise distances
233        let mut sq_dists: Vec<f64> = Vec::with_capacity(n * (n - 1) / 2);
234        for i in 0..n {
235            for j in (i + 1)..n {
236                let sq: f64 = (0..d).map(|k| (x[[i, k]] - x[[j, k]]).powi(2)).sum();
237                sq_dists.push(sq);
238            }
239        }
240        if !sq_dists.is_empty() {
241            sq_dists.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
242            let median = sq_dists[sq_dists.len() / 2];
243            self.bandwidth = median.sqrt().max(1e-6);
244        }
245
246        // Initialise quantum random Fourier features
247        self.init_random_features(d);
248
249        // Build kernel matrix K[i,j] = k(xᵢ, xⱼ)
250        let mut kernel_matrix = Array2::<f64>::zeros((n, n));
251        for i in 0..n {
252            for j in i..n {
253                let kij = self.quantum_kernel(&x.row(i), &x.row(j))?;
254                kernel_matrix[[i, j]] = kij;
255                kernel_matrix[[j, i]] = kij;
256            }
257        }
258
259        // Simplified SMO: coordinate ascent on the dual
260        // max  Σᵢ αᵢ - ½ Σᵢⱼ αᵢ αⱼ yᵢ yⱼ K[i,j]
261        // s.t. 0 ≤ αᵢ ≤ C
262        let mut alpha = vec![0.0f64; n];
263        let max_smo_iter = 200;
264        let tol = 1e-4;
265
266        for _ in 0..max_smo_iter {
267            let mut changed = false;
268
269            for i in 0..n {
270                // Compute decision function at xᵢ
271                let fi: f64 = alpha
272                    .iter()
273                    .enumerate()
274                    .map(|(j, &aj)| aj * y[j] * kernel_matrix[[i, j]])
275                    .sum::<f64>()
276                    + self.bias;
277
278                let ri = fi * y[i] - 1.0;
279
280                // Check KKT violation
281                let kkt_violated = (ri < -tol && alpha[i] < self.regularization - tol)
282                    || (ri > tol && alpha[i] > tol);
283
284                if !kkt_violated {
285                    continue;
286                }
287
288                // Pick second variable heuristically: most violating
289                let j = (0..n)
290                    .filter(|&k| k != i)
291                    .max_by(|&a, &b| {
292                        let fa: f64 = alpha
293                            .iter()
294                            .enumerate()
295                            .map(|(l, &al)| al * y[l] * kernel_matrix[[a, l]])
296                            .sum::<f64>()
297                            + self.bias;
298                        let fb: f64 = alpha
299                            .iter()
300                            .enumerate()
301                            .map(|(l, &al)| al * y[l] * kernel_matrix[[b, l]])
302                            .sum::<f64>()
303                            + self.bias;
304                        (fa * y[a] - 1.0)
305                            .abs()
306                            .partial_cmp(&(fb * y[b] - 1.0).abs())
307                            .unwrap_or(std::cmp::Ordering::Equal)
308                    })
309                    .unwrap_or((i + 1) % n);
310
311                // Analytic step
312                let eta =
313                    kernel_matrix[[i, i]] + kernel_matrix[[j, j]] - 2.0 * kernel_matrix[[i, j]];
314
315                if eta <= 1e-12 {
316                    continue;
317                }
318
319                let fj: f64 = alpha
320                    .iter()
321                    .enumerate()
322                    .map(|(l, &al)| al * y[l] * kernel_matrix[[j, l]])
323                    .sum::<f64>()
324                    + self.bias;
325
326                // Use SMO error terms E_i = f(x_i) - y_i, E_j = f(x_j) - y_j
327                // to correctly kick alpha off zero on the first iteration
328                let e_i = fi - y[i];
329                let e_j = fj - y[j];
330                let alpha_j_new =
331                    (alpha[j] + y[j] * (e_i - e_j) / eta).clamp(0.0, self.regularization);
332                let alpha_i_new = alpha[i] + y[i] * y[j] * (alpha[j] - alpha_j_new);
333                let alpha_i_new = alpha_i_new.clamp(0.0, self.regularization);
334
335                if (alpha_i_new - alpha[i]).abs() > 1e-8 {
336                    let delta_i = alpha_i_new - alpha[i];
337                    let delta_j = alpha_j_new - alpha[j];
338                    alpha[i] = alpha_i_new;
339                    alpha[j] = alpha_j_new;
340
341                    // Update bias
342                    let b_i = -fi
343                        - y[i] * delta_i * kernel_matrix[[i, i]]
344                        - y[j] * delta_j * kernel_matrix[[i, j]];
345                    let b_j = -fj
346                        - y[i] * delta_i * kernel_matrix[[i, j]]
347                        - y[j] * delta_j * kernel_matrix[[j, j]];
348
349                    if alpha[i] > tol && alpha[i] < self.regularization - tol {
350                        self.bias += b_i;
351                    } else if alpha[j] > tol && alpha[j] < self.regularization - tol {
352                        self.bias += b_j;
353                    } else {
354                        self.bias += (b_i + b_j) * 0.5;
355                    }
356
357                    changed = true;
358                }
359            }
360
361            if !changed {
362                break;
363            }
364        }
365
366        // Store support vectors (αᵢ > threshold)
367        let sv_threshold = 1e-6;
368        self.support_vectors.clear();
369        self.alphas.clear();
370        for i in 0..n {
371            if alpha[i] > sv_threshold {
372                self.support_vectors.push(x.row(i).to_owned());
373                self.alphas.push(alpha[i] * y[i]);
374            }
375        }
376
377        Ok(())
378    }
379
380    /// Predict class labels for new samples.
381    ///
382    /// # Arguments
383    /// * `x` - Test features matrix of shape `(n_test, n_features)`.
384    ///
385    /// # Returns
386    /// Binary predictions `{+1, -1}` of shape `(n_test,)`.
387    ///
388    /// # Errors
389    /// Returns error if the model has not been fitted or if `x` has a different
390    /// number of features than the training data.
391    pub fn predict(&self, x: &Array2<f64>) -> SpatialResult<Array1<f64>> {
392        if self.support_vectors.is_empty() {
393            return Err(SpatialError::InvalidInput(
394                "Model not fitted: call fit() first".to_string(),
395            ));
396        }
397
398        let n_test = x.nrows();
399        let mut preds = Array1::<f64>::zeros(n_test);
400
401        for (idx, row) in x.outer_iter().enumerate() {
402            let mut decision: f64 = self.bias;
403            for (sv, &alpha) in self.support_vectors.iter().zip(self.alphas.iter()) {
404                let kval = self.quantum_kernel(&row, &sv.view())?;
405                decision += alpha * kval;
406            }
407            preds[idx] = if decision >= 0.0 { 1.0 } else { -1.0 };
408        }
409
410        Ok(preds)
411    }
412}
413
414/// Quantum-Classical Hybrid Classifier
415///
416/// A high-level classifier that wraps [`QuantumSVMModel`] with optional
417/// per-feature standardisation preprocessing. The preprocessing computes
418/// z-score normalisation (zero mean, unit variance) from training data and
419/// applies it consistently at test time.
420///
421/// # Example
422/// ```rust
423/// use scirs2_core::ndarray::{Array1, Array2};
424/// use scirs2_spatial::quantum_inspired::algorithms::quantum_machine_learning::QuantumClassifier;
425///
426/// # fn example() -> Result<(), Box<dyn std::error::Error>> {
427/// let x = Array2::from_shape_vec((4, 2), vec![
428///     0.0, 0.0,
429///     1.0, 0.0,
430///     0.0, 1.0,
431///     5.0, 5.0,
432/// ])?;
433/// let y = Array1::from_vec(vec![1.0, 1.0, 1.0, -1.0]);
434///
435/// let mut clf = QuantumClassifier::new(4, 1.0, true);
436/// clf.fit(&x, &y)?;
437///
438/// let x_test = Array2::from_shape_vec((1, 2), vec![0.5, 0.5])?;
439/// let preds = clf.predict(&x_test)?;
440/// assert_eq!(preds.len(), 1);
441/// # Ok(())
442/// # }
443/// ```
444#[derive(Debug, Clone)]
445pub struct QuantumClassifier {
446    /// The underlying quantum SVM
447    svm: QuantumSVMModel,
448    /// Whether to apply z-score normalisation before passing data to the SVM
449    standardise: bool,
450    /// Per-feature means (set during fit)
451    feature_means: Option<Array1<f64>>,
452    /// Per-feature standard deviations (set during fit)
453    feature_stds: Option<Array1<f64>>,
454}
455
456impl QuantumClassifier {
457    /// Construct a new `QuantumClassifier`.
458    ///
459    /// # Arguments
460    /// * `n_qubits` - Passed through to [`QuantumSVMModel`].
461    /// * `regularization` - SVM regularisation constant C.
462    /// * `standardise` - If `true`, z-score normalise features before fitting.
463    pub fn new(n_qubits: usize, regularization: f64, standardise: bool) -> Self {
464        Self {
465            svm: QuantumSVMModel::new(n_qubits, regularization),
466            standardise,
467            feature_means: None,
468            feature_stds: None,
469        }
470    }
471
472    /// Train the classifier.
473    ///
474    /// If `standardise` was set to `true`, this computes feature statistics
475    /// from `x` and stores them for consistent use at predict time.
476    ///
477    /// # Errors
478    /// Propagates errors from [`QuantumSVMModel::fit`].
479    pub fn fit(&mut self, x: &Array2<f64>, y: &Array1<f64>) -> SpatialResult<()> {
480        let x_proc = if self.standardise {
481            self.compute_and_store_stats(x)?
482        } else {
483            x.clone()
484        };
485        self.svm.fit(&x_proc, y)
486    }
487
488    /// Predict labels for new samples.
489    ///
490    /// Applies the same normalisation (if any) that was used during training.
491    ///
492    /// # Errors
493    /// Propagates errors from [`QuantumSVMModel::predict`].
494    pub fn predict(&self, x: &Array2<f64>) -> SpatialResult<Array1<f64>> {
495        let x_proc = if self.standardise {
496            self.apply_stored_stats(x)?
497        } else {
498            x.clone()
499        };
500        self.svm.predict(&x_proc)
501    }
502
503    /// Compute per-feature mean and std from `x`, store them, and return
504    /// the standardised copy.
505    fn compute_and_store_stats(&mut self, x: &Array2<f64>) -> SpatialResult<Array2<f64>> {
506        let (n, d) = x.dim();
507        if n == 0 {
508            return Err(SpatialError::InvalidInput(
509                "Cannot standardise an empty matrix".to_string(),
510            ));
511        }
512
513        let mut means = Array1::<f64>::zeros(d);
514        let mut stds = Array1::<f64>::zeros(d);
515
516        for j in 0..d {
517            let col = x.column(j);
518            let mean = col.iter().sum::<f64>() / n as f64;
519            let variance = col.iter().map(|&v| (v - mean).powi(2)).sum::<f64>() / n as f64;
520            means[j] = mean;
521            stds[j] = variance.sqrt().max(1e-8);
522        }
523
524        self.feature_means = Some(means.clone());
525        self.feature_stds = Some(stds.clone());
526
527        let mut x_std = x.clone();
528        for j in 0..d {
529            for i in 0..n {
530                x_std[[i, j]] = (x_std[[i, j]] - means[j]) / stds[j];
531            }
532        }
533        Ok(x_std)
534    }
535
536    /// Apply previously stored statistics to standardise `x`.
537    fn apply_stored_stats(&self, x: &Array2<f64>) -> SpatialResult<Array2<f64>> {
538        let means = self.feature_means.as_ref().ok_or_else(|| {
539            SpatialError::InvalidInput("Model not fitted: call fit() first".to_string())
540        })?;
541        let stds = self.feature_stds.as_ref().ok_or_else(|| {
542            SpatialError::InvalidInput("Model not fitted: call fit() first".to_string())
543        })?;
544
545        let (n, d) = x.dim();
546        if d != means.len() {
547            return Err(SpatialError::InvalidInput(format!(
548                "Expected {} features but got {}",
549                means.len(),
550                d
551            )));
552        }
553
554        let mut x_std = x.clone();
555        for j in 0..d {
556            for i in 0..n {
557                x_std[[i, j]] = (x_std[[i, j]] - means[j]) / stds[j];
558            }
559        }
560        Ok(x_std)
561    }
562
563    /// Return the number of support vectors in the underlying SVM.
564    pub fn num_support_vectors(&self) -> usize {
565        self.svm.num_support_vectors()
566    }
567}
568
569#[cfg(test)]
570mod tests {
571    use super::*;
572    use scirs2_core::ndarray::{Array1, Array2};
573
574    /// Build a simple two-class dataset with two well-separated clusters.
575    fn two_class_data() -> (Array2<f64>, Array1<f64>) {
576        // 3 points near origin (+1) and 3 points far from origin (-1)
577        let x = Array2::from_shape_vec(
578            (6, 2),
579            vec![0.1, 0.1, -0.1, 0.2, 0.2, -0.1, 5.0, 5.0, 5.5, 5.0, 5.0, 5.5],
580        )
581        .expect("shape is valid");
582        let y = Array1::from_vec(vec![1.0, 1.0, 1.0, -1.0, -1.0, -1.0]);
583        (x, y)
584    }
585
586    #[test]
587    fn test_quantum_svm_fit_and_predict() {
588        let (x, y) = two_class_data();
589        let mut model = QuantumSVMModel::new(3, 1.0);
590        model.fit(&x, &y).expect("fit should succeed");
591        assert!(
592            model.num_support_vectors() > 0,
593            "model must produce support vectors"
594        );
595
596        let preds = model.predict(&x).expect("predict should succeed");
597        assert_eq!(preds.len(), x.nrows());
598
599        // All predicted labels must be ±1
600        for &p in preds.iter() {
601            assert!(
602                (p - 1.0).abs() < 1e-9 || (p + 1.0).abs() < 1e-9,
603                "prediction {p} is not ±1"
604            );
605        }
606    }
607
608    #[test]
609    fn test_quantum_classifier_with_standardisation() {
610        let (x, y) = two_class_data();
611        let mut clf = QuantumClassifier::new(3, 1.0, true);
612        clf.fit(&x, &y).expect("fit should succeed");
613
614        let preds = clf.predict(&x).expect("predict should succeed");
615        assert_eq!(preds.len(), x.nrows());
616
617        // Verify all outputs are binary
618        for &p in preds.iter() {
619            assert!(
620                (p - 1.0).abs() < 1e-9 || (p + 1.0).abs() < 1e-9,
621                "prediction {p} is not ±1"
622            );
623        }
624    }
625
626    #[test]
627    fn test_svm_rejects_bad_labels() {
628        let x = Array2::from_shape_vec((2, 2), vec![0.0, 0.0, 1.0, 1.0]).expect("shape is valid");
629        let y_bad = Array1::from_vec(vec![0.0, 1.0]); // 0 is not a valid label
630        let mut model = QuantumSVMModel::new(2, 1.0);
631        assert!(model.fit(&x, &y_bad).is_err());
632    }
633
634    #[test]
635    fn test_predict_before_fit_errors() {
636        let model = QuantumSVMModel::new(2, 1.0);
637        let x_test = Array2::from_shape_vec((1, 2), vec![0.0, 0.0]).expect("shape is valid");
638        assert!(model.predict(&x_test).is_err());
639    }
640}