Skip to main content

scirs2_transform/kernel/
kpca.rs

1//! Kernel PCA (Principal Component Analysis)
2//!
3//! Kernel PCA is a nonlinear extension of PCA that uses the kernel trick to
4//! perform PCA in a high-dimensional (potentially infinite-dimensional) feature
5//! space without explicitly computing the feature map.
6//!
7//! ## Algorithm
8//!
9//! 1. Compute the kernel (Gram) matrix K
10//! 2. Center K in feature space
11//! 3. Eigendecompose the centered K
12//! 4. Project data using the top eigenvectors
13//!
14//! ## Features
15//!
16//! - Nonlinear dimensionality reduction via kernel trick
17//! - Pre-image estimation (approximate reconstruction to input space)
18//! - Automatic kernel parameter selection via grid search on reconstruction error
19//! - Support for all kernel types in the kernels module
20
21use scirs2_core::ndarray::{Array1, Array2, ArrayBase, Data, Ix2};
22use scirs2_core::numeric::{Float, NumCast};
23use scirs2_linalg::eigh;
24
25use super::kernels::{
26    center_kernel_matrix, center_kernel_matrix_test, cross_gram_matrix, gram_matrix, KernelType,
27};
28use crate::error::{Result, TransformError};
29
30/// Kernel PCA for nonlinear dimensionality reduction
31///
32/// # Example
33///
34/// ```rust,no_run
35/// use scirs2_transform::kernel::{KernelPCA, KernelType};
36/// use scirs2_core::ndarray::Array2;
37///
38/// let data = Array2::<f64>::zeros((50, 10));
39/// let mut kpca = KernelPCA::new(2, KernelType::RBF { gamma: 0.1 });
40/// let embedding = kpca.fit_transform(&data).expect("should succeed");
41/// assert_eq!(embedding.shape(), &[50, 2]);
42/// ```
43#[derive(Debug, Clone)]
44pub struct KernelPCA {
45    /// Number of components to retain
46    n_components: usize,
47    /// Kernel function type
48    kernel: KernelType,
49    /// Whether to center the kernel matrix
50    center: bool,
51    /// Eigenvalues from the decomposition
52    eigenvalues: Option<Array1<f64>>,
53    /// Eigenvectors (alphas) from the decomposition
54    alphas: Option<Array2<f64>>,
55    /// The centered training kernel matrix
56    k_train_centered: Option<Array2<f64>>,
57    /// The raw training kernel matrix (for test centering)
58    k_train_raw: Option<Array2<f64>>,
59    /// Training data (for pre-image and out-of-sample)
60    training_data: Option<Array2<f64>>,
61    /// Explained variance ratio
62    explained_variance_ratio: Option<Array1<f64>>,
63}
64
65impl KernelPCA {
66    /// Create a new KernelPCA instance
67    ///
68    /// # Arguments
69    /// * `n_components` - Number of principal components to retain
70    /// * `kernel` - The kernel function to use
71    pub fn new(n_components: usize, kernel: KernelType) -> Self {
72        KernelPCA {
73            n_components,
74            kernel,
75            center: true,
76            eigenvalues: None,
77            alphas: None,
78            k_train_centered: None,
79            k_train_raw: None,
80            training_data: None,
81            explained_variance_ratio: None,
82        }
83    }
84
85    /// Set whether to center the kernel matrix (default: true)
86    pub fn with_centering(mut self, center: bool) -> Self {
87        self.center = center;
88        self
89    }
90
91    /// Get the kernel type
92    pub fn kernel(&self) -> &KernelType {
93        &self.kernel
94    }
95
96    /// Get the eigenvalues
97    pub fn eigenvalues(&self) -> Option<&Array1<f64>> {
98        self.eigenvalues.as_ref()
99    }
100
101    /// Get the explained variance ratio
102    pub fn explained_variance_ratio(&self) -> Option<&Array1<f64>> {
103        self.explained_variance_ratio.as_ref()
104    }
105
106    /// Get the principal axes (eigenvectors scaled by 1/sqrt(eigenvalue))
107    pub fn alphas(&self) -> Option<&Array2<f64>> {
108        self.alphas.as_ref()
109    }
110
111    /// Fit the Kernel PCA model to the input data
112    ///
113    /// # Arguments
114    /// * `x` - Input data, shape (n_samples, n_features)
115    pub fn fit<S>(&mut self, x: &ArrayBase<S, Ix2>) -> Result<()>
116    where
117        S: Data,
118        S::Elem: Float + NumCast,
119    {
120        let n_samples = x.nrows();
121        let n_features = x.ncols();
122
123        if n_samples == 0 || n_features == 0 {
124            return Err(TransformError::InvalidInput("Empty input data".to_string()));
125        }
126
127        if self.n_components > n_samples {
128            return Err(TransformError::InvalidInput(format!(
129                "n_components={} must be <= n_samples={}",
130                self.n_components, n_samples
131            )));
132        }
133
134        // Convert to f64
135        let x_f64 = x.mapv(|v| NumCast::from(v).unwrap_or(0.0));
136
137        // Compute kernel matrix
138        let k = gram_matrix(&x_f64.view(), &self.kernel)?;
139
140        // Center kernel matrix if requested
141        let k_centered_raw = if self.center {
142            center_kernel_matrix(&k)?
143        } else {
144            k.clone()
145        };
146
147        // Enforce numerical symmetry (centering can introduce tiny asymmetries)
148        let mut k_centered = Array2::zeros((n_samples, n_samples));
149        for i in 0..n_samples {
150            for j in i..n_samples {
151                let sym = 0.5 * (k_centered_raw[[i, j]] + k_centered_raw[[j, i]]);
152                k_centered[[i, j]] = sym;
153                k_centered[[j, i]] = sym;
154            }
155        }
156
157        // Eigendecomposition of the centered kernel matrix
158        let (eigenvalues, eigenvectors) =
159            eigh(&k_centered.view(), None).map_err(TransformError::LinalgError)?;
160
161        // Sort eigenvalues in descending order
162        let mut indices: Vec<usize> = (0..n_samples).collect();
163        indices.sort_by(|&i, &j| {
164            eigenvalues[j]
165                .partial_cmp(&eigenvalues[i])
166                .unwrap_or(std::cmp::Ordering::Equal)
167        });
168
169        // Extract top n_components
170        let mut top_eigenvalues = Array1::zeros(self.n_components);
171        let mut top_eigenvectors = Array2::zeros((n_samples, self.n_components));
172
173        for j in 0..self.n_components {
174            let idx = indices[j];
175            let eigval = eigenvalues[idx].max(0.0);
176            top_eigenvalues[j] = eigval;
177
178            // Normalize eigenvectors by sqrt(eigenvalue)
179            let scale = if eigval > 1e-15 {
180                1.0 / eigval.sqrt()
181            } else {
182                0.0
183            };
184
185            for i in 0..n_samples {
186                top_eigenvectors[[i, j]] = eigenvectors[[i, idx]] * scale;
187            }
188        }
189
190        // Compute explained variance ratio
191        let total_variance: f64 = eigenvalues.iter().map(|&v| v.max(0.0)).sum();
192        let explained_variance_ratio = if total_variance > 1e-15 {
193            top_eigenvalues.mapv(|v| v / total_variance)
194        } else {
195            Array1::zeros(self.n_components)
196        };
197
198        self.eigenvalues = Some(top_eigenvalues);
199        self.alphas = Some(top_eigenvectors);
200        self.k_train_centered = Some(k_centered);
201        self.k_train_raw = Some(k);
202        self.training_data = Some(x_f64);
203        self.explained_variance_ratio = Some(explained_variance_ratio);
204
205        Ok(())
206    }
207
208    /// Transform data using the fitted model
209    ///
210    /// # Arguments
211    /// * `x` - Input data, shape (n_samples, n_features)
212    ///
213    /// # Returns
214    /// * Projected data, shape (n_samples, n_components)
215    pub fn transform<S>(&self, x: &ArrayBase<S, Ix2>) -> Result<Array2<f64>>
216    where
217        S: Data,
218        S::Elem: Float + NumCast,
219    {
220        let alphas = self
221            .alphas
222            .as_ref()
223            .ok_or_else(|| TransformError::NotFitted("KernelPCA not fitted".to_string()))?;
224        let training_data = self
225            .training_data
226            .as_ref()
227            .ok_or_else(|| TransformError::NotFitted("Training data not available".to_string()))?;
228        let eigenvalues = self
229            .eigenvalues
230            .as_ref()
231            .ok_or_else(|| TransformError::NotFitted("Eigenvalues not available".to_string()))?;
232
233        let x_f64 = x.mapv(|v| NumCast::from(v).unwrap_or(0.0));
234
235        // Check if this is the same data
236        if self.is_same_data(&x_f64, training_data) {
237            return self.transform_training_data();
238        }
239
240        // Compute cross-kernel matrix between test and training
241        let k_test = cross_gram_matrix(&x_f64.view(), &training_data.view(), &self.kernel)?;
242
243        // Center the test kernel matrix
244        let k_test_centered = if self.center {
245            let k_train = self.k_train_raw.as_ref().ok_or_else(|| {
246                TransformError::NotFitted("Training kernel matrix not available".to_string())
247            })?;
248            center_kernel_matrix_test(&k_test, k_train)?
249        } else {
250            k_test
251        };
252
253        // Project: X_proj = K_test_centered * alpha
254        let n_test = x_f64.nrows();
255        let n_train = training_data.nrows();
256        let mut projected = Array2::zeros((n_test, self.n_components));
257
258        for i in 0..n_test {
259            for j in 0..self.n_components {
260                let mut sum = 0.0;
261                for k in 0..n_train {
262                    sum += k_test_centered[[i, k]] * alphas[[k, j]];
263                }
264                // Scale by sqrt(eigenvalue) to get proper projection
265                projected[[i, j]] = sum * eigenvalues[j].max(0.0).sqrt();
266            }
267        }
268
269        Ok(projected)
270    }
271
272    /// Transform the training data (using stored centered kernel)
273    fn transform_training_data(&self) -> Result<Array2<f64>> {
274        let alphas = self
275            .alphas
276            .as_ref()
277            .ok_or_else(|| TransformError::NotFitted("KernelPCA not fitted".to_string()))?;
278        let k_centered = self.k_train_centered.as_ref().ok_or_else(|| {
279            TransformError::NotFitted("Centered kernel not available".to_string())
280        })?;
281        let eigenvalues = self
282            .eigenvalues
283            .as_ref()
284            .ok_or_else(|| TransformError::NotFitted("Eigenvalues not available".to_string()))?;
285
286        let n_samples = k_centered.nrows();
287        let mut projected = Array2::zeros((n_samples, self.n_components));
288
289        for i in 0..n_samples {
290            for j in 0..self.n_components {
291                let mut sum = 0.0;
292                for k in 0..n_samples {
293                    sum += k_centered[[i, k]] * alphas[[k, j]];
294                }
295                projected[[i, j]] = sum * eigenvalues[j].max(0.0).sqrt();
296            }
297        }
298
299        Ok(projected)
300    }
301
302    /// Fit and transform in one step
303    pub fn fit_transform<S>(&mut self, x: &ArrayBase<S, Ix2>) -> Result<Array2<f64>>
304    where
305        S: Data,
306        S::Elem: Float + NumCast,
307    {
308        self.fit(x)?;
309        self.transform(x)
310    }
311
312    /// Pre-image estimation: approximate reconstruction from kernel space to input space
313    ///
314    /// Uses an iterative fixed-point method (Mika et al., 1998; Kwok & Tsang, 2004)
315    /// to find an approximate pre-image for the kernel PCA projection.
316    ///
317    /// # Arguments
318    /// * `projected` - Projected data in kernel PCA space, shape (n_samples, n_components)
319    /// * `max_iter` - Maximum number of iterations for the fixed-point method
320    /// * `tol` - Convergence tolerance
321    ///
322    /// # Returns
323    /// * Approximate reconstruction in input space, shape (n_samples, n_features)
324    pub fn inverse_transform(
325        &self,
326        projected: &Array2<f64>,
327        max_iter: usize,
328        tol: f64,
329    ) -> Result<Array2<f64>> {
330        let training_data = self
331            .training_data
332            .as_ref()
333            .ok_or_else(|| TransformError::NotFitted("Training data not available".to_string()))?;
334        let alphas = self
335            .alphas
336            .as_ref()
337            .ok_or_else(|| TransformError::NotFitted("KernelPCA not fitted".to_string()))?;
338        let eigenvalues = self
339            .eigenvalues
340            .as_ref()
341            .ok_or_else(|| TransformError::NotFitted("Eigenvalues not available".to_string()))?;
342
343        let n_samples = projected.nrows();
344        let n_features = training_data.ncols();
345        let n_train = training_data.nrows();
346
347        let mut reconstructed = Array2::zeros((n_samples, n_features));
348
349        for i in 0..n_samples {
350            // Initialize with the nearest training point in kernel space
351            let mut best_idx = 0;
352            let mut best_dist = f64::INFINITY;
353
354            // Compute the target kernel representation
355            let mut target_kernel_rep = Array1::zeros(self.n_components);
356            for j in 0..self.n_components {
357                target_kernel_rep[j] = projected[[i, j]];
358            }
359
360            // Find nearest training point by kernel space distance
361            for t in 0..n_train {
362                let mut dist = 0.0;
363                for j in 0..self.n_components {
364                    let mut train_proj = 0.0;
365                    let k_centered = self.k_train_centered.as_ref().ok_or_else(|| {
366                        TransformError::NotFitted("Centered kernel not available".to_string())
367                    })?;
368                    for k in 0..n_train {
369                        train_proj += k_centered[[t, k]] * alphas[[k, j]];
370                    }
371                    train_proj *= eigenvalues[j].max(0.0).sqrt();
372                    let diff = projected[[i, j]] - train_proj;
373                    dist += diff * diff;
374                }
375                if dist < best_dist {
376                    best_dist = dist;
377                    best_idx = t;
378                }
379            }
380
381            // Start with the nearest training point
382            let mut x_approx = training_data.row(best_idx).to_owned();
383
384            // Iterative fixed-point pre-image estimation
385            for _iter in 0..max_iter {
386                // Compute weights based on kernel between x_approx and training points
387                let mut weights = Array1::zeros(n_train);
388                let mut weight_sum = 0.0;
389
390                for t in 0..n_train {
391                    // Compute k(x_approx, x_t) weighted by the kernel PCA coefficients
392                    let k_val = match &self.kernel {
393                        KernelType::RBF { gamma } => {
394                            let mut dist_sq = 0.0;
395                            for d in 0..n_features {
396                                let diff = x_approx[d] - training_data[[t, d]];
397                                dist_sq += diff * diff;
398                            }
399                            (-gamma * dist_sq).exp()
400                        }
401                        KernelType::Laplacian { gamma } => {
402                            let mut l1_dist = 0.0;
403                            for d in 0..n_features {
404                                l1_dist += (x_approx[d] - training_data[[t, d]]).abs();
405                            }
406                            (-gamma * l1_dist).exp()
407                        }
408                        _ => {
409                            // For other kernels, use distance-based weighting
410                            let mut dist_sq = 0.0;
411                            for d in 0..n_features {
412                                let diff = x_approx[d] - training_data[[t, d]];
413                                dist_sq += diff * diff;
414                            }
415                            if dist_sq > 1e-15 {
416                                1.0 / (1.0 + dist_sq.sqrt())
417                            } else {
418                                1e10
419                            }
420                        }
421                    };
422
423                    // Weight by kernel PCA coefficients
424                    let mut coeff_weight = 0.0;
425                    for j in 0..self.n_components {
426                        coeff_weight += alphas[[t, j]] * projected[[i, j]];
427                    }
428
429                    weights[t] = k_val * coeff_weight.abs();
430                    weight_sum += weights[t];
431                }
432
433                // Normalize weights
434                if weight_sum > 1e-15 {
435                    weights.mapv_inplace(|w| w / weight_sum);
436                } else {
437                    // Fallback: uniform weights
438                    weights = Array1::from_elem(n_train, 1.0 / n_train as f64);
439                }
440
441                // Update x_approx as weighted sum of training points
442                let mut x_new = Array1::zeros(n_features);
443                for t in 0..n_train {
444                    for d in 0..n_features {
445                        x_new[d] += weights[t] * training_data[[t, d]];
446                    }
447                }
448
449                // Check convergence
450                let mut change = 0.0;
451                for d in 0..n_features {
452                    let diff = x_new[d] - x_approx[d];
453                    change += diff * diff;
454                }
455
456                x_approx = x_new;
457
458                if change.sqrt() < tol {
459                    break;
460                }
461            }
462
463            for d in 0..n_features {
464                reconstructed[[i, d]] = x_approx[d];
465            }
466        }
467
468        Ok(reconstructed)
469    }
470
471    /// Automatic kernel parameter selection via grid search
472    ///
473    /// Tries multiple gamma values and selects the one that minimizes
474    /// the reconstruction error (for RBF/Laplacian kernels).
475    ///
476    /// # Arguments
477    /// * `x` - Input data, shape (n_samples, n_features)
478    /// * `gamma_values` - List of gamma values to try
479    /// * `preimage_iter` - Number of pre-image iterations
480    ///
481    /// # Returns
482    /// * The best gamma value and corresponding reconstruction error
483    pub fn auto_select_gamma<S>(
484        x: &ArrayBase<S, Ix2>,
485        n_components: usize,
486        gamma_values: &[f64],
487        preimage_iter: usize,
488    ) -> Result<(f64, f64)>
489    where
490        S: Data,
491        S::Elem: Float + NumCast,
492    {
493        if gamma_values.is_empty() {
494            return Err(TransformError::InvalidInput(
495                "gamma_values must not be empty".to_string(),
496            ));
497        }
498
499        let x_f64: Array2<f64> = x.mapv(|v| NumCast::from(v).unwrap_or(0.0));
500
501        let mut best_gamma = gamma_values[0];
502        let mut best_error = f64::INFINITY;
503
504        for &gamma in gamma_values {
505            let kernel = KernelType::RBF { gamma };
506            let mut kpca = KernelPCA::new(n_components, kernel);
507
508            match kpca.fit(&x_f64.view()) {
509                Ok(()) => {}
510                Err(_) => continue,
511            }
512
513            let projected = match kpca.transform(&x_f64.view()) {
514                Ok(p) => p,
515                Err(_) => continue,
516            };
517
518            let reconstructed = match kpca.inverse_transform(&projected, preimage_iter, 1e-6) {
519                Ok(r) => r,
520                Err(_) => continue,
521            };
522
523            // Compute reconstruction error
524            let mut error = 0.0;
525            let n_samples = x_f64.nrows();
526            let n_features = x_f64.ncols();
527            for i in 0..n_samples {
528                for j in 0..n_features {
529                    let diff = x_f64[[i, j]] - reconstructed[[i, j]];
530                    error += diff * diff;
531                }
532            }
533            error /= n_samples as f64;
534
535            if error < best_error {
536                best_error = error;
537                best_gamma = gamma;
538            }
539        }
540
541        if best_error.is_infinite() {
542            return Err(TransformError::ComputationError(
543                "All gamma values failed".to_string(),
544            ));
545        }
546
547        Ok((best_gamma, best_error))
548    }
549
550    /// Check if two data matrices are identical
551    fn is_same_data(&self, x: &Array2<f64>, training_data: &Array2<f64>) -> bool {
552        if x.dim() != training_data.dim() {
553            return false;
554        }
555        let (n, m) = x.dim();
556        for i in 0..n {
557            for j in 0..m {
558                if (x[[i, j]] - training_data[[i, j]]).abs() > 1e-10 {
559                    return false;
560                }
561            }
562        }
563        true
564    }
565}
566
567#[cfg(test)]
568mod tests {
569    use super::*;
570    use scirs2_core::ndarray::Array;
571
572    fn make_circular_data(n: usize) -> Array2<f64> {
573        let mut data = Vec::with_capacity(n * 2);
574        for i in 0..n {
575            let t = 2.0 * std::f64::consts::PI * i as f64 / n as f64;
576            let r = 1.0 + 0.3 * (i as f64 / n as f64);
577            data.push(r * t.cos());
578            data.push(r * t.sin());
579        }
580        Array::from_shape_vec((n, 2), data).expect("Failed to create data")
581    }
582
583    #[test]
584    fn test_kpca_rbf_basic() {
585        let data = make_circular_data(30);
586        let mut kpca = KernelPCA::new(2, KernelType::RBF { gamma: 1.0 });
587        let projected = kpca
588            .fit_transform(&data)
589            .expect("KPCA fit_transform failed");
590
591        assert_eq!(projected.shape(), &[30, 2]);
592        for val in projected.iter() {
593            assert!(val.is_finite());
594        }
595    }
596
597    #[test]
598    fn test_kpca_linear_matches_pca() {
599        // Linear kernel PCA should give similar results to regular PCA
600        let data = Array::from_shape_vec(
601            (6, 3),
602            vec![
603                1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
604                9.0, 10.0,
605            ],
606        )
607        .expect("Failed");
608
609        let mut kpca = KernelPCA::new(2, KernelType::Linear);
610        let projected = kpca
611            .fit_transform(&data)
612            .expect("KPCA fit_transform failed");
613
614        assert_eq!(projected.shape(), &[6, 2]);
615        for val in projected.iter() {
616            assert!(val.is_finite());
617        }
618
619        // Check explained variance ratio sums to less than or equal to 1
620        let evr = kpca.explained_variance_ratio().expect("EVR should exist");
621        assert!(evr.sum() <= 1.0 + 1e-10);
622        assert!(evr.sum() > 0.0);
623    }
624
625    #[test]
626    fn test_kpca_polynomial() {
627        let data = make_circular_data(20);
628        let kernel = KernelType::Polynomial {
629            gamma: 1.0,
630            coef0: 1.0,
631            degree: 2,
632        };
633        let mut kpca = KernelPCA::new(2, kernel);
634        let projected = kpca
635            .fit_transform(&data)
636            .expect("KPCA fit_transform failed");
637
638        assert_eq!(projected.shape(), &[20, 2]);
639        for val in projected.iter() {
640            assert!(val.is_finite());
641        }
642    }
643
644    #[test]
645    fn test_kpca_laplacian() {
646        let data = make_circular_data(20);
647        let mut kpca = KernelPCA::new(2, KernelType::Laplacian { gamma: 1.0 });
648        let projected = kpca
649            .fit_transform(&data)
650            .expect("KPCA fit_transform failed");
651
652        assert_eq!(projected.shape(), &[20, 2]);
653        for val in projected.iter() {
654            assert!(val.is_finite());
655        }
656    }
657
658    #[test]
659    fn test_kpca_out_of_sample() {
660        let data = make_circular_data(30);
661        let mut kpca = KernelPCA::new(2, KernelType::RBF { gamma: 0.5 });
662        kpca.fit(&data).expect("KPCA fit failed");
663
664        let test_data =
665            Array::from_shape_vec((3, 2), vec![0.5, 0.5, -0.5, 0.5, 0.0, -1.0]).expect("Failed");
666
667        let test_projected = kpca.transform(&test_data).expect("KPCA transform failed");
668        assert_eq!(test_projected.shape(), &[3, 2]);
669        for val in test_projected.iter() {
670            assert!(val.is_finite());
671        }
672    }
673
674    #[test]
675    fn test_kpca_eigenvalues() {
676        let data = make_circular_data(20);
677        let mut kpca = KernelPCA::new(3, KernelType::RBF { gamma: 0.5 });
678        kpca.fit(&data).expect("KPCA fit failed");
679
680        let eigenvalues = kpca.eigenvalues().expect("Eigenvalues should exist");
681        assert_eq!(eigenvalues.len(), 3);
682
683        // Eigenvalues should be non-negative and in descending order
684        for i in 0..eigenvalues.len() {
685            assert!(eigenvalues[i] >= -1e-10);
686            if i > 0 {
687                assert!(eigenvalues[i] <= eigenvalues[i - 1] + 1e-10);
688            }
689        }
690    }
691
692    #[test]
693    fn test_kpca_preimage() {
694        let data = make_circular_data(15);
695        let mut kpca = KernelPCA::new(2, KernelType::RBF { gamma: 0.5 });
696        let projected = kpca
697            .fit_transform(&data)
698            .expect("KPCA fit_transform failed");
699
700        let reconstructed = kpca
701            .inverse_transform(&projected, 50, 1e-6)
702            .expect("Pre-image estimation failed");
703
704        assert_eq!(reconstructed.shape(), &[15, 2]);
705        for val in reconstructed.iter() {
706            assert!(val.is_finite());
707        }
708    }
709
710    #[test]
711    fn test_kpca_auto_gamma() {
712        let data = make_circular_data(15);
713        let gammas = vec![0.01, 0.1, 0.5, 1.0, 5.0];
714
715        let (best_gamma, best_error) = KernelPCA::auto_select_gamma(&data.view(), 2, &gammas, 10)
716            .expect("Auto gamma selection failed");
717
718        assert!(best_gamma > 0.0);
719        assert!(best_error >= 0.0);
720        assert!(best_error.is_finite());
721    }
722
723    #[test]
724    fn test_kpca_empty_data() {
725        let data: Array2<f64> = Array2::zeros((0, 5));
726        let mut kpca = KernelPCA::new(2, KernelType::RBF { gamma: 1.0 });
727        assert!(kpca.fit(&data).is_err());
728    }
729
730    #[test]
731    fn test_kpca_too_many_components() {
732        let data = make_circular_data(5);
733        let mut kpca = KernelPCA::new(10, KernelType::RBF { gamma: 1.0 });
734        assert!(kpca.fit(&data).is_err());
735    }
736
737    #[test]
738    fn test_kpca_not_fitted() {
739        let kpca = KernelPCA::new(2, KernelType::RBF { gamma: 1.0 });
740        let data = make_circular_data(10);
741        assert!(kpca.transform(&data).is_err());
742    }
743
744    #[test]
745    fn test_kpca_sigmoid() {
746        let data = make_circular_data(20);
747        let kernel = KernelType::Sigmoid {
748            gamma: 0.1,
749            coef0: 0.0,
750        };
751        let mut kpca = KernelPCA::new(2, kernel);
752        let projected = kpca
753            .fit_transform(&data)
754            .expect("KPCA fit_transform failed");
755
756        assert_eq!(projected.shape(), &[20, 2]);
757        for val in projected.iter() {
758            assert!(val.is_finite());
759        }
760    }
761
762    #[test]
763    fn test_kpca_no_centering() {
764        let data = make_circular_data(20);
765        let mut kpca = KernelPCA::new(2, KernelType::RBF { gamma: 0.5 }).with_centering(false);
766        let projected = kpca
767            .fit_transform(&data)
768            .expect("KPCA fit_transform failed");
769
770        assert_eq!(projected.shape(), &[20, 2]);
771        for val in projected.iter() {
772            assert!(val.is_finite());
773        }
774    }
775}