1use ferrolearn_core::error::FerroError;
44use ferrolearn_core::pipeline::{FittedPipelineTransformer, PipelineTransformer};
45use ferrolearn_core::traits::{Fit, Transform};
46use ndarray::{Array1, Array2};
47use num_traits::Float;
48
49#[derive(Debug, Clone, Copy, PartialEq, Eq)]
55pub enum Kernel {
56 Linear,
58 RBF,
60 Polynomial,
62 Sigmoid,
64}
65
66#[derive(Debug, Clone)]
76pub struct KernelPCA<F> {
77 n_components: usize,
79 kernel: Kernel,
81 gamma: Option<f64>,
83 degree: usize,
85 coef0: f64,
87 _marker: std::marker::PhantomData<F>,
88}
89
90impl<F: Float + Send + Sync + 'static> KernelPCA<F> {
91 #[must_use]
96 pub fn new(n_components: usize) -> Self {
97 Self {
98 n_components,
99 kernel: Kernel::Linear,
100 gamma: None,
101 degree: 3,
102 coef0: 0.0,
103 _marker: std::marker::PhantomData,
104 }
105 }
106
107 #[must_use]
109 pub fn with_kernel(mut self, kernel: Kernel) -> Self {
110 self.kernel = kernel;
111 self
112 }
113
114 #[must_use]
116 pub fn with_gamma(mut self, gamma: f64) -> Self {
117 self.gamma = Some(gamma);
118 self
119 }
120
121 #[must_use]
123 pub fn with_degree(mut self, degree: usize) -> Self {
124 self.degree = degree;
125 self
126 }
127
128 #[must_use]
130 pub fn with_coef0(mut self, coef0: f64) -> Self {
131 self.coef0 = coef0;
132 self
133 }
134
135 #[must_use]
137 pub fn n_components(&self) -> usize {
138 self.n_components
139 }
140
141 #[must_use]
143 pub fn kernel(&self) -> Kernel {
144 self.kernel
145 }
146
147 #[must_use]
149 pub fn gamma(&self) -> Option<f64> {
150 self.gamma
151 }
152
153 #[must_use]
155 pub fn degree(&self) -> usize {
156 self.degree
157 }
158
159 #[must_use]
161 pub fn coef0(&self) -> f64 {
162 self.coef0
163 }
164}
165
166#[derive(Debug, Clone)]
175pub struct FittedKernelPCA<F> {
176 alphas_: Array2<F>,
179
180 eigenvalues_: Array1<F>,
182
183 x_fit_: Array2<F>,
185
186 k_fit_col_means_: Array1<F>,
189
190 k_fit_grand_mean_: F,
192
193 kernel: Kernel,
195 gamma: f64,
197 degree: usize,
199 coef0: f64,
201}
202
203impl<F: Float + Send + Sync + 'static> FittedKernelPCA<F> {
204 #[must_use]
206 pub fn alphas(&self) -> &Array2<F> {
207 &self.alphas_
208 }
209
210 #[must_use]
212 pub fn eigenvalues(&self) -> &Array1<F> {
213 &self.eigenvalues_
214 }
215}
216
217fn kernel_value<F: Float>(
223 x: &[F],
224 y: &[F],
225 kernel: Kernel,
226 gamma: f64,
227 degree: usize,
228 coef0: f64,
229) -> F {
230 let gamma_f = F::from(gamma).unwrap();
231 let coef0_f = F::from(coef0).unwrap();
232
233 match kernel {
234 Kernel::Linear => {
235 let mut dot = F::zero();
236 for (&a, &b) in x.iter().zip(y.iter()) {
237 dot = dot + a * b;
238 }
239 dot
240 }
241 Kernel::RBF => {
242 let mut sq_dist = F::zero();
243 for (&a, &b) in x.iter().zip(y.iter()) {
244 let diff = a - b;
245 sq_dist = sq_dist + diff * diff;
246 }
247 (-gamma_f * sq_dist).exp()
248 }
249 Kernel::Polynomial => {
250 let mut dot = F::zero();
251 for (&a, &b) in x.iter().zip(y.iter()) {
252 dot = dot + a * b;
253 }
254 let base = gamma_f * dot + coef0_f;
255 let mut result = F::one();
256 for _ in 0..degree {
257 result = result * base;
258 }
259 result
260 }
261 Kernel::Sigmoid => {
262 let mut dot = F::zero();
263 for (&a, &b) in x.iter().zip(y.iter()) {
264 dot = dot + a * b;
265 }
266 (gamma_f * dot + coef0_f).tanh()
267 }
268 }
269}
270
271fn compute_kernel_matrix<F: Float>(
273 x1: &Array2<F>,
274 x2: &Array2<F>,
275 kernel: Kernel,
276 gamma: f64,
277 degree: usize,
278 coef0: f64,
279) -> Array2<F> {
280 let n1 = x1.nrows();
281 let n2 = x2.nrows();
282 let mut k = Array2::<F>::zeros((n1, n2));
283
284 for i in 0..n1 {
285 let row_i: Vec<F> = x1.row(i).to_vec();
286 for j in 0..n2 {
287 let row_j: Vec<F> = x2.row(j).to_vec();
288 k[[i, j]] = kernel_value(&row_i, &row_j, kernel, gamma, degree, coef0);
289 }
290 }
291
292 k
293}
294
295fn centre_kernel_matrix<F: Float>(k: &mut Array2<F>) {
300 let n = k.nrows();
301 let n_f = F::from(n).unwrap();
302
303 let mut col_means = Array1::<F>::zeros(n);
305 for j in 0..n {
306 let mut sum = F::zero();
307 for i in 0..n {
308 sum = sum + k[[i, j]];
309 }
310 col_means[j] = sum / n_f;
311 }
312
313 let grand_mean = col_means.iter().copied().fold(F::zero(), |a, b| a + b) / n_f;
315
316 for i in 0..n {
319 for j in 0..n {
320 k[[i, j]] = k[[i, j]] - col_means[i] - col_means[j] + grand_mean;
321 }
322 }
323}
324
325fn jacobi_eigen_symmetric<F: Float + Send + Sync + 'static>(
327 a: &Array2<F>,
328 max_iter: usize,
329) -> Result<(Array1<F>, Array2<F>), FerroError> {
330 let n = a.nrows();
331 if n == 0 {
332 return Ok((Array1::zeros(0), Array2::zeros((0, 0))));
333 }
334 if n == 1 {
335 let eigenvalues = Array1::from_vec(vec![a[[0, 0]]]);
336 let eigenvectors = Array2::from_shape_vec((1, 1), vec![F::one()]).unwrap();
337 return Ok((eigenvalues, eigenvectors));
338 }
339
340 let mut mat = a.to_owned();
341 let mut v = Array2::<F>::zeros((n, n));
342 for i in 0..n {
343 v[[i, i]] = F::one();
344 }
345
346 let tol = F::from(1e-12).unwrap_or(F::epsilon());
347
348 for _iteration in 0..max_iter {
349 let mut max_off = F::zero();
350 let mut p = 0;
351 let mut q = 1;
352 for i in 0..n {
353 for j in (i + 1)..n {
354 let val = mat[[i, j]].abs();
355 if val > max_off {
356 max_off = val;
357 p = i;
358 q = j;
359 }
360 }
361 }
362
363 if max_off < tol {
364 let eigenvalues = Array1::from_shape_fn(n, |i| mat[[i, i]]);
365 return Ok((eigenvalues, v));
366 }
367
368 let app = mat[[p, p]];
369 let aqq = mat[[q, q]];
370 let apq = mat[[p, q]];
371
372 let theta = if (app - aqq).abs() < tol {
373 F::from(std::f64::consts::FRAC_PI_4).unwrap_or(F::one())
374 } else {
375 let tau = (aqq - app) / (F::from(2.0).unwrap() * apq);
376 let t = if tau >= F::zero() {
377 F::one() / (tau.abs() + (F::one() + tau * tau).sqrt())
378 } else {
379 -F::one() / (tau.abs() + (F::one() + tau * tau).sqrt())
380 };
381 t.atan()
382 };
383
384 let c = theta.cos();
385 let s = theta.sin();
386
387 let mut new_mat = mat.clone();
388 for i in 0..n {
389 if i != p && i != q {
390 let mip = mat[[i, p]];
391 let miq = mat[[i, q]];
392 new_mat[[i, p]] = c * mip - s * miq;
393 new_mat[[p, i]] = new_mat[[i, p]];
394 new_mat[[i, q]] = s * mip + c * miq;
395 new_mat[[q, i]] = new_mat[[i, q]];
396 }
397 }
398
399 new_mat[[p, p]] = c * c * app - F::from(2.0).unwrap() * s * c * apq + s * s * aqq;
400 new_mat[[q, q]] = s * s * app + F::from(2.0).unwrap() * s * c * apq + c * c * aqq;
401 new_mat[[p, q]] = F::zero();
402 new_mat[[q, p]] = F::zero();
403
404 mat = new_mat;
405
406 for i in 0..n {
407 let vip = v[[i, p]];
408 let viq = v[[i, q]];
409 v[[i, p]] = c * vip - s * viq;
410 v[[i, q]] = s * vip + c * viq;
411 }
412 }
413
414 Err(FerroError::ConvergenceFailure {
415 iterations: max_iter,
416 message: "Jacobi eigendecomposition did not converge in KernelPCA".into(),
417 })
418}
419
420impl<F: Float + Send + Sync + 'static> Fit<Array2<F>, ()> for KernelPCA<F> {
425 type Fitted = FittedKernelPCA<F>;
426 type Error = FerroError;
427
428 fn fit(&self, x: &Array2<F>, _y: &()) -> Result<FittedKernelPCA<F>, FerroError> {
439 let (n_samples, n_features) = x.dim();
440
441 if self.n_components == 0 {
442 return Err(FerroError::InvalidParameter {
443 name: "n_components".into(),
444 reason: "must be at least 1".into(),
445 });
446 }
447 if n_samples < 2 {
448 return Err(FerroError::InsufficientSamples {
449 required: 2,
450 actual: n_samples,
451 context: "KernelPCA::fit requires at least 2 samples".into(),
452 });
453 }
454 if self.n_components > n_samples {
455 return Err(FerroError::InvalidParameter {
456 name: "n_components".into(),
457 reason: format!(
458 "n_components ({}) exceeds n_samples ({})",
459 self.n_components, n_samples
460 ),
461 });
462 }
463
464 let effective_gamma = self.gamma.unwrap_or(1.0 / n_features as f64);
466
467 let mut k =
469 compute_kernel_matrix(x, x, self.kernel, effective_gamma, self.degree, self.coef0);
470
471 let n_f = F::from(n_samples).unwrap();
473 let mut k_col_means = Array1::<F>::zeros(n_samples);
474 for j in 0..n_samples {
475 let mut sum = F::zero();
476 for i in 0..n_samples {
477 sum = sum + k[[i, j]];
478 }
479 k_col_means[j] = sum / n_f;
480 }
481 let k_grand_mean = k_col_means.iter().copied().fold(F::zero(), |a, b| a + b) / n_f;
482
483 centre_kernel_matrix(&mut k);
485
486 let max_iter = n_samples * n_samples * 100 + 1000;
488 let (eigenvalues, eigenvectors) = jacobi_eigen_symmetric(&k, max_iter)?;
489
490 let mut indices: Vec<usize> = (0..n_samples).collect();
492 indices.sort_by(|&a, &b| {
493 eigenvalues[b]
494 .partial_cmp(&eigenvalues[a])
495 .unwrap_or(std::cmp::Ordering::Equal)
496 });
497
498 let n_comp = self.n_components;
499 let mut alphas = Array2::<F>::zeros((n_samples, n_comp));
500 let mut top_eigenvalues = Array1::<F>::zeros(n_comp);
501
502 for (k_idx, &eigen_idx) in indices.iter().take(n_comp).enumerate() {
503 let eigval = eigenvalues[eigen_idx];
504 let eigval_clamped = if eigval > F::zero() {
505 eigval
506 } else {
507 F::zero()
508 };
509 top_eigenvalues[k_idx] = eigval_clamped;
510
511 let scale = if eigval_clamped > F::from(1e-12).unwrap_or(F::epsilon()) {
513 F::one() / eigval_clamped.sqrt()
514 } else {
515 F::zero()
516 };
517
518 for i in 0..n_samples {
519 alphas[[i, k_idx]] = eigenvectors[[i, eigen_idx]] * scale;
520 }
521 }
522
523 Ok(FittedKernelPCA {
524 alphas_: alphas,
525 eigenvalues_: top_eigenvalues,
526 x_fit_: x.to_owned(),
527 k_fit_col_means_: k_col_means,
528 k_fit_grand_mean_: k_grand_mean,
529 kernel: self.kernel,
530 gamma: effective_gamma,
531 degree: self.degree,
532 coef0: self.coef0,
533 })
534 }
535}
536
537impl<F: Float + Send + Sync + 'static> Transform<Array2<F>> for FittedKernelPCA<F> {
538 type Output = Array2<F>;
539 type Error = FerroError;
540
541 fn transform(&self, x: &Array2<F>) -> Result<Array2<F>, FerroError> {
551 let n_features = self.x_fit_.ncols();
552 if x.ncols() != n_features {
553 return Err(FerroError::ShapeMismatch {
554 expected: vec![x.nrows(), n_features],
555 actual: vec![x.nrows(), x.ncols()],
556 context: "FittedKernelPCA::transform".into(),
557 });
558 }
559
560 let n_test = x.nrows();
561 let n_train = self.x_fit_.nrows();
562 let n_f = F::from(n_train).unwrap();
563
564 let k_test = compute_kernel_matrix(
566 x,
567 &self.x_fit_,
568 self.kernel,
569 self.gamma,
570 self.degree,
571 self.coef0,
572 );
573
574 let mut k_centered = Array2::<F>::zeros((n_test, n_train));
580 for i in 0..n_test {
581 let mut row_mean = F::zero();
583 for j in 0..n_train {
584 row_mean = row_mean + k_test[[i, j]];
585 }
586 row_mean = row_mean / n_f;
587
588 for j in 0..n_train {
589 k_centered[[i, j]] =
590 k_test[[i, j]] - self.k_fit_col_means_[j] - row_mean + self.k_fit_grand_mean_;
591 }
592 }
593
594 Ok(k_centered.dot(&self.alphas_))
596 }
597}
598
599impl<F: Float + Send + Sync + 'static> PipelineTransformer<F> for KernelPCA<F> {
604 fn fit_pipeline(
612 &self,
613 x: &Array2<F>,
614 _y: &Array1<F>,
615 ) -> Result<Box<dyn FittedPipelineTransformer<F>>, FerroError> {
616 let fitted = self.fit(x, &())?;
617 Ok(Box::new(fitted))
618 }
619}
620
621impl<F: Float + Send + Sync + 'static> FittedPipelineTransformer<F> for FittedKernelPCA<F> {
622 fn transform_pipeline(&self, x: &Array2<F>) -> Result<Array2<F>, FerroError> {
628 self.transform(x)
629 }
630}
631
632#[cfg(test)]
637mod tests {
638 use super::*;
639 use approx::assert_abs_diff_eq;
640 use ndarray::array;
641
642 fn circle_dataset() -> Array2<f64> {
644 array![
646 [1.0, 0.0],
647 [0.0, 1.0],
648 [-1.0, 0.0],
649 [0.0, -1.0],
650 [2.0, 0.0],
651 [0.0, 2.0],
652 [-2.0, 0.0],
653 [0.0, -2.0],
654 ]
655 }
656
657 fn linear_dataset() -> Array2<f64> {
659 array![
660 [1.0, 2.0, 3.0],
661 [4.0, 5.0, 6.0],
662 [7.0, 8.0, 9.0],
663 [10.0, 11.0, 12.0],
664 [13.0, 14.0, 15.0],
665 ]
666 }
667
668 #[test]
669 fn test_kernel_pca_linear_basic() {
670 let kpca = KernelPCA::<f64>::new(2).with_kernel(Kernel::Linear);
671 let x = linear_dataset();
672 let fitted = kpca.fit(&x, &()).unwrap();
673 let projected = fitted.transform(&x).unwrap();
674 assert_eq!(projected.dim(), (5, 2));
675 }
676
677 #[test]
678 fn test_kernel_pca_rbf_basic() {
679 let kpca = KernelPCA::<f64>::new(2)
680 .with_kernel(Kernel::RBF)
681 .with_gamma(0.5);
682 let x = circle_dataset();
683 let fitted = kpca.fit(&x, &()).unwrap();
684 let projected = fitted.transform(&x).unwrap();
685 assert_eq!(projected.dim(), (8, 2));
686 }
687
688 #[test]
689 fn test_kernel_pca_polynomial_basic() {
690 let kpca = KernelPCA::<f64>::new(2)
691 .with_kernel(Kernel::Polynomial)
692 .with_degree(2)
693 .with_gamma(1.0)
694 .with_coef0(1.0);
695 let x = circle_dataset();
696 let fitted = kpca.fit(&x, &()).unwrap();
697 let projected = fitted.transform(&x).unwrap();
698 assert_eq!(projected.dim(), (8, 2));
699 }
700
701 #[test]
702 fn test_kernel_pca_sigmoid_basic() {
703 let kpca = KernelPCA::<f64>::new(2)
704 .with_kernel(Kernel::Sigmoid)
705 .with_gamma(0.01)
706 .with_coef0(0.0);
707 let x = linear_dataset();
708 let fitted = kpca.fit(&x, &()).unwrap();
709 let projected = fitted.transform(&x).unwrap();
710 assert_eq!(projected.dim(), (5, 2));
711 }
712
713 #[test]
714 fn test_kernel_pca_eigenvalues_non_negative() {
715 let kpca = KernelPCA::<f64>::new(3)
716 .with_kernel(Kernel::RBF)
717 .with_gamma(0.1);
718 let x = circle_dataset();
719 let fitted = kpca.fit(&x, &()).unwrap();
720 for &ev in fitted.eigenvalues().iter() {
721 assert!(ev >= 0.0, "eigenvalue should be non-negative, got {ev}");
722 }
723 }
724
725 #[test]
726 fn test_kernel_pca_eigenvalues_sorted_descending() {
727 let kpca = KernelPCA::<f64>::new(3)
728 .with_kernel(Kernel::RBF)
729 .with_gamma(0.1);
730 let x = circle_dataset();
731 let fitted = kpca.fit(&x, &()).unwrap();
732 let ev = fitted.eigenvalues();
733 for i in 1..ev.len() {
734 assert!(
735 ev[i - 1] >= ev[i] - 1e-10,
736 "eigenvalues not sorted: ev[{}]={} < ev[{}]={}",
737 i - 1,
738 ev[i - 1],
739 i,
740 ev[i]
741 );
742 }
743 }
744
745 #[test]
746 fn test_kernel_pca_single_component() {
747 let kpca = KernelPCA::<f64>::new(1)
748 .with_kernel(Kernel::RBF)
749 .with_gamma(0.5);
750 let x = circle_dataset();
751 let fitted = kpca.fit(&x, &()).unwrap();
752 assert_eq!(fitted.alphas().ncols(), 1);
753 assert_eq!(fitted.eigenvalues().len(), 1);
754 let projected = fitted.transform(&x).unwrap();
755 assert_eq!(projected.ncols(), 1);
756 }
757
758 #[test]
759 fn test_kernel_pca_invalid_n_components_zero() {
760 let kpca = KernelPCA::<f64>::new(0);
761 let x = linear_dataset();
762 assert!(kpca.fit(&x, &()).is_err());
763 }
764
765 #[test]
766 fn test_kernel_pca_invalid_n_components_too_large() {
767 let kpca = KernelPCA::<f64>::new(20);
768 let x = linear_dataset(); assert!(kpca.fit(&x, &()).is_err());
770 }
771
772 #[test]
773 fn test_kernel_pca_insufficient_samples() {
774 let kpca = KernelPCA::<f64>::new(1);
775 let x = array![[1.0, 2.0]]; assert!(kpca.fit(&x, &()).is_err());
777 }
778
779 #[test]
780 fn test_kernel_pca_shape_mismatch_transform() {
781 let kpca = KernelPCA::<f64>::new(1).with_kernel(Kernel::Linear);
782 let x = linear_dataset();
783 let fitted = kpca.fit(&x, &()).unwrap();
784 let x_bad = array![[1.0, 2.0]]; assert!(fitted.transform(&x_bad).is_err());
786 }
787
788 #[test]
789 fn test_kernel_pca_transform_new_data() {
790 let kpca = KernelPCA::<f64>::new(2)
791 .with_kernel(Kernel::RBF)
792 .with_gamma(0.5);
793 let x_train = circle_dataset();
794 let fitted = kpca.fit(&x_train, &()).unwrap();
795 let x_test = array![[1.5, 0.5], [-0.5, 1.5]];
796 let projected = fitted.transform(&x_test).unwrap();
797 assert_eq!(projected.dim(), (2, 2));
798 }
799
800 #[test]
801 fn test_kernel_pca_auto_gamma() {
802 let kpca = KernelPCA::<f64>::new(2).with_kernel(Kernel::RBF);
804 let x = linear_dataset(); let fitted = kpca.fit(&x, &()).unwrap();
806 let projected = fitted.transform(&x).unwrap();
808 assert_eq!(projected.dim(), (5, 2));
809 }
810
811 #[test]
812 fn test_kernel_pca_getters() {
813 let kpca = KernelPCA::<f64>::new(3)
814 .with_kernel(Kernel::Polynomial)
815 .with_gamma(0.5)
816 .with_degree(4)
817 .with_coef0(2.0);
818 assert_eq!(kpca.n_components(), 3);
819 assert_eq!(kpca.kernel(), Kernel::Polynomial);
820 assert_eq!(kpca.gamma(), Some(0.5));
821 assert_eq!(kpca.degree(), 4);
822 assert_abs_diff_eq!(kpca.coef0(), 2.0);
823 }
824
825 #[test]
826 fn test_kernel_pca_f32() {
827 let kpca = KernelPCA::<f32>::new(1).with_kernel(Kernel::Linear);
828 let x: Array2<f32> = array![[1.0f32, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0],];
829 let fitted = kpca.fit(&x, &()).unwrap();
830 let projected = fitted.transform(&x).unwrap();
831 assert_eq!(projected.ncols(), 1);
832 }
833
834 #[test]
835 fn test_kernel_pca_linear_resembles_pca() {
836 let kpca = KernelPCA::<f64>::new(1).with_kernel(Kernel::Linear);
839 let x = linear_dataset();
840 let fitted = kpca.fit(&x, &()).unwrap();
841 let projected = fitted.transform(&x).unwrap();
842 assert_eq!(projected.ncols(), 1);
845 let diffs: Vec<f64> = (1..projected.nrows())
847 .map(|i| (projected[[i, 0]] - projected[[i - 1, 0]]).abs())
848 .collect();
849 for d in &diffs {
850 assert_abs_diff_eq!(d, &diffs[0], epsilon = 1e-6);
851 }
852 }
853
854 #[test]
855 fn test_kernel_pca_pipeline_integration() {
856 use ferrolearn_core::pipeline::{FittedPipelineEstimator, Pipeline, PipelineEstimator};
857 use ferrolearn_core::traits::Predict;
858
859 struct SumEstimator;
860
861 impl PipelineEstimator<f64> for SumEstimator {
862 fn fit_pipeline(
863 &self,
864 _x: &Array2<f64>,
865 _y: &Array1<f64>,
866 ) -> Result<Box<dyn FittedPipelineEstimator<f64>>, FerroError> {
867 Ok(Box::new(FittedSumEstimator))
868 }
869 }
870
871 struct FittedSumEstimator;
872
873 impl FittedPipelineEstimator<f64> for FittedSumEstimator {
874 fn predict_pipeline(&self, x: &Array2<f64>) -> Result<Array1<f64>, FerroError> {
875 let sums: Vec<f64> = x.rows().into_iter().map(|r| r.sum()).collect();
876 Ok(Array1::from_vec(sums))
877 }
878 }
879
880 let pipeline = Pipeline::new()
881 .transform_step(
882 "kpca",
883 Box::new(
884 KernelPCA::<f64>::new(2)
885 .with_kernel(Kernel::RBF)
886 .with_gamma(0.5),
887 ),
888 )
889 .estimator_step("sum", Box::new(SumEstimator));
890
891 let x = circle_dataset();
892 let y = Array1::from_vec(vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]);
893
894 let fitted = pipeline.fit(&x, &y).unwrap();
895 let preds = fitted.predict(&x).unwrap();
896 assert_eq!(preds.len(), 8);
897 }
898
899 #[test]
900 fn test_kernel_pca_max_components_equals_samples() {
901 let kpca = KernelPCA::<f64>::new(5).with_kernel(Kernel::Linear);
902 let x = linear_dataset(); let fitted = kpca.fit(&x, &()).unwrap();
904 assert_eq!(fitted.eigenvalues().len(), 5);
905 }
906
907 #[test]
908 fn test_kernel_pca_rbf_sensitivity_to_gamma() {
909 let kpca_small = KernelPCA::<f64>::new(2)
911 .with_kernel(Kernel::RBF)
912 .with_gamma(0.01);
913 let kpca_large = KernelPCA::<f64>::new(2)
914 .with_kernel(Kernel::RBF)
915 .with_gamma(10.0);
916 let x = circle_dataset();
917 let fitted_small = kpca_small.fit(&x, &()).unwrap();
918 let fitted_large = kpca_large.fit(&x, &()).unwrap();
919 let proj_small = fitted_small.transform(&x).unwrap();
920 let proj_large = fitted_large.transform(&x).unwrap();
921 let mut diff_sum = 0.0;
923 for (a, b) in proj_small.iter().zip(proj_large.iter()) {
924 diff_sum += (a - b).abs();
925 }
926 assert!(
927 diff_sum > 1e-6,
928 "different gamma should produce different projections"
929 );
930 }
931}