sklears_kernel_approximation/
rbf_sampler.rs

1//! Random Fourier Features for RBF Kernel Approximation
2use scirs2_core::ndarray::{Array1, Array2, Axis};
3use scirs2_core::random::essentials::{Normal as RandNormal, Uniform as RandUniform};
4use scirs2_core::random::rngs::StdRng as RealStdRng;
5use scirs2_core::random::Rng;
6use scirs2_core::random::{thread_rng, SeedableRng};
7use scirs2_core::Cauchy;
8use sklears_core::{
9    error::{Result, SklearsError},
10    prelude::{Fit, Transform},
11    traits::{Estimator, Trained, Untrained},
12    types::Float,
13};
14use std::marker::PhantomData;
15
16/// Random Fourier Features for RBF kernel approximation
17///
18/// Approximates the RBF kernel K(x,y) = exp(-gamma * ||x-y||²) using
19/// random Fourier features (Random Kitchen Sinks).
20///
21/// # Parameters
22///
23/// * `gamma` - RBF kernel parameter (default: 1.0)
24/// * `n_components` - Number of Monte Carlo samples (default: 100)
25/// * `random_state` - Random seed for reproducibility
26///
27/// # Examples
28///
29/// ```rust,ignore
30/// use sklears_kernel_approximation::RBFSampler;
31/// use sklears_core::traits::{Transform, Fit, Untrained}
32/// use scirs2_core::ndarray::array;
33///
34/// let X = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
35///
36/// let rbf = RBFSampler::new(100);
37/// let fitted_rbf = rbf.fit(&X, &()).unwrap();
38/// let X_transformed = fitted_rbf.transform(&X).unwrap();
39/// assert_eq!(X_transformed.shape(), &[3, 100]);
40/// ```
41#[derive(Debug, Clone)]
42/// RBFSampler
43pub struct RBFSampler<State = Untrained> {
44    /// RBF kernel parameter
45    pub gamma: Float,
46    /// Number of Monte Carlo samples
47    pub n_components: usize,
48    /// Random seed
49    pub random_state: Option<u64>,
50
51    // Fitted attributes
52    random_weights_: Option<Array2<Float>>,
53    random_offset_: Option<Array1<Float>>,
54
55    _state: PhantomData<State>,
56}
57
58impl RBFSampler<Untrained> {
59    /// Create a new RBF sampler
60    pub fn new(n_components: usize) -> Self {
61        Self {
62            gamma: 1.0,
63            n_components,
64            random_state: None,
65            random_weights_: None,
66            random_offset_: None,
67            _state: PhantomData,
68        }
69    }
70
71    /// Set the gamma parameter
72    pub fn gamma(mut self, gamma: Float) -> Self {
73        self.gamma = gamma;
74        self
75    }
76
77    /// Set random state for reproducibility
78    pub fn random_state(mut self, seed: u64) -> Self {
79        self.random_state = Some(seed);
80        self
81    }
82}
83
84impl Estimator for RBFSampler<Untrained> {
85    type Config = ();
86    type Error = SklearsError;
87    type Float = Float;
88
89    fn config(&self) -> &Self::Config {
90        &()
91    }
92}
93
94impl Fit<Array2<Float>, ()> for RBFSampler<Untrained> {
95    type Fitted = RBFSampler<Trained>;
96
97    fn fit(self, x: &Array2<Float>, _y: &()) -> Result<Self::Fitted> {
98        let (_, n_features) = x.dim();
99
100        if self.gamma <= 0.0 {
101            return Err(SklearsError::InvalidInput(
102                "gamma must be positive".to_string(),
103            ));
104        }
105
106        if self.n_components == 0 {
107            return Err(SklearsError::InvalidInput(
108                "n_components must be positive".to_string(),
109            ));
110        }
111
112        let mut rng = if let Some(seed) = self.random_state {
113            RealStdRng::seed_from_u64(seed)
114        } else {
115            RealStdRng::from_seed(thread_rng().gen())
116        };
117
118        // Sample random weights from N(0, 2*gamma)
119        let normal = RandNormal::new(0.0, (2.0 * self.gamma).sqrt()).unwrap();
120        let mut random_weights = Array2::zeros((n_features, self.n_components));
121        for mut col in random_weights.columns_mut() {
122            for val in col.iter_mut() {
123                *val = rng.sample(normal);
124            }
125        }
126
127        // Sample random offsets from Uniform(0, 2π)
128        let uniform = RandUniform::new(0.0, 2.0 * std::f64::consts::PI).unwrap();
129        let mut random_offset = Array1::zeros(self.n_components);
130        for val in random_offset.iter_mut() {
131            *val = rng.sample(uniform);
132        }
133
134        Ok(RBFSampler {
135            gamma: self.gamma,
136            n_components: self.n_components,
137            random_state: self.random_state,
138            random_weights_: Some(random_weights),
139            random_offset_: Some(random_offset),
140            _state: PhantomData,
141        })
142    }
143}
144
145impl Transform<Array2<Float>, Array2<Float>> for RBFSampler<Trained> {
146    fn transform(&self, x: &Array2<Float>) -> Result<Array2<Float>> {
147        let (_n_samples, n_features) = x.dim();
148        let weights = self.random_weights_.as_ref().unwrap();
149        let offset = self.random_offset_.as_ref().unwrap();
150
151        if n_features != weights.nrows() {
152            return Err(SklearsError::InvalidInput(format!(
153                "X has {} features, but RBFSampler was fitted with {} features",
154                n_features,
155                weights.nrows()
156            )));
157        }
158
159        // Compute projection: X @ weights + offset
160        let projection = x.dot(weights) + offset.view().insert_axis(Axis(0));
161
162        // Apply cosine and normalize: sqrt(2/n_components) * cos(projection)
163        let normalization = (2.0 / self.n_components as Float).sqrt();
164        let result = projection.mapv(|v| normalization * v.cos());
165
166        Ok(result)
167    }
168}
169
170impl RBFSampler<Trained> {
171    /// Get the random weights
172    pub fn random_weights(&self) -> &Array2<Float> {
173        self.random_weights_.as_ref().unwrap()
174    }
175
176    /// Get the random offset
177    pub fn random_offset(&self) -> &Array1<Float> {
178        self.random_offset_.as_ref().unwrap()
179    }
180}
181
182/// Laplacian kernel approximation using Random Fourier Features
183///
184/// Approximates the Laplacian kernel K(x,y) = exp(-gamma * ||x-y||₁) using
185/// random Fourier features with Cauchy distribution.
186///
187/// # Parameters
188///
189/// * `gamma` - Laplacian kernel parameter (default: 1.0)
190/// * `n_components` - Number of Monte Carlo samples (default: 100)
191/// * `random_state` - Random seed for reproducibility
192///
193/// # Examples
194///
195/// ```rust,ignore
196/// use sklears_kernel_approximation::LaplacianSampler;
197/// use sklears_core::traits::{Transform, Fit, Untrained}
198/// use scirs2_core::ndarray::array;
199///
200/// let X = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
201///
202/// let laplacian = LaplacianSampler::new(100);
203/// let fitted_laplacian = laplacian.fit(&X, &()).unwrap();
204/// let X_transformed = fitted_laplacian.transform(&X).unwrap();
205/// assert_eq!(X_transformed.shape(), &[3, 100]);
206/// ```
207#[derive(Debug, Clone)]
208/// LaplacianSampler
209pub struct LaplacianSampler<State = Untrained> {
210    /// Laplacian kernel parameter
211    pub gamma: Float,
212    /// Number of Monte Carlo samples
213    pub n_components: usize,
214    /// Random seed
215    pub random_state: Option<u64>,
216
217    // Fitted attributes
218    random_weights_: Option<Array2<Float>>,
219    random_offset_: Option<Array1<Float>>,
220
221    _state: PhantomData<State>,
222}
223
224impl LaplacianSampler<Untrained> {
225    /// Create a new Laplacian sampler
226    pub fn new(n_components: usize) -> Self {
227        Self {
228            gamma: 1.0,
229            n_components,
230            random_state: None,
231            random_weights_: None,
232            random_offset_: None,
233            _state: PhantomData,
234        }
235    }
236
237    /// Set the gamma parameter
238    pub fn gamma(mut self, gamma: Float) -> Self {
239        self.gamma = gamma;
240        self
241    }
242
243    /// Set random state for reproducibility
244    pub fn random_state(mut self, seed: u64) -> Self {
245        self.random_state = Some(seed);
246        self
247    }
248}
249
250impl Estimator for LaplacianSampler<Untrained> {
251    type Config = ();
252    type Error = SklearsError;
253    type Float = Float;
254
255    fn config(&self) -> &Self::Config {
256        &()
257    }
258}
259
260impl Fit<Array2<Float>, ()> for LaplacianSampler<Untrained> {
261    type Fitted = LaplacianSampler<Trained>;
262
263    fn fit(self, x: &Array2<Float>, _y: &()) -> Result<Self::Fitted> {
264        let (_, n_features) = x.dim();
265
266        if self.gamma <= 0.0 {
267            return Err(SklearsError::InvalidInput(
268                "gamma must be positive".to_string(),
269            ));
270        }
271
272        if self.n_components == 0 {
273            return Err(SklearsError::InvalidInput(
274                "n_components must be positive".to_string(),
275            ));
276        }
277
278        let mut rng = if let Some(seed) = self.random_state {
279            RealStdRng::seed_from_u64(seed)
280        } else {
281            RealStdRng::from_seed(thread_rng().gen())
282        };
283
284        // Sample random weights from Cauchy distribution (location=0, scale=gamma)
285        let cauchy = Cauchy::new(0.0, self.gamma).unwrap();
286        let mut random_weights = Array2::zeros((n_features, self.n_components));
287        for mut col in random_weights.columns_mut() {
288            for val in col.iter_mut() {
289                *val = rng.sample(cauchy);
290            }
291        }
292
293        // Sample random offsets from Uniform(0, 2π)
294        let uniform = RandUniform::new(0.0, 2.0 * std::f64::consts::PI).unwrap();
295        let mut random_offset = Array1::zeros(self.n_components);
296        for val in random_offset.iter_mut() {
297            *val = rng.sample(uniform);
298        }
299
300        Ok(LaplacianSampler {
301            gamma: self.gamma,
302            n_components: self.n_components,
303            random_state: self.random_state,
304            random_weights_: Some(random_weights),
305            random_offset_: Some(random_offset),
306            _state: PhantomData,
307        })
308    }
309}
310
311impl Transform<Array2<Float>, Array2<Float>> for LaplacianSampler<Trained> {
312    fn transform(&self, x: &Array2<Float>) -> Result<Array2<Float>> {
313        let (_n_samples, n_features) = x.dim();
314        let weights = self.random_weights_.as_ref().unwrap();
315        let offset = self.random_offset_.as_ref().unwrap();
316
317        if n_features != weights.nrows() {
318            return Err(SklearsError::InvalidInput(format!(
319                "X has {} features, but LaplacianSampler was fitted with {} features",
320                n_features,
321                weights.nrows()
322            )));
323        }
324
325        // Compute projection: X @ weights + offset
326        let projection = x.dot(weights) + offset.view().insert_axis(Axis(0));
327
328        // Apply cosine and normalize: sqrt(2/n_components) * cos(projection)
329        let normalization = (2.0 / self.n_components as Float).sqrt();
330        let result = projection.mapv(|v| normalization * v.cos());
331
332        Ok(result)
333    }
334}
335
336impl LaplacianSampler<Trained> {
337    /// Get the random weights
338    pub fn random_weights(&self) -> &Array2<Float> {
339        self.random_weights_.as_ref().unwrap()
340    }
341
342    /// Get the random offset
343    pub fn random_offset(&self) -> &Array1<Float> {
344        self.random_offset_.as_ref().unwrap()
345    }
346}
347
348/// Polynomial kernel approximation using Random Fourier Features
349///
350/// Approximates the polynomial kernel K(x,y) = (gamma * <x,y> + coef0)^degree using
351/// random Fourier features based on the binomial theorem expansion.
352///
353/// # Parameters
354///
355/// * `gamma` - Polynomial kernel parameter (default: 1.0)
356/// * `coef0` - Independent term in polynomial kernel (default: 1.0)
357/// * `degree` - Degree of polynomial kernel (default: 3)
358/// * `n_components` - Number of Monte Carlo samples (default: 100)
359/// * `random_state` - Random seed for reproducibility
360///
361/// # Examples
362///
363/// ```rust,ignore
364/// use sklears_kernel_approximation::rbf_sampler::PolynomialSampler;
365/// use sklears_core::traits::{Transform, Fit, Untrained}
366/// use scirs2_core::ndarray::array;
367///
368/// let X = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
369///
370/// let poly = PolynomialSampler::new(100).degree(3).gamma(1.0).coef0(1.0);
371/// let fitted_poly = poly.fit(&X, &()).unwrap();
372/// let X_transformed = fitted_poly.transform(&X).unwrap();
373/// assert_eq!(X_transformed.shape(), &[3, 100]);
374/// ```
375#[derive(Debug, Clone)]
376/// PolynomialSampler
377pub struct PolynomialSampler<State = Untrained> {
378    /// Polynomial kernel parameter
379    pub gamma: Float,
380    /// Independent term in polynomial kernel
381    pub coef0: Float,
382    /// Degree of polynomial kernel
383    pub degree: u32,
384    /// Number of Monte Carlo samples
385    pub n_components: usize,
386    /// Random seed
387    pub random_state: Option<u64>,
388
389    // Fitted attributes
390    random_weights_: Option<Array2<Float>>,
391    random_offset_: Option<Array1<Float>>,
392
393    _state: PhantomData<State>,
394}
395
396impl PolynomialSampler<Untrained> {
397    /// Create a new polynomial sampler
398    pub fn new(n_components: usize) -> Self {
399        Self {
400            gamma: 1.0,
401            coef0: 1.0,
402            degree: 3,
403            n_components,
404            random_state: None,
405            random_weights_: None,
406            random_offset_: None,
407            _state: PhantomData,
408        }
409    }
410
411    /// Set the gamma parameter
412    pub fn gamma(mut self, gamma: Float) -> Self {
413        self.gamma = gamma;
414        self
415    }
416
417    /// Set the coef0 parameter
418    pub fn coef0(mut self, coef0: Float) -> Self {
419        self.coef0 = coef0;
420        self
421    }
422
423    /// Set the degree parameter
424    pub fn degree(mut self, degree: u32) -> Self {
425        self.degree = degree;
426        self
427    }
428
429    /// Set random state for reproducibility
430    pub fn random_state(mut self, seed: u64) -> Self {
431        self.random_state = Some(seed);
432        self
433    }
434}
435
436impl Estimator for PolynomialSampler<Untrained> {
437    type Config = ();
438    type Error = SklearsError;
439    type Float = Float;
440
441    fn config(&self) -> &Self::Config {
442        &()
443    }
444}
445
446impl Fit<Array2<Float>, ()> for PolynomialSampler<Untrained> {
447    type Fitted = PolynomialSampler<Trained>;
448
449    fn fit(self, x: &Array2<Float>, _y: &()) -> Result<Self::Fitted> {
450        let (_, n_features) = x.dim();
451
452        if self.gamma <= 0.0 {
453            return Err(SklearsError::InvalidInput(
454                "gamma must be positive".to_string(),
455            ));
456        }
457
458        if self.degree == 0 {
459            return Err(SklearsError::InvalidInput(
460                "degree must be positive".to_string(),
461            ));
462        }
463
464        if self.n_components == 0 {
465            return Err(SklearsError::InvalidInput(
466                "n_components must be positive".to_string(),
467            ));
468        }
469
470        let mut rng = if let Some(seed) = self.random_state {
471            RealStdRng::seed_from_u64(seed)
472        } else {
473            RealStdRng::from_seed(thread_rng().gen())
474        };
475
476        // For polynomial kernels, we use a different approach:
477        // Sample random projections from uniform sphere and scaling factors
478        let normal = RandNormal::new(0.0, 1.0).unwrap();
479        let mut random_weights = Array2::zeros((n_features, self.n_components));
480
481        for mut col in random_weights.columns_mut() {
482            // Sample from standard normal and normalize to get uniform direction on sphere
483            for val in col.iter_mut() {
484                *val = rng.sample(normal);
485            }
486            let norm = (col.dot(&col) as Float).sqrt();
487            if norm > 1e-12 {
488                col /= norm;
489            }
490
491            // Scale by gamma
492            col *= self.gamma.sqrt();
493        }
494
495        // Sample random offsets from Uniform(0, 2π)
496        let uniform = RandUniform::new(0.0, 2.0 * std::f64::consts::PI).unwrap();
497        let mut random_offset = Array1::zeros(self.n_components);
498        for val in random_offset.iter_mut() {
499            *val = rng.sample(uniform);
500        }
501
502        Ok(PolynomialSampler {
503            gamma: self.gamma,
504            coef0: self.coef0,
505            degree: self.degree,
506            n_components: self.n_components,
507            random_state: self.random_state,
508            random_weights_: Some(random_weights),
509            random_offset_: Some(random_offset),
510            _state: PhantomData,
511        })
512    }
513}
514
515impl Transform<Array2<Float>, Array2<Float>> for PolynomialSampler<Trained> {
516    fn transform(&self, x: &Array2<Float>) -> Result<Array2<Float>> {
517        let (_n_samples, n_features) = x.dim();
518        let weights = self.random_weights_.as_ref().unwrap();
519        let offset = self.random_offset_.as_ref().unwrap();
520
521        if n_features != weights.nrows() {
522            return Err(SklearsError::InvalidInput(format!(
523                "X has {} features, but PolynomialSampler was fitted with {} features",
524                n_features,
525                weights.nrows()
526            )));
527        }
528
529        // Compute projection: X @ weights + offset
530        let projection = x.dot(weights) + offset.view().insert_axis(Axis(0));
531
532        // For polynomial kernels, we apply: (cos(projection) + coef0)^degree
533        // This approximates the polynomial kernel through trigonometric expansion
534        let normalization = (2.0 / self.n_components as Float).sqrt();
535        let result = projection.mapv(|v| {
536            let cos_val = v.cos() + self.coef0;
537            normalization * cos_val.powf(self.degree as Float)
538        });
539
540        Ok(result)
541    }
542}
543
544impl PolynomialSampler<Trained> {
545    /// Get the random weights
546    pub fn random_weights(&self) -> &Array2<Float> {
547        self.random_weights_.as_ref().unwrap()
548    }
549
550    /// Get the random offset
551    pub fn random_offset(&self) -> &Array1<Float> {
552        self.random_offset_.as_ref().unwrap()
553    }
554
555    /// Get the gamma parameter
556    pub fn gamma(&self) -> Float {
557        self.gamma
558    }
559
560    /// Get the coef0 parameter
561    pub fn coef0(&self) -> Float {
562        self.coef0
563    }
564
565    /// Get the degree parameter
566    pub fn degree(&self) -> u32 {
567        self.degree
568    }
569}
570
571/// Arc-cosine kernel approximation using Random Fourier Features
572///
573/// Approximates the arc-cosine kernel which corresponds to infinite-width neural networks.
574/// The arc-cosine kernel of degree n is defined as:
575/// K_n(x,y) = (1/π) * ||x|| * ||y|| * J_n(θ)
576/// where θ is the angle between x and y.
577///
578/// # Parameters
579///
580/// * `degree` - Degree of the arc-cosine kernel (0, 1, or 2)
581/// * `n_components` - Number of Monte Carlo samples (default: 100)
582/// * `random_state` - Random seed for reproducibility
583///
584/// # Examples
585///
586/// ```rust,ignore
587/// use sklears_kernel_approximation::rbf_sampler::ArcCosineSampler;
588/// use sklears_core::traits::{Transform, Fit, Untrained}
589/// use scirs2_core::ndarray::array;
590///
591/// let X = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
592///
593/// let arc_cosine = ArcCosineSampler::new(100).degree(1);
594/// let fitted_arc_cosine = arc_cosine.fit(&X, &()).unwrap();
595/// let X_transformed = fitted_arc_cosine.transform(&X).unwrap();
596/// assert_eq!(X_transformed.shape(), &[3, 100]);
597/// ```
598#[derive(Debug, Clone)]
599/// ArcCosineSampler
600pub struct ArcCosineSampler<State = Untrained> {
601    /// Degree of the arc-cosine kernel
602    pub degree: u32,
603    /// Number of Monte Carlo samples
604    pub n_components: usize,
605    /// Random seed
606    pub random_state: Option<u64>,
607
608    // Fitted attributes
609    random_weights_: Option<Array2<Float>>,
610
611    _state: PhantomData<State>,
612}
613
614impl ArcCosineSampler<Untrained> {
615    /// Create a new arc-cosine sampler
616    pub fn new(n_components: usize) -> Self {
617        Self {
618            degree: 1,
619            n_components,
620            random_state: None,
621            random_weights_: None,
622            _state: PhantomData,
623        }
624    }
625
626    /// Set the degree parameter (0, 1, or 2)
627    pub fn degree(mut self, degree: u32) -> Self {
628        self.degree = degree;
629        self
630    }
631
632    /// Set random state for reproducibility
633    pub fn random_state(mut self, seed: u64) -> Self {
634        self.random_state = Some(seed);
635        self
636    }
637}
638
639impl Estimator for ArcCosineSampler<Untrained> {
640    type Config = ();
641    type Error = SklearsError;
642    type Float = Float;
643
644    fn config(&self) -> &Self::Config {
645        &()
646    }
647}
648
649impl Fit<Array2<Float>, ()> for ArcCosineSampler<Untrained> {
650    type Fitted = ArcCosineSampler<Trained>;
651
652    fn fit(self, x: &Array2<Float>, _y: &()) -> Result<Self::Fitted> {
653        let (_, n_features) = x.dim();
654
655        if self.degree > 2 {
656            return Err(SklearsError::InvalidInput(
657                "degree must be 0, 1, or 2".to_string(),
658            ));
659        }
660
661        if self.n_components == 0 {
662            return Err(SklearsError::InvalidInput(
663                "n_components must be positive".to_string(),
664            ));
665        }
666
667        let mut rng = if let Some(seed) = self.random_state {
668            RealStdRng::seed_from_u64(seed)
669        } else {
670            RealStdRng::from_seed(thread_rng().gen())
671        };
672
673        // Sample random weights from standard normal distribution
674        let normal = RandNormal::new(0.0, 1.0).unwrap();
675        let mut random_weights = Array2::zeros((n_features, self.n_components));
676
677        for mut col in random_weights.columns_mut() {
678            for val in col.iter_mut() {
679                *val = rng.sample(normal);
680            }
681        }
682
683        Ok(ArcCosineSampler {
684            degree: self.degree,
685            n_components: self.n_components,
686            random_state: self.random_state,
687            random_weights_: Some(random_weights),
688            _state: PhantomData,
689        })
690    }
691}
692
693impl Transform<Array2<Float>, Array2<Float>> for ArcCosineSampler<Trained> {
694    fn transform(&self, x: &Array2<Float>) -> Result<Array2<Float>> {
695        let (_n_samples, n_features) = x.dim();
696        let weights = self.random_weights_.as_ref().unwrap();
697
698        if n_features != weights.nrows() {
699            return Err(SklearsError::InvalidInput(format!(
700                "X has {} features, but ArcCosineSampler was fitted with {} features",
701                n_features,
702                weights.nrows()
703            )));
704        }
705
706        // Compute projection: X @ weights
707        let projection = x.dot(weights);
708
709        // Apply activation function based on degree
710        let normalization = (2.0 / self.n_components as Float).sqrt();
711        let result = match self.degree {
712            0 => {
713                // ReLU: max(0, x)
714                projection.mapv(|v| normalization * v.max(0.0))
715            }
716            1 => {
717                // Sigmoid-like: x * I(x > 0)
718                projection.mapv(|v| if v > 0.0 { normalization * v } else { 0.0 })
719            }
720            2 => {
721                // Quadratic: x² * I(x > 0)
722                projection.mapv(|v| if v > 0.0 { normalization * v * v } else { 0.0 })
723            }
724            _ => unreachable!("degree validation should prevent this"),
725        };
726
727        Ok(result)
728    }
729}
730
731impl ArcCosineSampler<Trained> {
732    /// Get the random weights
733    pub fn random_weights(&self) -> &Array2<Float> {
734        self.random_weights_.as_ref().unwrap()
735    }
736
737    /// Get the degree parameter
738    pub fn degree(&self) -> u32 {
739        self.degree
740    }
741}
742
743#[allow(non_snake_case)]
744#[cfg(test)]
745mod tests {
746    use super::*;
747    use scirs2_core::ndarray::array;
748
749    #[test]
750    fn test_rbf_sampler_basic() {
751        let x = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0],];
752
753        let rbf = RBFSampler::new(50).gamma(0.1);
754        let fitted = rbf.fit(&x, &()).unwrap();
755        let x_transformed = fitted.transform(&x).unwrap();
756
757        assert_eq!(x_transformed.shape(), &[3, 50]);
758
759        // Check that values are in reasonable range for cosine function
760        for val in x_transformed.iter() {
761            assert!(val.abs() <= 2.0); // sqrt(2) * 1 is the max possible value
762        }
763    }
764
765    #[test]
766    fn test_rbf_sampler_reproducibility() {
767        let x = array![[1.0, 2.0], [3.0, 4.0],];
768
769        let rbf1 = RBFSampler::new(10).random_state(42);
770        let fitted1 = rbf1.fit(&x, &()).unwrap();
771        let result1 = fitted1.transform(&x).unwrap();
772
773        let rbf2 = RBFSampler::new(10).random_state(42);
774        let fitted2 = rbf2.fit(&x, &()).unwrap();
775        let result2 = fitted2.transform(&x).unwrap();
776
777        // Results should be identical with same random state
778        for (a, b) in result1.iter().zip(result2.iter()) {
779            assert!((a - b).abs() < 1e-10);
780        }
781    }
782
783    #[test]
784    fn test_rbf_sampler_feature_mismatch() {
785        let x_train = array![[1.0, 2.0], [3.0, 4.0],];
786
787        let x_test = array![
788            [1.0, 2.0, 3.0], // Wrong number of features
789        ];
790
791        let rbf = RBFSampler::new(10);
792        let fitted = rbf.fit(&x_train, &()).unwrap();
793        let result = fitted.transform(&x_test);
794
795        assert!(result.is_err());
796    }
797
798    #[test]
799    fn test_rbf_sampler_invalid_gamma() {
800        let x = array![[1.0, 2.0]];
801        let rbf = RBFSampler::new(10).gamma(-1.0);
802        let result = rbf.fit(&x, &());
803        assert!(result.is_err());
804    }
805
806    #[test]
807    fn test_rbf_sampler_zero_components() {
808        let x = array![[1.0, 2.0]];
809        let rbf = RBFSampler::new(0);
810        let result = rbf.fit(&x, &());
811        assert!(result.is_err());
812    }
813
814    #[test]
815    fn test_laplacian_sampler_basic() {
816        let x = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0],];
817
818        let laplacian = LaplacianSampler::new(50).gamma(0.1);
819        let fitted = laplacian.fit(&x, &()).unwrap();
820        let x_transformed = fitted.transform(&x).unwrap();
821
822        assert_eq!(x_transformed.shape(), &[3, 50]);
823
824        // Check that values are in reasonable range for cosine function
825        for val in x_transformed.iter() {
826            assert!(val.abs() <= 2.0); // sqrt(2) * 1 is the max possible value
827        }
828    }
829
830    #[test]
831    fn test_laplacian_sampler_reproducibility() {
832        let x = array![[1.0, 2.0], [3.0, 4.0],];
833
834        let laplacian1 = LaplacianSampler::new(10).random_state(42);
835        let fitted1 = laplacian1.fit(&x, &()).unwrap();
836        let result1 = fitted1.transform(&x).unwrap();
837
838        let laplacian2 = LaplacianSampler::new(10).random_state(42);
839        let fitted2 = laplacian2.fit(&x, &()).unwrap();
840        let result2 = fitted2.transform(&x).unwrap();
841
842        // Results should be identical with same random state
843        for (a, b) in result1.iter().zip(result2.iter()) {
844            assert!((a - b).abs() < 1e-10);
845        }
846    }
847
848    #[test]
849    fn test_laplacian_sampler_feature_mismatch() {
850        let x_train = array![[1.0, 2.0], [3.0, 4.0],];
851
852        let x_test = array![
853            [1.0, 2.0, 3.0], // Wrong number of features
854        ];
855
856        let laplacian = LaplacianSampler::new(10);
857        let fitted = laplacian.fit(&x_train, &()).unwrap();
858        let result = fitted.transform(&x_test);
859
860        assert!(result.is_err());
861    }
862
863    #[test]
864    fn test_laplacian_sampler_invalid_gamma() {
865        let x = array![[1.0, 2.0]];
866        let laplacian = LaplacianSampler::new(10).gamma(-1.0);
867        let result = laplacian.fit(&x, &());
868        assert!(result.is_err());
869    }
870
871    #[test]
872    fn test_laplacian_sampler_zero_components() {
873        let x = array![[1.0, 2.0]];
874        let laplacian = LaplacianSampler::new(0);
875        let result = laplacian.fit(&x, &());
876        assert!(result.is_err());
877    }
878
879    #[test]
880    fn test_polynomial_sampler_basic() {
881        let x = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0],];
882
883        let poly = PolynomialSampler::new(50).degree(3).gamma(1.0).coef0(1.0);
884        let fitted = poly.fit(&x, &()).unwrap();
885        let x_transformed = fitted.transform(&x).unwrap();
886
887        assert_eq!(x_transformed.shape(), &[3, 50]);
888
889        // Check that values are in reasonable range
890        for val in x_transformed.iter() {
891            assert!(val.is_finite());
892        }
893    }
894
895    #[test]
896    fn test_polynomial_sampler_reproducibility() {
897        let x = array![[1.0, 2.0], [3.0, 4.0],];
898
899        let poly1 = PolynomialSampler::new(10).degree(2).random_state(42);
900        let fitted1 = poly1.fit(&x, &()).unwrap();
901        let result1 = fitted1.transform(&x).unwrap();
902
903        let poly2 = PolynomialSampler::new(10).degree(2).random_state(42);
904        let fitted2 = poly2.fit(&x, &()).unwrap();
905        let result2 = fitted2.transform(&x).unwrap();
906
907        // Results should be identical with same random state
908        for (a, b) in result1.iter().zip(result2.iter()) {
909            assert!((a - b).abs() < 1e-10);
910        }
911    }
912
913    #[test]
914    fn test_polynomial_sampler_feature_mismatch() {
915        let x_train = array![[1.0, 2.0], [3.0, 4.0],];
916
917        let x_test = array![
918            [1.0, 2.0, 3.0], // Wrong number of features
919        ];
920
921        let poly = PolynomialSampler::new(10);
922        let fitted = poly.fit(&x_train, &()).unwrap();
923        let result = fitted.transform(&x_test);
924
925        assert!(result.is_err());
926    }
927
928    #[test]
929    fn test_polynomial_sampler_invalid_gamma() {
930        let x = array![[1.0, 2.0]];
931        let poly = PolynomialSampler::new(10).gamma(-1.0);
932        let result = poly.fit(&x, &());
933        assert!(result.is_err());
934    }
935
936    #[test]
937    fn test_polynomial_sampler_zero_degree() {
938        let x = array![[1.0, 2.0]];
939        let poly = PolynomialSampler::new(10).degree(0);
940        let result = poly.fit(&x, &());
941        assert!(result.is_err());
942    }
943
944    #[test]
945    fn test_polynomial_sampler_zero_components() {
946        let x = array![[1.0, 2.0]];
947        let poly = PolynomialSampler::new(0);
948        let result = poly.fit(&x, &());
949        assert!(result.is_err());
950    }
951
952    #[test]
953    fn test_polynomial_sampler_different_degrees() {
954        let x = array![[1.0, 2.0], [3.0, 4.0],];
955
956        // Test degree 1
957        let poly1 = PolynomialSampler::new(10).degree(1);
958        let fitted1 = poly1.fit(&x, &()).unwrap();
959        let result1 = fitted1.transform(&x).unwrap();
960        assert_eq!(result1.shape(), &[2, 10]);
961
962        // Test degree 5
963        let poly5 = PolynomialSampler::new(10).degree(5);
964        let fitted5 = poly5.fit(&x, &()).unwrap();
965        let result5 = fitted5.transform(&x).unwrap();
966        assert_eq!(result5.shape(), &[2, 10]);
967    }
968
969    #[test]
970    fn test_arc_cosine_sampler_basic() {
971        let x = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0],];
972
973        let arc_cosine = ArcCosineSampler::new(50).degree(1);
974        let fitted = arc_cosine.fit(&x, &()).unwrap();
975        let x_transformed = fitted.transform(&x).unwrap();
976
977        assert_eq!(x_transformed.shape(), &[3, 50]);
978
979        // Check that values are non-negative (due to ReLU-like activation)
980        for val in x_transformed.iter() {
981            assert!(val >= &0.0);
982            assert!(val.is_finite());
983        }
984    }
985
986    #[test]
987    fn test_arc_cosine_sampler_reproducibility() {
988        let x = array![[1.0, 2.0], [3.0, 4.0],];
989
990        let arc1 = ArcCosineSampler::new(10).degree(1).random_state(42);
991        let fitted1 = arc1.fit(&x, &()).unwrap();
992        let result1 = fitted1.transform(&x).unwrap();
993
994        let arc2 = ArcCosineSampler::new(10).degree(1).random_state(42);
995        let fitted2 = arc2.fit(&x, &()).unwrap();
996        let result2 = fitted2.transform(&x).unwrap();
997
998        // Results should be identical with same random state
999        for (a, b) in result1.iter().zip(result2.iter()) {
1000            assert!((a - b).abs() < 1e-10);
1001        }
1002    }
1003
1004    #[test]
1005    fn test_arc_cosine_sampler_different_degrees() {
1006        let x = array![[1.0, 2.0], [3.0, 4.0],];
1007
1008        // Test degree 0 (ReLU)
1009        let arc0 = ArcCosineSampler::new(10).degree(0);
1010        let fitted0 = arc0.fit(&x, &()).unwrap();
1011        let result0 = fitted0.transform(&x).unwrap();
1012        assert_eq!(result0.shape(), &[2, 10]);
1013
1014        // Test degree 1 (Linear ReLU)
1015        let arc1 = ArcCosineSampler::new(10).degree(1);
1016        let fitted1 = arc1.fit(&x, &()).unwrap();
1017        let result1 = fitted1.transform(&x).unwrap();
1018        assert_eq!(result1.shape(), &[2, 10]);
1019
1020        // Test degree 2 (Quadratic ReLU)
1021        let arc2 = ArcCosineSampler::new(10).degree(2);
1022        let fitted2 = arc2.fit(&x, &()).unwrap();
1023        let result2 = fitted2.transform(&x).unwrap();
1024        assert_eq!(result2.shape(), &[2, 10]);
1025    }
1026
1027    #[test]
1028    fn test_arc_cosine_sampler_feature_mismatch() {
1029        let x_train = array![[1.0, 2.0], [3.0, 4.0],];
1030
1031        let x_test = array![
1032            [1.0, 2.0, 3.0], // Wrong number of features
1033        ];
1034
1035        let arc_cosine = ArcCosineSampler::new(10);
1036        let fitted = arc_cosine.fit(&x_train, &()).unwrap();
1037        let result = fitted.transform(&x_test);
1038
1039        assert!(result.is_err());
1040    }
1041
1042    #[test]
1043    fn test_arc_cosine_sampler_invalid_degree() {
1044        let x = array![[1.0, 2.0]];
1045        let arc_cosine = ArcCosineSampler::new(10).degree(3);
1046        let result = arc_cosine.fit(&x, &());
1047        assert!(result.is_err());
1048    }
1049
1050    #[test]
1051    fn test_arc_cosine_sampler_zero_components() {
1052        let x = array![[1.0, 2.0]];
1053        let arc_cosine = ArcCosineSampler::new(0);
1054        let result = arc_cosine.fit(&x, &());
1055        assert!(result.is_err());
1056    }
1057}