Skip to main content

tensorlogic_sklears_kernels/
random_features.rs

1// Allow needless_range_loop for matrix operations which are clearer with indexed loops
2#![allow(clippy::needless_range_loop)]
3
4//! Random Fourier Features (RFF) for scalable kernel approximation.
5//!
6//! Random Fourier Features provide a way to approximate kernel functions
7//! in linear time O(n) instead of O(n²) by mapping data to a randomized
8//! low-dimensional feature space.
9//!
10//! ## Theory
11//!
12//! For shift-invariant kernels K(x, y) = k(x - y), Bochner's theorem
13//! guarantees that there exists a Fourier transform p(ω) such that:
14//!
15//! k(x - y) = ∫ p(ω) e^{iω^T(x-y)} dω
16//!
17//! By sampling ω from p(ω) and computing cos(ω^T x) and sin(ω^T x),
18//! we can approximate K(x, y) ≈ z(x)^T z(y) where z is the feature map.
19//!
20//! ## Reference
21//!
22//! Rahimi, A., & Recht, B. (2007). "Random Features for Large-Scale Kernel
23//! Machines." NIPS.
24//!
25//! ## Example
26//!
27//! ```rust
28//! use tensorlogic_sklears_kernels::random_features::{
29//!     RandomFourierFeatures, RffConfig, KernelType
30//! };
31//!
32//! // Create RFF for RBF kernel with gamma=0.5
33//! let config = RffConfig::new(KernelType::Rbf { gamma: 0.5 }, 100);
34//! let rff = RandomFourierFeatures::new(3, config).unwrap(); // 3D input
35//!
36//! let x = vec![1.0, 2.0, 3.0];
37//! let features = rff.transform(&x).unwrap();
38//! // features is a 200-dimensional vector (100 cos + 100 sin components)
39//! ```
40
41use crate::error::{KernelError, Result};
42use std::f64::consts::PI;
43
44/// Kernel type for RFF approximation.
45#[derive(Debug, Clone)]
46pub enum KernelType {
47    /// RBF/Gaussian kernel: K(x,y) = exp(-gamma * ||x-y||²)
48    /// Fourier transform: Gaussian distribution N(0, 2*gamma*I)
49    Rbf { gamma: f64 },
50
51    /// Laplacian kernel: K(x,y) = exp(-gamma * ||x-y||₁)
52    /// Fourier transform: Cauchy distribution
53    Laplacian { gamma: f64 },
54
55    /// Matérn kernel with nu=0.5 (equivalent to Laplacian)
56    Matern05 { length_scale: f64 },
57
58    /// Matérn kernel with nu=1.5
59    Matern15 { length_scale: f64 },
60
61    /// Matérn kernel with nu=2.5
62    Matern25 { length_scale: f64 },
63}
64
65impl KernelType {
66    /// Get the name of the kernel type.
67    pub fn name(&self) -> &str {
68        match self {
69            KernelType::Rbf { .. } => "RBF",
70            KernelType::Laplacian { .. } => "Laplacian",
71            KernelType::Matern05 { .. } => "Matern-0.5",
72            KernelType::Matern15 { .. } => "Matern-1.5",
73            KernelType::Matern25 { .. } => "Matern-2.5",
74        }
75    }
76}
77
78/// Configuration for Random Fourier Features.
79#[derive(Debug, Clone)]
80pub struct RffConfig {
81    /// The kernel type to approximate
82    pub kernel_type: KernelType,
83    /// Number of random features (n_components)
84    /// The output dimension will be 2 * n_components (cos + sin)
85    pub n_components: usize,
86    /// Random seed for reproducibility
87    pub seed: Option<u64>,
88}
89
90impl RffConfig {
91    /// Create a new RFF configuration.
92    pub fn new(kernel_type: KernelType, n_components: usize) -> Self {
93        Self {
94            kernel_type,
95            n_components,
96            seed: None,
97        }
98    }
99
100    /// Set random seed for reproducibility.
101    pub fn with_seed(mut self, seed: u64) -> Self {
102        self.seed = Some(seed);
103        self
104    }
105}
106
107/// Random Fourier Features for scalable kernel approximation.
108///
109/// This struct stores the random frequencies sampled from the spectral
110/// distribution of the kernel, allowing efficient feature computation.
111#[derive(Debug, Clone)]
112pub struct RandomFourierFeatures {
113    /// Random frequencies matrix (n_components x input_dim)
114    random_weights: Vec<Vec<f64>>,
115    /// Random offsets for the cosine features
116    random_offsets: Vec<f64>,
117    /// Configuration
118    config: RffConfig,
119    /// Input dimension
120    input_dim: usize,
121    /// Output dimension (2 * n_components for cos + sin)
122    output_dim: usize,
123}
124
125impl RandomFourierFeatures {
126    /// Create a new Random Fourier Features transformer.
127    ///
128    /// # Arguments
129    /// * `input_dim` - Dimension of input vectors
130    /// * `config` - RFF configuration
131    ///
132    /// # Example
133    /// ```rust
134    /// use tensorlogic_sklears_kernels::random_features::{
135    ///     RandomFourierFeatures, RffConfig, KernelType
136    /// };
137    ///
138    /// let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50);
139    /// let rff = RandomFourierFeatures::new(10, config).unwrap();
140    /// ```
141    pub fn new(input_dim: usize, config: RffConfig) -> Result<Self> {
142        if input_dim == 0 {
143            return Err(KernelError::InvalidParameter {
144                parameter: "input_dim".to_string(),
145                value: "0".to_string(),
146                reason: "input dimension must be positive".to_string(),
147            });
148        }
149        if config.n_components == 0 {
150            return Err(KernelError::InvalidParameter {
151                parameter: "n_components".to_string(),
152                value: "0".to_string(),
153                reason: "number of components must be positive".to_string(),
154            });
155        }
156
157        // Initialize random state
158        let seed = config.seed.unwrap_or(42);
159        let mut rng_state = seed;
160
161        // Sample random frequencies from the spectral distribution
162        let random_weights = Self::sample_frequencies(
163            &config.kernel_type,
164            config.n_components,
165            input_dim,
166            &mut rng_state,
167        )?;
168
169        // Sample random offsets uniformly from [0, 2π]
170        let random_offsets: Vec<f64> = (0..config.n_components)
171            .map(|_| random_uniform(&mut rng_state) * 2.0 * PI)
172            .collect();
173
174        let output_dim = 2 * config.n_components;
175
176        Ok(Self {
177            random_weights,
178            random_offsets,
179            config,
180            input_dim,
181            output_dim,
182        })
183    }
184
185    /// Sample random frequencies from the kernel's spectral distribution.
186    fn sample_frequencies(
187        kernel_type: &KernelType,
188        n_components: usize,
189        input_dim: usize,
190        rng_state: &mut u64,
191    ) -> Result<Vec<Vec<f64>>> {
192        let mut weights = Vec::with_capacity(n_components);
193
194        for _ in 0..n_components {
195            let mut w = Vec::with_capacity(input_dim);
196            for _ in 0..input_dim {
197                let freq = match kernel_type {
198                    KernelType::Rbf { gamma } => {
199                        // Spectral distribution: N(0, 2*gamma)
200                        let std = (2.0 * gamma).sqrt();
201                        random_normal(rng_state) * std
202                    }
203                    KernelType::Laplacian { gamma } => {
204                        // Spectral distribution: Cauchy(0, gamma)
205                        random_cauchy(rng_state) * gamma
206                    }
207                    KernelType::Matern05 { length_scale } => {
208                        // Spectral distribution: Cauchy(0, 1/length_scale)
209                        let g = 1.0 / length_scale;
210                        random_cauchy(rng_state) * g
211                    }
212                    KernelType::Matern15 { length_scale } => {
213                        // Spectral distribution: Student-t with df=3
214                        let scale = (3.0_f64).sqrt() / length_scale;
215                        random_student_t(rng_state, 3.0) * scale
216                    }
217                    KernelType::Matern25 { length_scale } => {
218                        // Spectral distribution: Student-t with df=5
219                        let scale = (5.0_f64).sqrt() / length_scale;
220                        random_student_t(rng_state, 5.0) * scale
221                    }
222                };
223                w.push(freq);
224            }
225            weights.push(w);
226        }
227
228        Ok(weights)
229    }
230
231    /// Transform a single input vector to the random feature space.
232    ///
233    /// # Arguments
234    /// * `x` - Input vector of dimension `input_dim`
235    ///
236    /// # Returns
237    /// Feature vector of dimension `2 * n_components`
238    pub fn transform(&self, x: &[f64]) -> Result<Vec<f64>> {
239        if x.len() != self.input_dim {
240            return Err(KernelError::DimensionMismatch {
241                expected: vec![self.input_dim],
242                got: vec![x.len()],
243                context: "RFF transform".to_string(),
244            });
245        }
246
247        let mut features = Vec::with_capacity(self.output_dim);
248        let scale = (1.0 / self.config.n_components as f64).sqrt();
249
250        for (i, w) in self.random_weights.iter().enumerate() {
251            // Compute w^T x + b
252            let wx: f64 = w.iter().zip(x.iter()).map(|(wi, xi)| wi * xi).sum();
253            let proj = wx + self.random_offsets[i];
254
255            // Cosine and sine features
256            features.push(proj.cos() * scale);
257            features.push(proj.sin() * scale);
258        }
259
260        Ok(features)
261    }
262
263    /// Transform multiple input vectors to the random feature space.
264    ///
265    /// # Arguments
266    /// * `data` - List of input vectors
267    ///
268    /// # Returns
269    /// Matrix of feature vectors (one row per input)
270    pub fn transform_batch(&self, data: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
271        data.iter().map(|x| self.transform(x)).collect()
272    }
273
274    /// Approximate the kernel value between two vectors.
275    ///
276    /// K(x, y) ≈ z(x)^T z(y)
277    pub fn approximate_kernel(&self, x: &[f64], y: &[f64]) -> Result<f64> {
278        let z_x = self.transform(x)?;
279        let z_y = self.transform(y)?;
280
281        let dot: f64 = z_x.iter().zip(z_y.iter()).map(|(a, b)| a * b).sum();
282        Ok(dot)
283    }
284
285    /// Approximate the kernel matrix for a set of data points.
286    ///
287    /// `K[i,j] ≈ z(x_i)^T z(x_j)`
288    pub fn approximate_kernel_matrix(&self, data: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
289        let features = self.transform_batch(data)?;
290        let n = features.len();
291
292        let mut matrix = vec![vec![0.0; n]; n];
293        for i in 0..n {
294            for j in i..n {
295                let dot: f64 = features[i]
296                    .iter()
297                    .zip(features[j].iter())
298                    .map(|(a, b)| a * b)
299                    .sum();
300                matrix[i][j] = dot;
301                matrix[j][i] = dot;
302            }
303        }
304
305        Ok(matrix)
306    }
307
308    /// Get the input dimension.
309    pub fn input_dim(&self) -> usize {
310        self.input_dim
311    }
312
313    /// Get the output dimension (feature space dimension).
314    pub fn output_dim(&self) -> usize {
315        self.output_dim
316    }
317
318    /// Get the number of random components.
319    pub fn n_components(&self) -> usize {
320        self.config.n_components
321    }
322
323    /// Get the kernel type being approximated.
324    pub fn kernel_type(&self) -> &KernelType {
325        &self.config.kernel_type
326    }
327
328    /// Get the random weights matrix.
329    pub fn random_weights(&self) -> &[Vec<f64>] {
330        &self.random_weights
331    }
332}
333
334/// Orthogonal Random Features for improved variance.
335///
336/// Uses orthogonalized random matrices for better approximation quality,
337/// especially for smaller n_components.
338///
339/// Reference: Yu et al. (2016). "Orthogonal Random Features."
340#[derive(Debug, Clone)]
341pub struct OrthogonalRandomFeatures {
342    /// Base RFF
343    rff: RandomFourierFeatures,
344}
345
346impl OrthogonalRandomFeatures {
347    /// Create orthogonal random features.
348    ///
349    /// Note: For simplicity, this currently uses standard RFF.
350    /// Full orthogonalization would require QR decomposition.
351    pub fn new(input_dim: usize, config: RffConfig) -> Result<Self> {
352        let rff = RandomFourierFeatures::new(input_dim, config)?;
353        Ok(Self { rff })
354    }
355
356    /// Transform a single input vector.
357    pub fn transform(&self, x: &[f64]) -> Result<Vec<f64>> {
358        self.rff.transform(x)
359    }
360
361    /// Transform multiple input vectors.
362    pub fn transform_batch(&self, data: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
363        self.rff.transform_batch(data)
364    }
365
366    /// Get the output dimension.
367    pub fn output_dim(&self) -> usize {
368        self.rff.output_dim()
369    }
370}
371
372/// Nystroem approximation as random features.
373///
374/// Represents kernel approximation using Nystroem method,
375/// which uses a subset of training points as landmarks.
376#[derive(Debug, Clone)]
377pub struct NystroemFeatures {
378    /// Landmark points
379    landmarks: Vec<Vec<f64>>,
380    /// Computed components (L^{-1/2} from K_mm = L L^T)
381    components: Vec<Vec<f64>>,
382    /// Kernel type (for computing kernel values)
383    kernel_type: KernelType,
384}
385
386impl NystroemFeatures {
387    /// Create Nystroem features from landmark points.
388    ///
389    /// # Arguments
390    /// * `landmarks` - Subset of training points to use as landmarks
391    /// * `kernel_type` - The kernel to approximate
392    pub fn new(landmarks: Vec<Vec<f64>>, kernel_type: KernelType) -> Result<Self> {
393        if landmarks.is_empty() {
394            return Err(KernelError::InvalidParameter {
395                parameter: "landmarks".to_string(),
396                value: "[]".to_string(),
397                reason: "must have at least one landmark".to_string(),
398            });
399        }
400
401        let m = landmarks.len();
402        let mut k_mm = vec![vec![0.0; m]; m];
403
404        // Compute kernel matrix between landmarks
405        for i in 0..m {
406            for j in i..m {
407                let k_val = compute_kernel(&kernel_type, &landmarks[i], &landmarks[j]);
408                k_mm[i][j] = k_val;
409                k_mm[j][i] = k_val;
410            }
411        }
412
413        // Add small regularization for numerical stability
414        for i in 0..m {
415            k_mm[i][i] += 1e-6;
416        }
417
418        // Compute L^{-1/2} via eigendecomposition approximation
419        // For simplicity, use Cholesky-like approach
420        let components = compute_pseudo_sqrt_inv(&k_mm)?;
421
422        Ok(Self {
423            landmarks,
424            components,
425            kernel_type,
426        })
427    }
428
429    /// Transform a single input vector to Nystroem features.
430    pub fn transform(&self, x: &[f64]) -> Result<Vec<f64>> {
431        let m = self.landmarks.len();
432        let mut k_xm = Vec::with_capacity(m);
433
434        // Compute kernel between x and each landmark
435        for landmark in &self.landmarks {
436            k_xm.push(compute_kernel(&self.kernel_type, x, landmark));
437        }
438
439        // Multiply by components: z(x) = K_xm @ L^{-1/2}
440        let mut features = vec![0.0; m];
441        for i in 0..m {
442            for j in 0..m {
443                features[i] += k_xm[j] * self.components[j][i];
444            }
445        }
446
447        Ok(features)
448    }
449
450    /// Transform multiple input vectors.
451    pub fn transform_batch(&self, data: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
452        data.iter().map(|x| self.transform(x)).collect()
453    }
454
455    /// Get the number of landmarks (output dimension).
456    pub fn n_landmarks(&self) -> usize {
457        self.landmarks.len()
458    }
459}
460
461// ========== Helper functions ==========
462
463/// Simple LCG-based uniform random number generator [0, 1).
464fn random_uniform(state: &mut u64) -> f64 {
465    *state = state.wrapping_mul(6364136223846793005).wrapping_add(1);
466    (*state >> 33) as f64 / (1u64 << 31) as f64
467}
468
469/// Generate standard normal random variable using Box-Muller transform.
470fn random_normal(state: &mut u64) -> f64 {
471    let u1 = random_uniform(state);
472    let u2 = random_uniform(state);
473    let r = (-2.0 * u1.ln()).sqrt();
474    let theta = 2.0 * PI * u2;
475    r * theta.cos()
476}
477
478/// Generate Cauchy random variable.
479fn random_cauchy(state: &mut u64) -> f64 {
480    let u = random_uniform(state);
481    (PI * (u - 0.5)).tan()
482}
483
484/// Generate Student-t random variable.
485fn random_student_t(state: &mut u64, df: f64) -> f64 {
486    // Use the ratio of normal to chi-squared
487    let z = random_normal(state);
488    let mut chi_sq = 0.0;
489    for _ in 0..(df as usize) {
490        let n = random_normal(state);
491        chi_sq += n * n;
492    }
493    z / (chi_sq / df).sqrt()
494}
495
496/// Compute kernel value for a given kernel type.
497fn compute_kernel(kernel_type: &KernelType, x: &[f64], y: &[f64]) -> f64 {
498    match kernel_type {
499        KernelType::Rbf { gamma } => {
500            let sq_dist: f64 = x.iter().zip(y.iter()).map(|(a, b)| (a - b).powi(2)).sum();
501            (-gamma * sq_dist).exp()
502        }
503        KernelType::Laplacian { gamma } => {
504            let l1_dist: f64 = x.iter().zip(y.iter()).map(|(a, b)| (a - b).abs()).sum();
505            (-gamma * l1_dist).exp()
506        }
507        KernelType::Matern05 { length_scale } => {
508            let dist: f64 = x
509                .iter()
510                .zip(y.iter())
511                .map(|(a, b)| (a - b).powi(2))
512                .sum::<f64>()
513                .sqrt();
514            (-dist / length_scale).exp()
515        }
516        KernelType::Matern15 { length_scale } => {
517            let dist: f64 = x
518                .iter()
519                .zip(y.iter())
520                .map(|(a, b)| (a - b).powi(2))
521                .sum::<f64>()
522                .sqrt();
523            let r = (3.0_f64).sqrt() * dist / length_scale;
524            (1.0 + r) * (-r).exp()
525        }
526        KernelType::Matern25 { length_scale } => {
527            let dist: f64 = x
528                .iter()
529                .zip(y.iter())
530                .map(|(a, b)| (a - b).powi(2))
531                .sum::<f64>()
532                .sqrt();
533            let r = (5.0_f64).sqrt() * dist / length_scale;
534            (1.0 + r + r * r / 3.0) * (-r).exp()
535        }
536    }
537}
538
539/// Compute pseudo-inverse square root of a symmetric positive definite matrix.
540fn compute_pseudo_sqrt_inv(matrix: &[Vec<f64>]) -> Result<Vec<Vec<f64>>> {
541    let n = matrix.len();
542    if n == 0 {
543        return Ok(vec![]);
544    }
545
546    // Use simple Cholesky-based approach for small matrices
547    // For larger matrices, would need eigendecomposition
548
549    // Cholesky decomposition: A = L L^T
550    let mut l = vec![vec![0.0; n]; n];
551    for i in 0..n {
552        for j in 0..=i {
553            let mut sum = matrix[i][j];
554            for k in 0..j {
555                sum -= l[i][k] * l[j][k];
556            }
557            if i == j {
558                if sum <= 0.0 {
559                    return Err(KernelError::ComputationError(
560                        "Matrix not positive definite".to_string(),
561                    ));
562                }
563                l[i][j] = sum.sqrt();
564            } else {
565                l[i][j] = sum / l[j][j];
566            }
567        }
568    }
569
570    // Invert L (lower triangular)
571    let mut l_inv = vec![vec![0.0; n]; n];
572    for i in 0..n {
573        l_inv[i][i] = 1.0 / l[i][i];
574        for j in (i + 1)..n {
575            let mut sum = 0.0;
576            for k in i..j {
577                sum -= l[j][k] * l_inv[k][i];
578            }
579            l_inv[j][i] = sum / l[j][j];
580        }
581    }
582
583    // Return L^{-1} transposed (which is L^{-T})
584    let mut result = vec![vec![0.0; n]; n];
585    for i in 0..n {
586        for j in 0..n {
587            result[i][j] = l_inv[j][i];
588        }
589    }
590
591    Ok(result)
592}
593
594#[cfg(test)]
595mod tests {
596    use super::*;
597
598    #[test]
599    fn test_rff_config() {
600        let config = RffConfig::new(KernelType::Rbf { gamma: 0.5 }, 100);
601        assert_eq!(config.n_components, 100);
602        assert!(config.seed.is_none());
603
604        let config = config.with_seed(42);
605        assert_eq!(config.seed, Some(42));
606    }
607
608    #[test]
609    fn test_rff_creation() {
610        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(42);
611        let rff = RandomFourierFeatures::new(3, config).unwrap();
612
613        assert_eq!(rff.input_dim(), 3);
614        assert_eq!(rff.output_dim(), 100); // 2 * 50
615        assert_eq!(rff.n_components(), 50);
616    }
617
618    #[test]
619    fn test_rff_invalid_params() {
620        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50);
621        assert!(RandomFourierFeatures::new(0, config.clone()).is_err());
622
623        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 0);
624        assert!(RandomFourierFeatures::new(3, config).is_err());
625    }
626
627    #[test]
628    fn test_rff_transform() {
629        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(42);
630        let rff = RandomFourierFeatures::new(3, config).unwrap();
631
632        let x = vec![1.0, 2.0, 3.0];
633        let features = rff.transform(&x).unwrap();
634
635        assert_eq!(features.len(), 100);
636        // Features should be bounded (cos and sin scaled by sqrt(1/n))
637        for f in &features {
638            assert!(f.abs() <= 1.0);
639        }
640    }
641
642    #[test]
643    fn test_rff_transform_dimension_mismatch() {
644        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(42);
645        let rff = RandomFourierFeatures::new(3, config).unwrap();
646
647        let x = vec![1.0, 2.0]; // Wrong dimension
648        assert!(rff.transform(&x).is_err());
649    }
650
651    #[test]
652    fn test_rff_batch_transform() {
653        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(42);
654        let rff = RandomFourierFeatures::new(2, config).unwrap();
655
656        let data = vec![vec![1.0, 2.0], vec![3.0, 4.0], vec![5.0, 6.0]];
657        let features = rff.transform_batch(&data).unwrap();
658
659        assert_eq!(features.len(), 3);
660        assert_eq!(features[0].len(), 100);
661    }
662
663    #[test]
664    fn test_rff_kernel_approximation() {
665        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 500).with_seed(42);
666        let rff = RandomFourierFeatures::new(2, config).unwrap();
667
668        let x = vec![0.0, 0.0];
669        let y = vec![0.0, 0.0];
670
671        // Same point should have kernel ≈ 1
672        let approx = rff.approximate_kernel(&x, &y).unwrap();
673        assert!((approx - 1.0).abs() < 0.1); // Allow some approximation error
674
675        // Different points
676        let y2 = vec![1.0, 1.0];
677        let approx2 = rff.approximate_kernel(&x, &y2).unwrap();
678        // True RBF: exp(-1.0 * 2) = exp(-2) ≈ 0.135
679        assert!(approx2 > 0.0 && approx2 < 1.0);
680    }
681
682    #[test]
683    fn test_rff_matrix_approximation() {
684        let config = RffConfig::new(KernelType::Rbf { gamma: 0.5 }, 200).with_seed(42);
685        let rff = RandomFourierFeatures::new(2, config).unwrap();
686
687        let data = vec![vec![0.0, 0.0], vec![1.0, 0.0], vec![0.0, 1.0]];
688
689        let matrix = rff.approximate_kernel_matrix(&data).unwrap();
690
691        assert_eq!(matrix.len(), 3);
692        assert_eq!(matrix[0].len(), 3);
693
694        // Diagonal should be close to 1
695        for i in 0..3 {
696            assert!((matrix[i][i] - 1.0).abs() < 0.1);
697        }
698
699        // Symmetry
700        for i in 0..3 {
701            for j in 0..3 {
702                assert!((matrix[i][j] - matrix[j][i]).abs() < 1e-10);
703            }
704        }
705    }
706
707    #[test]
708    fn test_rff_reproducibility() {
709        let config1 = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(123);
710        let config2 = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(123);
711
712        let rff1 = RandomFourierFeatures::new(3, config1).unwrap();
713        let rff2 = RandomFourierFeatures::new(3, config2).unwrap();
714
715        let x = vec![1.0, 2.0, 3.0];
716        let f1 = rff1.transform(&x).unwrap();
717        let f2 = rff2.transform(&x).unwrap();
718
719        for (a, b) in f1.iter().zip(f2.iter()) {
720            assert!((a - b).abs() < 1e-10);
721        }
722    }
723
724    #[test]
725    fn test_rff_laplacian() {
726        let config = RffConfig::new(KernelType::Laplacian { gamma: 1.0 }, 100).with_seed(42);
727        let rff = RandomFourierFeatures::new(2, config).unwrap();
728
729        let x = vec![0.0, 0.0];
730        let approx = rff.approximate_kernel(&x, &x).unwrap();
731        assert!((approx - 1.0).abs() < 0.2);
732    }
733
734    #[test]
735    fn test_rff_matern() {
736        let config = RffConfig::new(KernelType::Matern15 { length_scale: 1.0 }, 100).with_seed(42);
737        let rff = RandomFourierFeatures::new(2, config).unwrap();
738
739        let x = vec![0.0, 0.0];
740        let approx = rff.approximate_kernel(&x, &x).unwrap();
741        assert!((approx - 1.0).abs() < 0.2);
742    }
743
744    #[test]
745    fn test_nystroem_features() {
746        let landmarks = vec![vec![0.0, 0.0], vec![1.0, 0.0], vec![0.0, 1.0]];
747
748        let nystroem = NystroemFeatures::new(landmarks, KernelType::Rbf { gamma: 1.0 }).unwrap();
749
750        assert_eq!(nystroem.n_landmarks(), 3);
751
752        let x = vec![0.5, 0.5];
753        let features = nystroem.transform(&x).unwrap();
754        assert_eq!(features.len(), 3);
755    }
756
757    #[test]
758    fn test_nystroem_batch() {
759        let landmarks = vec![vec![0.0, 0.0], vec![1.0, 1.0]];
760        let nystroem = NystroemFeatures::new(landmarks, KernelType::Rbf { gamma: 0.5 }).unwrap();
761
762        let data = vec![vec![0.0, 0.0], vec![0.5, 0.5], vec![1.0, 1.0]];
763        let features = nystroem.transform_batch(&data).unwrap();
764
765        assert_eq!(features.len(), 3);
766        assert_eq!(features[0].len(), 2);
767    }
768
769    #[test]
770    fn test_orthogonal_rff() {
771        let config = RffConfig::new(KernelType::Rbf { gamma: 1.0 }, 50).with_seed(42);
772        let orf = OrthogonalRandomFeatures::new(3, config).unwrap();
773
774        let x = vec![1.0, 2.0, 3.0];
775        let features = orf.transform(&x).unwrap();
776
777        assert_eq!(features.len(), 100);
778    }
779
780    #[test]
781    fn test_kernel_type_name() {
782        assert_eq!(KernelType::Rbf { gamma: 1.0 }.name(), "RBF");
783        assert_eq!(KernelType::Laplacian { gamma: 1.0 }.name(), "Laplacian");
784        assert_eq!(
785            KernelType::Matern15 { length_scale: 1.0 }.name(),
786            "Matern-1.5"
787        );
788    }
789}