Skip to main content

aprender/decomposition/
ica.rs

1//! Independent Component Analysis (ICA)
2//!
3//! ICA is a computational technique for separating a multivariate signal into
4//! additive, independent components. It's a form of blind source separation.
5//!
6//! # Algorithm: `FastICA`
7//!
8//! The `FastICA` algorithm (Hyvärinen & Oja, 2000) consists of:
9//!
10//! 1. **Centering**: Subtract mean from each feature
11//! 2. **Whitening**: Decorrelate and normalize variance via eigendecomposition
12//! 3. **Optimization**: Iteratively find directions of maximum non-Gaussianity
13//!
14//! # Mathematical Background
15//!
16//! Given observed data X = AS, where:
17//! - X: n×p observed signals (mixed)
18//! - A: p×p mixing matrix
19//! - S: n×p independent sources
20//!
21//! ICA recovers W such that S ≈ XW, where W ≈ A^(-1).
22//!
23//! # Examples
24//!
25//! ```
26//! use aprender::decomposition::ICA;
27//! use aprender::primitives::Matrix;
28//!
29//! // Mixed signals (3 samples, 2 sources)
30//! let mixed = Matrix::from_vec(3, 2, vec![
31//!     1.0, 2.0,
32//!     2.0, 1.0,
33//!     3.0, 4.0,
34//! ]).expect("Valid matrix");
35//!
36//! let mut ica = ICA::new(2); // 2 components
37//! ica.fit(&mixed).expect("ICA should fit");
38//!
39//! let sources = ica.transform(&mixed).expect("Should transform");
40//! ```
41
42use crate::error::{AprenderError, Result};
43use crate::primitives::{Matrix, Vector};
44
45/// Independent Component Analysis using `FastICA` algorithm.
46///
47/// ICA separates multivariate signals into independent, non-Gaussian components.
48#[derive(Debug, Clone)]
49pub struct ICA {
50    /// Number of components to extract
51    n_components: usize,
52
53    /// Maximum iterations for `FastICA`
54    max_iter: usize,
55
56    /// Convergence tolerance
57    tol: f32,
58
59    /// Random state for initialization
60    random_state: Option<u64>,
61
62    // Fitted parameters
63    /// Whitening matrix (p × `n_components`)
64    whitening_matrix: Option<Matrix<f32>>,
65
66    /// Unmixing matrix (`n_components` × p)
67    unmixing_matrix: Option<Matrix<f32>>,
68
69    /// Mean of each feature
70    mean: Option<Vector<f32>>,
71}
72
73impl ICA {
74    /// Creates a new ICA model.
75    ///
76    /// # Arguments
77    ///
78    /// * `n_components` - Number of independent components to extract
79    ///
80    /// # Examples
81    ///
82    /// ```
83    /// use aprender::decomposition::ICA;
84    ///
85    /// let ica = ICA::new(3); // Extract 3 components
86    /// ```
87    #[must_use]
88    pub fn new(n_components: usize) -> Self {
89        Self {
90            n_components,
91            max_iter: 200,
92            tol: 1e-4,
93            random_state: None,
94            whitening_matrix: None,
95            unmixing_matrix: None,
96            mean: None,
97        }
98    }
99
100    /// Sets the maximum number of iterations.
101    #[must_use]
102    pub fn with_max_iter(mut self, max_iter: usize) -> Self {
103        self.max_iter = max_iter;
104        self
105    }
106
107    /// Sets the convergence tolerance.
108    #[must_use]
109    pub fn with_tolerance(mut self, tol: f32) -> Self {
110        self.tol = tol;
111        self
112    }
113
114    /// Sets the random state for reproducibility.
115    #[must_use]
116    pub fn with_random_state(mut self, seed: u64) -> Self {
117        self.random_state = Some(seed);
118        self
119    }
120
121    /// Fits the ICA model to the data.
122    ///
123    /// # Arguments
124    ///
125    /// * `x` - Data matrix (n × p), where n is samples and p is features
126    ///
127    /// # Errors
128    ///
129    /// Returns error if data dimensions are invalid or optimization fails.
130    #[allow(clippy::similar_names)]
131    pub fn fit(&mut self, x: &Matrix<f32>) -> Result<()> {
132        let n = x.n_rows();
133        let p = x.n_cols();
134
135        if n == 0 || p == 0 {
136            return Err(AprenderError::Other("Data cannot be empty".into()));
137        }
138
139        if self.n_components > p {
140            return Err(AprenderError::Other(format!(
141                "n_components ({}) cannot exceed number of features ({})",
142                self.n_components, p
143            )));
144        }
145
146        // Step 1: Center the data
147        let (x_centered, mean) = Self::center_data(x)?;
148        self.mean = Some(mean);
149
150        // Step 2: Whiten the data
151        let (x_whitened, whitening_matrix) = Self::whiten_data(&x_centered, self.n_components)?;
152        self.whitening_matrix = Some(whitening_matrix);
153
154        // Step 3: Run FastICA to find unmixing matrix
155        let unmixing = self.fastica(&x_whitened)?;
156        self.unmixing_matrix = Some(unmixing);
157
158        Ok(())
159    }
160
161    /// Transforms data using the fitted ICA model.
162    ///
163    /// # Arguments
164    ///
165    /// * `x` - Data matrix (n × p)
166    ///
167    /// # Returns
168    ///
169    /// Independent components (n × `n_components`)
170    pub fn transform(&self, x: &Matrix<f32>) -> Result<Matrix<f32>> {
171        let mean = self
172            .mean
173            .as_ref()
174            .ok_or_else(|| AprenderError::Other("Model not fitted. Call fit() first.".into()))?;
175
176        let whitening = self
177            .whitening_matrix
178            .as_ref()
179            .ok_or_else(|| AprenderError::Other("Model not fitted. Call fit() first.".into()))?;
180
181        let unmixing = self
182            .unmixing_matrix
183            .as_ref()
184            .ok_or_else(|| AprenderError::Other("Model not fitted. Call fit() first.".into()))?;
185
186        let n = x.n_rows();
187        let p = x.n_cols();
188
189        if p != mean.len() {
190            return Err(AprenderError::DimensionMismatch {
191                expected: format!("{} features", mean.len()),
192                actual: format!("{p} features in data"),
193            });
194        }
195
196        // Center
197        let mut x_centered_data = Vec::with_capacity(n * p);
198        for i in 0..n {
199            for j in 0..p {
200                x_centered_data.push(x.get(i, j) - mean[j]);
201            }
202        }
203        let x_centered = Matrix::from_vec(n, p, x_centered_data)
204            .map_err(|e| AprenderError::Other(format!("Centering failed: {e}")))?;
205
206        // Whiten
207        let x_whitened = x_centered
208            .matmul(whitening)
209            .map_err(|e| AprenderError::Other(format!("Whitening failed: {e}")))?;
210
211        // Apply unmixing
212        x_whitened
213            .matmul(unmixing)
214            .map_err(|e| AprenderError::Other(format!("Unmixing failed: {e}")))
215    }
216
217    /// Centers data by subtracting column means.
218    #[allow(clippy::needless_range_loop)]
219    fn center_data(x: &Matrix<f32>) -> Result<(Matrix<f32>, Vector<f32>)> {
220        let n = x.n_rows();
221        let p = x.n_cols();
222
223        // Compute column means
224        let mut means = vec![0.0_f32; p];
225        #[allow(clippy::needless_range_loop)]
226        for j in 0..p {
227            let mut sum = 0.0;
228            for i in 0..n {
229                sum += x.get(i, j);
230            }
231            means[j] = sum / n as f32;
232        }
233
234        // Center data
235        let mut centered_data = Vec::with_capacity(n * p);
236        for i in 0..n {
237            for j in 0..p {
238                centered_data.push(x.get(i, j) - means[j]);
239            }
240        }
241
242        let centered = Matrix::from_vec(n, p, centered_data)
243            .map_err(|e| AprenderError::Other(format!("Failed to center data: {e}")))?;
244
245        Ok((centered, Vector::from_vec(means)))
246    }
247
248    /// Whitens data using eigendecomposition (ZCA whitening).
249    ///
250    /// Returns whitened data and whitening matrix.
251    #[allow(clippy::similar_names)]
252    #[allow(clippy::needless_range_loop)]
253    fn whiten_data(
254        x_centered: &Matrix<f32>,
255        n_components: usize,
256    ) -> Result<(Matrix<f32>, Matrix<f32>)> {
257        let n = x_centered.n_rows();
258        let p = x_centered.n_cols();
259
260        // Compute covariance matrix: (1/n) X^T X
261        let xt = x_centered.transpose();
262        let cov = xt
263            .matmul(x_centered)
264            .map_err(|e| AprenderError::Other(format!("Covariance computation failed: {e}")))?;
265
266        // Scale by 1/n
267        let mut cov_data = vec![0.0_f32; p * p];
268        for i in 0..p {
269            for j in 0..p {
270                cov_data[i * p + j] = cov.get(i, j) / n as f32;
271            }
272        }
273        let cov_scaled = Matrix::from_vec(p, p, cov_data)
274            .map_err(|e| AprenderError::Other(format!("Covariance scaling failed: {e}")))?;
275
276        // Eigen decomposition (simplified - use power iteration for top components)
277        let (eigenvalues, eigenvectors) = Self::eigen_decomposition(&cov_scaled, n_components)?;
278
279        // Compute whitening matrix: V Λ^(-1/2)
280        // where V are eigenvectors and Λ are eigenvalues
281        let mut whitening_data = Vec::with_capacity(p * n_components);
282        for j in 0..n_components {
283            let scale = 1.0 / eigenvalues[j].sqrt();
284            for i in 0..p {
285                whitening_data.push(eigenvectors.get(i, j) * scale);
286            }
287        }
288        let whitening_matrix = Matrix::from_vec(p, n_components, whitening_data)
289            .map_err(|e| AprenderError::Other(format!("Whitening matrix creation failed: {e}")))?;
290
291        // Whiten data: X_white = X_centered * whitening_matrix
292        let x_whitened = x_centered
293            .matmul(&whitening_matrix)
294            .map_err(|e| AprenderError::Other(format!("Data whitening failed: {e}")))?;
295
296        Ok((x_whitened, whitening_matrix))
297    }
298
299    /// Simple eigen decomposition using power iteration for top k eigenvalues/vectors.
300    #[allow(clippy::needless_range_loop)]
301    fn eigen_decomposition(matrix: &Matrix<f32>, k: usize) -> Result<(Vec<f32>, Matrix<f32>)> {
302        let n = matrix.n_rows();
303
304        if matrix.n_cols() != n {
305            return Err(AprenderError::Other(
306                "Eigendecomposition requires square matrix".into(),
307            ));
308        }
309
310        let mut eigenvalues = Vec::with_capacity(k);
311        let mut eigenvectors_data = Vec::with_capacity(n * k);
312
313        let mut residual = matrix.clone();
314
315        for _ in 0..k {
316            // Power iteration to find dominant eigenvector
317            let (eigenvalue, eigenvector) = Self::power_iteration(&residual, 100)?;
318
319            eigenvalues.push(eigenvalue);
320            eigenvectors_data.extend(eigenvector.as_slice());
321
322            // Deflate: A' = A - λvv^T
323            let mut new_residual_data = vec![0.0_f32; n * n];
324            for i in 0..n {
325                for j in 0..n {
326                    let deflation = eigenvalue * eigenvector[i] * eigenvector[j];
327                    new_residual_data[i * n + j] = residual.get(i, j) - deflation;
328                }
329            }
330            residual = Matrix::from_vec(n, n, new_residual_data)
331                .map_err(|e| AprenderError::Other(format!("Deflation failed: {e}")))?;
332        }
333
334        let eigenvectors = Matrix::from_vec(n, k, eigenvectors_data).map_err(|e| {
335            AprenderError::Other(format!("Eigenvector matrix creation failed: {e}"))
336        })?;
337
338        Ok((eigenvalues, eigenvectors))
339    }
340
341    /// Power iteration to find dominant eigenvector.
342    #[allow(clippy::needless_range_loop)]
343    fn power_iteration(matrix: &Matrix<f32>, max_iter: usize) -> Result<(f32, Vector<f32>)> {
344        let n = matrix.n_rows();
345
346        // Initialize with random vector (simple: all ones, then normalize)
347        let mut v = vec![1.0_f32; n];
348        let norm = (v.iter().map(|x| x * x).sum::<f32>()).sqrt();
349        for val in &mut v {
350            *val /= norm;
351        }
352
353        let mut eigenvalue = 0.0;
354
355        for _ in 0..max_iter {
356            // v_new = A * v
357            let mut v_new = vec![0.0_f32; n];
358            for i in 0..n {
359                let mut sum = 0.0;
360                for j in 0..n {
361                    sum += matrix.get(i, j) * v[j];
362                }
363                v_new[i] = sum;
364            }
365
366            // Normalize
367            let norm = (v_new.iter().map(|x| x * x).sum::<f32>()).sqrt();
368            if norm < 1e-10 {
369                return Err(AprenderError::Other(
370                    "Power iteration converged to zero vector".into(),
371                ));
372            }
373
374            for val in &mut v_new {
375                *val /= norm;
376            }
377
378            eigenvalue = norm;
379            v = v_new;
380        }
381
382        Ok((eigenvalue, Vector::from_vec(v)))
383    }
384
385    /// `FastICA` algorithm to find unmixing matrix.
386    ///
387    /// Uses deflation approach with tanh nonlinearity.
388    #[allow(clippy::similar_names)]
389    #[allow(clippy::needless_range_loop)]
390    fn fastica(&self, x_white: &Matrix<f32>) -> Result<Matrix<f32>> {
391        let n = x_white.n_rows();
392        let p = x_white.n_cols(); // Should equal n_components after whitening
393
394        let mut w_vectors = Vec::with_capacity(p * p);
395
396        // Deflation: extract components one by one
397        for comp in 0..p {
398            // Initialize w randomly (simple: use component index for determinism)
399            let mut w = vec![0.0_f32; p];
400            w[comp % p] = 1.0;
401
402            // Normalize
403            let norm = (w.iter().map(|x| x * x).sum::<f32>()).sqrt();
404            for val in &mut w {
405                *val /= norm;
406            }
407
408            // Fixed-point iteration
409            for _iter in 0..self.max_iter {
410                // Compute w^T X^T for all samples
411                let mut wtx = vec![0.0_f32; n];
412                for i in 0..n {
413                    let mut sum = 0.0;
414                    for j in 0..p {
415                        sum += w[j] * x_white.get(i, j);
416                    }
417                    wtx[i] = sum;
418                }
419
420                // E[X g(w^T X)] where g = tanh
421                let mut ex_g = vec![0.0_f32; p];
422                for j in 0..p {
423                    let mut sum = 0.0;
424                    for i in 0..n {
425                        let g = wtx[i].tanh(); // g(w^T x)
426                        sum += x_white.get(i, j) * g;
427                    }
428                    ex_g[j] = sum / n as f32;
429                }
430
431                // E[g'(w^T X)] where g' = 1 - tanh^2
432                let mut eg_prime = 0.0;
433                for i in 0..n {
434                    let tanh_val = wtx[i].tanh();
435                    eg_prime += 1.0 - tanh_val * tanh_val;
436                }
437                eg_prime /= n as f32;
438
439                // w_new = E[X g(w^T X)] - E[g'(w^T X)] w
440                let mut w_new = vec![0.0_f32; p];
441                for j in 0..p {
442                    w_new[j] = ex_g[j] - eg_prime * w[j];
443                }
444
445                // Orthogonalize against previous components
446                for prev_comp in 0..comp {
447                    let mut dot = 0.0;
448                    for j in 0..p {
449                        dot += w_new[j] * w_vectors[prev_comp * p + j];
450                    }
451                    for j in 0..p {
452                        w_new[j] -= dot * w_vectors[prev_comp * p + j];
453                    }
454                }
455
456                // Normalize
457                let norm = (w_new.iter().map(|x| x * x).sum::<f32>()).sqrt();
458                if norm < 1e-10 {
459                    return Err(AprenderError::Other(
460                        "FastICA failed: w converged to zero".into(),
461                    ));
462                }
463                for val in &mut w_new {
464                    *val /= norm;
465                }
466
467                // Check convergence
468                let mut dot = 0.0;
469                for j in 0..p {
470                    dot += w[j] * w_new[j];
471                }
472
473                if (1.0 - dot.abs()) < self.tol {
474                    w = w_new;
475                    break;
476                }
477
478                w = w_new;
479            }
480
481            // Store this component
482            w_vectors.extend(&w);
483        }
484
485        // Unmixing matrix is W^T (each row is a component)
486        Matrix::from_vec(p, p, w_vectors)
487            .map_err(|e| AprenderError::Other(format!("Failed to create unmixing matrix: {e}")))
488    }
489}
490
491#[cfg(test)]
492mod tests {
493    use super::*;
494
495    #[test]
496    fn test_ica_basic() {
497        // Simple 2D case
498        let data = Matrix::from_vec(
499            10,
500            2,
501            vec![
502                1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 4.0, 3.0, 5.0, 6.0, 1.5, 2.5, 2.5, 1.5, 3.5, 4.5,
503                4.5, 3.5, 5.5, 6.5,
504            ],
505        )
506        .expect("Valid matrix");
507
508        let mut ica = ICA::new(2);
509        let result = ica.fit(&data);
510        assert!(result.is_ok(), "ICA should fit");
511
512        let sources = ica.transform(&data).expect("Should transform");
513        assert_eq!(sources.n_rows(), 10);
514        assert_eq!(sources.n_cols(), 2);
515    }
516
517    #[test]
518    fn test_ica_invalid_n_components() {
519        let data = Matrix::from_vec(5, 2, vec![1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 6.0])
520            .expect("Valid matrix");
521
522        let mut ica = ICA::new(3); // More components than features
523        let result = ica.fit(&data);
524        assert!(result.is_err());
525    }
526
527    #[test]
528    fn test_ica_transform_not_fitted() {
529        let ica = ICA::new(2);
530        let data =
531            Matrix::from_vec(3, 2, vec![1.0, 2.0, 2.0, 3.0, 3.0, 4.0]).expect("Valid matrix");
532
533        let result = ica.transform(&data);
534        assert!(result.is_err());
535    }
536
537    #[test]
538    fn test_ica_dimension_mismatch() {
539        let data = Matrix::from_vec(
540            5,
541            3,
542            vec![
543                1.0, 2.0, 3.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0,
544            ],
545        )
546        .expect("Valid matrix");
547
548        let mut ica = ICA::new(2);
549        ica.fit(&data).expect("Should fit");
550
551        // Try to transform data with wrong dimensions
552        let wrong_data =
553            Matrix::from_vec(3, 2, vec![1.0, 2.0, 2.0, 3.0, 3.0, 4.0]).expect("Valid matrix");
554
555        let result = ica.transform(&wrong_data);
556        assert!(result.is_err());
557    }
558
559    #[test]
560    fn test_ica_with_options() {
561        // Data with some variation between features
562        let data = Matrix::from_vec(
563            8,
564            2,
565            vec![
566                1.0, 2.0, 2.0, 3.0, 3.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 5.0, 7.0, 7.0, 8.0, 9.0,
567            ],
568        )
569        .expect("Valid matrix");
570
571        let mut ica = ICA::new(2).with_max_iter(100).with_tolerance(1e-5);
572
573        let result = ica.fit(&data);
574        assert!(result.is_ok());
575    }
576
577    #[test]
578    fn test_center_data() {
579        let data =
580            Matrix::from_vec(3, 2, vec![1.0, 2.0, 2.0, 4.0, 3.0, 6.0]).expect("Valid matrix");
581
582        let (centered, mean) = ICA::center_data(&data).expect("Should center");
583
584        assert_eq!(mean.len(), 2);
585        assert!((mean[0] - 2.0).abs() < 1e-6); // mean of [1,2,3] is 2
586        assert!((mean[1] - 4.0).abs() < 1e-6); // mean of [2,4,6] is 4
587
588        // Centered data should have mean ~0
589        let mut col0_sum = 0.0;
590        let mut col1_sum = 0.0;
591        for i in 0..3 {
592            col0_sum += centered.get(i, 0);
593            col1_sum += centered.get(i, 1);
594        }
595        assert!(col0_sum.abs() < 1e-6);
596        assert!(col1_sum.abs() < 1e-6);
597    }
598
599    #[test]
600    fn test_power_iteration() {
601        // Simple 2x2 matrix with known eigenvalues
602        let matrix = Matrix::from_vec(2, 2, vec![3.0, 1.0, 1.0, 3.0]).expect("Valid matrix");
603
604        let (eigenvalue, eigenvector) =
605            ICA::power_iteration(&matrix, 100).expect("Should converge");
606
607        // Largest eigenvalue should be 4.0
608        assert!((eigenvalue - 4.0).abs() < 0.1, "Eigenvalue should be ~4.0");
609
610        // Eigenvector should be normalized
611        let norm: f32 = eigenvector
612            .as_slice()
613            .iter()
614            .map(|x| x * x)
615            .sum::<f32>()
616            .sqrt();
617        assert!((norm - 1.0).abs() < 1e-6);
618    }
619
620    // ============================================================================
621    // Additional Coverage Tests
622    // ============================================================================
623
624    #[test]
625    fn test_ica_with_random_state() {
626        let data = Matrix::from_vec(
627            8,
628            2,
629            vec![
630                1.0, 2.0, 2.0, 3.0, 3.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 5.0, 7.0, 7.0, 8.0, 9.0,
631            ],
632        )
633        .expect("Valid matrix");
634
635        let mut ica = ICA::new(2).with_random_state(42);
636        let result = ica.fit(&data);
637        assert!(result.is_ok());
638    }
639
640    #[test]
641    fn test_ica_empty_data() {
642        let data = Matrix::from_vec(0, 2, vec![]).expect("Valid empty matrix");
643        let mut ica = ICA::new(2);
644        let result = ica.fit(&data);
645        assert!(result.is_err());
646    }
647
648    #[test]
649    fn test_ica_empty_features() {
650        let data = Matrix::from_vec(5, 0, vec![]).expect("Valid empty matrix");
651        let mut ica = ICA::new(1);
652        let result = ica.fit(&data);
653        assert!(result.is_err());
654    }
655
656    #[test]
657    fn test_ica_single_component() {
658        let data = Matrix::from_vec(
659            6,
660            3,
661            vec![
662                1.0, 2.0, 3.0, 2.0, 4.0, 6.0, 3.0, 6.0, 9.0, 4.0, 8.0, 12.0, 5.0, 10.0, 15.0, 6.0,
663                12.0, 18.0,
664            ],
665        )
666        .expect("Valid matrix");
667
668        let mut ica = ICA::new(1);
669        let result = ica.fit(&data);
670        assert!(result.is_ok());
671
672        let sources = ica.transform(&data).expect("Should transform");
673        assert_eq!(sources.n_cols(), 1);
674    }
675
676    #[test]
677    fn test_ica_whitening() {
678        let data = Matrix::from_vec(
679            10,
680            2,
681            vec![
682                1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 4.0, 3.0, 5.0, 6.0, 1.5, 2.5, 2.5, 1.5, 3.5, 4.5,
683                4.5, 3.5, 5.5, 6.5,
684            ],
685        )
686        .expect("Valid matrix");
687
688        // Center and whiten
689        let (centered, _mean) = ICA::center_data(&data).expect("Should center");
690        let (whitened, _whitening_matrix) = ICA::whiten_data(&centered, 2).expect("Should whiten");
691
692        assert_eq!(whitened.n_rows(), 10);
693        assert_eq!(whitened.n_cols(), 2);
694    }
695
696    #[test]
697    fn test_ica_eigen_decomposition() {
698        // Symmetric positive definite matrix
699        let matrix = Matrix::from_vec(3, 3, vec![4.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0])
700            .expect("Valid matrix");
701
702        let (eigenvalues, eigenvectors) =
703            ICA::eigen_decomposition(&matrix, 2).expect("Should decompose");
704
705        assert_eq!(eigenvalues.len(), 2);
706        assert_eq!(eigenvectors.n_rows(), 3);
707        assert_eq!(eigenvectors.n_cols(), 2);
708    }
709
710    #[test]
711    fn test_ica_eigen_decomposition_non_square() {
712        let matrix =
713            Matrix::from_vec(2, 3, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).expect("Valid matrix");
714
715        let result = ICA::eigen_decomposition(&matrix, 2);
716        assert!(result.is_err());
717    }
718
719    #[test]
720    fn test_ica_clone() {
721        let ica = ICA::new(3)
722            .with_max_iter(100)
723            .with_tolerance(1e-5)
724            .with_random_state(42);
725
726        let cloned = ica.clone();
727        // Just test that clone compiles and works
728        assert_eq!(format!("{:?}", ica), format!("{:?}", cloned));
729    }
730
731    #[test]
732    fn test_ica_debug() {
733        let ica = ICA::new(2);
734        let debug_str = format!("{:?}", ica);
735        assert!(debug_str.contains("ICA"));
736        assert!(debug_str.contains("n_components"));
737    }
738
739    #[test]
740    fn test_ica_fit_then_transform_new_data() {
741        let training_data = Matrix::from_vec(
742            10,
743            2,
744            vec![
745                1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 4.0, 3.0, 5.0, 6.0, 1.5, 2.5, 2.5, 1.5, 3.5, 4.5,
746                4.5, 3.5, 5.5, 6.5,
747            ],
748        )
749        .expect("Valid matrix");
750
751        let mut ica = ICA::new(2);
752        ica.fit(&training_data).expect("Should fit");
753
754        // Transform different data with same shape
755        let new_data =
756            Matrix::from_vec(5, 2, vec![2.0, 3.0, 3.0, 2.0, 4.0, 5.0, 5.0, 4.0, 6.0, 7.0])
757                .expect("Valid matrix");
758
759        let transformed = ica.transform(&new_data).expect("Should transform");
760        assert_eq!(transformed.n_rows(), 5);
761        assert_eq!(transformed.n_cols(), 2);
762    }
763
764    #[test]
765    fn test_ica_3d_data() {
766        // Test with 3 features/components - use data with more variance
767        // Note: Highly correlated/linearly dependent data can cause issues
768        let data = Matrix::from_vec(
769            12,
770            3,
771            vec![
772                1.0, 5.0, 2.0, // More variance between columns
773                4.0, 2.0, 6.0, 3.0, 7.0, 1.0, 6.0, 3.0, 4.0, 2.0, 8.0, 5.0, 5.0, 1.0, 3.0, 1.5,
774                6.0, 2.5, 4.5, 2.5, 5.5, 3.5, 6.5, 1.5, 5.5, 4.5, 4.5, 2.5, 7.5, 6.5, 6.5, 1.5,
775                3.5,
776            ],
777        )
778        .expect("Valid matrix");
779
780        let mut ica = ICA::new(2); // Use 2 components instead of 3 for more stability
781        let result = ica.fit(&data);
782        assert!(result.is_ok());
783
784        let sources = ica.transform(&data).expect("Should transform");
785        assert_eq!(sources.n_rows(), 12);
786        assert_eq!(sources.n_cols(), 2);
787    }
788
789    #[test]
790    fn test_ica_strict_tolerance() {
791        let data = Matrix::from_vec(
792            8,
793            2,
794            vec![
795                1.0, 2.0, 2.0, 3.0, 3.0, 5.0, 4.0, 6.0, 5.0, 4.0, 6.0, 5.0, 7.0, 7.0, 8.0, 9.0,
796            ],
797        )
798        .expect("Valid matrix");
799
800        // Very strict tolerance
801        let mut ica = ICA::new(2).with_tolerance(1e-8).with_max_iter(500);
802        let result = ica.fit(&data);
803        assert!(result.is_ok());
804    }
805}