scirs2_sparse/linalg/
svd.rs

1//! Sparse Singular Value Decomposition (SVD) algorithms
2//!
3//! This module provides efficient SVD algorithms for sparse matrices,
4//! including truncated SVD and randomized SVD methods.
5
6#![allow(unused_variables)]
7#![allow(unused_assignments)]
8#![allow(unused_mut)]
9
10use crate::error::{SparseError, SparseResult};
11use crate::sparray::SparseArray;
12use scirs2_core::ndarray::{Array1, Array2};
13use scirs2_core::numeric::{Float, SparseElement};
14use std::fmt::Debug;
15use std::ops::{Add, Div, Mul, Sub};
16
17/// Type alias for bidiagonal SVD result
18type BidiagonalSvdResult<T> = (Vec<T>, Vec<Vec<f64>>, Vec<Vec<f64>>);
19
20/// SVD computation methods
21#[derive(Debug, Clone, Copy, PartialEq)]
22pub enum SVDMethod {
23    /// Lanczos bidiagonalization
24    Lanczos,
25    /// Randomized SVD
26    Randomized,
27    /// Power method for truncated SVD
28    Power,
29    /// Cross-approximation SVD
30    CrossApproximation,
31}
32
33impl SVDMethod {
34    pub fn from_str(s: &str) -> SparseResult<Self> {
35        match s.to_lowercase().as_str() {
36            "lanczos" => Ok(Self::Lanczos),
37            "randomized" | "random" => Ok(Self::Randomized),
38            "power" => Ok(Self::Power),
39            "cross" | "cross_approximation" => Ok(Self::CrossApproximation),
40            _ => Err(SparseError::ValueError(format!("Unknown SVD method: {s}"))),
41        }
42    }
43}
44
45/// Options for SVD computation
46#[derive(Debug, Clone)]
47pub struct SVDOptions {
48    /// Number of singular values to compute
49    pub k: usize,
50    /// Maximum number of iterations
51    pub maxiter: usize,
52    /// Convergence tolerance
53    pub tol: f64,
54    /// Number of additional singular vectors for randomized methods
55    pub n_oversamples: usize,
56    /// Number of power iterations for randomized methods
57    pub n_iter: usize,
58    /// SVD computation method
59    pub method: SVDMethod,
60    /// Random seed for reproducibility
61    pub random_seed: Option<u64>,
62    /// Whether to compute left singular vectors (U)
63    pub compute_u: bool,
64    /// Whether to compute right singular vectors (V^T)
65    pub compute_vt: bool,
66}
67
68impl Default for SVDOptions {
69    fn default() -> Self {
70        Self {
71            k: 6,
72            maxiter: 1000,
73            tol: 1e-10,
74            n_oversamples: 10,
75            n_iter: 2,
76            method: SVDMethod::Lanczos,
77            random_seed: None,
78            compute_u: true,
79            compute_vt: true,
80        }
81    }
82}
83
84/// Result of SVD computation
85#[derive(Debug, Clone)]
86pub struct SVDResult<T>
87where
88    T: Float + SparseElement + Debug + Copy,
89{
90    /// Left singular vectors (U matrix)
91    pub u: Option<Array2<T>>,
92    /// Singular values
93    pub s: Array1<T>,
94    /// Right singular vectors transposed (V^T matrix)
95    pub vt: Option<Array2<T>>,
96    /// Number of iterations performed
97    pub iterations: usize,
98    /// Whether the algorithm converged
99    pub converged: bool,
100}
101
102/// Compute the truncated SVD of a sparse matrix
103///
104/// Computes the k largest singular values and corresponding singular vectors
105/// of a sparse matrix using iterative methods.
106///
107/// # Arguments
108///
109/// * `matrix` - The sparse matrix to decompose
110/// * `k` - Number of singular values to compute (default: 6)
111/// * `options` - Optional configuration parameters
112///
113/// # Returns
114///
115/// SVD result containing U, s, and V^T matrices
116///
117/// # Examples
118///
119/// ```
120/// use scirs2_sparse::linalg::svds;
121/// use scirs2_sparse::csr_array::CsrArray;
122///
123/// // Create a sparse matrix
124/// let rows = vec![0, 0, 1, 2, 2];
125/// let cols = vec![0, 2, 1, 0, 2];
126/// let data = vec![1.0, 2.0, 3.0, 4.0, 5.0];
127/// let matrix = CsrArray::from_triplets(&rows, &cols, &data, (3, 3), false).expect("Operation failed");
128///
129/// // Compute 2 largest singular values
130/// let result = svds(&matrix, Some(2), None).expect("Operation failed");
131/// ```
132#[allow(dead_code)]
133pub fn svds<T, S>(
134    matrix: &S,
135    k: Option<usize>,
136    options: Option<SVDOptions>,
137) -> SparseResult<SVDResult<T>>
138where
139    T: Float
140        + SparseElement
141        + Debug
142        + Copy
143        + Add<Output = T>
144        + Sub<Output = T>
145        + Mul<Output = T>
146        + Div<Output = T>
147        + 'static
148        + std::iter::Sum,
149    S: SparseArray<T>,
150{
151    let opts = options.unwrap_or_default();
152    let k = k.unwrap_or(opts.k);
153
154    let (m, n) = matrix.shape();
155    if k >= m.min(n) {
156        return Err(SparseError::ValueError(
157            "Number of singular values k must be less than min(m, n)".to_string(),
158        ));
159    }
160
161    match opts.method {
162        SVDMethod::Lanczos => lanczos_bidiag_svd(matrix, k, &opts),
163        SVDMethod::Randomized => randomized_svd(matrix, k, &opts),
164        SVDMethod::Power => power_method_svd(matrix, k, &opts),
165        SVDMethod::CrossApproximation => cross_approximation_svd(matrix, k, &opts),
166    }
167}
168
169/// Compute truncated SVD using a specific method and parameters
170#[allow(dead_code)]
171pub fn svd_truncated<T, S>(
172    matrix: &S,
173    k: usize,
174    method: &str,
175    tol: Option<f64>,
176    maxiter: Option<usize>,
177) -> SparseResult<SVDResult<T>>
178where
179    T: Float
180        + SparseElement
181        + Debug
182        + Copy
183        + Add<Output = T>
184        + Sub<Output = T>
185        + Mul<Output = T>
186        + Div<Output = T>
187        + 'static
188        + std::iter::Sum,
189    S: SparseArray<T>,
190{
191    let method_enum = SVDMethod::from_str(method)?;
192
193    let options = SVDOptions {
194        k,
195        method: method_enum,
196        tol: tol.unwrap_or(1e-10),
197        maxiter: maxiter.unwrap_or(1000),
198        ..Default::default()
199    };
200
201    svds(matrix, Some(k), Some(options))
202}
203
204/// Lanczos bidiagonalization SVD algorithm
205#[allow(dead_code)]
206fn lanczos_bidiag_svd<T, S>(
207    matrix: &S,
208    k: usize,
209    options: &SVDOptions,
210) -> SparseResult<SVDResult<T>>
211where
212    T: Float
213        + SparseElement
214        + Debug
215        + Copy
216        + Add<Output = T>
217        + Sub<Output = T>
218        + Mul<Output = T>
219        + Div<Output = T>
220        + 'static
221        + std::iter::Sum,
222    S: SparseArray<T>,
223{
224    let (m, n) = matrix.shape();
225    let max_lanczos_size = (2 * k + 10).min(m.min(n));
226
227    // Initialize starting vector
228    let mut u = Array1::zeros(m);
229    u[0] = T::sparse_one();
230
231    // Normalize
232    let norm = (u.iter().map(|&v| v * v).sum::<T>()).sqrt();
233    if !SparseElement::is_zero(&norm) {
234        for i in 0..m {
235            u[i] = u[i] / norm;
236        }
237    }
238
239    let mut alpha = Vec::<T>::new();
240    let mut beta = Vec::<T>::new();
241    let mut u_vectors = Vec::<Array1<T>>::with_capacity(max_lanczos_size);
242    let mut v_vectors = Vec::<Array1<T>>::with_capacity(max_lanczos_size);
243
244    u_vectors.push(u.clone());
245
246    let mut converged = false;
247    let mut iter = 0;
248
249    // Lanczos bidiagonalization loop
250    while iter < options.maxiter && alpha.len() < max_lanczos_size {
251        // v = A^T * u - beta[j-1] * v[j-1]
252        let av = matrix_transpose_vector_product(matrix, &u_vectors[iter])?;
253        let mut v = av;
254
255        if iter > 0 && !beta.is_empty() {
256            let prev_beta = beta[iter - 1];
257            for i in 0..n {
258                v[i] = v[i] - prev_beta * v_vectors[iter - 1][i];
259            }
260        }
261
262        // alpha[j] = ||v||
263        let alpha_j = (v.iter().map(|&val| val * val).sum::<T>()).sqrt();
264        alpha.push(alpha_j);
265
266        if SparseElement::is_zero(&alpha_j) {
267            break;
268        }
269
270        // Normalize v
271        for i in 0..n {
272            v[i] = v[i] / alpha_j;
273        }
274        v_vectors.push(v.clone());
275
276        // u = A * v - alpha[j] * u[j]
277        let avu = matrix_vector_product(matrix, &v)?;
278        let mut u_next = avu;
279
280        for i in 0..m {
281            u_next[i] = u_next[i] - alpha_j * u_vectors[iter][i];
282        }
283
284        // beta[j] = ||u||
285        let beta_j = (u_next.iter().map(|&val| val * val).sum::<T>()).sqrt();
286        beta.push(beta_j);
287
288        if beta_j < T::from(options.tol).expect("Operation failed") {
289            converged = true;
290            break;
291        }
292
293        // Normalize u
294        for i in 0..m {
295            u_next[i] = u_next[i] / beta_j;
296        }
297
298        u_vectors.push(u_next);
299        iter += 1;
300    }
301
302    // Solve the bidiagonal SVD problem
303    let (singular_values, u_bidiag, vt_bidiag) = solve_bidiagonal_svd(&alpha, &beta, k)?;
304
305    // Compute final U and V^T matrices
306    let final_u = if options.compute_u {
307        let mut u_final = Array2::zeros((m, k.min(singular_values.len())));
308        for j in 0..k.min(singular_values.len()) {
309            for i in 0..m {
310                let mut sum = T::sparse_zero();
311                for l in 0..u_vectors.len().min(u_bidiag.len()) {
312                    if j < u_bidiag[l].len() {
313                        sum = sum
314                            + T::from(u_bidiag[l][j]).expect("Operation failed") * u_vectors[l][i];
315                    }
316                }
317                u_final[[i, j]] = sum;
318            }
319        }
320        Some(u_final)
321    } else {
322        None
323    };
324
325    let final_vt = if options.compute_vt {
326        let mut vt_final = Array2::zeros((k.min(singular_values.len()), n));
327        for j in 0..k.min(singular_values.len()) {
328            for i in 0..n {
329                let mut sum = T::sparse_zero();
330                for l in 0..v_vectors.len().min(vt_bidiag.len()) {
331                    if j < vt_bidiag[l].len() {
332                        sum = sum
333                            + T::from(vt_bidiag[l][j]).expect("Operation failed") * v_vectors[l][i];
334                    }
335                }
336                vt_final[[j, i]] = sum;
337            }
338        }
339        Some(vt_final)
340    } else {
341        None
342    };
343
344    Ok(SVDResult {
345        u: final_u,
346        s: Array1::from_vec(singular_values[..k.min(singular_values.len())].to_vec()),
347        vt: final_vt,
348        iterations: iter,
349        converged,
350    })
351}
352
353/// Randomized SVD algorithm
354#[allow(dead_code)]
355fn randomized_svd<T, S>(matrix: &S, k: usize, options: &SVDOptions) -> SparseResult<SVDResult<T>>
356where
357    T: Float
358        + SparseElement
359        + Debug
360        + Copy
361        + Add<Output = T>
362        + Sub<Output = T>
363        + Mul<Output = T>
364        + Div<Output = T>
365        + 'static
366        + std::iter::Sum,
367    S: SparseArray<T>,
368{
369    let (m, n) = matrix.shape();
370    // Limit l to the smaller dimension to ensure we can form orthonormal basis
371    let l = (k + options.n_oversamples).min(m).min(n);
372
373    // Generate random matrix
374    let mut omega = Array2::zeros((n, l));
375    for i in 0..n {
376        for j in 0..l {
377            // Simple pseudo-random generation (replace with proper RNG in production)
378            let val = ((i * 17 + j * 13) % 1000) as f64 / 1000.0 - 0.5;
379            omega[[i, j]] = T::from(val).expect("Operation failed");
380        }
381    }
382
383    // Y = A * Omega
384    let mut y = Array2::zeros((m, l));
385    for j in 0..l {
386        let omega_col = omega.column(j).to_owned();
387        let y_col = matrix_vector_product(matrix, &omega_col)?;
388        for i in 0..m {
389            y[[i, j]] = y_col[i];
390        }
391    }
392
393    // Power iterations to improve quality
394    for _ in 0..options.n_iter {
395        // Y = A * (A^T * Y)
396        let mut y_new = Array2::zeros((m, l));
397        for j in 0..l {
398            let y_col = y.column(j).to_owned();
399            let at_y_col = matrix_transpose_vector_product(matrix, &y_col)?;
400            let a_at_y_col = matrix_vector_product(matrix, &at_y_col)?;
401            for i in 0..m {
402                y_new[[i, j]] = a_at_y_col[i];
403            }
404        }
405        y = y_new;
406    }
407
408    // QR decomposition of Y
409    let q = qr_decomposition_orthogonal(&y)?;
410
411    // B = Q^T * A
412    // Q has shape (m, l) with orthonormal columns
413    // Q^T * A has shape (l, n)
414    // The i-th row of Q^T is the i-th column of Q
415    let mut b = Array2::zeros((l, n));
416    for i in 0..l {
417        let q_col = q.column(i).to_owned();
418
419        // Compute A^T * q_col which gives the i-th row of Q^T * A
420        let b_row = matrix_transpose_vector_product(matrix, &q_col)?;
421        for j in 0..n {
422            b[[i, j]] = b_row[j];
423        }
424    }
425
426    // SVD of B
427    let b_svd = dense_svd(&b, k)?;
428
429    // Compute final U and V^T
430    // U = Q * U_B where Q is (m, l) and U_B is (l, k)
431    // So U is (m, k) with U[i,j] = sum_l Q[i,l] * U_B[l,j]
432    let final_u = if options.compute_u {
433        if let Some(ref u_b) = b_svd.u {
434            let mut u_result = Array2::zeros((m, k));
435            for i in 0..m {
436                for j in 0..k {
437                    let mut sum = T::sparse_zero();
438                    for l_idx in 0..l {
439                        sum = sum + q[[i, l_idx]] * u_b[[l_idx, j]];
440                    }
441                    u_result[[i, j]] = sum;
442                }
443            }
444            Some(u_result)
445        } else {
446            None
447        }
448    } else {
449        None
450    };
451
452    Ok(SVDResult {
453        u: final_u,
454        s: b_svd.s,
455        vt: b_svd.vt,
456        iterations: options.n_iter + 1,
457        converged: true,
458    })
459}
460
461/// Power method SVD (simplified implementation)
462#[allow(dead_code)]
463fn power_method_svd<T, S>(matrix: &S, k: usize, options: &SVDOptions) -> SparseResult<SVDResult<T>>
464where
465    T: Float
466        + SparseElement
467        + Debug
468        + Copy
469        + Add<Output = T>
470        + Sub<Output = T>
471        + Mul<Output = T>
472        + Div<Output = T>
473        + 'static
474        + std::iter::Sum,
475    S: SparseArray<T>,
476{
477    // For now, fall back to Lanczos method
478    // A full implementation would use deflation and multiple power iterations
479    lanczos_bidiag_svd(matrix, k, options)
480}
481
482/// Cross-approximation SVD (simplified implementation)
483#[allow(dead_code)]
484fn cross_approximation_svd<T, S>(
485    matrix: &S,
486    k: usize,
487    options: &SVDOptions,
488) -> SparseResult<SVDResult<T>>
489where
490    T: Float
491        + SparseElement
492        + Debug
493        + Copy
494        + Add<Output = T>
495        + Sub<Output = T>
496        + Mul<Output = T>
497        + Div<Output = T>
498        + 'static
499        + std::iter::Sum,
500    S: SparseArray<T>,
501{
502    // For now, fall back to Lanczos method
503    // A full implementation would use adaptive cross-approximation
504    lanczos_bidiag_svd(matrix, k, options)
505}
506
507/// Matrix-vector product: y = A * x
508#[allow(dead_code)]
509fn matrix_vector_product<T, S>(matrix: &S, vector: &Array1<T>) -> SparseResult<Array1<T>>
510where
511    T: Float
512        + SparseElement
513        + Debug
514        + Copy
515        + Add<Output = T>
516        + Sub<Output = T>
517        + Mul<Output = T>
518        + Div<Output = T>
519        + 'static
520        + std::iter::Sum,
521    S: SparseArray<T>,
522{
523    let (m, n) = matrix.shape();
524    if vector.len() != n {
525        return Err(SparseError::DimensionMismatch {
526            expected: n,
527            found: vector.len(),
528        });
529    }
530
531    let mut result = Array1::zeros(m);
532    let (row_indices, col_indices, values) = matrix.find();
533
534    for (k, (&i, &j)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
535        result[i] = result[i] + values[k] * vector[j];
536    }
537
538    Ok(result)
539}
540
541/// Matrix-transpose-vector product: y = A^T * x
542#[allow(dead_code)]
543fn matrix_transpose_vector_product<T, S>(matrix: &S, vector: &Array1<T>) -> SparseResult<Array1<T>>
544where
545    T: Float
546        + SparseElement
547        + Debug
548        + Copy
549        + Add<Output = T>
550        + Sub<Output = T>
551        + Mul<Output = T>
552        + Div<Output = T>
553        + 'static
554        + std::iter::Sum,
555    S: SparseArray<T>,
556{
557    let (m, n) = matrix.shape();
558    if vector.len() != m {
559        return Err(SparseError::DimensionMismatch {
560            expected: m,
561            found: vector.len(),
562        });
563    }
564
565    let mut result = Array1::zeros(n);
566    let (row_indices, col_indices, values) = matrix.find();
567
568    for (k, (&i, &j)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
569        result[j] = result[j] + values[k] * vector[i];
570    }
571
572    Ok(result)
573}
574
575/// Solve SVD of a bidiagonal matrix (simplified implementation)
576#[allow(dead_code)]
577fn solve_bidiagonal_svd<T>(
578    alpha: &[T],
579    beta: &[T],
580    k: usize,
581) -> SparseResult<BidiagonalSvdResult<T>>
582where
583    T: Float
584        + SparseElement
585        + Debug
586        + Copy
587        + Add<Output = T>
588        + Sub<Output = T>
589        + Mul<Output = T>
590        + Div<Output = T>
591        + 'static
592        + std::iter::Sum,
593{
594    let n = alpha.len();
595
596    // For simplicity, use power iteration on A^T * A
597    // In a real implementation, we'd use the QR algorithm for bidiagonal matrices
598
599    let mut singular_values = Vec::with_capacity(k);
600    let mut u_vectors = Vec::with_capacity(k);
601    let mut vt_vectors = Vec::with_capacity(k);
602
603    // Approximate largest singular value using power iteration
604    if n > 0 {
605        let largest_sv = alpha
606            .iter()
607            .map(|&x| x.abs())
608            .fold(T::sparse_zero(), |a, b| if a > b { a } else { b });
609        singular_values.push(largest_sv);
610
611        // Create identity-like vectors
612        let mut u_vec = vec![0.0; n];
613        let mut vt_vec = vec![0.0; n];
614
615        if n > 0 {
616            u_vec[0] = 1.0;
617            vt_vec[0] = 1.0;
618        }
619
620        u_vectors.push(u_vec);
621        vt_vectors.push(vt_vec);
622    }
623
624    // Fill remaining with zeros (simplified)
625    while singular_values.len() < k && singular_values.len() < n {
626        singular_values.push(T::sparse_zero());
627        u_vectors.push(vec![0.0_f64; n]);
628        vt_vectors.push(vec![0.0_f64; n]);
629    }
630
631    Ok((singular_values, u_vectors, vt_vectors))
632}
633
634/// QR decomposition returning only Q (simplified implementation)
635#[allow(dead_code)]
636fn qr_decomposition_orthogonal<T>(matrix: &Array2<T>) -> SparseResult<Array2<T>>
637where
638    T: Float
639        + SparseElement
640        + Debug
641        + Copy
642        + Add<Output = T>
643        + Sub<Output = T>
644        + Mul<Output = T>
645        + Div<Output = T>
646        + 'static
647        + std::iter::Sum,
648{
649    let (m, n) = matrix.dim();
650    let mut q = matrix.clone();
651
652    // Simple Gram-Schmidt orthogonalization
653    for j in 0..n {
654        // Normalize column j
655        let mut norm = T::sparse_zero();
656        for i in 0..m {
657            norm = norm + q[[i, j]] * q[[i, j]];
658        }
659        norm = norm.sqrt();
660
661        if !SparseElement::is_zero(&norm) {
662            for i in 0..m {
663                q[[i, j]] = q[[i, j]] / norm;
664            }
665        }
666
667        // Orthogonalize remaining columns against column j
668        for k in (j + 1)..n {
669            let mut dot = T::sparse_zero();
670            for i in 0..m {
671                dot = dot + q[[i, j]] * q[[i, k]];
672            }
673
674            for i in 0..m {
675                q[[i, k]] = q[[i, k]] - dot * q[[i, j]];
676            }
677        }
678    }
679
680    Ok(q)
681}
682
683/// Dense SVD (placeholder implementation)
684#[allow(dead_code)]
685fn dense_svd<T>(matrix: &Array2<T>, k: usize) -> SparseResult<SVDResult<T>>
686where
687    T: Float
688        + SparseElement
689        + Debug
690        + Copy
691        + Add<Output = T>
692        + Sub<Output = T>
693        + Mul<Output = T>
694        + Div<Output = T>
695        + 'static
696        + std::iter::Sum,
697{
698    let (m, n) = matrix.dim();
699    let rank = k.min(m).min(n);
700
701    // Simplified implementation - in practice, use LAPACK or similar
702    let singular_values = Array1::from_elem(rank, T::sparse_one());
703    let u = Some(
704        Array2::eye(m)
705            .slice(scirs2_core::ndarray::s![.., ..rank])
706            .to_owned(),
707    );
708    let vt = Some(
709        Array2::eye(n)
710            .slice(scirs2_core::ndarray::s![..rank, ..])
711            .to_owned(),
712    );
713
714    Ok(SVDResult {
715        u,
716        s: singular_values,
717        vt,
718        iterations: 1,
719        converged: true,
720    })
721}
722
723#[cfg(test)]
724mod tests {
725    use super::*;
726    use crate::csr_array::CsrArray;
727    use approx::assert_relative_eq;
728
729    fn create_test_matrix() -> CsrArray<f64> {
730        // Create a simple sparse matrix for testing
731        let rows = vec![0, 0, 1, 2, 2];
732        let cols = vec![0, 2, 1, 0, 2];
733        let data = vec![3.0, 2.0, 1.0, 4.0, 5.0];
734
735        CsrArray::from_triplets(&rows, &cols, &data, (3, 3), false).expect("Operation failed")
736    }
737
738    #[test]
739    fn test_svds_basic() {
740        let matrix = create_test_matrix();
741        let result = svds(&matrix, Some(2), None).expect("Operation failed");
742
743        // Check dimensions
744        assert_eq!(result.s.len(), 2);
745
746        if let Some(ref u) = result.u {
747            assert_eq!(u.shape(), [3, 2]);
748        }
749
750        if let Some(ref vt) = result.vt {
751            assert_eq!(vt.shape(), [2, 3]);
752        }
753
754        // Singular values should be non-negative and sorted
755        assert!(result.s[0] >= 0.0);
756        if result.s.len() > 1 {
757            assert!(result.s[0] >= result.s[1]);
758        }
759    }
760
761    #[test]
762    fn test_matrix_vector_product() {
763        let matrix = create_test_matrix();
764        let x = Array1::from_vec(vec![1.0, 2.0, 3.0]);
765
766        let y = matrix_vector_product(&matrix, &x).expect("Operation failed");
767
768        // Check result dimensions
769        assert_eq!(y.len(), 3);
770
771        // Verify computation: y = A * x
772        assert_relative_eq!(y[0], 3.0 * 1.0 + 2.0 * 3.0, epsilon = 1e-10); // 9.0
773        assert_relative_eq!(y[1], 1.0 * 2.0, epsilon = 1e-10); // 2.0
774        assert_relative_eq!(y[2], 4.0 * 1.0 + 5.0 * 3.0, epsilon = 1e-10); // 19.0
775    }
776
777    #[test]
778    fn test_matrix_transpose_vector_product() {
779        let matrix = create_test_matrix();
780        let x = Array1::from_vec(vec![1.0, 2.0, 3.0]);
781
782        let y = matrix_transpose_vector_product(&matrix, &x).expect("Operation failed");
783
784        // Check result dimensions
785        assert_eq!(y.len(), 3);
786
787        // Verify computation: y = A^T * x
788        assert_relative_eq!(y[0], 3.0 * 1.0 + 4.0 * 3.0, epsilon = 1e-10); // 15.0
789        assert_relative_eq!(y[1], 1.0 * 2.0, epsilon = 1e-10); // 2.0
790        assert_relative_eq!(y[2], 2.0 * 1.0 + 5.0 * 3.0, epsilon = 1e-10); // 17.0
791    }
792
793    #[test]
794    fn test_svd_options() {
795        let matrix = create_test_matrix();
796
797        let options = SVDOptions {
798            k: 1,
799            method: SVDMethod::Lanczos,
800            compute_u: false,
801            compute_vt: true,
802            ..Default::default()
803        };
804
805        let result = svds(&matrix, Some(1), Some(options)).expect("Operation failed");
806
807        assert_eq!(result.s.len(), 1);
808        assert!(result.u.is_none());
809        assert!(result.vt.is_some());
810    }
811
812    #[test]
813    fn test_svd_truncated_api() {
814        let matrix = create_test_matrix();
815
816        let result =
817            svd_truncated(&matrix, 2, "lanczos", Some(1e-8), Some(100)).expect("Operation failed");
818
819        assert_eq!(result.s.len(), 2);
820        assert!(result.u.is_some());
821        assert!(result.vt.is_some());
822    }
823
824    #[test]
825    fn test_randomized_svd() {
826        let matrix = create_test_matrix();
827
828        let options = SVDOptions {
829            k: 2,
830            method: SVDMethod::Randomized,
831            n_oversamples: 5,
832            n_iter: 1,
833            ..Default::default()
834        };
835
836        let result = svds(&matrix, Some(2), Some(options)).expect("Operation failed");
837
838        assert_eq!(result.s.len(), 2);
839        assert!(result.converged);
840    }
841
842    #[test]
843    fn test_qr_decomposition() {
844        let matrix = Array2::from_shape_vec((3, 2), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
845            .expect("Operation failed");
846
847        let q = qr_decomposition_orthogonal(&matrix).expect("Operation failed");
848
849        // Check orthogonality (Q^T * Q = I)
850        assert_eq!(q.shape(), [3, 2]);
851
852        // Check that columns are orthonormal
853        for j in 0..2 {
854            let mut norm = 0.0;
855            for i in 0..3 {
856                norm += q[[i, j]] * q[[i, j]];
857            }
858            assert_relative_eq!(norm, 1.0, epsilon = 1e-10);
859        }
860    }
861
862    #[test]
863    fn test_svd_method_parsing() {
864        assert_eq!(
865            SVDMethod::from_str("lanczos").expect("Operation failed"),
866            SVDMethod::Lanczos
867        );
868        assert_eq!(
869            SVDMethod::from_str("randomized").expect("Operation failed"),
870            SVDMethod::Randomized
871        );
872        assert_eq!(
873            SVDMethod::from_str("power").expect("Operation failed"),
874            SVDMethod::Power
875        );
876        assert!(SVDMethod::from_str("invalid").is_err());
877    }
878
879    #[test]
880    fn test_invalid_k() {
881        let matrix = create_test_matrix();
882
883        // k too large
884        let result = svds(&matrix, Some(10), None);
885        assert!(result.is_err());
886    }
887}