mdarray_linalg_faer/svd/context.rs
1// Singular Value Decomposition (SVD):
2// A = U * Σ * V^T
3// where:
4// - A is m × n (input matrix)
5// - U is m × m (left singular vectors, orthogonal)
6// - Σ is m × n (diagonal matrix with singular values on the diagonal)
7// - V^T is n × n (transpose of right singular vectors, orthogonal)
8// - s (Σ) contains min(m, n) singular values (non-negative, sorted in descending order)
9
10use super::simple::svd_faer;
11use faer_traits::ComplexField;
12use mdarray::{DSlice, DTensor, Dense, Layout, tensor};
13use mdarray_linalg::svd::{SVD, SVDDecomp, SVDError};
14use num_complex::ComplexFloat;
15
16use crate::Faer;
17
18impl<T> SVD<T> for Faer
19where
20 T: ComplexFloat
21 + ComplexField
22 + Default
23 + std::convert::From<<T as num_complex::ComplexFloat>::Real>
24 + 'static,
25{
26 /// Compute full SVD with new allocated matrices
27 fn svd<L: Layout>(&self, a: &mut DSlice<T, 2, L>) -> Result<SVDDecomp<T>, SVDError> {
28 let (m, n) = *a.shape();
29 let min_mn = m.min(n);
30 let mut s_mda = tensor![[T::default(); min_mn]; min_mn];
31 let mut u_mda = tensor![[T::default(); m]; m];
32 let mut vt_mda = tensor![[T::default(); n]; n];
33
34 // NOTE:
35 // These tensors were previously created with `MaybeUninit` to avoid default-initialization.
36 // However, after benchmarking, we observed **no measurable performance benefit**,
37 // so for the sake of simplicity and safety, `T::default()` is now used instead.
38 //
39 // LLVM aggressively optimizes trivial memory initialization (like zeroing floats or ints),
40 // either lowering them to a single `memset` or eliminating them entirely if they're unused.
41 //
42 // See:
43 // - LLVM memset/memcpy optimizer: https://github.com/llvm/llvm-project/blob/main/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
44 //
45 // In this context, using `MaybeUninit` adds complexity and potential for undefined behavior
46 // with no real performance gain, so we stick to `T::default()`.
47
48 match svd_faer(a, &mut s_mda, Some(&mut u_mda), Some(&mut vt_mda)) {
49 Err(_) => Err(SVDError::BackendDidNotConverge {
50 superdiagonals: (0),
51 }),
52 Ok(_) => Ok(SVDDecomp {
53 s: s_mda,
54 u: u_mda,
55 vt: vt_mda,
56 }),
57 }
58 }
59
60 /// Compute only singular values with new allocated matrix
61 fn svd_s<L: Layout>(&self, a: &mut DSlice<T, 2, L>) -> Result<DTensor<T, 2>, SVDError> {
62 let (m, n) = *a.shape();
63 let min_mn = m.min(n);
64 let mut s_mda = tensor![[T::default(); min_mn]; min_mn];
65
66 // NOTE:
67 // Same rationale as in `svd`: `T::default()` is used instead of `MaybeUninit`,
68 // because LLVM already optimizes default initializations effectively.
69
70 match svd_faer::<T, L, Dense, Dense, Dense>(a, &mut s_mda, None, None) {
71 Err(_) => Err(SVDError::BackendDidNotConverge {
72 superdiagonals: (0),
73 }),
74 Ok(_) => Ok(s_mda),
75 }
76 }
77
78 /// Compute full SVD, overwriting existing matrices
79 fn svd_overwrite<L: Layout, Ls: Layout, Lu: Layout, Lvt: Layout>(
80 &self,
81 a: &mut DSlice<T, 2, L>,
82 s: &mut DSlice<T, 2, Ls>,
83 u: &mut DSlice<T, 2, Lu>,
84 vt: &mut DSlice<T, 2, Lvt>,
85 ) -> Result<(), SVDError> {
86 svd_faer::<T, L, Ls, Lu, Lvt>(a, s, Some(u), Some(vt))
87 }
88
89 /// Compute only singular values, overwriting existing matrix
90 fn svd_overwrite_s<L: Layout, Ls: Layout>(
91 &self,
92 a: &mut DSlice<T, 2, L>,
93 s: &mut DSlice<T, 2, Ls>,
94 ) -> Result<(), SVDError> {
95 svd_faer::<T, L, Ls, Dense, Dense>(a, s, None, None)
96 }
97}