mdarray_linalg/testing/lu/
mod.rs1use approx::assert_relative_eq;
2use num_complex::ComplexFloat;
3
4use super::common::{naive_matmul, random_matrix};
5use crate::prelude::*;
6use crate::{identity, pretty_print, transpose_in_place};
7use mdarray::{DSlice, DTensor, Dense, tensor};
8
9pub fn test_lu_reconstruction<T>(
10 a: &DTensor<T, 2>,
11 l: &DTensor<T, 2>,
12 u: &DTensor<T, 2>,
13 p: &DTensor<T, 2>,
14) where
15 T: Default
16 + ComplexFloat
17 + std::fmt::Debug
18 + Copy
19 + std::ops::Mul<Output = T>
20 + std::ops::Add<Output = T>
21 + std::ops::Sub<Output = T>,
22 f64: From<T>,
23{
24 let (n, m) = *a.shape();
25
26 let pa = naive_matmul(p, a);
27 let lu = naive_matmul(l, u);
28
29 for i in 0..n {
31 for j in 0..m {
32 let diff = f64::from(pa[[i, j]]) - f64::from(lu[[i, j]]);
33 assert_relative_eq!(diff, 0.0, epsilon = 1e-10);
34 }
35 }
36}
37
38pub fn test_lu_decomposition(bd: &impl LU<f64>) {
39 let mut a = tensor![
40 [0.16931568150114162, 0.5524301997803323],
41 [0.10477204466703971, 0.33895423448188766]
42 ];
43
44 let original_a = a.clone();
45
46 let (l, u, p) = bd.lu(&mut a);
47
48 println!("{a:?}");
49 pretty_print(&a);
50 pretty_print(&l);
51 pretty_print(&u);
52
53 test_lu_reconstruction(&original_a, &l, &u, &p);
54}
55
56pub fn test_lu_decomposition_rectangular(bd: &impl LU<f64>) {
57 let n = 5;
58 let m = 3;
59 let mut a = random_matrix(n, m);
60 let original_a = a.clone();
61
62 let (l, u, p) = bd.lu(&mut a);
63
64 test_lu_reconstruction(&original_a, &l, &u, &p);
65}
66
67pub fn test_lu_overwrite(bd: &impl LU<f64>) {
68 let n = 4;
69 let mut a = random_matrix(n, n);
70 let original_a = a.clone();
71
72 let mut l = DTensor::<f64, 2>::zeros([n, n]);
73 let mut u = DTensor::<f64, 2>::zeros([n, n]);
74 let mut p = DTensor::<f64, 2>::zeros([n, n]);
75
76 bd.lu_overwrite(&mut a, &mut l, &mut u, &mut p);
77
78 test_lu_reconstruction(&original_a, &l, &u, &p);
79}
80
81pub fn test_lu_overwrite_rectangular(bd: &impl LU<f64>) {
82 let n = 5;
83 let m = 3;
84 let mut a = random_matrix(n, m);
85 let original_a = a.clone();
86
87 let mut l = DTensor::<f64, 2>::zeros([n, std::cmp::min(n, m)]);
88 let mut u = DTensor::<f64, 2>::zeros([std::cmp::min(n, m), m]);
89 let mut p = DTensor::<f64, 2>::zeros([n, n]);
90
91 bd.lu_overwrite(&mut a, &mut l, &mut u, &mut p);
92
93 test_lu_reconstruction(&original_a, &l, &u, &p);
94}
95
96pub fn test_inverse(bd: &impl LU<f64>) {
97 let n = 4;
98 let a = random_matrix(n, n);
99 let a_inv = bd.inv(&mut a.clone()).unwrap();
100
101 let product = naive_matmul(&a, &a_inv);
102
103 for i in 0..n {
104 for j in 0..n {
105 let expected = if i == j { 1.0 } else { 0.0 };
106 assert_relative_eq!(product[[i, j]], expected, epsilon = 1e-10);
107 }
108 }
109}
110
111pub fn test_inverse_overwrite(bd: &impl LU<f64>) {
112 let n = 4;
113 let mut a = random_matrix(n, n);
114 let a_clone = a.clone();
115 let _ = bd.inv_overwrite(&mut a);
116
117 let product = naive_matmul(&a, &a_clone);
118
119 for i in 0..n {
120 for j in 0..n {
121 let expected = if i == j { 1.0 } else { 0.0 };
122 assert_relative_eq!(product[[i, j]], expected, epsilon = 1e-10);
123 }
124 }
125}
126
127pub fn test_inverse_singular_should_panic(bd: &impl LU<f64>) {
128 let n = 4;
129 let mut a = DTensor::<f64, 2>::from_elem([n, n], 1.);
130 let _ = bd.inv(&mut a);
131}
132
133pub fn test_determinant(bd: &impl LU<f64>) {
134 let n = 4;
135 let a = random_matrix(n, n);
136
137 let d = bd.det(&mut a.clone());
138
139 assert_relative_eq!(det_permutations(&a), d, epsilon = 1e-6);
140}
141
142pub fn test_determinant_dummy(bd: &impl LU<f64>) {
143 let a = identity(3);
144 let d = bd.det(&mut a.clone());
145 println!("{d}");
146 assert_relative_eq!(1., d, epsilon = 1e-6);
147}
148
149use itertools::Itertools;
150
151pub fn det_permutations<T>(a: &DSlice<T, 2, Dense>) -> T
154where
155 T: ComplexFloat
156 + std::ops::Add<Output = T>
157 + std::ops::Sub<Output = T>
158 + std::ops::Mul<Output = T>
159 + Copy
160 + Default,
161{
162 let (n, m) = *a.shape();
163 assert_eq!(n, m, "Matrix must be square");
164
165 let mut det = T::default();
166
167 for perm in (0..n).permutations(n) {
168 let mut sign = 1;
169 for i in 0..n {
170 for j in i + 1..n {
171 if perm[i] > perm[j] {
172 sign = -sign;
173 }
174 }
175 }
176
177 let mut prod = T::one();
178 for i in 0..n {
179 prod = prod * a[[i, perm[i]]];
180 }
181
182 if sign == 1 {
183 det = det + prod;
184 } else {
185 det = det - prod;
186 }
187 }
188 det
189}
190
191pub fn random_positive_definite_matrix(n: usize) -> DTensor<f64, 2> {
192 let a = random_matrix(n, n);
194 let mut a_t = a.clone();
195 transpose_in_place(&mut a_t);
196 let mut b = a + a_t;
197 for i in 0..n {
198 b[[i, i]] += n as f64;
199 }
200 b
201}
202
203pub fn test_cholesky_reconstruction<T>(a: &DTensor<T, 2>, l: &DTensor<T, 2>)
204where
205 T: Default
206 + ComplexFloat
207 + std::fmt::Debug
208 + Copy
209 + std::ops::Mul<Output = T>
210 + std::ops::Add<Output = T>
211 + std::ops::Sub<Output = T>,
212 f64: From<T>,
213{
214 let (n, m) = *a.shape();
215 assert_eq!(n, m, "Matrix must be square");
216 let mut ln = DTensor::<T, 2>::zeros([n, n]);
217 for i in 0..n {
218 for j in 0..n {
219 if i >= j {
220 ln[[i, j]] = l[[i, j]];
221 }
222 }
223 }
224
225 let mut lt = DTensor::<T, 2>::zeros([n, m]);
227 for i in 0..n {
228 for j in 0..m {
229 if i <= j {
230 lt[[i, j]] = l[[j, i]];
231 }
232 }
233 }
234
235 let llt = naive_matmul(&ln, <);
237
238 for i in 0..n {
240 for j in 0..m {
241 let diff = f64::from(a[[i, j]]) - f64::from(llt[[i, j]]);
242 assert_relative_eq!(diff, 0.0, epsilon = 1e-10);
243 }
244 }
245}
246
247pub fn test_cholesky_decomposition(bd: &impl LU<f64>) {
248 let n = 4;
249 let mut a = random_positive_definite_matrix(n);
250 let original_a = a.clone();
251
252 let l = bd.choleski(&mut a).unwrap();
253
254 println!("{l:?}");
255
256 test_cholesky_reconstruction(&original_a, &l);
257}
258
259pub fn test_cholesky_overwrite(bd: &impl LU<f64>) {
260 let n = 4;
261 let a = random_positive_definite_matrix(n);
262 let original_a = a.clone();
263 let mut a_copy = a.clone();
264
265 let l = bd.choleski(&mut a_copy.clone()).unwrap();
266 bd.choleski_overwrite(&mut a_copy).unwrap();
267
268 println!("{l:?}");
269 println!("{a_copy:?}");
270
271 test_cholesky_reconstruction(&original_a, &a_copy);
272}
273
274pub fn test_cholesky_not_positive_definite(bd: &impl LU<f64>) {
275 let n = 4;
276 let mut a = DTensor::<f64, 2>::zeros([n, n]);
278
279 for i in 0..n {
281 for j in 0..n {
282 if i == j {
283 a[[i, j]] = -1.0; } else {
285 a[[i, j]] = 0.1;
286 }
287 }
288 }
289
290 let result = bd.choleski(&mut a);
292 assert!(result.is_err());
293}
294
295pub fn test_cholesky_identity_matrix(bd: &impl LU<f64>) {
296 let n = 4;
297 let mut a = DTensor::<f64, 2>::zeros([n, n]);
298
299 for i in 0..n {
301 a[[i, i]] = 1.0;
302 }
303
304 let l = bd.choleski(&mut a).unwrap();
305
306 for i in 0..n {
308 for j in 0..n {
309 let expected = if i == j { 1.0 } else { 0.0 };
310 assert_relative_eq!(l[[i, j]], expected, epsilon = 1e-14);
311 }
312 }
313}