oxilean_std/linear_algebra/
types.rs1use super::functions::*;
5
6#[derive(Clone, Debug)]
8pub struct DenseVector {
9 pub data: Vec<f64>,
11}
12impl DenseVector {
13 pub fn zero(n: usize) -> Self {
15 Self { data: vec![0.0; n] }
16 }
17 pub fn dim(&self) -> usize {
19 self.data.len()
20 }
21 pub fn dot(&self, other: &DenseVector) -> f64 {
23 self.data
24 .iter()
25 .zip(other.data.iter())
26 .map(|(a, b)| a * b)
27 .sum()
28 }
29 pub fn norm_sq(&self) -> f64 {
31 self.dot(self)
32 }
33 pub fn norm(&self) -> f64 {
35 self.norm_sq().sqrt()
36 }
37 pub fn normalize(&self) -> Option<DenseVector> {
39 let n = self.norm();
40 if n < 1e-12 {
41 return None;
42 }
43 Some(DenseVector {
44 data: self.data.iter().map(|x| x / n).collect(),
45 })
46 }
47 pub fn add(&self, other: &DenseVector) -> Option<DenseVector> {
49 if self.dim() != other.dim() {
50 return None;
51 }
52 Some(DenseVector {
53 data: self
54 .data
55 .iter()
56 .zip(other.data.iter())
57 .map(|(a, b)| a + b)
58 .collect(),
59 })
60 }
61 pub fn scale(&self, s: f64) -> DenseVector {
63 DenseVector {
64 data: self.data.iter().map(|x| x * s).collect(),
65 }
66 }
67 pub fn apply(mat: &DenseMatrix, v: &DenseVector) -> Option<DenseVector> {
69 if mat.cols != v.dim() {
70 return None;
71 }
72 let mut result = vec![0.0; mat.rows];
73 for i in 0..mat.rows {
74 for j in 0..mat.cols {
75 result[i] += mat.data[i][j] * v.data[j];
76 }
77 }
78 Some(DenseVector { data: result })
79 }
80}
81#[allow(dead_code)]
83#[derive(Clone, Debug)]
84pub struct SparseMatrix {
85 pub rows: usize,
87 pub cols: usize,
89 pub row_ptr: Vec<usize>,
91 pub col_indices: Vec<usize>,
93 pub values: Vec<f64>,
95}
96#[allow(dead_code)]
97impl SparseMatrix {
98 pub fn new(rows: usize, cols: usize) -> Self {
100 Self {
101 rows,
102 cols,
103 row_ptr: vec![0; rows + 1],
104 col_indices: vec![],
105 values: vec![],
106 }
107 }
108 pub fn from_coo(rows: usize, cols: usize, entries: &[(usize, usize, f64)]) -> Self {
110 let mut row_ptr = vec![0usize; rows + 1];
111 for &(r, _, _) in entries {
112 row_ptr[r + 1] += 1;
113 }
114 for i in 0..rows {
115 row_ptr[i + 1] += row_ptr[i];
116 }
117 let col_indices: Vec<usize> = entries.iter().map(|&(_, c, _)| c).collect();
118 let values: Vec<f64> = entries.iter().map(|&(_, _, v)| v).collect();
119 Self {
120 rows,
121 cols,
122 row_ptr,
123 col_indices,
124 values,
125 }
126 }
127 pub fn nnz(&self) -> usize {
129 self.values.len()
130 }
131 pub fn get(&self, r: usize, c: usize) -> f64 {
133 let start = self.row_ptr[r];
134 let end = self.row_ptr[r + 1];
135 for idx in start..end {
136 if self.col_indices[idx] == c {
137 return self.values[idx];
138 }
139 }
140 0.0
141 }
142 pub fn matvec(&self, x: &[f64]) -> Option<Vec<f64>> {
144 if x.len() != self.cols {
145 return None;
146 }
147 let mut y = vec![0.0f64; self.rows];
148 for r in 0..self.rows {
149 let start = self.row_ptr[r];
150 let end = self.row_ptr[r + 1];
151 for idx in start..end {
152 y[r] += self.values[idx] * x[self.col_indices[idx]];
153 }
154 }
155 Some(y)
156 }
157 pub fn transpose(&self) -> SparseMatrix {
159 let mut entries: Vec<(usize, usize, f64)> = Vec::with_capacity(self.nnz());
160 for r in 0..self.rows {
161 let start = self.row_ptr[r];
162 let end = self.row_ptr[r + 1];
163 for idx in start..end {
164 entries.push((self.col_indices[idx], r, self.values[idx]));
165 }
166 }
167 entries.sort_by_key(|&(r, c, _)| (r, c));
168 SparseMatrix::from_coo(self.cols, self.rows, &entries)
169 }
170 pub fn norm_sq(&self) -> f64 {
172 self.values.iter().map(|v| v * v).sum()
173 }
174}
175#[allow(dead_code)]
178#[derive(Clone, Debug)]
179pub struct QRDecomposition {
180 pub q: DenseMatrix,
182 pub r: DenseMatrix,
184}
185#[allow(dead_code)]
186impl QRDecomposition {
187 pub fn compute(a: &DenseMatrix) -> Option<QRDecomposition> {
189 let m = a.rows;
190 let n = a.cols;
191 if m < n {
192 return None;
193 }
194 let mut mat = a.data.clone();
195 let mut vs: Vec<Vec<f64>> = Vec::new();
196 for k in 0..n {
197 let mut x: Vec<f64> = (k..m).map(|i| mat[i][k]).collect();
198 let norm: f64 = x.iter().map(|xi| xi * xi).sum::<f64>().sqrt();
199 if norm < 1e-14 {
200 vs.push(vec![0.0; m - k]);
201 continue;
202 }
203 x[0] += if x[0] >= 0.0 { norm } else { -norm };
204 let v_norm: f64 = x.iter().map(|xi| xi * xi).sum::<f64>().sqrt();
205 let v: Vec<f64> = x.iter().map(|xi| xi / v_norm).collect();
206 for j in k..n {
207 let dot: f64 = v.iter().enumerate().map(|(i, vi)| vi * mat[k + i][j]).sum();
208 for i in 0..(m - k) {
209 mat[k + i][j] -= 2.0 * v[i] * dot;
210 }
211 }
212 vs.push(v);
213 }
214 let mut r_data = vec![vec![0.0; n]; n];
215 for i in 0..n {
216 for j in i..n {
217 r_data[i][j] = mat[i][j];
218 }
219 }
220 let r = DenseMatrix {
221 rows: n,
222 cols: n,
223 data: r_data,
224 };
225 let mut q_data = vec![vec![0.0; n]; m];
226 for j in 0..n {
227 q_data[j][j] = 1.0;
228 }
229 for k in (0..n).rev() {
230 let v = &vs[k];
231 let len = v.len();
232 for j in 0..n {
233 let dot: f64 = v
234 .iter()
235 .enumerate()
236 .map(|(i, vi)| vi * q_data[k + i][j])
237 .sum();
238 for i in 0..len {
239 q_data[k + i][j] -= 2.0 * v[i] * dot;
240 }
241 }
242 }
243 let q = DenseMatrix {
244 rows: m,
245 cols: n,
246 data: q_data,
247 };
248 Some(QRDecomposition { q, r })
249 }
250 pub fn solve(&self, b: &[f64]) -> Option<Vec<f64>> {
252 let m = self.q.rows;
253 let n = self.q.cols;
254 if b.len() != m {
255 return None;
256 }
257 let mut qtb = vec![0.0f64; n];
258 for j in 0..n {
259 qtb[j] = (0..m).map(|i| self.q.data[i][j] * b[i]).sum();
260 }
261 let mut x = vec![0.0f64; n];
262 for i in (0..n).rev() {
263 let mut s = qtb[i];
264 for j in (i + 1)..n {
265 s -= self.r.data[i][j] * x[j];
266 }
267 if self.r.data[i][i].abs() < 1e-14 {
268 return None;
269 }
270 x[i] = s / self.r.data[i][i];
271 }
272 Some(x)
273 }
274}
275#[derive(Clone, Debug)]
277pub struct DenseMatrix {
278 pub rows: usize,
280 pub cols: usize,
282 pub data: Vec<Vec<f64>>,
284}
285impl DenseMatrix {
286 pub fn zero(rows: usize, cols: usize) -> Self {
288 Self {
289 rows,
290 cols,
291 data: vec![vec![0.0; cols]; rows],
292 }
293 }
294 pub fn identity(n: usize) -> Self {
296 let mut m = Self::zero(n, n);
297 for i in 0..n {
298 m.data[i][i] = 1.0;
299 }
300 m
301 }
302 pub fn get(&self, r: usize, c: usize) -> f64 {
304 self.data[r][c]
305 }
306 pub fn set(&mut self, r: usize, c: usize, v: f64) {
308 self.data[r][c] = v;
309 }
310 pub fn add(&self, other: &DenseMatrix) -> Option<DenseMatrix> {
312 if self.rows != other.rows || self.cols != other.cols {
313 return None;
314 }
315 let mut result = Self::zero(self.rows, self.cols);
316 for i in 0..self.rows {
317 for j in 0..self.cols {
318 result.data[i][j] = self.data[i][j] + other.data[i][j];
319 }
320 }
321 Some(result)
322 }
323 pub fn mul(&self, other: &DenseMatrix) -> Option<DenseMatrix> {
325 if self.cols != other.rows {
326 return None;
327 }
328 let mut result = Self::zero(self.rows, other.cols);
329 for i in 0..self.rows {
330 for k in 0..self.cols {
331 for j in 0..other.cols {
332 result.data[i][j] += self.data[i][k] * other.data[k][j];
333 }
334 }
335 }
336 Some(result)
337 }
338 pub fn scale(&self, s: f64) -> DenseMatrix {
340 let mut result = self.clone();
341 for i in 0..self.rows {
342 for j in 0..self.cols {
343 result.data[i][j] *= s;
344 }
345 }
346 result
347 }
348 pub fn transpose(&self) -> DenseMatrix {
350 let mut result = Self::zero(self.cols, self.rows);
351 for i in 0..self.rows {
352 for j in 0..self.cols {
353 result.data[j][i] = self.data[i][j];
354 }
355 }
356 result
357 }
358 pub fn trace(&self) -> f64 {
360 let n = self.rows.min(self.cols);
361 (0..n).map(|i| self.data[i][i]).sum()
362 }
363 pub fn norm_sq(&self) -> f64 {
365 self.data
366 .iter()
367 .flat_map(|row| row.iter())
368 .map(|x| x * x)
369 .sum()
370 }
371 pub fn det(&self) -> Option<f64> {
373 if self.rows != self.cols {
374 return None;
375 }
376 let n = self.rows;
377 if n == 0 {
378 return Some(1.0);
379 }
380 if n == 1 {
381 return Some(self.data[0][0]);
382 }
383 if n == 2 {
384 return Some(self.data[0][0] * self.data[1][1] - self.data[0][1] * self.data[1][0]);
385 }
386 let mut m = self.data.clone();
387 let mut sign = 1.0_f64;
388 for col in 0..n {
389 let mut pivot_row = col;
390 let mut max_val = m[col][col].abs();
391 for row in (col + 1)..n {
392 if m[row][col].abs() > max_val {
393 max_val = m[row][col].abs();
394 pivot_row = row;
395 }
396 }
397 if max_val < 1e-12 {
398 return Some(0.0);
399 }
400 if pivot_row != col {
401 m.swap(col, pivot_row);
402 sign = -sign;
403 }
404 let pivot = m[col][col];
405 for row in (col + 1)..n {
406 let factor = m[row][col] / pivot;
407 for k in col..n {
408 let v = m[col][k] * factor;
409 m[row][k] -= v;
410 }
411 }
412 }
413 let diag_prod: f64 = (0..n).map(|i| m[i][i]).product();
414 Some(sign * diag_prod)
415 }
416 pub fn row_echelon(&self) -> (DenseMatrix, usize) {
418 let mut m = self.data.clone();
419 let mut rank = 0;
420 let mut pivot_col = 0;
421 while rank < self.rows && pivot_col < self.cols {
422 let mut pivot_row = rank;
423 while pivot_row < self.rows && m[pivot_row][pivot_col].abs() < 1e-12 {
424 pivot_row += 1;
425 }
426 if pivot_row == self.rows {
427 pivot_col += 1;
428 continue;
429 }
430 if pivot_row != rank {
431 m.swap(rank, pivot_row);
432 }
433 let pivot = m[rank][pivot_col];
434 for j in pivot_col..self.cols {
435 m[rank][j] /= pivot;
436 }
437 for i in 0..self.rows {
438 if i != rank && m[i][pivot_col].abs() > 1e-12 {
439 let factor = m[i][pivot_col];
440 for j in pivot_col..self.cols {
441 let v = m[rank][j] * factor;
442 m[i][j] -= v;
443 }
444 }
445 }
446 rank += 1;
447 pivot_col += 1;
448 }
449 let result = DenseMatrix {
450 rows: self.rows,
451 cols: self.cols,
452 data: m,
453 };
454 (result, rank)
455 }
456 pub fn rank(&self) -> usize {
458 self.row_echelon().1
459 }
460 pub fn is_symmetric(&self) -> bool {
462 if self.rows != self.cols {
463 return false;
464 }
465 for i in 0..self.rows {
466 for j in 0..self.cols {
467 if (self.data[i][j] - self.data[j][i]).abs() > 1e-12 {
468 return false;
469 }
470 }
471 }
472 true
473 }
474 pub fn solve(&self, b: &[f64]) -> Option<Vec<f64>> {
476 if self.rows != self.cols || self.rows != b.len() {
477 return None;
478 }
479 let n = self.rows;
480 let mut aug: Vec<Vec<f64>> = self
481 .data
482 .iter()
483 .enumerate()
484 .map(|(i, row)| {
485 let mut r = row.clone();
486 r.push(b[i]);
487 r
488 })
489 .collect();
490 for col in 0..n {
491 let mut pivot_row = col;
492 let mut max_val = aug[col][col].abs();
493 for row in (col + 1)..n {
494 if aug[row][col].abs() > max_val {
495 max_val = aug[row][col].abs();
496 pivot_row = row;
497 }
498 }
499 if max_val < 1e-12 {
500 return None;
501 }
502 if pivot_row != col {
503 aug.swap(col, pivot_row);
504 }
505 let pivot = aug[col][col];
506 for j in col..=n {
507 aug[col][j] /= pivot;
508 }
509 for row in 0..n {
510 if row != col && aug[row][col].abs() > 1e-12 {
511 let factor = aug[row][col];
512 for j in col..=n {
513 let v = aug[col][j] * factor;
514 aug[row][j] -= v;
515 }
516 }
517 }
518 }
519 Some((0..n).map(|i| aug[i][n]).collect())
520 }
521}
522#[allow(dead_code)]
525#[derive(Clone, Debug)]
526pub struct BandedMatrix {
527 pub rows: usize,
529 pub cols: usize,
531 pub kl: usize,
533 pub ku: usize,
535 pub band: Vec<Vec<f64>>,
537}
538#[allow(dead_code)]
539impl BandedMatrix {
540 pub fn zero(rows: usize, cols: usize, kl: usize, ku: usize) -> Self {
542 let ndiag = kl + ku + 1;
543 Self {
544 rows,
545 cols,
546 kl,
547 ku,
548 band: vec![vec![0.0; cols]; ndiag],
549 }
550 }
551 pub fn get(&self, r: usize, c: usize) -> f64 {
553 let offset = c as isize - r as isize;
554 if offset < -(self.kl as isize) || offset > self.ku as isize {
555 return 0.0;
556 }
557 let k = (offset + self.kl as isize) as usize;
558 self.band[k][c]
559 }
560 pub fn set(&mut self, r: usize, c: usize, v: f64) {
562 let offset = c as isize - r as isize;
563 assert!(
564 offset >= -(self.kl as isize) && offset <= self.ku as isize,
565 "Index outside band"
566 );
567 let k = (offset + self.kl as isize) as usize;
568 self.band[k][c] = v;
569 }
570 pub fn matvec(&self, x: &[f64]) -> Option<Vec<f64>> {
572 if x.len() != self.cols {
573 return None;
574 }
575 let mut y = vec![0.0f64; self.rows];
576 for r in 0..self.rows {
577 let c_start = r.saturating_sub(self.kl);
578 let c_end = (r + self.ku + 1).min(self.cols);
579 for c in c_start..c_end {
580 y[r] += self.get(r, c) * x[c];
581 }
582 }
583 Some(y)
584 }
585 pub fn diagonal(&self) -> Vec<f64> {
587 let n = self.rows.min(self.cols);
588 (0..n).map(|i| self.get(i, i)).collect()
589 }
590}
591#[allow(dead_code)]
593#[derive(Clone, Debug)]
594pub struct LanczosResult {
595 pub basis: Vec<DenseVector>,
597 pub alpha: Vec<f64>,
599 pub beta: Vec<f64>,
601}
602#[allow(dead_code)]
603impl LanczosResult {
604 pub fn compute(a: &DenseMatrix, v0: &DenseVector, k: usize) -> Option<LanczosResult> {
606 if a.rows != a.cols || a.rows != v0.dim() || k == 0 {
607 return None;
608 }
609 let n = a.rows;
610 let mut basis: Vec<DenseVector> = Vec::with_capacity(k + 1);
611 let mut alpha = Vec::with_capacity(k);
612 let mut beta: Vec<f64> = Vec::with_capacity(k);
613 let v0_norm = v0.norm();
614 if v0_norm < 1e-14 {
615 return None;
616 }
617 let v_init = v0.scale(1.0 / v0_norm);
618 basis.push(v_init);
619 let mut w_prev = DenseVector::zero(n);
620 for j in 0..k {
621 let vj = &basis[j];
622 let av = DenseVector::apply(a, vj)?;
623 let aj = vj.dot(&av);
624 alpha.push(aj);
625 let mut w_data = vec![0.0f64; n];
626 for i in 0..n {
627 w_data[i] = av.data[i] - aj * vj.data[i];
628 if j > 0 {
629 w_data[i] -= beta[j - 1] * w_prev.data[i];
630 }
631 }
632 let w = DenseVector { data: w_data };
633 let bj = w.norm();
634 if j + 1 < k {
635 beta.push(bj);
636 if bj < 1e-14 {
637 break;
638 }
639 let v_next = w.scale(1.0 / bj);
640 w_prev = basis[j].clone();
641 basis.push(v_next);
642 }
643 }
644 Some(LanczosResult { basis, alpha, beta })
645 }
646 pub fn tridiagonal(&self) -> DenseMatrix {
648 let n = self.alpha.len();
649 let mut t = DenseMatrix::zero(n, n);
650 for i in 0..n {
651 t.data[i][i] = self.alpha[i];
652 }
653 for i in 0..self.beta.len() {
654 t.data[i][i + 1] = self.beta[i];
655 t.data[i + 1][i] = self.beta[i];
656 }
657 t
658 }
659}