1use std::fmt::{Display, Formatter, Result as FmtResult};
2use std::ops::{Add, Div, Index, IndexMut, Mul, Sub};
3#[derive(Debug, Clone)]
11pub struct Vector(Vec<f64>);
12
13
14#[macro_export]
16macro_rules! vector {
17 ([ $($x:expr),* $(,)* ]) => {{
18 rs_sci::linear::Vector::new(vec![ $($x as f64),* ])
19 }};
20}
21
22impl Vector {
23 pub fn new(data: Vec<f64>) -> Self {
24 Self(data)
25 }
26
27 pub fn zeros(size: usize) -> Self {
29 Self(vec![0.0; size])
30 }
31
32 pub fn ones(size: usize) -> Self {
34 Self(vec![1.0; size])
35 }
36
37 pub fn dimension(&self) -> usize {
39 self.0.len()
40 }
41
42 pub fn magnitude(&self) -> f64 {
44 self.0.iter().map(|x| x * x).sum::<f64>().sqrt()
45 }
46
47 pub fn normalize(&self) -> Self {
49 let mag = self.magnitude();
50 Self::new(self.0.iter().map(|x| x / mag).collect())
51 }
52
53 pub fn dot(&self, rhs: &Vector) -> Result<f64, &'static str> {
55 if self.dimension() != rhs.dimension() {
56 return Err("Vectors must have same dimension for dot product");
57 }
58 Ok(self.0.iter().zip(rhs.0.iter()).map(|(a, b)| a * b).sum())
59 }
60
61 pub fn cross(&self, other: &Vector) -> Result<Vector, &'static str> {
63 if self.dimension() != 3 || other.dimension() != 3 {
64 return Err("Cross product is only defined for 3D vectors");
65 }
66 Ok(Vector::new(vec![
67 self.0[1] * other.0[2] - self.0[2] * other.0[1],
68 self.0[2] * other.0[0] - self.0[0] * other.0[2],
69 self.0[0] * other.0[1] - self.0[1] * other.0[0],
70 ]))
71 }
72
73 pub fn transpose(&self) -> Matrix {
74 Matrix::new(vec![self.0.clone()]).unwrap()
75 }
76
77 pub fn as_column_matrix(&self) -> Matrix {
78 Matrix::new(self.0.iter().map(|&x| vec![x]).collect()).unwrap()
79 }
80}
81
82impl Add for &Vector {
83 type Output = Result<Vector, &'static str>;
84
85 fn add(self, other: &Vector) -> Self::Output {
86 if self.dimension() != other.dimension() {
87 return Err("Vectors must have same dimension for addition");
88 }
89 Ok(Vector::new(
90 self.0
91 .iter()
92 .zip(other.0.iter())
93 .map(|(a, b)| a + b)
94 .collect(),
95 ))
96 }
97}
98
99impl Sub for Vector {
100 type Output = Result<Self, &'static str>;
101
102 fn sub(self, other: Self) -> Self::Output {
103 if self.dimension() != other.dimension() {
104 return Err("Vectors must have same dimension for subtraction");
105 }
106 Ok(Self::new(
107 self.0
108 .iter()
109 .zip(other.0.iter())
110 .map(|(a, b)| a - b)
111 .collect(),
112 ))
113 }
114}
115
116impl Mul<f64> for Vector {
117 type Output = Self;
118
119 fn mul(self, scalar: f64) -> Self {
120 Self::new(self.0.iter().map(|x| x * scalar).collect())
121 }
122}
123
124impl Div<f64> for Vector {
125 type Output = Self;
126
127 fn div(self, scalar: f64) -> Self {
128 if scalar == 0.0 {
129 panic!("Division by zero");
130 }
131 Self::new(self.0.iter().map(|x| x / scalar).collect())
132 }
133}
134
135impl Index<usize> for Vector {
136 type Output = f64;
137
138 fn index(&self, index: usize) -> &Self::Output {
139 &self.0[index]
140 }
141}
142
143impl IndexMut<usize> for Vector {
144 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
145 &mut self.0[index]
146 }
147}
148
149impl Display for Vector {
150 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
151 write!(f, "[")?;
152 for (i, val) in self.0.iter().enumerate() {
153 if i > 0 {
154 write!(f, ", ")?;
155 }
156 write!(f, "{:.6}", val)?;
157 }
158 write!(f, "]")
159 }
160}
161
162#[derive(Debug, Clone)]
163pub struct Matrix {
164 pub data: Vec<Vec<f64>>,
165 pub rows: usize,
166 pub cols: usize,
167}
168
169#[macro_export]
170macro_rules! matrix {
171 ([ $( [ $($x:expr),* $(,)* ] ),* $(,)* ]) => {{
172 let data = vec![ $( vec![ $($x as f64),* ] ),* ];
173 Matrix::new(data).unwrap()
174 }};
175
176 ([ $($x:expr),* $(,)* ]) => {{
177 let data = vec![vec![ $($x as f64),* ]];
178 Matrix::new(data).unwrap()
179 }};
180}
181
182impl Matrix {
183 pub fn new(data: Vec<Vec<f64>>) -> Result<Self, &'static str> {
184 if data.is_empty() {
185 return Err("Matrix cannot be empty");
186 }
187 let rows = data.len();
188 let cols = data[0].len();
189 if !data.iter().all(|row| row.len() == cols) {
190 return Err("All rows must have same length");
191 }
192 Ok(Self { data, rows, cols })
193 }
194
195 pub fn zeros(rows: usize, cols: usize) -> Self {
196 Self {
197 data: vec![vec![0.0; cols]; rows],
198 rows,
199 cols,
200 }
201 }
202
203 pub fn identity(size: usize) -> Self {
204 let mut data = vec![vec![0.0; size]; size];
205 for i in 0..size {
206 data[i][i] = 1.0;
207 }
208 Self {
209 data,
210 rows: size,
211 cols: size,
212 }
213 }
214
215 pub fn transpose(&self) -> Self {
216 let mut result = vec![vec![0.0; self.rows]; self.cols];
217 for i in 0..self.rows {
218 for j in 0..self.cols {
219 result[j][i] = self.data[i][j];
220 }
221 }
222 Self {
223 data: result,
224 rows: self.cols,
225 cols: self.rows,
226 }
227 }
228
229 pub fn determinant(&self) -> Result<f64, &'static str> {
230 if self.rows != self.cols {
231 return Err("Determinant only defined for square matrices");
232 }
233 match self.rows {
234 1 => Ok(self.data[0][0]),
235 2 => Ok(self.data[0][0] * self.data[1][1] - self.data[0][1] * self.data[1][0]),
236 _ => {
237 let mut det = 0.0;
238 for j in 0..self.cols {
239 det += self.data[0][j] * self.cofactor(0, j)?;
240 }
241 Ok(det)
242 }
243 }
244 }
245
246 fn cofactor(&self, row: usize, col: usize) -> Result<f64, &'static str> {
247 let minor = self.minor(row, col)?;
248 Ok(minor * if (row + col) % 2 == 0 { 1.0 } else { -1.0 })
249 }
250
251 fn minor(&self, row: usize, col: usize) -> Result<f64, &'static str> {
252 let submatrix = self.submatrix(row, col)?;
253 submatrix.determinant()
254 }
255
256 fn submatrix(&self, row: usize, col: usize) -> Result<Matrix, &'static str> {
257 let mut result = Vec::new();
258 for i in 0..self.rows {
259 if i == row {
260 continue;
261 }
262 let mut new_row = Vec::new();
263 for j in 0..self.cols {
264 if j == col {
265 continue;
266 }
267 new_row.push(self.data[i][j]);
268 }
269 result.push(new_row);
270 }
271 Matrix::new(result)
272 }
273
274 pub fn inverse(&self) -> Result<Matrix, &'static str> {
275 let det = self.determinant()?;
276 if det == 0.0 {
277 return Err("Matrix is not invertible");
278 }
279
280 let mut result = vec![vec![0.0; self.cols]; self.rows];
281 for i in 0..self.rows {
282 for j in 0..self.cols {
283 result[j][i] = self.cofactor(i, j)? / det;
284 }
285 }
286
287 Ok(Self {
288 data: result,
289 rows: self.rows,
290 cols: self.cols,
291 })
292 }
293
294 pub fn svd(
295 &self,
296 max_iter: usize,
297 tolerance: f64,
298 ) -> Result<(Matrix, Vector, Matrix), &'static str> {
299 let ata = self.transpose() * self.clone();
301 let (v, s) = ata?.eigendecomposition(max_iter, tolerance)?;
302
303 let singular_values = s.iter().map(|&x| x.sqrt()).collect();
305 let sigma = Vector::new(singular_values);
306
307 let mut u_cols = Vec::new();
309 for i in 0..v.cols {
310 let v_i = v.get_column(i);
311 let u_i = (self * &v_i)? / sigma[i];
312 u_cols.push(u_i.0);
313 }
314
315 let u = Matrix::new(u_cols.into_iter().map(|col| col).collect())?;
316 Ok((u, sigma, v))
317 }
318
319 pub fn eigendecomposition(
320 &self,
321 max_iter: usize,
322 tolerance: f64,
323 ) -> Result<(Matrix, Vec<f64>), &'static str> {
324 if self.rows != self.cols {
325 return Err("Matrix must be square for eigendecomposition");
326 }
327
328 let n = self.rows;
329 let mut eigenvectors = Vec::new();
330 let mut eigenvalues = Vec::new();
331 let mut working_matrix = self.clone();
332
333 for _ in 0..n {
334 let (eigenval, eigenvec) = working_matrix.power_iteration(max_iter, tolerance)?;
335 eigenvalues.push(eigenval);
336 eigenvectors.push(eigenvec.clone().0);
337
338 working_matrix = self.deflate(&eigenvec, eigenval)?;
340 }
341
342 Ok((Matrix::new(eigenvectors)?, eigenvalues))
343 }
344
345 fn deflate(&self, eigenvector: &Vector, eigenvalue: f64) -> Result<Matrix, &'static str> {
346 let n = self.rows;
347 let mut result = self.clone();
348
349 for i in 0..n {
350 for j in 0..n {
351 result.data[i][j] -= eigenvalue * eigenvector[i] * eigenvector[j];
352 }
353 }
354
355 Ok(result)
356 }
357
358 pub fn cholesky(&self) -> Result<Matrix, &'static str> {
359 if self.rows != self.cols {
360 return Err("Matrix must be square for Cholesky decomposition");
361 }
362
363 let n = self.rows;
364 let mut l = Matrix::zeros(n, n);
365
366 for i in 0..n {
367 for j in 0..=i {
368 let mut sum = 0.0;
369
370 if j == i {
371 for k in 0..j {
372 sum += l.data[j][k] * l.data[j][k];
373 }
374 let val = self.data[j][j] - sum;
375 if val <= 0.0 {
376 return Err("Matrix is not positive definite");
377 }
378 l.data[j][j] = val.sqrt();
379 } else {
380 for k in 0..j {
381 sum += l.data[i][k] * l.data[j][k];
382 }
383 l.data[i][j] = (self.data[i][j] - sum) / l.data[j][j];
384 }
385 }
386 }
387
388 Ok(l)
389 }
390 pub fn is_symmetric(&self) -> bool {
391 if self.rows != self.cols {
392 return false;
393 }
394
395 for i in 0..self.rows {
396 for j in 0..i {
397 if (self.data[i][j] - self.data[j][i]).abs() > 1e-10 {
398 return false;
399 }
400 }
401 }
402 true
403 }
404
405 pub fn is_positive_definite(&self) -> bool {
406 self.cholesky().is_ok()
407 }
408
409 pub fn trace(&self) -> Result<f64, &'static str> {
410 if self.rows != self.cols {
411 return Err("Matrix must be square to compute trace");
412 }
413
414 Ok((0..self.rows).map(|i| self.data[i][i]).sum())
415 }
416
417 pub fn frobenius_norm(&self) -> f64 {
418 self.data
419 .iter()
420 .flat_map(|row| row.iter())
421 .map(|&x| x * x)
422 .sum::<f64>()
423 .sqrt()
424 }
425}
426
427impl Index<(usize, usize)> for Matrix {
428 type Output = f64;
429
430 fn index(&self, index: (usize, usize)) -> &Self::Output {
431 &self.data[index.0][index.1]
432 }
433}
434
435impl IndexMut<(usize, usize)> for Matrix {
436 fn index_mut(&mut self, index: (usize, usize)) -> &mut Self::Output {
437 &mut self.data[index.0][index.1]
438 }
439}
440
441impl Index<usize> for Matrix {
442 type Output = Vec<f64>;
443
444 fn index(&self, index: usize) -> &Self::Output {
445 &self.data[index]
446 }
447}
448
449impl IndexMut<usize> for Matrix {
450 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
451 &mut self.data[index]
452 }
453}
454
455impl Add for &Matrix {
456 type Output = Result<Matrix, &'static str>;
457
458 fn add(self, other: &Matrix) -> Self::Output {
459 if self.rows != other.rows || self.cols != other.cols {
460 return Err("Matrices must have same dimensions for addition");
461 }
462 let mut result = vec![vec![0.0; self.cols]; self.rows];
463 for i in 0..self.rows {
464 for j in 0..self.cols {
465 result[i][j] = self.data[i][j] + other.data[i][j];
466 }
467 }
468 Matrix::new(result)
469 }
470}
471
472impl Sub for Matrix {
473 type Output = Result<Self, &'static str>;
474
475 fn sub(self, other: Self) -> Self::Output {
476 if self.rows != other.rows || self.cols != other.cols {
477 return Err("Matrices must have same dimensions for subtraction");
478 }
479 let mut result = vec![vec![0.0; self.cols]; self.rows];
480 for i in 0..self.rows {
481 for j in 0..self.cols {
482 result[i][j] = self.data[i][j] - other.data[i][j];
483 }
484 }
485 Matrix::new(result)
486 }
487}
488
489impl Mul for Matrix {
490 type Output = Result<Self, &'static str>;
491
492 fn mul(self, other: Self) -> Self::Output {
493 if self.cols != other.rows {
494 return Err("Invalid matrix dimensions for multiplication");
495 }
496 let mut result = vec![vec![0.0; other.cols]; self.rows];
497 for i in 0..self.rows {
498 for j in 0..other.cols {
499 for k in 0..self.cols {
500 result[i][j] += self.data[i][k] * other.data[k][j];
501 }
502 }
503 }
504 Matrix::new(result)
505 }
506}
507
508impl Mul<f64> for Matrix {
509 type Output = Self;
510
511 fn mul(self, scalar: f64) -> Self {
512 Self {
513 data: self
514 .data
515 .iter()
516 .map(|row| row.iter().map(|&x| x * scalar).collect())
517 .collect(),
518 rows: self.rows,
519 cols: self.cols,
520 }
521 }
522}
523
524impl Div<f64> for Matrix {
525 type Output = Self;
526
527 fn div(self, scalar: f64) -> Self {
528 if scalar == 0.0 {
529 panic!("Division by zero");
530 }
531 Self {
532 data: self
533 .data
534 .iter()
535 .map(|row| row.iter().map(|&x| x / scalar).collect())
536 .collect(),
537 rows: self.rows,
538 cols: self.cols,
539 }
540 }
541}
542
543impl Mul<&Vector> for &Matrix {
544 type Output = Result<Vector, &'static str>;
545
546 fn mul(self, vector: &Vector) -> Self::Output {
547 if self.cols != vector.dimension() {
548 return Err("Matrix columns must match vector dimension");
549 }
550 let mut result = vec![0.0; self.rows];
551 for i in 0..self.rows {
552 for j in 0..self.cols {
553 result[i] += self.data[i][j] * vector.0[j];
554 }
555 }
556 Ok(Vector::new(result))
557 }
558}
559
560impl Display for Matrix {
561 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
562 for i in 0..self.rows {
563 write!(f, "[")?;
564 for j in 0..self.cols {
565 if j > 0 {
566 write!(f, ", ")?;
567 }
568 write!(f, "{:.6}", self.data[i][j])?;
569 }
570 writeln!(f, "]")?;
571 }
572 Ok(())
573 }
574}
575
576impl Matrix {
577 pub fn lu_decomposition(&self) -> Result<(Matrix, Matrix), &'static str> {
579 if self.rows != self.cols {
580 return Err("Matrix must be square for LU decomposition");
581 }
582
583 let n = self.rows;
584 let mut l = vec![vec![0.0; n]; n];
585 let mut u = vec![vec![0.0; n]; n];
586
587 for i in 0..n {
588 for k in i..n {
590 let mut sum = 0.0;
591 for j in 0..i {
592 sum += l[i][j] * u[j][k];
593 }
594 u[i][k] = self.data[i][k] - sum;
595 }
596
597 for k in i..n {
599 if i == k {
600 l[i][i] = 1.0;
601 } else {
602 let mut sum = 0.0;
603 for j in 0..i {
604 sum += l[k][j] * u[j][i];
605 }
606 l[k][i] = (self.data[k][i] - sum) / u[i][i];
607 }
608 }
609 }
610
611 Ok((Matrix::new(l)?, Matrix::new(u)?))
612 }
613
614 pub fn qr_decomposition(&self) -> Result<(Matrix, Matrix), &'static str> {
616 if self.rows < self.cols {
617 return Err("Matrix must have at least as many rows as columns");
618 }
619
620 let n = self.cols;
621 let mut q = vec![vec![0.0; n]; self.rows];
622 let mut r = vec![vec![0.0; n]; n];
623
624 for j in 0..n {
625 let mut v = self.get_column(j);
626
627 for i in 0..j {
628 let q_col = Matrix::from_column(&q, i);
629 r[i][j] = q_col.dot(&self.get_column(j))?;
630 let m = v - q_col * r[i][j];
631 v = m?
632 }
633
634 r[j][j] = v.magnitude();
635 if r[j][j] != 0.0 {
636 let q_col = v / r[j][j];
637 for i in 0..self.rows {
638 q[i][j] = q_col[i];
639 }
640 }
641 }
642
643 Ok((Matrix::new(q)?, Matrix::new(r)?))
644 }
645
646 pub fn rank(&self) -> Result<usize, &'static str> {
648 let tolerance = 1e-10;
649 let (_, r) = self.qr_decomposition()?;
650
651 let mut rank = 0;
652 for i in 0..std::cmp::min(r.rows, r.cols) {
653 if r.data[i][i].abs() > tolerance {
654 rank += 1;
655 }
656 }
657
658 Ok(rank)
659 }
660
661 pub fn power_iteration(
662 &self,
663 max_iter: usize,
664 tolerance: f64,
665 ) -> Result<(f64, Vector), &'static str> {
666 if self.rows != self.cols {
667 return Err("Matrix must be square for eigenvalue calculation");
668 }
669
670 let n = self.rows;
671 let mut v = Vector::ones(n).normalize();
673 let mut lambda_old = 0.0;
674
675 for _ in 0..max_iter {
676 let av = self.multiply_vector(&v)?;
678
679 let lambda = v.dot(&av)?;
681
682 if (lambda - lambda_old).abs() < tolerance {
683 return Ok((lambda, v));
684 }
685
686 lambda_old = lambda;
687 v = av.normalize();
688 }
689
690 Err("Power iteration did not converge")
691 }
692
693 pub fn multiply_vector(&self, v: &Vector) -> Result<Vector, &'static str> {
694 if self.cols != v.dimension() {
695 return Err("Matrix columns must match vector dimension");
696 }
697
698 let mut result = vec![0.0; self.rows];
699 for i in 0..self.rows {
700 for j in 0..self.cols {
701 result[i] += self.data[i][j] * v[j];
702 }
703 }
704 Ok(Vector::new(result))
705 }
706
707 fn get_column(&self, j: usize) -> Vector {
709 Vector::new((0..self.rows).map(|i| self.data[i][j]).collect())
710 }
711
712 fn from_column(data: &[Vec<f64>], j: usize) -> Vector {
713 Vector::new((0..data.len()).map(|i| data[i][j]).collect())
714 }
715}
716
717#[allow(unused)]
718trait VectorOps {
719 fn dot(&self, other: &Self) -> Result<f64, &'static str>;
720 fn magnitude(&self) -> f64;
721 fn normalize(&self) -> Self;
722}
723
724impl VectorOps for Vector {
725 fn dot(&self, other: &Self) -> Result<f64, &'static str> {
726 if self.0.len() != other.0.len() {
727 return Err("Vectors must have same dimension for dot product");
728 }
729 Ok(self.0.iter().zip(other.0.iter()).map(|(a, b)| a * b).sum())
730 }
731
732 fn magnitude(&self) -> f64 {
733 self.0.iter().map(|x| x * x).sum::<f64>().sqrt()
734 }
735
736 fn normalize(&self) -> Self {
737 let mag = self.magnitude();
738 Self::new(self.0.iter().map(|x| x / mag).collect())
739 }
740}