matrixlab/matrix/
dense.rs

1//
2//    matrixlab, a library for working with sparse matricies
3//    Copyright (C) 2019 Waylon Cude
4//
5//    This program is free software: you can redistribute it and/or modify
6//    it under the terms of the GNU General Public License as published by
7//    the Free Software Foundation, either version 3 of the License, or
8//    (at your option) any later version.
9//
10//    This program is distributed in the hope that it will be useful,
11//    but WITHOUT ANY WARRANTY; without even the implied warranty of
12//    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13//    GNU General Public License for more details.
14//
15//    You should have received a copy of the GNU General Public License
16//    along with this program.  If not, see <https://www.gnu.org/licenses/>.
17//    
18
19use std::ops::{Mul,Add,Sub};
20use crate::matrix::MatrixElement;
21use crate::vector::{Vector,VectorTrait,FloatVectorTrait};
22use crate::error::Error;
23use rayon::prelude::*;
24
25#[derive(PartialEq,Debug)]
26pub struct DenseMatrix<A> {
27    columns: Vec<Vector<A>>
28}
29impl<A> DenseMatrix<A> {
30    /// Creates a matrix from a vector of columns
31    pub fn new(columns: Vec<Vector<A>>) -> DenseMatrix<A> {
32        DenseMatrix {
33            columns
34        }
35    }
36    pub fn add_column(&mut self, column: Vector<A>) {
37        self.columns.push(column);
38    }
39    pub fn num_rows(&self) -> usize {
40        // We assume the matrix is well formed so we just look
41        // at the number of rows in the first column
42        match self.columns.get(0) {
43            Some(column) => column.len(),
44            None => 0
45        }
46    }
47    pub fn num_columns(&self) -> usize {
48        self.columns.len()
49    }
50
51}
52//Maybe todo: should this be copy?
53impl<A: Clone> DenseMatrix<A> {
54    pub fn transpose(&self) -> DenseMatrix<A> {
55        // Set up the columns for our new matrix
56        let mut columns = Vec::with_capacity(self.num_rows());
57        for _ in 0 .. self.num_rows() {
58            columns.push(Vec::with_capacity(self.num_columns()));
59        }
60
61        // Set up the elements of the columns of our new array
62        for column in self.columns.iter() {
63            for (i,entry) in column.iter().enumerate() {
64                columns[i].push(entry.clone());
65            }
66        }
67
68        DenseMatrix::new(columns)
69    }
70    /// Creates a matrix from a vector of rows
71    pub fn from_rows(rows: Vec<Vector<A>>) -> DenseMatrix<A> {
72        DenseMatrix {
73            columns: rows
74        }.transpose()
75    }
76}
77
78//TODO: Make this generic?
79impl<A: MatrixElement + Mul<Output=A> + Add<Output=A> + Sub<Output=A> + Default> DenseMatrix<A> {
80    pub fn scale(&self, other: &A) -> DenseMatrix<A> {
81        let columns = self.columns.par_iter()
82            .map_with(other, 
83                      |&mut o, column| column.iter()
84                                             .map(|e| *o**e)
85                                             .collect())
86            .collect();
87        DenseMatrix::new(columns)
88    }
89    pub fn vec_mul(&self, other: &Vector<A>) -> Result<Vector<A>,Error> {
90        // If the size of the vector doesn't match the number of
91        // columns then error out
92        if other.len() != self.columns.len() {
93            return Err(Error::SizeMismatch);
94        }
95        Ok(self.columns.par_iter()
96            .zip(other.par_iter())
97            // Is there any way to make this a normal iterator
98            // and still be able to flatten?
99            // Is collecting it slow?
100            .map(|(column,scale)| column.iter()
101                                        .map(|x| *x**scale)
102                                        .collect::<Vec<A>>())
103            .reduce(
104                || [Default::default()]
105                   .into_iter()
106                   .cycle()
107                   .take(self.num_rows())
108                   .cloned()
109                   .collect(),
110                |x,y| x.add(&y)))
111            //.collect()
112    }
113    pub fn safe_mul(&self, other: &DenseMatrix<A>) -> Result<DenseMatrix<A>,Error> {
114        if self.num_columns() != other.num_rows() {
115            return Err(Error::SizeMismatch);
116        }
117
118        let new_cols = other.columns.par_iter()
119            .map_with(self, |&mut s, col| s.vec_mul(col))
120            .collect::<Result<Vec<Vec<A>>,Error>>()?;
121
122        Ok(DenseMatrix::new(new_cols))
123        
124    }
125}
126impl DenseMatrix<f64> {
127    /// This takes an upper triangular matrix, and solves it to
128    /// equal b
129    pub fn backsolve(&self, b: &Vector<f64>) -> Vector<f64> {
130        // Start off with a copy of b, to modify to create our solutions
131        let mut solutions: Vec<f64> = b.clone();
132        // Start with the last column
133        for (i,column) in self.columns.iter().rev().enumerate() {
134            //Normalize our last element
135            let last_element = b.len()-1-i;
136            solutions[last_element] /= column[last_element];
137            //And skip i elements because they're all zero
138            //But we have to reverse the list first
139            for (j,element) in column.iter().rev().skip(1+i).enumerate() {
140                //Move up b as we iterate
141                let last_element = b.len()-1-i;
142                //And move up b as we go up each column
143                //This probably won't overflow
144                //TODO ^ figure out if this is exploitable
145                let current_element = last_element - 1-j;
146                solutions[current_element] -= solutions[last_element] * element;                                            
147            }
148        }
149        solutions
150    }
151    /// This solves for B*y = r
152    pub fn least_squares(&self, r: &Vector<f64>) -> Result<Vector<f64>,Error> {
153        //Solve for Q, for our QR factorization
154        let q = self.factor_q();
155        let q_transpose = q.transpose();
156        let rhs = q_transpose.vec_mul(&r)?;
157        let r = q_transpose.safe_mul(self).expect("Error in least squares, multiplication failed");;
158
159        //Now solve for Ra = rhs, then return a
160        Ok(r.backsolve(&rhs))
161    }
162    pub fn factor_q(&self) -> DenseMatrix<f64> {
163        let mut q_vectors: Vec<Vector<f64>> = Vec::with_capacity(self.num_columns());
164        for column in self.columns.iter() {
165            let mut maybe_q = column.clone();
166            for orthogonal in q_vectors.iter() {
167                let c = column.inner(orthogonal);
168                // subtract c_n * q_n
169                maybe_q = maybe_q.sub(&orthogonal.scale(c));
170            }
171            let q = maybe_q.normalize();
172            q_vectors.push(q);
173        }
174
175        DenseMatrix::new(q_vectors)
176    }
177    /// Returns a new vector, orthogonal to all vectors currently in the
178    /// array and to the other vector
179    pub fn orthogonal_to(&self,other: &Vector<f64>) -> Vector<f64> {
180        let mut final_vec = other.clone();
181        for column in self.columns.iter() {
182            final_vec = final_vec.sub(&column.scale(other.inner(column)));
183        }
184        final_vec
185    }
186}