use std::ops::{Add, Sub, Mul, Neg, Div};
pub mod iterators;
pub mod slices;
use crate::matrices::iterators::{
ColumnIterator, RowIterator, ColumnMajorIterator,
ColumnReferenceIterator, RowReferenceIterator, ColumnMajorReferenceIterator};
use crate::matrices::slices::Slice2D;
use crate::numeric::{Numeric, NumericRef};
use crate::linear_algebra;
#[derive(Debug)]
pub struct Matrix<T> {
data: Vec<Vec<T>>
}
pub type Row = usize;
pub type Column = usize;
impl <T> Matrix<T> {
pub fn unit(value: T) -> Matrix<T> {
Matrix {
data: vec![vec![value]]
}
}
pub fn row(values: Vec<T>) -> Matrix<T> {
Matrix {
data: vec![values]
}
}
pub fn column(values: Vec<T>) -> Matrix<T> {
Matrix {
data: values.into_iter().map(|x| vec![x]).collect()
}
}
pub fn from(values: Vec<Vec<T>>) -> Matrix<T> {
assert!(!values.is_empty(), "No rows defined");
assert!(!values[0].is_empty(), "No column defined");
assert!(values.iter().map(|x| x.len()).all(|x| x == values[0].len()), "Inconsistent size");
Matrix {
data: values
}
}
pub fn size(&self) -> (Row, Column) {
(self.data.len(), self.data[0].len())
}
pub fn rows(&self) -> Row {
self.data.len()
}
pub fn columns(&self) -> Column {
self.data[0].len()
}
pub fn get_reference(&self, row: Row, column: Column) -> &T {
assert!(row < self.rows(), "Row out of index");
assert!(column < self.columns(), "Column out of index");
&self.data[row][column]
}
pub fn set(&mut self, row: Row, column: Column, value: T) {
assert!(row < self.rows(), "Row out of index");
assert!(column < self.columns(), "Column out of index");
self.data[row][column] = value;
}
pub fn remove_row(&mut self, row: Row) {
assert!(self.rows() > 1);
self.data.remove(row);
}
pub fn remove_column(&mut self, column: Column) {
assert!(self.columns() > 1);
for row in 0..self.rows() {
self.data[row].remove(column);
}
}
pub fn column_reference_iter(&self, column: Column) -> ColumnReferenceIterator<T> {
ColumnReferenceIterator::new(self, column)
}
pub fn row_reference_iter(&self, row: Row) -> RowReferenceIterator<T> {
RowReferenceIterator::new(self, row)
}
pub fn column_major_reference_iter(&self) -> ColumnMajorReferenceIterator<T> {
ColumnMajorReferenceIterator::new(self)
}
pub fn retain_mut(&mut self, slice: Slice2D) {
for row in (0..self.rows()).rev() {
for column in (0..self.columns()).rev() {
if !slice.accepts(row, column) {
self.data[row].remove(column);
}
}
if self.data[row].is_empty() {
self.data.remove(row);
}
}
assert!(
!self.data.is_empty(),
"Provided slice must leave at least 1 row in the retained matrix");
assert!(
!self.data[0].is_empty(),
"Provided slice must leave at least 1 column in the retained matrix");
}
}
impl <T: Clone> Matrix<T> {
pub fn transpose(&self) -> Matrix<T> {
let mut result = Matrix::empty(self.get(0, 0), (self.columns(), self.rows()));
for i in 0..self.columns() {
for j in 0..self.rows() {
result.set(i, j, self.get(j, i).clone());
}
}
result
}
pub fn transpose_mut(&mut self) {
for i in 0..self.rows() {
for j in 0..self.columns() {
if i > j {
continue;
}
let temp = self.get(i, j);
self.set(i, j, self.get(j, i));
self.set(j, i, temp);
}
}
}
pub fn column_iter(&self, column: Column) -> ColumnIterator<T> {
ColumnIterator::new(self, column)
}
pub fn row_iter(&self, row: Row) -> RowIterator<T> {
RowIterator::new(self, row)
}
pub fn column_major_iter(&self) -> ColumnMajorIterator<T> {
ColumnMajorIterator::new(self)
}
pub fn empty(value: T, size: (Row, Column)) -> Matrix<T> {
Matrix {
data: vec![vec![value; size.1]; size.0]
}
}
pub fn get(&self, row: Row, column: Column) -> T {
assert!(row < self.rows(), "Row out of index");
assert!(column < self.columns(), "Column out of index");
self.data[row][column].clone()
}
pub fn scalar(&self) -> T {
assert!(self.rows() == 1, "Cannot treat matrix as scalar as it has more than one row");
assert!(self.columns() == 1, "Cannot treat matrix as scalar as it has more than one column");
self.get(0, 0)
}
pub fn map_mut(&mut self, mapping_function: impl Fn(T) -> T) {
for i in 0..self.rows() {
for j in 0..self.columns() {
self.set(i, j, mapping_function(self.get(i, j).clone()));
}
}
}
pub fn map_mut_with_index(&mut self, mapping_function: impl Fn(T, Row, Column) -> T) {
for i in 0..self.rows() {
for j in 0..self.columns() {
self.set(i, j, mapping_function(self.get(i, j).clone(), i, j));
}
}
}
pub fn map<U>(&self, mapping_function: impl Fn(T) -> U) -> Matrix<U>
where U: Clone {
let first_value: U = mapping_function(self.get(0, 0));
let mut mapped = Matrix::empty(first_value, self.size());
for i in 0..self.rows() {
for j in 0..self.columns() {
mapped.set(i, j, mapping_function(self.get(i, j).clone()));
}
}
mapped
}
pub fn map_with_index<U>(&self, mapping_function: impl Fn(T, Row, Column) -> U) -> Matrix<U>
where U: Clone {
let first_value: U = mapping_function(self.get(0, 0), 0, 0);
let mut mapped = Matrix::empty(first_value, self.size());
for i in 0..self.rows() {
for j in 0..self.columns() {
mapped.set(i, j, mapping_function(self.get(i, j).clone(), i, j));
}
}
mapped
}
pub fn insert_row(&mut self, row: Row, value: T) {
let new_row = vec![value; self.columns()];
self.data.insert(row, new_row);
}
pub fn insert_row_with<I>(&mut self, row: Row, values: I)
where I: Iterator<Item = T> {
let new_row = values.take(self.columns()).collect();
self.data.insert(row, new_row);
}
pub fn insert_column(&mut self, column: Column, value: T) {
for row in 0..self.rows() {
self.data[row].insert(column, value.clone());
}
}
pub fn insert_column_with<I>(&mut self, column: Column, mut values: I)
where I: Iterator<Item = T> {
for row in 0..self.rows() {
self.data[row].insert(column, values.next().unwrap());
}
}
pub fn retain(&self, slice: Slice2D) -> Matrix<T> {
let mut retained = self.clone();
retained.retain_mut(slice);
retained
}
}
impl <T: Clone> Clone for Matrix<T> {
fn clone(&self) -> Self {
self.map(|element| element)
}
}
impl <T: Numeric> Matrix<T>
where for<'a> &'a T: NumericRef<T> {
pub fn determinant(&self) -> Option<T> {
linear_algebra::determinant(self)
}
pub fn inverse(&self) -> Option<Matrix<T>>
where T: Add<Output = T> + Mul<Output = T> + Sub<Output = T> + Div<Output = T> {
linear_algebra::inverse(self)
}
pub fn covariance_column_features(&self) -> Matrix<T> {
linear_algebra::covariance_column_features(self)
}
pub fn covariance_row_features(&self) -> Matrix<T> {
linear_algebra::covariance_row_features(self)
}
}
impl <T: Numeric> Matrix<T> {
pub fn diagonal(value: T, size: (Row, Column)) -> Matrix<T> {
assert!(size.0 == size.1);
let mut matrix = Matrix {
data: vec![vec![T::zero(); size.1]; size.0]
};
for i in 0..size.0 {
matrix.set(i, i, value.clone());
}
matrix
}
}
impl <T: PartialEq> PartialEq for Matrix<T> {
fn eq(&self, other: &Self) -> bool {
if self.rows() != other.rows() {
return false;
}
if self.columns() != other.columns() {
return false;
}
self.data.iter()
.zip(other.data.iter())
.all(|(x, y)| x.iter().zip(y.iter()).all(|(a, b)| a == b))
}
}
impl <T: Numeric> Mul for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn mul(self, rhs: Self) -> Self::Output {
assert!(self.columns() == rhs.rows(),
"Mismatched Matrices, left is {}x{}, right is {}x{}, * is only defined for MxN * NxL",
self.rows(), self.columns(), rhs.rows(), rhs.columns());
let mut result = Matrix::empty(self.get(0, 0), (self.rows(), rhs.columns()));
for i in 0..self.rows() {
for j in 0..rhs.columns() {
result.set(i, j,
self.row_reference_iter(i)
.zip(rhs.column_reference_iter(j))
.map(|(x, y)| x * y)
.sum());
}
}
result
}
}
impl <T: Numeric> Mul for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn mul(self, rhs: Self) -> Self::Output {
&self * &rhs
}
}
impl <T: Numeric> Mul<&Matrix<T>> for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn mul(self, rhs: &Self) -> Self::Output {
&self * rhs
}
}
impl <T: Numeric> Mul<Matrix<T>> for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn mul(self, rhs: Matrix<T>) -> Self::Output {
self * &rhs
}
}
impl <T: Numeric> Add for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn add(self, rhs: Self) -> Self::Output {
assert!(self.size() == rhs.size(),
"Mismatched Matrices, left is {}x{}, right is {}x{}, + is only defined for MxN + MxN",
self.rows(), self.columns(), rhs.rows(), rhs.columns());
let mut result = Matrix::empty(self.get(0, 0), self.size());
for i in 0..self.rows() {
for j in 0..self.columns() {
result.set(i, j, self.get_reference(i, j) + rhs.get_reference(i, j));
}
}
result
}
}
impl <T: Numeric> Add for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn add(self, rhs: Self) -> Self::Output {
&self + &rhs
}
}
impl <T: Numeric> Add<&Matrix<T>> for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn add(self, rhs: &Self) -> Self::Output {
&self + rhs
}
}
impl <T: Numeric> Add<Matrix<T>> for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn add(self, rhs: Matrix<T>) -> Self::Output {
self + &rhs
}
}
impl <T: Numeric> Sub for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn sub(self, rhs: Self) -> Self::Output {
assert!(self.size() == rhs.size(),
"Mismatched Matrices, left is {}x{}, right is {}x{}, - is only defined for MxN - MxN",
self.rows(), self.columns(), rhs.rows(), rhs.columns());
let mut result = Matrix::empty(self.get(0, 0), self.size());
for i in 0..self.rows() {
for j in 0..self.columns() {
result.set(i, j, self.get_reference(i, j) - rhs.get_reference(i, j));
}
}
result
}
}
impl <T: Numeric> Sub for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn sub(self, rhs: Self) -> Self::Output {
&self - &rhs
}
}
impl <T: Numeric> Sub<&Matrix<T>> for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn sub(self, rhs: &Self) -> Self::Output {
&self - rhs
}
}
impl <T: Numeric> Sub<Matrix<T>> for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn sub(self, rhs: Matrix<T>) -> Self::Output {
self - &rhs
}
}
impl <T: Numeric> Neg for &Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn neg(self) -> Self::Output {
self.map(|v| -v)
}
}
impl <T: Numeric> Neg for Matrix<T>
where for<'a> &'a T: NumericRef<T> {
type Output = Matrix<T>;
fn neg(self) -> Self::Output {
- &self
}
}