use ndarray::ArrayView2;
use num::{Float, Zero};
use std::iter::Sum;
use std::ops::Mul;
#[inline(always)]
pub fn inner_product<T>(a: &[T], b: &[T]) -> T
where
T: Float + Sum<T> + Mul<T, Output = T>,
{
assert!(a.len() == b.len());
a.iter().zip(b.iter()).map(|(x, y)| (*x) * (*y)).sum()
}
#[inline(always)]
pub fn norm1<T>(a: &[T]) -> T
where
T: Float + Sum<T>,
{
a.iter().map(|x| x.abs()).sum()
}
#[inline(always)]
pub fn norm2<T>(a: &[T]) -> T
where
T: Float + Sum<T> + Mul<T, Output = T>,
{
let norm: T = norm2_squared(a);
norm.sqrt()
}
#[inline(always)]
pub fn norm2_squared_diff<T>(a: &[T], b: &[T]) -> T
where
T: Float + Sum<T> + Mul<T, Output = T> + std::ops::AddAssign,
{
a.iter().zip(b.iter()).fold(T::zero(), |mut sum, (&x, &y)| {
sum += (x - y).powi(2);
sum
})
}
#[inline(always)]
pub fn norm2_squared<T>(a: &[T]) -> T
where
T: Float + Sum<T> + Mul<T, Output = T>,
{
let norm: T = a.iter().map(|x| (*x) * (*x)).sum();
norm
}
#[inline(always)]
pub fn sum<T>(a: &[T]) -> T
where
T: Float + Sum<T> + Mul<T, Output = T>,
{
let norm: T = a.iter().copied().sum();
norm
}
#[inline(always)]
pub fn norm_inf<T>(a: &[T]) -> T
where
T: Float + Zero,
{
a.iter()
.fold(T::zero(), |current_max, x| x.abs().max(current_max))
}
#[inline(always)]
pub fn norm_inf_diff<T>(a: &[T], b: &[T]) -> T
where
T: Float + Zero,
{
assert_eq!(a.len(), b.len());
a.iter()
.zip(b.iter())
.fold(T::zero(), |current_max, (x, y)| {
(*x - *y).abs().max(current_max)
})
}
#[inline(always)]
pub fn is_finite<T>(a: &[T]) -> bool
where
T: Float,
{
!a.iter().any(|&xi| !xi.is_finite())
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MatrixError {
DimensionMismatch,
}
pub fn mul_a_at<T: Float + 'static>(
a: &[T],
rows: usize,
cols: usize,
) -> Result<Vec<T>, MatrixError> {
if a.len() != rows * cols {
return Err(MatrixError::DimensionMismatch);
}
let a_mat =
ArrayView2::from_shape((rows, cols), a).map_err(|_| MatrixError::DimensionMismatch)?;
let out = a_mat.dot(&a_mat.t());
let (vec, offset) = out.into_raw_vec_and_offset();
debug_assert_eq!(offset, Some(0));
Ok(vec)
}
#[cfg(test)]
mod tests {
use crate::*;
#[test]
fn t_inner_product_test() {
unit_test_utils::assert_nearly_equal(
14.0,
matrix_operations::inner_product(&[1.0, 2.0, 3.0], &[1.0, 2.0, 3.0]),
1e-10,
1e-16,
"inner product",
);
}
#[test]
#[should_panic]
fn t_inner_product_test_panic() {
matrix_operations::inner_product(&[2.0, 3.0], &[1.0, 2.0, 3.0]);
}
#[test]
fn t_norm1_test() {
unit_test_utils::assert_nearly_equal(
6.0,
matrix_operations::norm1(&[1.0, -2.0, -3.0]),
1e-10,
1e-16,
"norm1",
);
}
#[test]
fn t_norm2_test() {
unit_test_utils::assert_nearly_equal(
5.0,
matrix_operations::norm2(&[3.0, 4.0]),
1e-10,
1e-16,
"norm2",
);
}
#[test]
fn t_norm_inf_test() {
unit_test_utils::assert_nearly_equal(
3.0,
matrix_operations::norm_inf(&[1.0, -2.0, -3.0]),
1e-10,
1e-16,
"norm infinity of vector",
);
unit_test_utils::assert_nearly_equal(
8.0,
matrix_operations::norm_inf(&[1.0, -8.0, -3.0, 0.0]),
1e-10,
1e-16,
"infinity norm",
);
}
#[test]
fn t_norm_inf_diff() {
let x = [1.0, 2.0, 1.0];
let y = [-4.0, 0.0, 3.0];
let norm_diff = matrix_operations::norm_inf_diff(&x, &y);
unit_test_utils::assert_nearly_equal(5.0f64, norm_diff, 1e-10, 1e-9, "norm of difference");
unit_test_utils::assert_nearly_equal(
0.0f64,
matrix_operations::norm_inf_diff(&x, &x),
1e-10,
1e-16,
"difference of same vector",
);
unit_test_utils::assert_nearly_equal(
0.0f64,
matrix_operations::norm_inf_diff(&[], &[]),
1e-10,
1e-16,
"difference of empty vectors",
);
}
#[test]
#[should_panic]
fn t_norm_inf_diff_panic() {
let x = [1.0, 2.0, 3.0];
let y = [0.0, 3.0];
let _ = matrix_operations::norm_inf_diff(&x, &y);
}
#[test]
fn t_norm2_squared_diff_test() {
let x = [2.0, 5.0, 7.0, -1.0];
let y = [4.0, 1.0, 0.0, 10.0];
let norm2sq = matrix_operations::norm2_squared_diff(&x, &y);
unit_test_utils::assert_nearly_equal(190., norm2sq, 1e-10, 1e-12, "norm sq diff");
}
#[test]
fn t_matmul_a_at_tall() {
let a = vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0];
let aat = matrix_operations::mul_a_at(&a, 3, 2).unwrap();
let expected = vec![5.0_f64, 11., 17., 11., 25., 39., 17., 39., 61.];
unit_test_utils::nearly_equal_array(&expected, &aat, 1e-10, 1e-12);
}
#[test]
fn t_matmul_a_at_fat() {
let a = vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0];
let aat = matrix_operations::mul_a_at(&a, 2, 3).unwrap();
let expected = vec![14.0_f64, 32., 32., 77.];
unit_test_utils::nearly_equal_array(&expected, &aat, 1e-10, 1e-12);
}
#[test]
fn t_matmul_a_at_column_vec() {
let a = vec![1.0_f64, 2.0, 3.0];
let aat = matrix_operations::mul_a_at(&a, 3, 1).unwrap();
let expected = vec![1.0_f64, 2., 3., 2., 4., 6., 3., 6., 9.];
unit_test_utils::nearly_equal_array(&expected, &aat, 1e-10, 1e-12);
}
#[test]
fn t_matmul_a_at_column_rowvec() {
let a = vec![1.0_f64, 2.0, 3.0];
let aat = matrix_operations::mul_a_at(&a, 1, 3).unwrap();
unit_test_utils::nearly_equal(14.0, aat[0], 1e-10, 1e-12);
}
#[test]
fn t_matmul_a_at_wrong_dimension() {
let a = vec![1.0_f64, 2.0, 3.0, 4.0, 5.0, 6.0];
let result = matrix_operations::mul_a_at(&a, 2, 2);
assert_eq!(
result,
Err(matrix_operations::MatrixError::DimensionMismatch)
);
}
}