use crate::matrix::vector::Vector;
use crate::traits::{FloatScalar, LinalgScalar, Scalar};
use num_traits::{Float, One, Zero};
use crate::Matrix;
impl<T: Scalar, const N: usize> Vector<T, N> {
pub fn norm_squared(&self) -> T {
self.dot(self)
}
}
impl<T: LinalgScalar, const N: usize> Vector<T, N> {
pub fn norm(&self) -> T::Real {
let mut sum = <T::Real as Zero>::zero();
let mut comp = <T::Real as Zero>::zero();
for i in 0..N {
let m = self[i].modulus();
let prod = m * m;
let y = prod - comp;
let t = sum + y;
comp = (t - sum) - y;
sum = t;
}
sum.sqrt()
}
pub fn norm_l1(&self) -> T::Real {
let mut sum = <T::Real as Zero>::zero();
let mut comp = <T::Real as Zero>::zero();
for i in 0..N {
let val = self[i].modulus();
let y = val - comp;
let t = sum + y;
comp = (t - sum) - y;
sum = t;
}
sum
}
pub fn normalize(&self) -> Self {
let n = self.norm();
*self * T::from_real(<T::Real as One>::one() / n)
}
}
impl<T: FloatScalar, const M: usize, const N: usize> Matrix<T, M, N> {
pub fn scaled_norm(&self) -> T {
self.frobenius_norm() / T::from(M * N).unwrap().sqrt()
}
}
impl<T: Scalar, const M: usize, const N: usize> Matrix<T, M, N> {
pub fn frobenius_norm_squared(&self) -> T {
let mut sum = T::zero();
for i in 0..M {
for j in 0..N {
sum = sum + self[(i, j)] * self[(i, j)];
}
}
sum
}
}
impl<T: LinalgScalar, const M: usize, const N: usize> Matrix<T, M, N> {
pub fn frobenius_norm(&self) -> T::Real {
let mut sum = <T::Real as Zero>::zero();
let mut comp = <T::Real as Zero>::zero();
for i in 0..M {
for j in 0..N {
let m = self[(i, j)].modulus();
let prod = m * m;
let y = prod - comp;
let t = sum + y;
comp = (t - sum) - y;
sum = t;
}
}
sum.sqrt()
}
pub fn norm_inf(&self) -> T::Real {
let mut max = <T::Real as Zero>::zero();
for i in 0..M {
let mut row_sum = <T::Real as Zero>::zero();
for j in 0..N {
row_sum = row_sum + self[(i, j)].modulus();
}
if row_sum > max {
max = row_sum;
}
}
max
}
pub fn norm_one(&self) -> T::Real {
let mut max = <T::Real as Zero>::zero();
for j in 0..N {
let mut col_sum = <T::Real as Zero>::zero();
for i in 0..M {
col_sum = col_sum + self[(i, j)].modulus();
}
if col_sum > max {
max = col_sum;
}
}
max
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn vector_norm_squared() {
let v = Vector::from_array([3.0, 4.0]);
assert_eq!(v.norm_squared(), 25.0);
}
#[test]
fn vector_norm_squared_integer() {
let v = Vector::from_array([3, 4]);
assert_eq!(v.norm_squared(), 25);
}
#[test]
fn vector_norm() {
let v = Vector::from_array([3.0_f64, 4.0]);
assert!((v.norm() - 5.0).abs() < 1e-12);
}
#[test]
fn vector_norm_l1() {
let v = Vector::from_array([1.0_f64, -2.0, 3.0]);
assert!((v.norm_l1() - 6.0).abs() < 1e-12);
}
#[test]
fn vector_normalize() {
let v = Vector::from_array([3.0_f64, 4.0]);
let u = v.normalize();
assert!((u.norm() - 1.0).abs() < 1e-12);
assert!((u[0] - 0.6).abs() < 1e-12);
assert!((u[1] - 0.8).abs() < 1e-12);
}
#[test]
fn vector_normalize_3d() {
let v = Vector::from_array([1.0_f64, 1.0, 1.0]);
let u = v.normalize();
assert!((u.norm() - 1.0).abs() < 1e-12);
}
#[test]
fn frobenius_norm() {
let m = Matrix::new([[1.0_f64, 2.0], [3.0, 4.0]]);
assert!((m.frobenius_norm() - 30.0_f64.sqrt()).abs() < 1e-12);
}
#[test]
fn frobenius_norm_squared_integer() {
let m = Matrix::new([[1, 2], [3, 4]]);
assert_eq!(m.frobenius_norm_squared(), 30);
}
#[test]
fn norm_inf() {
let m = Matrix::new([[1.0_f64, -2.0], [3.0, 4.0]]);
assert!((m.norm_inf() - 7.0).abs() < 1e-12);
}
#[test]
fn norm_one() {
let m = Matrix::new([[1.0_f64, -2.0], [3.0, 4.0]]);
assert!((m.norm_one() - 6.0).abs() < 1e-12);
}
}