numeris 0.5.1

Pure-Rust numerical algorithms library — high performance with SIMD support while also supporting no-std for embedded and WASM targets.
Documentation
use crate::matrix::vector::Vector;
use crate::traits::{FloatScalar, LinalgScalar, Scalar};
use num_traits::{Float, One, Zero};
use crate::Matrix;

// ── Vector norms ────────────────────────────────────────────────────

impl<T: Scalar, const N: usize> Vector<T, N> {
    /// Squared L2 norm (dot product with self). No sqrt, works with integers.
    ///
    /// ```
    /// use numeris::Vector;
    /// let v = Vector::from_array([3.0, 4.0]);
    /// assert_eq!(v.norm_squared(), 25.0);
    ///
    /// let vi = Vector::from_array([3, 4]);
    /// assert_eq!(vi.norm_squared(), 25);
    /// ```
    pub fn norm_squared(&self) -> T {
        self.dot(self)
    }
}

impl<T: LinalgScalar, const N: usize> Vector<T, N> {
    /// L2 (Euclidean) norm.
    ///
    /// For complex vectors, this is `sqrt(sum(|x_i|^2))`.
    ///
    /// ```
    /// use numeris::Vector;
    /// let v = Vector::from_array([3.0_f64, 4.0]);
    /// assert!((v.norm() - 5.0).abs() < 1e-12);
    /// ```
    pub fn norm(&self) -> T::Real {
        let mut sum = <T::Real as Zero>::zero();
        let mut comp = <T::Real as Zero>::zero();
        for i in 0..N {
            let m = self[i].modulus();
            let prod = m * m;
            let y = prod - comp;
            let t = sum + y;
            comp = (t - sum) - y;
            sum = t;
        }
        sum.sqrt()
    }

    /// L1 norm (sum of absolute values / moduli).
    ///
    /// ```
    /// use numeris::Vector;
    /// let v = Vector::from_array([1.0_f64, -2.0, 3.0]);
    /// assert!((v.norm_l1() - 6.0).abs() < 1e-12);
    /// ```
    pub fn norm_l1(&self) -> T::Real {
        let mut sum = <T::Real as Zero>::zero();
        let mut comp = <T::Real as Zero>::zero();
        for i in 0..N {
            let val = self[i].modulus();
            let y = val - comp;
            let t = sum + y;
            comp = (t - sum) - y;
            sum = t;
        }
        sum
    }

    /// Return a unit vector in the same direction.
    ///
    /// Panics if the norm is zero.
    ///
    /// ```
    /// use numeris::Vector;
    /// let v = Vector::from_array([3.0_f64, 4.0]);
    /// let u = v.normalize();
    /// assert!((u.norm() - 1.0).abs() < 1e-12);
    /// assert!((u[0] - 0.6).abs() < 1e-12);
    /// ```
    pub fn normalize(&self) -> Self {
        let n = self.norm();
        *self * T::from_real(<T::Real as One>::one() / n)
    }
}

impl<T: FloatScalar, const M: usize, const N: usize> Matrix<T, M, N> {
    /// Scaled Frobenius norm: `frobenius_norm() / sqrt(M * N)`.
    ///
    /// Makes the error metric independent of state dimension,
    /// used by ODE solvers for step size control.
    ///
    /// ```
    /// use numeris::Vector;
    /// let v = Vector::from_array([3.0_f64, 4.0]);
    /// let sn = v.scaled_norm();
    /// assert!((sn - 5.0 / 2.0_f64.sqrt()).abs() < 1e-12);
    /// ```
    pub fn scaled_norm(&self) -> T {
        self.frobenius_norm() / T::from(M * N).unwrap().sqrt()
    }
}

// ── Matrix norms ────────────────────────────────────────────────────

impl<T: Scalar, const M: usize, const N: usize> Matrix<T, M, N> {
    /// Squared Frobenius norm (sum of all elements squared). No sqrt.
    pub fn frobenius_norm_squared(&self) -> T {
        let mut sum = T::zero();
        for i in 0..M {
            for j in 0..N {
                sum = sum + self[(i, j)] * self[(i, j)];
            }
        }
        sum
    }
}

impl<T: LinalgScalar, const M: usize, const N: usize> Matrix<T, M, N> {
    /// Frobenius norm (square root of sum of squared moduli).
    ///
    /// ```
    /// use numeris::Matrix;
    /// let m = Matrix::new([[1.0_f64, 2.0], [3.0, 4.0]]);
    /// assert!((m.frobenius_norm() - 30.0_f64.sqrt()).abs() < 1e-12);
    /// ```
    pub fn frobenius_norm(&self) -> T::Real {
        let mut sum = <T::Real as Zero>::zero();
        let mut comp = <T::Real as Zero>::zero();
        for i in 0..M {
            for j in 0..N {
                let m = self[(i, j)].modulus();
                let prod = m * m;
                let y = prod - comp;
                let t = sum + y;
                comp = (t - sum) - y;
                sum = t;
            }
        }
        sum.sqrt()
    }

    /// Infinity norm (maximum row sum of moduli).
    ///
    /// ```
    /// use numeris::Matrix;
    /// let m = Matrix::new([[1.0_f64, -2.0], [3.0, 4.0]]);
    /// // row sums: |1|+|-2| = 3, |3|+|4| = 7
    /// assert!((m.norm_inf() - 7.0).abs() < 1e-12);
    /// ```
    pub fn norm_inf(&self) -> T::Real {
        let mut max = <T::Real as Zero>::zero();
        for i in 0..M {
            let mut row_sum = <T::Real as Zero>::zero();
            for j in 0..N {
                row_sum = row_sum + self[(i, j)].modulus();
            }
            if row_sum > max {
                max = row_sum;
            }
        }
        max
    }

    /// One norm (maximum column sum of moduli).
    ///
    /// ```
    /// use numeris::Matrix;
    /// let m = Matrix::new([[1.0_f64, -2.0], [3.0, 4.0]]);
    /// // col sums: |1|+|3| = 4, |-2|+|4| = 6
    /// assert!((m.norm_one() - 6.0).abs() < 1e-12);
    /// ```
    pub fn norm_one(&self) -> T::Real {
        let mut max = <T::Real as Zero>::zero();
        for j in 0..N {
            let mut col_sum = <T::Real as Zero>::zero();
            for i in 0..M {
                col_sum = col_sum + self[(i, j)].modulus();
            }
            if col_sum > max {
                max = col_sum;
            }
        }
        max
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    // ── Vector norm tests ───────────────────────────────────────

    #[test]
    fn vector_norm_squared() {
        let v = Vector::from_array([3.0, 4.0]);
        assert_eq!(v.norm_squared(), 25.0);
    }

    #[test]
    fn vector_norm_squared_integer() {
        let v = Vector::from_array([3, 4]);
        assert_eq!(v.norm_squared(), 25);
    }

    #[test]
    fn vector_norm() {
        let v = Vector::from_array([3.0_f64, 4.0]);
        assert!((v.norm() - 5.0).abs() < 1e-12);
    }

    #[test]
    fn vector_norm_l1() {
        let v = Vector::from_array([1.0_f64, -2.0, 3.0]);
        assert!((v.norm_l1() - 6.0).abs() < 1e-12);
    }

    #[test]
    fn vector_normalize() {
        let v = Vector::from_array([3.0_f64, 4.0]);
        let u = v.normalize();
        assert!((u.norm() - 1.0).abs() < 1e-12);
        assert!((u[0] - 0.6).abs() < 1e-12);
        assert!((u[1] - 0.8).abs() < 1e-12);
    }

    #[test]
    fn vector_normalize_3d() {
        let v = Vector::from_array([1.0_f64, 1.0, 1.0]);
        let u = v.normalize();
        assert!((u.norm() - 1.0).abs() < 1e-12);
    }

    // ── Matrix norm tests ───────────────────────────────────────

    #[test]
    fn frobenius_norm() {
        let m = Matrix::new([[1.0_f64, 2.0], [3.0, 4.0]]);
        // sqrt(1 + 4 + 9 + 16) = sqrt(30)
        assert!((m.frobenius_norm() - 30.0_f64.sqrt()).abs() < 1e-12);
    }

    #[test]
    fn frobenius_norm_squared_integer() {
        let m = Matrix::new([[1, 2], [3, 4]]);
        assert_eq!(m.frobenius_norm_squared(), 30);
    }

    #[test]
    fn norm_inf() {
        let m = Matrix::new([[1.0_f64, -2.0], [3.0, 4.0]]);
        // row sums: |1|+|-2| = 3, |3|+|4| = 7
        assert!((m.norm_inf() - 7.0).abs() < 1e-12);
    }

    #[test]
    fn norm_one() {
        let m = Matrix::new([[1.0_f64, -2.0], [3.0, 4.0]]);
        // col sums: |1|+|3| = 4, |-2|+|4| = 6
        assert!((m.norm_one() - 6.0).abs() < 1e-12);
    }
}