use crate::backend::Backend;
use crate::tensor::{BasicOps, Tensor};
use crate::{ElementConversion, Numeric};
#[derive(Debug, Clone, Copy)]
pub enum Norm {
L0,
L1,
L2,
LInf,
LNegInf,
Lp(f64),
}
impl From<i32> for Norm {
fn from(value: i32) -> Self {
match value {
0 => Norm::L0,
1 => Norm::L1,
2 => Norm::L2,
_ => Norm::Lp(value as f64),
}
}
}
impl From<f32> for Norm {
fn from(value: f32) -> Self {
match value {
0.0 => Norm::L0,
1.0 => Norm::L1,
2.0 => Norm::L2,
f32::INFINITY => Norm::LInf,
f32::NEG_INFINITY => Norm::LNegInf,
_ => Norm::Lp(value as f64),
}
}
}
impl From<f64> for Norm {
fn from(value: f64) -> Self {
match value {
0.0 => Norm::L0,
1.0 => Norm::L1,
2.0 => Norm::L2,
f64::INFINITY => Norm::LInf,
f64::NEG_INFINITY => Norm::LNegInf,
_ => Norm::Lp(value),
}
}
}
pub fn vector_norm<B: Backend, const D: usize>(
x: Tensor<B, D>,
norm: impl Into<Norm>,
dim: usize,
) -> Tensor<B, D> {
let norm = norm.into();
match norm {
Norm::L0 => l0_norm(x, dim),
Norm::L1 => l1_norm(x, dim),
Norm::L2 => l2_norm(x, dim),
Norm::LInf => max_abs_norm(x, dim),
Norm::LNegInf => min_abs_norm(x, dim),
Norm::Lp(p) => lp_norm(x, p, dim),
}
}
pub fn vector_normalize<B: Backend, const D: usize, E: ElementConversion>(
x: Tensor<B, D>,
norm: impl Into<Norm>,
dim: usize,
eps: E,
) -> Tensor<B, D> {
let norm = vector_norm(x.clone(), norm, dim).clamp_min(eps);
x / norm
}
pub fn l0_norm<B: Backend, const D: usize, K>(x: Tensor<B, D, K>, dim: usize) -> Tensor<B, D, K>
where
K: BasicOps<B> + Numeric<B>,
{
x.zeros_like()
.mask_fill(x.not_equal_elem(0), 1)
.sum_dim(dim)
}
pub fn l1_norm<B: Backend, const D: usize, K>(x: Tensor<B, D, K>, dim: usize) -> Tensor<B, D, K>
where
K: BasicOps<B> + Numeric<B>,
{
x.abs().sum_dim(dim)
}
pub fn l2_norm<B: Backend, const D: usize>(x: Tensor<B, D>, dim: usize) -> Tensor<B, D> {
x.abs().square().sum_dim(dim).sqrt()
}
pub fn lp_norm<B: Backend, const D: usize>(x: Tensor<B, D>, p: f64, dim: usize) -> Tensor<B, D> {
x.abs().powf_scalar(p).sum_dim(dim).powf_scalar(1. / p)
}
pub fn max_abs_norm<B: Backend, const D: usize, K>(
x: Tensor<B, D, K>,
dim: usize,
) -> Tensor<B, D, K>
where
K: BasicOps<B> + Numeric<B>,
{
x.max_abs_dim(dim)
}
pub fn min_abs_norm<B: Backend, const D: usize, K>(
x: Tensor<B, D, K>,
dim: usize,
) -> Tensor<B, D, K>
where
K: BasicOps<B> + Numeric<B>,
{
x.abs().min_dim(dim)
}