use scirs2_core::ndarray::{ArrayView1, ArrayView2, ScalarOperand};
use scirs2_core::numeric::{Float, NumAssign};
use scirs2_core::simd_ops::{AutoOptimizer, SimdUnifiedOps};
use std::iter::Sum;
use crate::decomposition::svd;
use crate::error::{LinalgError, LinalgResult};
use crate::validation::{
validate_finite_vector, validate_finitematrix, validate_not_empty_vector,
validate_not_emptymatrix,
};
#[allow(dead_code)]
pub fn matrix_norm<F>(a: &ArrayView2<F>, ord: &str, workers: Option<usize>) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + scirs2_core::ndarray::ScalarOperand + Send + Sync + 'static,
{
validate_not_emptymatrix(a, "Matrix norm computation")?;
validate_finitematrix(a, "Matrix norm computation")?;
match ord {
"fro" | "f" | "frobenius" => {
let mut sum_sq = F::zero();
for i in 0..a.nrows() {
for j in 0..a.ncols() {
sum_sq += a[[i, j]] * a[[i, j]];
}
}
Ok(sum_sq.sqrt())
}
"1" => {
let mut max_col_sum = F::zero();
for j in 0..a.ncols() {
let col = a.column(j);
let col_sum = col.fold(F::zero(), |acc, &x| acc + x.abs());
if col_sum > max_col_sum {
max_col_sum = col_sum;
}
}
Ok(max_col_sum)
}
"inf" => {
let mut max_row_sum = F::zero();
for i in 0..a.nrows() {
let row = a.row(i);
let row_sum = row.fold(F::zero(), |acc, &x| acc + x.abs());
if row_sum > max_row_sum {
max_row_sum = row_sum;
}
}
Ok(max_row_sum)
}
"2" => {
let (_u, s, _vt) = svd(a, false, workers)?;
if s.is_empty() {
Ok(F::zero())
} else {
Ok(s[0])
}
}
_ => Err(LinalgError::InvalidInputError(format!(
"Matrix norm computation failed: Invalid norm order '{ord}'\nSupported norms: 'fro', 'f', 'frobenius', '1', 'inf', '2'"
))),
}
}
#[allow(dead_code)]
pub fn vector_norm<F>(x: &ArrayView1<F>, ord: usize) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + scirs2_core::ndarray::ScalarOperand + 'static,
{
validate_not_empty_vector(x, "Vector norm computation")?;
validate_finite_vector(x, "Vector norm computation")?;
match ord {
1 => {
let sum_abs = x.fold(F::zero(), |acc, &val| acc + val.abs());
Ok(sum_abs)
}
2 => {
let sum_sq = x.fold(F::zero(), |acc, &val| acc + val * val);
Ok(sum_sq.sqrt())
}
usize::MAX => {
let max_abs = x.fold(F::zero(), |acc, &val| {
let abs_val = val.abs();
if abs_val > acc {
abs_val
} else {
acc
}
});
Ok(max_abs)
}
_ => Err(LinalgError::InvalidInputError(format!(
"Vector norm computation failed: Invalid norm order {}\nSupported norms: 1 (L1), 2 (L2/Euclidean), {} (infinity)",
ord, usize::MAX
))),
}
}
#[allow(dead_code)]
pub fn vector_norm_simd<F>(x: &ArrayView1<F>, ord: usize) -> LinalgResult<F>
where
F: Float
+ NumAssign
+ Sum
+ Send
+ Sync
+ scirs2_core::ndarray::ScalarOperand
+ SimdUnifiedOps
+ 'static,
{
validate_not_empty_vector(x, "Vector norm computation")?;
validate_finite_vector(x, "Vector norm computation")?;
let optimizer = AutoOptimizer::new();
if optimizer.should_use_simd(x.len()) {
match ord {
1 => {
return Ok(F::simd_norm_l1(x));
}
2 => {
return Ok(F::simd_norm(x));
}
usize::MAX => {
return Ok(F::simd_norm_linf(x));
}
_ => {
}
}
}
match ord {
1 => {
let sum_abs = x.fold(F::zero(), |acc, &val| acc + val.abs());
Ok(sum_abs)
}
2 => {
let sum_sq = x.fold(F::zero(), |acc, &val| acc + val * val);
Ok(sum_sq.sqrt())
}
usize::MAX => {
let max_abs = x.fold(F::zero(), |acc, &val| {
let abs_val = val.abs();
if abs_val > acc {
abs_val
} else {
acc
}
});
Ok(max_abs)
}
_ => Err(LinalgError::InvalidInputError(format!(
"Vector norm computation failed: Invalid norm order {}\nSupported norms: 1 (L1), 2 (L2/Euclidean), {} (infinity)",
ord, usize::MAX
))),
}
}
#[allow(dead_code)]
pub fn matrix_norm_simd<F>(a: &ArrayView2<F>, ord: &str, workers: Option<usize>) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + ScalarOperand + Send + Sync + SimdUnifiedOps + 'static,
{
validate_not_emptymatrix(a, "Matrix norm computation")?;
validate_finitematrix(a, "Matrix norm computation")?;
let optimizer = AutoOptimizer::new();
match ord {
"fro" | "f" | "frobenius" => {
let n_elements = a.nrows() * a.ncols();
if optimizer.should_use_simd(n_elements) {
let flat_view = a.as_slice().ok_or_else(|| {
LinalgError::ComputationError(
"Matrix must be contiguous for SIMD Frobenius norm".to_string(),
)
})?;
let array_1d = scirs2_core::ndarray::ArrayView1::from(flat_view);
let sum_squares = F::simd_sum_squares(&array_1d);
return Ok(sum_squares.sqrt());
}
let mut sum_sq = F::zero();
for i in 0..a.nrows() {
for j in 0..a.ncols() {
sum_sq += a[[i, j]] * a[[i, j]];
}
}
Ok(sum_sq.sqrt())
}
"1" => {
let mut max_col_sum = F::zero();
if optimizer.should_use_simd(a.nrows()) {
for j in 0..a.ncols() {
let col = a.column(j);
let col_sum = F::simd_norm_l1(&col);
if col_sum > max_col_sum {
max_col_sum = col_sum;
}
}
} else {
for j in 0..a.ncols() {
let col = a.column(j);
let col_sum = col.fold(F::zero(), |acc, &x| acc + x.abs());
if col_sum > max_col_sum {
max_col_sum = col_sum;
}
}
}
Ok(max_col_sum)
}
"inf" => {
let mut max_row_sum = F::zero();
if optimizer.should_use_simd(a.ncols()) {
for i in 0..a.nrows() {
let row = a.row(i);
let row_sum = F::simd_norm_l1(&row);
if row_sum > max_row_sum {
max_row_sum = row_sum;
}
}
} else {
for i in 0..a.nrows() {
let row = a.row(i);
let row_sum = row.fold(F::zero(), |acc, &x| acc + x.abs());
if row_sum > max_row_sum {
max_row_sum = row_sum;
}
}
}
Ok(max_row_sum)
}
_ => matrix_norm(a, ord, workers),
}
}
#[allow(dead_code)]
pub fn vector_norm_parallel<F>(
x: &ArrayView1<F>,
ord: usize,
workers: Option<usize>,
) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + Send + Sync + ScalarOperand,
{
use crate::parallel;
use scirs2_core::parallel_ops::*;
parallel::configure_workers(workers);
validate_not_empty_vector(x, "Vector norm computation")?;
validate_finite_vector(x, "Vector norm computation")?;
const PARALLEL_THRESHOLD: usize = 1000;
if x.len() < PARALLEL_THRESHOLD {
return vector_norm(x, ord);
}
match ord {
1 => {
let sum_abs = (0..x.len()).into_par_iter()
.map(|i| x[i].abs())
.sum();
Ok(sum_abs)
}
2 => {
let sum_sq: F = (0..x.len()).into_par_iter()
.map(|i| x[i] * x[i])
.sum();
Ok(sum_sq.sqrt())
}
usize::MAX => {
let max_abs = (0..x.len()).into_par_iter()
.map(|i| x[i].abs())
.reduce(|| F::zero(), |a, b| if a > b { a } else { b });
Ok(max_abs)
}
_ => Err(LinalgError::InvalidInputError(format!(
"Vector norm computation failed: Invalid norm order {}\nSupported norms: 1 (L1), 2 (L2/Euclidean), {} (infinity)",
ord, usize::MAX
))),
}
}
#[allow(dead_code)]
pub fn cond<F>(a: &ArrayView2<F>, p: Option<&str>, workers: Option<usize>) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + scirs2_core::ndarray::ScalarOperand + Send + Sync + 'static,
{
use crate::validation::validate_squarematrix;
validate_not_emptymatrix(a, "Condition number computation")?;
validate_squarematrix(a, "Condition number computation")?;
validate_finitematrix(a, "Condition number computation")?;
let norm_type = p.unwrap_or("2");
match norm_type {
"2" | "fro" | "f" | "frobenius" => {
let (_u, s, _vt) = svd(a, false, workers)?;
if s.is_empty() {
return Ok(F::infinity());
}
let sigma_max = s[0]; let mut sigma_min = F::zero();
for &val in s.iter().rev() {
if val > F::epsilon() * F::from(100).expect("Operation failed") * sigma_max {
sigma_min = val;
break;
}
}
if sigma_min <= F::zero() {
Ok(F::infinity())
} else {
Ok(sigma_max / sigma_min)
}
}
"1" | "inf" => {
let (_u, s, _vt) = svd(a, false, workers)?;
if s.is_empty() {
return Ok(F::infinity());
}
let sigma_max = s[0];
let mut sigma_min = F::zero();
for &val in s.iter().rev() {
if val > F::epsilon() * F::from(100).expect("Operation failed") * sigma_max {
sigma_min = val;
break;
}
}
if sigma_min <= F::zero() {
Ok(F::infinity())
} else {
Ok(sigma_max / sigma_min)
}
}
_ => Err(LinalgError::InvalidInputError(format!(
"Condition number computation failed: Invalid norm type '{norm_type}'\nSupported norms: '1', '2' (default), 'fro', 'f', 'frobenius', 'inf'"
))),
}
}
#[allow(dead_code)]
pub fn matrix_rank<F>(
a: &ArrayView2<F>,
tol: Option<F>,
workers: Option<usize>,
) -> LinalgResult<usize>
where
F: Float + NumAssign + Sum + scirs2_core::ndarray::ScalarOperand + Send + Sync + 'static,
{
if a.is_empty() {
return Ok(0);
}
validate_finitematrix(a, "Matrix rank computation")?;
if let Some(t) = tol {
if t < F::zero() {
return Err(LinalgError::InvalidInputError(
"Matrix rank computation failed: Tolerance must be non-negative".to_string(),
));
}
}
let (_u, s, _vt) = svd(a, false, workers)?;
if s.is_empty() {
return Ok(0);
}
let tolerance = if let Some(t) = tol {
t
} else {
let max_dim = std::cmp::max(a.nrows(), a.ncols());
let eps = F::epsilon();
let sigma_max = s[0]; F::from(max_dim).expect("Operation failed") * eps * sigma_max
};
let rank = s.iter().filter(|&&val| val > tolerance).count();
Ok(rank)
}
#[deprecated(
since = "0.1.0",
note = "Use matrix_norm with workers parameter instead"
)]
#[allow(dead_code)]
pub fn matrix_norm_default<F>(a: &ArrayView2<F>, ord: &str) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + scirs2_core::ndarray::ScalarOperand + Send + Sync + 'static,
{
matrix_norm(a, ord, None)
}
#[deprecated(since = "0.1.0", note = "Use cond with workers parameter instead")]
#[allow(dead_code)]
pub fn cond_default<F>(a: &ArrayView2<F>, p: Option<&str>) -> LinalgResult<F>
where
F: Float + NumAssign + Sum + scirs2_core::ndarray::ScalarOperand + Send + Sync + 'static,
{
cond(a, p, None)
}
#[deprecated(
since = "0.1.0",
note = "Use matrix_rank with workers parameter instead"
)]
#[allow(dead_code)]
pub fn matrix_rank_default<F>(a: &ArrayView2<F>, tol: Option<F>) -> LinalgResult<usize>
where
F: Float + NumAssign + Sum + scirs2_core::ndarray::ScalarOperand + Send + Sync + 'static,
{
matrix_rank(a, tol, None)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::array;
#[test]
fn testmatrix_norm_frobenius() {
let a = array![[1.0, 2.0], [3.0, 4.0]];
let norm = matrix_norm(&a.view(), "fro", None).expect("Operation failed");
assert_relative_eq!(norm, 5.477225575051661, epsilon = 1e-10);
}
#[test]
fn testmatrix_norm_1() {
let a = array![[1.0, 2.0], [3.0, 4.0]];
let norm = matrix_norm(&a.view(), "1", None).expect("Operation failed");
assert_relative_eq!(norm, 6.0);
}
#[test]
fn testmatrix_norm_inf() {
let a = array![[1.0, 2.0], [3.0, 4.0]];
let norm = matrix_norm(&a.view(), "inf", None).expect("Operation failed");
assert_relative_eq!(norm, 7.0);
}
#[test]
fn test_vector_norm_1() {
let x = array![1.0, -2.0, 3.0];
let norm = vector_norm(&x.view(), 1).expect("Operation failed");
assert_relative_eq!(norm, 6.0);
}
#[test]
fn test_vector_norm_2() {
let x = array![3.0, 4.0];
let norm = vector_norm(&x.view(), 2).expect("Operation failed");
assert_relative_eq!(norm, 5.0);
}
#[test]
fn test_vector_norm_inf() {
let x = array![1.0, -5.0, 3.0];
let norm = vector_norm(&x.view(), usize::MAX).expect("Operation failed");
assert_relative_eq!(norm, 5.0);
}
}