use crate::csr::CsrMatrix;
use crate::error::SparseResult;
use crate::linalg::interface::LinearOperator;
use crate::sparray::SparseArray;
use scirs2_core::ndarray::{Array1, ArrayView1};
use scirs2_core::numeric::{Float, NumAssign};
use scirs2_core::random::{Rng, RngExt};
use scirs2_core::SparseElement;
use std::fmt::Debug;
use std::iter::Sum;
use std::ops::{Add, Div, Mul, Sub};
#[allow(dead_code)]
pub fn expm_multiply<F>(
a: &dyn LinearOperator<F>,
v: &[F],
t: F,
m: Option<usize>,
tol: Option<F>,
) -> SparseResult<Vec<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static,
{
let (rows, cols) = a.shape();
if rows != cols {
return Err(crate::error::SparseError::ValueError(
"Matrix must be square for expm_multiply".to_string(),
));
}
if v.len() != cols {
return Err(crate::error::SparseError::DimensionMismatch {
expected: cols,
found: v.len(),
});
}
let n = rows;
let m = m.unwrap_or(30.min(n - 1));
let tol = tol.unwrap_or(F::from(1e-7).expect("Failed to convert constant to float"));
if t == F::sparse_zero() {
return Ok(v.to_vec());
}
let v_norm = norm2(v);
if v_norm == F::sparse_zero() {
return Ok(vec![F::sparse_zero(); n]);
}
let v_normalized: Vec<F> = v.iter().map(|&vi| vi / v_norm).collect();
let mut v = vec![v_normalized.clone()]; let mut h = vec![vec![F::sparse_zero(); m]; m + 1];
for j in 0..m {
let mut w = a.matvec(&v[j])?;
for i in 0..=j {
let h_ij = dot(&v[i], &w);
h[i][j] = h_ij;
for (k, w_val) in w.iter_mut().enumerate().take(n) {
*w_val -= h_ij * v[i][k];
}
}
let h_next = norm2(&w);
h[j + 1][j] = h_next;
if h_next.abs() < tol * F::from(100).expect("Failed to convert constant to float") {
break;
}
let w_normalized: Vec<F> = w.iter().map(|&wi| wi / h_next).collect();
v.push(w_normalized);
}
let actual_m = v.len() - 1;
if actual_m == 0 {
let lambda = h[0][0];
let exp_t_lambda = (t * lambda).exp();
let mut y = vec![F::sparse_zero(); n];
for (j, y_val) in y.iter_mut().enumerate().take(n) {
*y_val = v_norm * exp_t_lambda * v[0][j];
}
return Ok(y);
}
let mut h_square = vec![vec![F::sparse_zero(); actual_m]; actual_m];
for (i, h_row) in h.iter().enumerate().take(actual_m) {
for (j, &h_val) in h_row.iter().enumerate().take(actual_m) {
h_square[i][j] = h_val;
}
}
let exp_t_h = matrix_exponential_dense(&h_square, t)?;
let mut y = vec![F::sparse_zero(); n];
for (i, v_row) in v.iter().enumerate().take(actual_m) {
let coeff = v_norm * exp_t_h[i][0];
for (j, y_val) in y.iter_mut().enumerate().take(n) {
*y_val += coeff * v_row[j];
}
}
Ok(y)
}
#[allow(dead_code)]
fn matrix_exponential_dense<F>(h: &[Vec<F>], t: F) -> SparseResult<Vec<Vec<F>>>
where
F: Float + NumAssign + SparseElement + 'static,
{
let _n = h.len();
let mut s = 0;
let mut scale = F::sparse_one();
let h_norm = matrix_norm_inf(h);
let mut scaled_norm = (t * h_norm).abs();
while scaled_norm > F::sparse_one() {
s += 1;
scale *= F::from(2).expect("Failed to convert constant to float");
scaled_norm /= F::from(2).expect("Failed to convert constant to float");
}
let t_scaled = t / scale;
let mut exp_h = pade_approximation(h, t_scaled, 6)?;
for _ in 0..s {
exp_h = matrix_multiply_dense(&exp_h, &exp_h)?;
}
Ok(exp_h)
}
#[allow(dead_code)]
fn pade_approximation<F>(a: &[Vec<F>], t: F, order: usize) -> SparseResult<Vec<Vec<F>>>
where
F: Float + NumAssign + SparseElement + 'static,
{
let n = a.len();
let mut t_a = vec![vec![F::sparse_zero(); n]; n];
for i in 0..n {
for j in 0..n {
t_a[i][j] = t * a[i][j];
}
}
let mut powers = vec![identity_matrix(n)];
powers.push(t_a.clone());
for p in 2..=order {
let prev = &powers[p - 1];
let next = matrix_multiply_dense(&t_a, prev)?;
powers.push(next);
}
let num_coeffs = [
F::sparse_one(),
F::from(0.5).expect("Failed to convert constant to float"),
F::from(3.0 / 26.0).expect("Failed to convert to float"),
F::from(1.0 / 312.0).expect("Failed to convert to float"),
F::from(1.0 / 11232.0).expect("Failed to convert to float"),
F::from(1.0 / 506880.0).expect("Failed to convert to float"),
F::from(1.0 / 18811680.0).expect("Failed to convert to float"),
];
let den_coeffs = [
F::sparse_one(),
F::from(-0.5).expect("Failed to convert constant to float"),
F::from(3.0 / 26.0).expect("Failed to convert to float"),
F::from(-1.0 / 312.0).expect("Failed to convert to float"),
F::from(1.0 / 11232.0).expect("Failed to convert to float"),
F::from(-1.0 / 506880.0).expect("Failed to convert to float"),
F::from(1.0 / 18811680.0).expect("Failed to convert to float"),
];
let mut num = zero_matrix(n);
let mut den = zero_matrix(n);
for (i, coeff) in num_coeffs.iter().enumerate().take(order + 1) {
add_scaled_matrix(&mut num, &powers[i], *coeff);
}
for (i, coeff) in den_coeffs.iter().enumerate().take(order + 1) {
add_scaled_matrix(&mut den, &powers[i], *coeff);
}
solve_matrix_equation(&den, &num)
}
#[allow(dead_code)]
pub fn twonormest<F>(a: &CsrMatrix<F>, tol: Option<F>, maxiter: Option<usize>) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let (m, n) = (a.rows(), a.cols());
let tol = tol.unwrap_or_else(|| F::from(1e-6).expect("Failed to convert constant to float"));
let maxiter = maxiter.unwrap_or(100);
if m == 0 || n == 0 {
return Ok(F::sparse_zero());
}
if n <= 4 && m <= 4 {
return exact_twonorm(a);
}
let mut rng = scirs2_core::random::rng();
let mut v: Vec<F> = (0..n)
.map(|_| F::from(rng.random::<f64>() - 0.5).expect("Operation failed"))
.collect();
let v_norm = norm2(&v);
if v_norm == F::sparse_zero() {
return Ok(F::sparse_zero());
}
for v_elem in v.iter_mut() {
*v_elem /= v_norm;
}
let mut lambda = F::sparse_zero();
let mut lambda_old = F::sparse_zero();
for iter in 0..maxiter {
let mut w = vec![F::sparse_zero(); m];
for (row, w_val) in w.iter_mut().enumerate().take(m) {
let row_range = a.row_range(row);
let row_indices = &a.indices[row_range.clone()];
let row_data = &a.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * v[col];
}
*w_val = sum;
}
let mut u = vec![F::sparse_zero(); n];
let a_t = a.transpose();
for (row, u_val) in u.iter_mut().enumerate().take(a_t.rows()) {
let row_range = a_t.row_range(row);
let row_indices = &a_t.indices[row_range.clone()];
let row_data = &a_t.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * w[col];
}
*u_val = sum;
}
lambda = dot(&v, &u);
let u_norm = norm2(&u);
if u_norm == F::sparse_zero() {
break;
}
for (i, u_val) in u.iter().enumerate() {
v[i] = *u_val / u_norm;
}
if iter > 0 {
let rel_change = if lambda_old != F::sparse_zero() {
((lambda - lambda_old) / lambda_old).abs()
} else {
lambda.abs()
};
if rel_change < tol {
break;
}
}
lambda_old = lambda;
}
Ok(lambda.sqrt())
}
#[allow(dead_code)]
pub fn condest<F>(
a: &CsrMatrix<F>,
norm_type: Option<&str>,
tol: Option<F>,
maxiter: Option<usize>,
) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let (m, n) = (a.rows(), a.cols());
if m != n {
return Err(crate::error::SparseError::ValueError(
"Condition number estimation requires a square matrix".to_string(),
));
}
let norm_type = norm_type.unwrap_or("2");
let tol = tol.unwrap_or_else(|| F::from(1e-6).expect("Failed to convert constant to float"));
let maxiter = maxiter.unwrap_or(100);
let norm_a = match norm_type {
"1" => onenormest(a, None, None)?,
"2" => twonormest(a, Some(tol), Some(maxiter))?,
_ => {
return Err(crate::error::SparseError::ValueError(
"norm_type must be '1' or '2'".to_string(),
))
}
};
if norm_a == F::sparse_zero() {
return Ok(F::infinity());
}
let norm_a_inv = estimate_inverse_norm(a, norm_type, tol, maxiter)?;
Ok(norm_a * norm_a_inv)
}
#[allow(dead_code)]
fn estimate_inverse_norm<F>(
a: &CsrMatrix<F>,
norm_type: &str,
tol: F,
maxiter: usize,
) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let _n = a.rows();
if norm_type == "1" {
return estimate_smallest_singular_value(a, tol, maxiter).map(|sigma_min| {
if sigma_min == F::sparse_zero() {
F::infinity()
} else {
F::sparse_one() / sigma_min
}
});
}
estimate_smallest_singular_value(a, tol, maxiter).map(|sigma_min| {
if sigma_min == F::sparse_zero() {
F::infinity()
} else {
F::sparse_one() / sigma_min
}
})
}
#[allow(dead_code)]
fn estimate_smallest_singular_value<F>(a: &CsrMatrix<F>, tol: F, maxiter: usize) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let n = a.cols();
let mut rng = scirs2_core::random::rng();
let mut v: Vec<F> = (0..n)
.map(|_| F::from(rng.random::<f64>() - 0.5).expect("Operation failed"))
.collect();
let v_norm = norm2(&v);
if v_norm == F::sparse_zero() {
return Ok(F::sparse_zero());
}
for v_elem in v.iter_mut() {
*v_elem /= v_norm;
}
let mut lambda = F::sparse_zero();
let mut lambda_old = F::infinity();
for iter in 0..maxiter {
let u = solve_ata_approximately(a, &v, 5)?;
let u_norm = norm2(&u);
if u_norm == F::sparse_zero() {
break;
}
for (i, u_val) in u.iter().enumerate() {
v[i] = *u_val / u_norm;
}
lambda = estimate_rayleigh_quotient(a, &v)?;
if iter > 0 {
let rel_change = if lambda != F::sparse_zero() {
((lambda - lambda_old) / lambda).abs()
} else {
F::infinity()
};
if rel_change < tol {
break;
}
}
lambda_old = lambda;
}
Ok(lambda.sqrt())
}
#[allow(dead_code)]
fn solve_ata_approximately<F>(
a: &CsrMatrix<F>,
b: &[F],
num_iterations: usize,
) -> SparseResult<Vec<F>>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let n = a.cols();
let mut x = b.to_vec();
for _ in 0..num_iterations {
let mut ax = vec![F::sparse_zero(); a.rows()];
for (row, ax_val) in ax.iter_mut().enumerate().take(a.rows()) {
let row_range = a.row_range(row);
let row_indices = &a.indices[row_range.clone()];
let row_data = &a.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * x[col];
}
*ax_val = sum;
}
let mut ata_x = vec![F::sparse_zero(); n];
let a_t = a.transpose();
for (row, ata_x_val) in ata_x.iter_mut().enumerate().take(a_t.rows()) {
let row_range = a_t.row_range(row);
let row_indices = &a_t.indices[row_range.clone()];
let row_data = &a_t.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * ax[col];
}
*ata_x_val = sum;
}
let alpha = F::from(0.1).expect("Failed to convert constant to float"); for i in 0..n {
x[i] -= alpha * (ata_x[i] - b[i]);
}
}
Ok(x)
}
#[allow(dead_code)]
fn estimate_rayleigh_quotient<F>(a: &CsrMatrix<F>, v: &[F]) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let mut av = vec![F::sparse_zero(); a.rows()];
for (row, av_val) in av.iter_mut().enumerate().take(a.rows()) {
let row_range = a.row_range(row);
let row_indices = &a.indices[row_range.clone()];
let row_data = &a.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * v[col];
}
*av_val = sum;
}
let mut ata_v = vec![F::sparse_zero(); a.cols()];
let a_t = a.transpose();
for (row, ata_v_val) in ata_v.iter_mut().enumerate().take(a_t.rows()) {
let row_range = a_t.row_range(row);
let row_indices = &a_t.indices[row_range.clone()];
let row_data = &a_t.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * av[col];
}
*ata_v_val = sum;
}
Ok(dot(v, &ata_v))
}
#[allow(dead_code)]
fn exact_twonorm<F>(a: &CsrMatrix<F>) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let (m, n) = (a.rows(), a.cols());
let min_dim = m.min(n);
if min_dim == 0 {
return Ok(F::sparse_zero());
}
if min_dim == 1 {
let mut max_norm = F::sparse_zero();
for i in 0..m {
for j in 0..n {
let val = a.get(i, j).abs();
if val > max_norm {
max_norm = val;
}
}
}
return Ok(max_norm);
}
twonormest(
a,
Some(F::from(1e-12).expect("Failed to convert constant to float")),
Some(1000),
)
}
#[allow(dead_code)]
pub fn onenormest<F>(a: &CsrMatrix<F>, t: Option<usize>, itmax: Option<usize>) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let n = a.cols();
let t = t.unwrap_or(2);
let itmax = itmax.unwrap_or(5);
if n == 0 {
return Ok(F::sparse_zero());
}
if n <= 4 {
return exact_onenorm(a);
}
let mut rng = scirs2_core::random::rng();
let mut x = vec![vec![F::sparse_zero(); n]; t];
for x_j in x.iter_mut().take(t) {
for x_elem in x_j.iter_mut().take(n) {
*x_elem = if rng.random::<bool>() {
F::sparse_one()
} else {
-F::sparse_one()
};
}
}
for i in 0..n {
x[0][i] = F::sparse_one();
}
let mut est = F::sparse_zero();
let mut est_old = F::sparse_zero();
let mut ind_best = vec![0; n];
let mut s = vec![false; n];
for _ in 0..itmax {
let mut y = vec![vec![F::sparse_zero(); n]; t];
let a_t = a.transpose();
for j in 0..t {
let mut y_j = vec![F::sparse_zero(); n];
for (row, y_val) in y_j.iter_mut().enumerate().take(a_t.rows()) {
let row_range = a_t.row_range(row);
let row_indices = &a_t.indices[row_range.clone()];
let row_data = &a_t.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * x[j][col];
}
*y_val = sum;
}
y[j] = y_j;
}
let mut max_norm = F::sparse_zero();
let mut max_col = 0;
for (j, y_vec) in y.iter().enumerate().take(t) {
let norm = onenorm_vec(y_vec);
if norm > max_norm {
max_norm = norm;
max_col = j;
}
}
est = max_norm;
if est <= est_old {
break;
}
est_old = est;
let mut abs_y: Vec<(F, usize)> = y[max_col]
.iter()
.enumerate()
.map(|(i, &y_val)| (y_val.abs(), i))
.collect();
abs_y.sort_by(|a, b| b.0.partial_cmp(&a.0).expect("Operation failed"));
let mut count = 0;
for (_, idx) in abs_y {
if !s[idx] {
ind_best[count] = idx;
s[idx] = true;
count += 1;
if count >= t {
break;
}
}
}
if count < t {
break;
}
let z: Vec<F> = y[max_col]
.iter()
.map(|&y_val| {
if y_val >= F::sparse_zero() {
F::sparse_one()
} else {
-F::sparse_one()
}
})
.collect();
let mut w = vec![F::sparse_zero(); a.rows()];
for (row, w_val) in w.iter_mut().enumerate().take(a.rows()) {
let row_range = a.row_range(row);
let row_indices = &a.indices[row_range.clone()];
let row_data = &a.data[row_range];
let mut sum = F::sparse_zero();
for (col_idx, &col) in row_indices.iter().enumerate() {
sum += row_data[col_idx] * z[col];
}
*w_val = sum;
}
for j in 0..t {
for i in 0..n {
x[j][i] = F::sparse_zero();
}
x[j][ind_best[j]] = F::sparse_one();
}
let w_norm = onenorm_vec(&w);
if w_norm > est {
est = w_norm;
}
}
Ok(est)
}
#[allow(dead_code)]
fn exact_onenorm<F>(a: &CsrMatrix<F>) -> SparseResult<F>
where
F: Float + NumAssign + Sum + SparseElement + 'static + Debug,
{
let n = a.cols();
let mut max_norm = F::sparse_zero();
for j in 0..n {
let mut col_sum = F::sparse_zero();
for i in 0..a.rows() {
let val = a.get(i, j);
if val != F::sparse_zero() {
col_sum += val.abs();
}
}
if col_sum > max_norm {
max_norm = col_sum;
}
}
Ok(max_norm)
}
#[allow(dead_code)]
fn onenorm_vec<F: Float + Sum>(x: &[F]) -> F {
x.iter().map(|&xi| xi.abs()).sum()
}
#[allow(dead_code)]
fn dot<F: Float + Sum>(x: &[F], y: &[F]) -> F {
x.iter().zip(y).map(|(&xi, &yi)| xi * yi).sum()
}
#[allow(dead_code)]
fn norm2<F: Float + Sum>(x: &[F]) -> F {
dot(x, x).sqrt()
}
#[allow(dead_code)]
fn matrix_norm_inf<F: Float + SparseElement>(a: &[Vec<F>]) -> F {
let mut max_norm = F::sparse_zero();
for row in a {
let row_sum: F = row
.iter()
.map(|&x| x.abs())
.fold(F::sparse_zero(), |a, b| a + b);
if row_sum > max_norm {
max_norm = row_sum;
}
}
max_norm
}
#[allow(dead_code)]
fn identity_matrix<F: Float + SparseElement>(n: usize) -> Vec<Vec<F>> {
let mut identity = vec![vec![F::sparse_zero(); n]; n];
for (i, row) in identity.iter_mut().enumerate().take(n) {
row[i] = F::sparse_one();
}
identity
}
#[allow(dead_code)]
fn zero_matrix<F: Float + SparseElement>(n: usize) -> Vec<Vec<F>> {
vec![vec![F::sparse_zero(); n]; n]
}
#[allow(dead_code)]
fn add_scaled_matrix<F: Float + NumAssign>(a: &mut [Vec<F>], b: &[Vec<F>], alpha: F) {
let n = a.len();
for i in 0..n {
for j in 0..n {
a[i][j] += alpha * b[i][j];
}
}
}
#[allow(dead_code)]
fn matrix_multiply_dense<F: Float + NumAssign + SparseElement>(
a: &[Vec<F>],
b: &[Vec<F>],
) -> SparseResult<Vec<Vec<F>>> {
let n = a.len();
let mut c = vec![vec![F::sparse_zero(); n]; n];
for (i, c_row) in c.iter_mut().enumerate().take(n) {
for (j, c_val) in c_row.iter_mut().enumerate().take(n) {
for (k, &a_val) in a[i].iter().enumerate().take(n) {
*c_val += a_val * b[k][j];
}
}
}
Ok(c)
}
#[allow(dead_code)]
fn solve_matrix_equation<F: Float + NumAssign + SparseElement>(
a: &[Vec<F>],
b: &[Vec<F>],
) -> SparseResult<Vec<Vec<F>>> {
let n = a.len();
let mut l = vec![vec![F::sparse_zero(); n]; n];
let mut u = a.to_vec();
for (i, l_row) in l.iter_mut().enumerate().take(n) {
l_row[i] = F::sparse_one();
}
if n > 1 {
for k in 0..n - 1 {
for i in k + 1..n {
if u[k][k].abs() < F::epsilon() {
return Err(crate::error::SparseError::SingularMatrix(
"Matrix is singular".to_string(),
));
}
let factor = u[i][k] / u[k][k];
l[i][k] = factor;
for j in k..n {
u[i][j] = u[i][j] - factor * u[k][j];
}
}
}
}
let mut y = vec![vec![F::sparse_zero(); n]; n];
for j in 0..n {
for i in 0..n {
let mut sum = b[i][j];
for (k, &l_val) in l[i].iter().enumerate().take(i) {
sum -= l_val * y[k][j];
}
y[i][j] = sum;
}
}
let mut x = vec![vec![F::sparse_zero(); n]; n];
for j in 0..n {
for i in (0..n).rev() {
let mut sum = y[i][j];
for (k, &u_val) in u[i].iter().enumerate().skip(i + 1).take(n - i - 1) {
sum -= u_val * x[k][j];
}
if u[i][i].abs() < F::epsilon() {
return Err(crate::error::SparseError::SingularMatrix(
"Matrix is singular".to_string(),
));
}
x[i][j] = sum / u[i][i];
}
}
Ok(x)
}
#[allow(dead_code)]
pub fn twonormest_enhanced<T, S>(
a: &S,
tol: Option<T>,
maxiter: Option<usize>,
initial_guess: Option<ArrayView1<T>>,
) -> SparseResult<T>
where
T: Float
+ SparseElement
+ NumAssign
+ Sum
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync,
S: SparseArray<T>,
{
let (m, n) = a.shape();
let tol = tol.unwrap_or_else(|| T::from(1e-8).expect("Operation failed"));
let maxiter = maxiter.unwrap_or(100);
if m == 0 || n == 0 {
return Ok(T::sparse_zero());
}
if n <= 4 && m <= 4 {
return exact_twonorm_enhanced(a);
}
let mut v = match initial_guess {
Some(_guess) => {
if _guess.len() != n {
return Err(crate::error::SparseError::DimensionMismatch {
expected: n,
found: _guess.len(),
});
}
_guess.to_owned()
}
None => {
let mut rng = scirs2_core::random::rng();
let mut v_arr = Array1::zeros(n);
for i in 0..n {
v_arr[i] = T::from(rng.random::<f64>() - 0.5).expect("Operation failed");
}
v_arr
}
};
let v_norm = (v.iter().map(|&x| x * x).sum::<T>()).sqrt();
if v_norm == T::sparse_zero() {
return Ok(T::sparse_zero());
}
for i in 0..n {
v[i] /= v_norm;
}
let mut lambda = T::sparse_zero();
let mut lambda_old = T::sparse_zero();
let mut _converged = false;
for iter in 0..maxiter {
let w = sparse_matvec(a, &v.view())?;
let u = sparse_matvec_transpose(a, &w.view())?;
lambda = v.iter().zip(u.iter()).map(|(&vi, &ui)| vi * ui).sum();
let u_norm = (u.iter().map(|&x| x * x).sum::<T>()).sqrt();
if u_norm == T::sparse_zero() {
break;
}
for i in 0..n {
v[i] = u[i] / u_norm;
}
if iter > 0 {
let rel_change = if lambda_old != T::sparse_zero() {
((lambda - lambda_old) / lambda_old).abs()
} else {
lambda.abs()
};
if rel_change < tol {
_converged = true;
break;
}
}
lambda_old = lambda;
}
Ok(lambda.sqrt())
}
#[allow(dead_code)]
pub fn condest_enhanced<T, S>(
a: &S,
norm_type: Option<&str>,
tol: Option<T>,
maxiter: Option<usize>,
) -> SparseResult<T>
where
T: Float
+ SparseElement
+ NumAssign
+ Sum
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync,
S: SparseArray<T>,
{
let (m, n) = a.shape();
if m != n {
return Err(crate::error::SparseError::ValueError(
"Condition number estimation requires a square matrix".to_string(),
));
}
let norm_type = norm_type.unwrap_or("2");
let tol = tol.unwrap_or_else(|| T::from(1e-8).expect("Operation failed"));
let maxiter = maxiter.unwrap_or(100);
let norm_a = match norm_type {
"2" => twonormest_enhanced(a, Some(tol), Some(maxiter), None)?,
"1" => onenormest_enhanced(a, None, None)?,
_ => {
return Err(crate::error::SparseError::ValueError(
"norm_type must be '1' or '2'".to_string(),
))
}
};
if norm_a == T::sparse_zero() {
return Ok(T::infinity());
}
let norm_a_inv = estimate_inverse_norm_enhanced(a, norm_type, tol, maxiter)?;
Ok(norm_a * norm_a_inv)
}
#[allow(dead_code)]
pub fn onenormest_enhanced<T, S>(a: &S, t: Option<usize>, itmax: Option<usize>) -> SparseResult<T>
where
T: Float
+ SparseElement
+ NumAssign
+ Sum
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static,
S: SparseArray<T>,
{
let (_m, n) = a.shape();
let t = t.unwrap_or(2);
let itmax = itmax.unwrap_or(5);
if n == 0 {
return Ok(T::sparse_zero());
}
if n <= 4 {
return exact_onenorm_enhanced(a);
}
let mut rng = scirs2_core::random::rng();
let mut x_vectors = Vec::with_capacity(t);
for _ in 0..t {
let mut x = Array1::zeros(n);
for i in 0..n {
x[i] = if rng.random::<bool>() {
T::sparse_one()
} else {
-T::sparse_one()
};
}
x_vectors.push(x);
}
if !x_vectors.is_empty() {
for i in 0..n {
x_vectors[0][i] = T::sparse_one();
}
}
let mut est = T::sparse_zero();
let mut est_old = T::sparse_zero();
for _iter in 0..itmax {
let mut y_vectors = Vec::with_capacity(t);
for x in &x_vectors {
let y = sparse_matvec_transpose(a, &x.view())?;
y_vectors.push(y);
}
let mut max_norm = T::sparse_zero();
let mut max_col = 0;
for (j, y) in y_vectors.iter().enumerate() {
let norm = y.iter().map(|&val| val.abs()).sum();
if norm > max_norm {
max_norm = norm;
max_col = j;
}
}
est = max_norm;
if est <= est_old {
break;
}
est_old = est;
x_vectors.clear();
let mut x = Array1::zeros(n);
for i in 0..n {
x[i] = if y_vectors[max_col][i] >= T::sparse_zero() {
T::sparse_one()
} else {
-T::sparse_one()
};
}
x_vectors.push(x);
for _ in 1..t {
let mut x = Array1::zeros(n);
for i in 0..n {
x[i] = if rng.random::<bool>() {
T::sparse_one()
} else {
-T::sparse_one()
};
}
x_vectors.push(x);
}
}
Ok(est)
}
#[allow(dead_code)]
fn estimate_inverse_norm_enhanced<T, S>(
a: &S,
norm_type: &str,
tol: T,
maxiter: usize,
) -> SparseResult<T>
where
T: Float
+ SparseElement
+ NumAssign
+ Sum
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync,
S: SparseArray<T>,
{
let sigma_min = estimate_smallest_singular_value_enhanced(a, tol, maxiter)?;
if sigma_min == T::sparse_zero() {
Ok(T::infinity())
} else {
Ok(T::sparse_one() / sigma_min)
}
}
#[allow(dead_code)]
fn estimate_smallest_singular_value_enhanced<T, S>(a: &S, tol: T, maxiter: usize) -> SparseResult<T>
where
T: Float
+ SparseElement
+ NumAssign
+ Sum
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync,
S: SparseArray<T>,
{
let (_, n) = a.shape();
let mut rng = scirs2_core::random::rng();
let mut v = Array1::zeros(n);
for i in 0..n {
v[i] = T::from(rng.random::<f64>() - 0.5).expect("Operation failed");
}
let v_norm = (v.iter().map(|&x| x * x).sum::<T>()).sqrt();
if v_norm == T::sparse_zero() {
return Ok(T::sparse_zero());
}
for i in 0..n {
v[i] /= v_norm;
}
let mut lambda = T::sparse_zero();
let mut lambda_old = T::infinity();
for iter in 0..maxiter {
let u = solve_ata_system_enhanced(a, &v, 10)?;
let u_norm = (u.iter().map(|&x| x * x).sum::<T>()).sqrt();
if u_norm == T::sparse_zero() {
break;
}
for i in 0..n {
v[i] = u[i] / u_norm;
}
lambda = estimate_rayleigh_quotient_enhanced(a, &v)?;
if iter > 0 {
let rel_change = if lambda != T::sparse_zero() {
((lambda - lambda_old) / lambda).abs()
} else {
T::infinity()
};
if rel_change < tol {
break;
}
}
lambda_old = lambda;
}
Ok(lambda.sqrt())
}
#[allow(dead_code)]
fn sparse_matvec<T, S>(a: &S, x: &ArrayView1<T>) -> SparseResult<Array1<T>>
where
T: Float + SparseElement + Debug + Copy + Add<Output = T> + Mul<Output = T> + 'static,
S: SparseArray<T>,
{
let (m, n) = a.shape();
if x.len() != n {
return Err(crate::error::SparseError::DimensionMismatch {
expected: n,
found: x.len(),
});
}
let mut result = Array1::zeros(m);
let (row_indices, col_indices, values) = a.find();
for (k, (&i, &j)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
result[i] = result[i] + values[k] * x[j];
}
Ok(result)
}
#[allow(dead_code)]
fn sparse_matvec_transpose<T, S>(a: &S, x: &ArrayView1<T>) -> SparseResult<Array1<T>>
where
T: Float + SparseElement + Debug + Copy + Add<Output = T> + Mul<Output = T> + 'static,
S: SparseArray<T>,
{
let (m, n) = a.shape();
if x.len() != m {
return Err(crate::error::SparseError::DimensionMismatch {
expected: m,
found: x.len(),
});
}
let mut result = Array1::zeros(n);
let (row_indices, col_indices, values) = a.find();
for (k, (&i, &j)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
result[j] = result[j] + values[k] * x[i];
}
Ok(result)
}
#[allow(dead_code)]
fn solve_ata_system_enhanced<T, S>(
a: &S,
b: &Array1<T>,
num_iterations: usize,
) -> SparseResult<Array1<T>>
where
T: Float
+ SparseElement
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync,
S: SparseArray<T>,
{
let (_, n) = a.shape();
let mut x = b.clone();
for _ in 0..num_iterations {
let ax = sparse_matvec(a, &x.view())?;
let ata_x = sparse_matvec_transpose(a, &ax.view())?;
let alpha = T::from(0.1).expect("Operation failed"); for i in 0..n {
x[i] = x[i] - alpha * (ata_x[i] - b[i]);
}
}
Ok(x)
}
#[allow(dead_code)]
fn estimate_rayleigh_quotient_enhanced<T, S>(a: &S, v: &Array1<T>) -> SparseResult<T>
where
T: Float
+ SparseElement
+ Debug
+ Copy
+ Add<Output = T>
+ Mul<Output = T>
+ 'static
+ std::iter::Sum,
S: SparseArray<T>,
{
let av = sparse_matvec(a, &v.view())?;
let ata_v = sparse_matvec_transpose(a, &av.view())?;
Ok(v.iter().zip(ata_v.iter()).map(|(&vi, &ai)| vi * ai).sum())
}
#[allow(dead_code)]
fn exact_twonorm_enhanced<T, S>(a: &S) -> SparseResult<T>
where
T: Float
+ SparseElement
+ NumAssign
+ Sum
+ Debug
+ Copy
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ Div<Output = T>
+ 'static
+ scirs2_core::simd_ops::SimdUnifiedOps
+ Send
+ Sync,
S: SparseArray<T>,
{
let (m, n) = a.shape();
let min_dim = m.min(n);
if min_dim == 0 {
return Ok(T::sparse_zero());
}
if min_dim == 1 {
let (_, _, values) = a.find();
let max_val = values
.iter()
.map(|&v| v.abs())
.fold(T::sparse_zero(), |acc, v| if v > acc { v } else { acc });
return Ok(max_val);
}
twonormest_enhanced(
a,
Some(T::from(1e-12).expect("Operation failed")),
Some(1000),
None,
)
}
#[allow(dead_code)]
fn exact_onenorm_enhanced<T, S>(a: &S) -> SparseResult<T>
where
T: Float + SparseElement + Debug + Copy + Add<Output = T> + 'static,
S: SparseArray<T>,
{
let (_, n) = a.shape();
let mut max_col_sum = T::sparse_zero();
for j in 0..n {
let mut col_sum = T::sparse_zero();
let (row_indices, col_indices, values) = a.find();
for (k, (&_i, &col)) in row_indices.iter().zip(col_indices.iter()).enumerate() {
if col == j {
col_sum = col_sum + values[k].abs();
}
}
if col_sum > max_col_sum {
max_col_sum = col_sum;
}
}
Ok(max_col_sum)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::linalg::interface::{IdentityOperator, ScaledIdentityOperator};
#[test]
fn test_expm_multiply_identity() {
let identity = IdentityOperator::<f64>::new(3);
let v = vec![1.0, 2.0, 3.0];
let t = 1.0;
let result = expm_multiply(&identity, &v, t, None, None).expect("Operation failed");
let exp_t = t.exp();
let expected: Vec<f64> = v.iter().map(|&vi| exp_t * vi).collect();
println!("Identity test - result: {:?}", result);
println!("Identity test - expected: {:?}", expected);
println!("Identity test - exp(t): {}", exp_t);
for (ri, ei) in result.iter().zip(&expected) {
assert!((ri - ei).abs() < 1e-10);
}
}
#[test]
fn test_expm_multiply_scaled_identity() {
let alpha = 2.0;
let scaled_identity = ScaledIdentityOperator::new(3, alpha);
let v = vec![1.0, 2.0, 3.0];
let t = 0.5;
let result = expm_multiply(&scaled_identity, &v, t, None, None).expect("Operation failed");
let exp_t_alpha = (t * alpha).exp();
let expected: Vec<f64> = v.iter().map(|&vi| exp_t_alpha * vi).collect();
for (ri, ei) in result.iter().zip(&expected) {
assert!((ri - ei).abs() < 1e-6);
}
}
#[test]
fn test_expm_multiply_zero_time() {
let identity = IdentityOperator::<f64>::new(3);
let v = vec![1.0, 2.0, 3.0];
let t = 0.0;
let result = expm_multiply(&identity, &v, t, None, None).expect("Operation failed");
for (ri, vi) in result.iter().zip(&v) {
assert!((ri - vi).abs() < 1e-10);
}
}
#[test]
fn test_onenormest_small_matrix() {
let rows = vec![0, 1, 2];
let cols = vec![0, 1, 2];
let data = vec![2.0, 3.0, 4.0];
let shape = (3, 3);
let matrix = CsrMatrix::new(data, rows, cols, shape).expect("Operation failed");
let estimate = onenormest(&matrix, None, None).expect("Operation failed");
assert!((estimate - 4.0).abs() < 1e-10);
}
}