use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2, Axis};
use std::collections::HashMap;
use std::fmt::Debug;
use std::ops::{Add, Mul, Sub};
use scirs2_core::numeric::{Float, NumAssign, NumCast, Zero};
use crate::error::{LinalgError, LinalgResult};
#[derive(Debug, Clone)]
pub struct SparseMatrixView<T> {
pub rows: usize,
pub cols: usize,
pub indptr: Vec<usize>,
pub indices: Vec<usize>,
pub data: Vec<T>,
}
impl<T> SparseMatrixView<T>
where
T: Clone + Copy + Debug + Zero,
{
pub fn new(
data: Vec<T>,
indptr: Vec<usize>,
indices: Vec<usize>,
shape: (usize, usize),
) -> LinalgResult<Self> {
let (rows, cols) = shape;
if indptr.len() != rows + 1 {
return Err(LinalgError::DimensionError(format!(
"Row pointer array length must be rows + 1, got {} for {} rows",
indptr.len(),
rows
)));
}
if data.len() != indices.len() {
return Err(LinalgError::DimensionError(format!(
"Data and indices must have the same length, got {} and {}",
data.len(),
indices.len()
)));
}
for i in 1..indptr.len() {
if indptr[i] < indptr[i - 1] {
return Err(LinalgError::ValueError(
"Row pointer array must be monotonically increasing".to_string(),
));
}
}
if indptr[rows] != data.len() {
return Err(LinalgError::ValueError(format!(
"Last row pointer entry must match data length, got {} and {}",
indptr[rows],
data.len()
)));
}
if let Some(&max_index) = indices.iter().max() {
if max_index >= cols {
return Err(LinalgError::ValueError(format!(
"Column index out of bounds: {max_index} for a matrix with {cols} columns"
)));
}
}
Ok(SparseMatrixView {
rows,
cols,
indptr,
indices,
data,
})
}
pub fn shape(&self) -> (usize, usize) {
(self.rows, self.cols)
}
pub fn nnz(&self) -> usize {
self.data.len()
}
pub fn nrows(&self) -> usize {
self.rows
}
pub fn ncols(&self) -> usize {
self.cols
}
pub fn is_empty(&self) -> bool {
self.nnz() == 0
}
pub fn to_dense(&self) -> Array2<T> {
let mut dense = Array2::zeros((self.rows, self.cols));
for row in 0..self.rows {
for j in self.indptr[row]..self.indptr[row + 1] {
let col = self.indices[j];
dense[[row, col]] = self.data[j];
}
}
dense
}
}
#[allow(dead_code)]
pub fn sparse_from_ndarray<T, F>(
array: &ArrayView2<T>,
threshold: F,
) -> LinalgResult<SparseMatrixView<T>>
where
T: Clone + Copy + Debug + PartialOrd + Zero + NumCast,
F: Float + NumCast,
{
let shape = array.dim();
let (rows, cols) = (shape.0, shape.1);
let mut data = Vec::new();
let mut indices = Vec::new();
let mut indptr = vec![0; rows + 1];
let threshold_abs: F = threshold.abs();
for (i, row) in array.axis_iter(Axis(0)).enumerate() {
for (j, &val) in row.iter().enumerate() {
let val_abs: F = if let Some(v) = NumCast::from(val) {
let v_typed: F = v;
v_typed.abs()
} else {
F::zero()
};
if val_abs > threshold_abs {
data.push(val);
indices.push(j);
indptr[i + 1] += 1;
}
}
}
for i in 1..=rows {
indptr[i] += indptr[i - 1];
}
SparseMatrixView::new(data, indptr, indices, (rows, cols))
}
#[allow(dead_code)]
pub fn sparse_dense_matmul<T>(
sparse: &SparseMatrixView<T>,
dense: &ArrayView2<T>,
) -> LinalgResult<Array2<T>>
where
T: Clone + Copy + Debug + Zero + Add<Output = T> + Mul<Output = T>,
{
if sparse.cols != dense.dim().0 {
return Err(LinalgError::DimensionError(format!(
"Matrix dimensions incompatible for multiplication: {}x{} and {}x{}",
sparse.rows,
sparse.cols,
dense.dim().0,
dense.dim().1
)));
}
let result_rows = sparse.rows;
let result_cols = dense.dim().1;
let mut result = Array2::zeros((result_rows, result_cols));
for i in 0..sparse.rows {
for j in 0..result_cols {
let mut sum = T::zero();
for k in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[k];
sum = sum + sparse.data[k] * dense[[col, j]];
}
result[[i, j]] = sum;
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn dense_sparse_matmul<T>(
dense: &ArrayView2<T>,
sparse: &SparseMatrixView<T>,
) -> LinalgResult<Array2<T>>
where
T: Clone + Copy + Debug + Zero + Add<Output = T> + Mul<Output = T>,
{
if dense.dim().1 != sparse.rows {
return Err(LinalgError::DimensionError(format!(
"Matrix dimensions incompatible for multiplication: {}x{} and {}x{}",
dense.dim().0,
dense.dim().1,
sparse.rows,
sparse.cols
)));
}
let result_rows = dense.dim().0;
let result_cols = sparse.cols;
let mut result = Array2::zeros((result_rows, result_cols));
let mut col_sums: HashMap<usize, Vec<T>> = HashMap::new();
for i in 0..sparse.rows {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
let val = sparse.data[j];
col_sums
.entry(col)
.or_insert_with(|| vec![T::zero(); result_rows]);
for (k, dense_row) in dense.axis_iter(Axis(0)).enumerate() {
col_sums.get_mut(&col).expect("Operation failed")[k] =
col_sums[&col][k] + dense_row[i] * val;
}
}
}
for (col, sums) in col_sums.iter() {
for (row, &sum) in sums.iter().enumerate() {
result[[row, *col]] = sum;
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn sparse_dense_matvec<T>(
sparse: &SparseMatrixView<T>,
vector: &ArrayView1<T>,
) -> LinalgResult<Array1<T>>
where
T: Clone + Copy + Debug + Zero + Add<Output = T> + Mul<Output = T>,
{
if sparse.cols != vector.len() {
return Err(LinalgError::DimensionError(format!(
"Matrix-vector dimensions incompatible: {}x{} and {}",
sparse.rows,
sparse.cols,
vector.len()
)));
}
let mut result = Array1::zeros(sparse.rows);
for i in 0..sparse.rows {
let mut sum = T::zero();
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
sum = sum + sparse.data[j] * vector[col];
}
result[i] = sum;
}
Ok(result)
}
#[allow(dead_code)]
pub fn dense_sparse_matvec<T>(
dense: &ArrayView2<T>,
sparse: &SparseMatrixView<T>,
) -> LinalgResult<Array1<T>>
where
T: Clone + Copy + Debug + Zero + Add<Output = T> + Mul<Output = T>,
{
if sparse.rows != 1 {
return Err(LinalgError::DimensionError(format!(
"Expected a sparse vector (single row), got {} rows",
sparse.rows
)));
}
if dense.dim().1 != sparse.cols {
return Err(LinalgError::DimensionError(format!(
"Matrix-vector dimensions incompatible: {}x{} and {}",
dense.dim().0,
dense.dim().1,
sparse.cols
)));
}
let mut result = Array1::zeros(dense.dim().0);
for j in sparse.indptr[0]..sparse.indptr[1] {
let col = sparse.indices[j];
let val = sparse.data[j];
for (i, row_val) in result.iter_mut().enumerate() {
*row_val = *row_val + dense[[i, col]] * val;
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn sparse_dense_add<T>(
sparse: &SparseMatrixView<T>,
dense: &ArrayView2<T>,
) -> LinalgResult<Array2<T>>
where
T: Clone + Copy + Debug + Zero + Add<Output = T>,
{
if sparse.shape() != dense.dim() {
return Err(LinalgError::DimensionError(format!(
"Matrix dimensions incompatible for addition: {}x{} and {}x{}",
sparse.rows,
sparse.cols,
dense.dim().0,
dense.dim().1
)));
}
let mut result = dense.to_owned();
for i in 0..sparse.rows {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
result[[i, col]] = result[[i, col]] + sparse.data[j];
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn sparse_dense_sub<T>(
sparse: &SparseMatrixView<T>,
dense: &ArrayView2<T>,
) -> LinalgResult<Array2<T>>
where
T: Clone + Copy + Debug + Zero + Sub<Output = T> + Neg<Output = T>,
{
if sparse.shape() != dense.dim() {
return Err(LinalgError::DimensionError(format!(
"Matrix dimensions incompatible for subtraction: {}x{} and {}x{}",
sparse.rows,
sparse.cols,
dense.dim().0,
dense.dim().1
)));
}
let mut result = dense.mapv(|x| -x);
for i in 0..sparse.rows {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
result[[i, col]] = result[[i, col]] + sparse.data[j];
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn sparse_dense_elementwise_mul<T>(
sparse: &SparseMatrixView<T>,
dense: &ArrayView2<T>,
) -> LinalgResult<SparseMatrixView<T>>
where
T: Clone + Copy + Debug + Zero + Mul<Output = T> + PartialEq,
{
if sparse.shape() != dense.dim() {
return Err(LinalgError::DimensionError(format!(
"Matrix dimensions incompatible for element-wise multiplication: {}x{} and {}x{}",
sparse.rows,
sparse.cols,
dense.dim().0,
dense.dim().1
)));
}
let mut data = Vec::new();
let mut indices = Vec::new();
let mut indptr = vec![0; sparse.rows + 1];
for i in 0..sparse.rows {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
let val = sparse.data[j] * dense[[i, col]];
if val != T::zero() {
data.push(val);
indices.push(col);
indptr[i + 1] += 1;
}
}
}
for i in 1..=sparse.rows {
indptr[i] += indptr[i - 1];
}
SparseMatrixView::new(data, indptr, indices, sparse.shape())
}
#[allow(dead_code)]
pub fn sparse_transpose<T>(sparse: &SparseMatrixView<T>) -> LinalgResult<SparseMatrixView<T>>
where
T: Clone + Copy + Debug + Zero,
{
let mut col_counts = vec![0; sparse.cols];
for &col in &sparse.indices {
col_counts[col] += 1;
}
let mut col_ptrs = vec![0; sparse.cols + 1];
for i in 0..sparse.cols {
col_ptrs[i + 1] = col_ptrs[i] + col_counts[i];
}
let nnz = sparse.nnz();
let mut indices_t = vec![0; nnz];
let mut data_t = vec![T::zero(); nnz];
let mut col_offsets = vec![0; sparse.cols];
for row in 0..sparse.rows {
for j in sparse.indptr[row]..sparse.indptr[row + 1] {
let col = sparse.indices[j];
let dest = col_ptrs[col] + col_offsets[col];
indices_t[dest] = row;
data_t[dest] = sparse.data[j];
col_offsets[col] += 1;
}
}
SparseMatrixView::new(data_t, col_ptrs, indices_t, (sparse.cols, sparse.rows))
}
pub mod advanced {
use super::*;
pub fn adaptive_sparse_dense_solve<T>(
sparse: &SparseMatrixView<T>,
rhs: &ArrayView1<T>,
tolerance: T,
max_iterations: usize,
) -> LinalgResult<Array1<T>>
where
T: Float
+ Clone
+ Copy
+ Debug
+ Zero
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ PartialOrd
+ std::iter::Sum
+ scirs2_core::ndarray::ScalarOperand
+ NumAssign
+ Send
+ Sync,
{
let sparsity_ratio = sparse.nnz() as f64 / (sparse.nrows() * sparse.ncols()) as f64;
let avg_nnz_per_row = sparse.nnz() as f64 / sparse.nrows() as f64;
if sparsity_ratio < 0.1 && avg_nnz_per_row < 50.0 {
sparse_conjugate_gradient(sparse, rhs, tolerance, max_iterations)
} else if sparsity_ratio < 0.3 {
sparse_preconditioned_cg(sparse, rhs, tolerance, max_iterations)
} else {
let dense = sparse.to_dense();
crate::solve::solve(&dense.view(), rhs, None)
}
}
pub fn sparse_conjugate_gradient<T>(
a: &SparseMatrixView<T>,
b: &ArrayView1<T>,
tolerance: T,
max_iterations: usize,
) -> LinalgResult<Array1<T>>
where
T: Float
+ Clone
+ Copy
+ Debug
+ Zero
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ PartialOrd
+ scirs2_core::ndarray::ScalarOperand,
{
let n = a.nrows();
if a.ncols() != n {
return Err(LinalgError::DimensionError(
"Matrix must be square for conjugate gradient".to_string(),
));
}
let mut x = Array1::zeros(n);
let mut r = b.to_owned();
let mut p = r.clone();
let mut rsold = r.dot(&r);
let b_norm = b.dot(b).sqrt();
if b_norm < T::epsilon() {
return Ok(x);
}
for _ in 0..max_iterations {
let ap = sparse_dense_matvec(a, &p.view())?;
let pap = p.dot(&ap);
if pap <= T::zero() {
return Err(LinalgError::ComputationError(
"Matrix is not positive definite".to_string(),
));
}
let alpha = rsold / pap;
x = x + &p * alpha;
r = r - &ap * alpha;
let rsnew = r.dot(&r);
if rsnew.sqrt() < tolerance * b_norm {
return Ok(x);
}
let beta = rsnew / rsold;
p = &r + &p * beta;
rsold = rsnew;
}
Ok(x)
}
pub fn sparse_preconditioned_cg<T>(
a: &SparseMatrixView<T>,
b: &ArrayView1<T>,
tolerance: T,
max_iterations: usize,
) -> LinalgResult<Array1<T>>
where
T: Float
+ Clone
+ Copy
+ Debug
+ Zero
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ PartialOrd
+ scirs2_core::ndarray::ScalarOperand,
{
let mut diag = Array1::zeros(a.nrows());
for i in 0..a.nrows() {
for j in a.indptr[i]..a.indptr[i + 1] {
if a.indices[j] == i {
diag[i] = a.data[j];
break;
}
}
if diag[i].abs() < T::epsilon() {
diag[i] = T::one();
}
}
let m_inv = diag.mapv(|x| T::one() / x);
let n = a.nrows();
let mut x = Array1::zeros(n);
let mut r = b.to_owned();
let mut z = &r * &m_inv; let mut p = z.clone();
let mut rzold = r.dot(&z);
let b_norm = b.dot(b).sqrt();
for _ in 0..max_iterations {
let ap = sparse_dense_matvec(a, &p.view())?;
let pap = p.dot(&ap);
if pap <= T::zero() {
return Err(LinalgError::ComputationError(
"Matrix is not positive definite".to_string(),
));
}
let alpha = rzold / pap;
x = x + &p * alpha;
r = r - &ap * alpha;
let r_norm = r.dot(&r).sqrt();
if r_norm < tolerance * b_norm {
return Ok(x);
}
z = &r * &m_inv;
let rznew = r.dot(&z);
let beta = rznew / rzold;
p = &z + &p * beta;
rzold = rznew;
}
Ok(x)
}
#[derive(Debug, Clone)]
pub struct SparseMatrixStats {
pub sparsity_ratio: f64,
pub avg_nnz_per_row: f64,
pub max_nnz_per_row: usize,
pub bandwidth: usize,
pub is_symmetric: bool,
pub is_diagonal_dominant: bool,
}
pub fn analyze_sparse_structure<T>(sparse: &SparseMatrixView<T>) -> SparseMatrixStats
where
T: Float + Clone + Copy + Debug + PartialOrd,
{
let total_elements = sparse.nrows() * sparse.ncols();
let sparsity_ratio = sparse.nnz() as f64 / total_elements as f64;
let avg_nnz_per_row = sparse.nnz() as f64 / sparse.nrows() as f64;
let mut max_nnz_per_row = 0;
for i in 0..sparse.nrows() {
let row_nnz = sparse.indptr[i + 1] - sparse.indptr[i];
max_nnz_per_row = max_nnz_per_row.max(row_nnz);
}
let mut bandwidth = 0;
for i in 0..sparse.nrows() {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
let distance = i.abs_diff(col);
bandwidth = bandwidth.max(distance);
}
}
let is_symmetric = if sparse.nrows() == sparse.ncols() {
check_sparse_symmetry(sparse)
} else {
false
};
let is_diagonal_dominant = check_diagonal_dominance(sparse);
SparseMatrixStats {
sparsity_ratio,
avg_nnz_per_row,
max_nnz_per_row,
bandwidth,
is_symmetric,
is_diagonal_dominant,
}
}
fn check_sparse_symmetry<T>(sparse: &SparseMatrixView<T>) -> bool
where
T: Float + Clone + Copy + Debug + PartialOrd,
{
let mut elements = HashMap::new();
for i in 0..sparse.nrows() {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
elements.insert((i, col), sparse.data[j]);
}
}
for i in 0..sparse.nrows() {
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
let val_ij = sparse.data[j];
if let Some(&val_ji) = elements.get(&(col, i)) {
let diff = (val_ij - val_ji).abs();
let tolerance = T::epsilon() * T::from(100.0).expect("Operation failed");
if diff > tolerance {
return false;
}
} else if val_ij.abs() > T::epsilon() * T::from(100.0).expect("Operation failed") {
return false;
}
}
}
true
}
fn check_diagonal_dominance<T>(sparse: &SparseMatrixView<T>) -> bool
where
T: Float + Clone + Copy + Debug + PartialOrd,
{
for i in 0..sparse.nrows() {
let mut diag_val = T::zero();
let mut off_diag_sum = T::zero();
for j in sparse.indptr[i]..sparse.indptr[i + 1] {
let col = sparse.indices[j];
let val = sparse.data[j].abs();
if col == i {
diag_val = val;
} else {
off_diag_sum = off_diag_sum + val;
}
}
if diag_val <= off_diag_sum {
return false;
}
}
true
}
}
pub trait SparseLinalg<T> {
fn shape(&self) -> (usize, usize);
fn nnz(&self) -> usize;
fn to_csr(&self) -> LinalgResult<SparseMatrixView<T>>;
fn matvec(&self, x: &ArrayView1<T>) -> LinalgResult<Array1<T>>;
fn solve(&self, b: &ArrayView1<T>) -> LinalgResult<Array1<T>>;
fn stats(&self) -> advanced::SparseMatrixStats;
}
impl<T> SparseLinalg<T> for SparseMatrixView<T>
where
T: Float
+ Clone
+ Copy
+ Debug
+ Zero
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ PartialOrd
+ std::iter::Sum
+ scirs2_core::ndarray::ScalarOperand
+ NumAssign
+ Send
+ Sync,
{
fn shape(&self) -> (usize, usize) {
(self.rows, self.cols)
}
fn nnz(&self) -> usize {
self.data.len()
}
fn to_csr(&self) -> LinalgResult<SparseMatrixView<T>> {
Ok(self.clone())
}
fn matvec(&self, x: &ArrayView1<T>) -> LinalgResult<Array1<T>> {
sparse_dense_matvec(self, x)
}
fn solve(&self, b: &ArrayView1<T>) -> LinalgResult<Array1<T>> {
advanced::adaptive_sparse_dense_solve(
self,
b,
T::epsilon() * T::from(1000.0).expect("Operation failed"),
1000,
)
}
fn stats(&self) -> advanced::SparseMatrixStats {
advanced::analyze_sparse_structure(self)
}
}
pub mod utils {
use super::*;
pub fn auto_solve<T>(
matrix: &ArrayView2<T>,
rhs: &ArrayView1<T>,
sparsity_threshold: f64,
) -> LinalgResult<Array1<T>>
where
T: Float
+ Clone
+ Copy
+ Debug
+ Zero
+ Add<Output = T>
+ Sub<Output = T>
+ Mul<Output = T>
+ PartialOrd
+ NumCast
+ NumAssign
+ std::iter::Sum
+ scirs2_core::ndarray::ScalarOperand
+ Send
+ Sync,
{
let total_elements = matrix.len();
let mut zero_count = 0;
let threshold = T::epsilon() * T::from(1000.0).expect("Operation failed");
for &val in matrix.iter() {
if val.abs() < threshold {
zero_count += 1;
}
}
let sparsity_ratio = zero_count as f64 / total_elements as f64;
if sparsity_ratio > sparsity_threshold {
let sparse = sparse_from_ndarray(matrix, threshold)?;
sparse.solve(rhs)
} else {
crate::solve::solve(matrix, rhs, None)
}
}
pub fn convert_sparse_format<T>(
sparse: &SparseMatrixView<T>,
format: &str,
) -> LinalgResult<SparseMatrixView<T>>
where
T: Clone + Copy + Debug + Zero,
{
match format {
"csr" => Ok(sparse.clone()),
"csc" => sparse_transpose(sparse), _ => Err(LinalgError::ValueError(format!(
"Unsupported sparse format: {format}"
))),
}
}
pub fn estimate_memory_usage<T>(shape: (usize, usize), nnz: usize) -> (usize, usize) {
let (rows, cols) = shape;
let elementsize = std::mem::size_of::<T>();
let dense_memory = rows * cols * elementsize;
let sparse_memory = nnz * elementsize + nnz * std::mem::size_of::<usize>() + (rows + 1) * std::mem::size_of::<usize>();
(dense_memory, sparse_memory)
}
}
use std::ops::Neg;
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_sparse_from_ndarray() {
let dense = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let sparse = sparse_from_ndarray(&dense.view(), 1e-10).expect("Operation failed");
assert_eq!(sparse.shape(), (3, 3));
assert_eq!(sparse.nnz(), 5);
let dense_again = sparse.to_dense();
for i in 0..3 {
for j in 0..3 {
assert_abs_diff_eq!(dense[[i, j]], dense_again[[i, j]], epsilon = 1e-10);
}
}
}
#[test]
fn test_sparse_dense_matmul() {
let dense_a = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let dense_b = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
let sparse_a = sparse_from_ndarray(&dense_a.view(), 1e-10).expect("Operation failed");
let result = sparse_dense_matmul(&sparse_a, &dense_b.view()).expect("Operation failed");
let expected = array![[11.0, 14.0], [15.0, 18.0], [19.0, 28.0]];
for i in 0..3 {
for j in 0..2 {
assert_abs_diff_eq!(result[[i, j]], expected[[i, j]], epsilon = 1e-10);
}
}
}
#[test]
fn test_dense_sparse_matmul() {
let dense_a = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
let dense_b = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let sparse_b = sparse_from_ndarray(&dense_b.view(), 1e-10).expect("Operation failed");
let result = dense_sparse_matmul(&dense_a.view(), &sparse_b).expect("Operation failed");
let expected = dense_a.dot(&dense_b);
for i in 0..2 {
for j in 0..3 {
assert_abs_diff_eq!(result[[i, j]], expected[[i, j]], epsilon = 1e-10);
}
}
}
#[test]
fn test_sparse_dense_matvec() {
let dense_a = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let vec_b = array![1.0, 2.0, 3.0];
let sparse_a = sparse_from_ndarray(&dense_a.view(), 1e-10).expect("Operation failed");
let result = sparse_dense_matvec(&sparse_a, &vec_b.view()).expect("Operation failed");
let expected = array![7.0, 9.0, 14.0];
for i in 0..3 {
assert_abs_diff_eq!(result[i], expected[i], epsilon = 1e-10);
}
}
#[test]
fn test_sparse_dense_add() {
let dense_a = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let dense_b = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]];
let sparse_a = sparse_from_ndarray(&dense_a.view(), 1e-10).expect("Operation failed");
let result = sparse_dense_add(&sparse_a, &dense_b.view()).expect("Operation failed");
let expected = array![[2.0, 2.0, 5.0], [4.0, 5.0, 9.0], [11.0, 13.0, 9.0]];
for i in 0..3 {
for j in 0..3 {
assert_abs_diff_eq!(result[[i, j]], expected[[i, j]], epsilon = 1e-10);
}
}
}
#[test]
fn test_sparse_dense_elementwise_mul() {
let dense_a = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let dense_b = array![[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]];
let sparse_a = sparse_from_ndarray(&dense_a.view(), 1e-10).expect("Operation failed");
let result =
sparse_dense_elementwise_mul(&sparse_a, &dense_b.view()).expect("Operation failed");
let result_dense = result.to_dense();
let expected = array![[1.0, 0.0, 6.0], [0.0, 0.0, 18.0], [28.0, 40.0, 0.0]];
for i in 0..3 {
for j in 0..3 {
assert_abs_diff_eq!(result_dense[[i, j]], expected[[i, j]], epsilon = 1e-10);
}
}
}
#[test]
fn test_sparse_transpose() {
let dense_a = array![[1.0, 0.0, 2.0], [0.0, 0.0, 3.0], [4.0, 5.0, 0.0]];
let sparse_a = sparse_from_ndarray(&dense_a.view(), 1e-10).expect("Operation failed");
let transposed = sparse_transpose(&sparse_a).expect("Operation failed");
let transposed_dense = transposed.to_dense();
let expected = array![[1.0, 0.0, 4.0], [0.0, 0.0, 5.0], [2.0, 3.0, 0.0]];
for i in 0..3 {
for j in 0..3 {
assert_abs_diff_eq!(transposed_dense[[i, j]], expected[[i, j]], epsilon = 1e-10);
}
}
}
}