pub mod arithmetic;
pub mod convolution;
pub mod core;
pub mod format;
pub mod linalg;
pub mod reduction;
pub use core::{sparse_coo_tensor, SparseTensor};
pub use arithmetic::{sparse_add, sparse_mul};
pub use linalg::{sparse_eye, sparse_mm, sparse_transpose};
pub use format::{csr_to_sparse, sparse_to_csr};
pub use convolution::{sparse_conv1d, sparse_conv2d};
pub use reduction::{sparse_max, sparse_mean, sparse_min, sparse_sum};
#[cfg(test)]
mod integration_tests {
use super::*;
use torsh_tensor::Tensor;
#[test]
fn test_sparse_workflow_integration() -> torsh_core::Result<()> {
let values = Tensor::from_data(
vec![1.0, 2.0, 3.0, 4.0, 5.0],
vec![5],
torsh_core::DeviceType::Cpu,
)?;
let indices = Tensor::from_data(
vec![
0.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 1.0, 0.0, 2.0,
], vec![2, 5],
torsh_core::DeviceType::Cpu,
)?;
let sparse_a = sparse_coo_tensor(&indices, &values, &[3, 3])?;
assert_eq!(sparse_a.nnz(), 5);
assert_eq!(sparse_a.shape(), &[3, 3]);
assert_eq!(sparse_a.ndim(), 2);
let dense = sparse_a.to_dense()?;
let sparse_b = SparseTensor::from_dense(&dense)?;
let dense_a = sparse_a.to_dense()?.to_vec()?;
let dense_b = sparse_b.to_dense()?.to_vec()?;
for (a, b) in dense_a.iter().zip(dense_b.iter()) {
assert!(
(a - b).abs() < 1e-6,
"Round-trip conversion mismatch: {} vs {}",
a,
b
);
}
let scaled = sparse_mul(&sparse_a, 2.0)?;
let scaled_values = scaled.values.to_vec()?;
let original_values = sparse_a.values.to_vec()?;
for (scaled, original) in scaled_values.iter().zip(original_values.iter()) {
assert!((scaled - original * 2.0).abs() < 1e-6);
}
let identity = sparse_eye(3)?;
let result = sparse_mm(&sparse_a, &identity.to_dense()?)?;
let original_dense = sparse_a.to_dense()?.to_vec()?;
let result_data = result.to_vec()?;
for (original, result) in original_dense.iter().zip(result_data.iter()) {
assert!((original - result).abs() < 1e-6);
}
let total_sum = sparse_sum(&sparse_a, None)?;
let sum_value = total_sum.to_vec()?[0];
let expected_sum = 1.0 + 2.0 + 3.0 + 4.0 + 5.0;
assert!((sum_value - expected_sum).abs() < 1e-6);
let (csr_values, col_indices, row_ptrs) = sparse_to_csr(&sparse_a)?;
let reconstructed = csr_to_sparse(&csr_values, &col_indices, &row_ptrs, &[3, 3])?;
let original_dense = sparse_a.to_dense()?.to_vec()?;
let reconstructed_dense = reconstructed.to_dense()?.to_vec()?;
for (original, reconstructed) in original_dense.iter().zip(reconstructed_dense.iter()) {
assert!((original - reconstructed).abs() < 1e-6);
}
Ok(())
}
#[test]
fn test_sparsity_preservation() -> torsh_core::Result<()> {
let values = Tensor::from_data(vec![1.0, 2.0], vec![2], torsh_core::DeviceType::Cpu)?;
let indices = Tensor::from_data(
vec![0.0, 9.0, 0.0, 9.0], vec![2, 2],
torsh_core::DeviceType::Cpu,
)?;
let sparse = sparse_coo_tensor(&indices, &values, &[10, 10])?;
assert_eq!(sparse.nnz(), 2);
let scaled = sparse_mul(&sparse, 3.0)?;
assert_eq!(scaled.nnz(), 2);
let transposed = sparse_transpose(&sparse)?;
assert_eq!(transposed.nnz(), 2);
let sum = sparse_sum(&sparse, None)?;
let sum_value = sum.to_vec()?[0];
assert!((sum_value - 3.0).abs() < 1e-6);
Ok(())
}
}