use crate::{core_ops::Tensor, TensorElement};
use scirs2_core::parallel_ops::*;
use std::collections::HashMap;
use torsh_core::{
device::DeviceType,
error::{Result, TorshError},
};
#[derive(Debug, Clone)]
pub struct SparseTensor<T: TensorElement> {
indices: Vec<Vec<usize>>,
values: Vec<T>,
shape: Vec<usize>,
device: DeviceType,
nnz: usize,
}
impl<T: TensorElement> SparseTensor<T> {
pub fn from_coo(indices: Vec<Vec<usize>>, values: Vec<T>, shape: Vec<usize>) -> Result<Self> {
if indices.len() != values.len() {
return Err(TorshError::InvalidArgument(format!(
"Indices length ({}) must match values length ({})",
indices.len(),
values.len()
)));
}
let ndim = shape.len();
for (i, coord) in indices.iter().enumerate() {
if coord.len() != ndim {
return Err(TorshError::InvalidArgument(format!(
"Index {} has {} dimensions, expected {}",
i,
coord.len(),
ndim
)));
}
for (dim, &idx) in coord.iter().enumerate() {
if idx >= shape[dim] {
return Err(TorshError::InvalidArgument(format!(
"Index {} at dimension {} is out of bounds ({})",
idx, dim, shape[dim]
)));
}
}
}
Ok(Self {
nnz: indices.len(),
indices,
values,
shape,
device: DeviceType::Cpu,
})
}
pub fn from_dense(dense: &Tensor<T>, tolerance: T) -> Result<Self>
where
T: Copy + PartialOrd + num_traits::Zero + num_traits::Signed,
{
let data = dense.data()?;
let shape = dense.shape().dims().to_vec();
let mut indices = Vec::new();
let mut values = Vec::new();
for flat_idx in 0..data.len() {
let value = data[flat_idx];
let abs_value = value.abs();
if abs_value > tolerance {
let coords = Self::flat_to_coords(flat_idx, &shape);
indices.push(coords);
values.push(value);
}
}
Self::from_coo(indices, values, shape)
}
pub fn to_dense(&self) -> Result<Tensor<T>>
where
T: Copy + num_traits::Zero,
{
let total_elements: usize = self.shape.iter().product();
let mut data = vec![<T as num_traits::Zero>::zero(); total_elements];
for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
let flat_idx = Self::coords_to_flat(coords, &self.shape);
data[flat_idx] = value;
}
Tensor::from_data(data, self.shape.clone(), self.device)
}
pub fn nnz(&self) -> usize {
self.nnz
}
pub fn shape(&self) -> &[usize] {
&self.shape
}
pub fn device(&self) -> DeviceType {
self.device
}
pub fn indices(&self) -> &[Vec<usize>] {
&self.indices
}
pub fn values(&self) -> &[T] {
&self.values
}
pub fn sparsity(&self) -> f64 {
let total_elements: usize = self.shape.iter().product();
1.0 - (self.nnz as f64 / total_elements as f64)
}
pub fn memory_usage(&self) -> usize {
let indices_size = self.indices.len() * self.shape.len() * std::mem::size_of::<usize>();
let values_size = self.values.len() * std::mem::size_of::<T>();
let shape_size = self.shape.len() * std::mem::size_of::<usize>();
indices_size + values_size + shape_size + std::mem::size_of::<Self>()
}
pub fn memory_efficiency(&self) -> f64 {
let total_elements: usize = self.shape.iter().product();
let dense_size = total_elements * std::mem::size_of::<T>();
let sparse_size = self.memory_usage();
1.0 - (sparse_size as f64 / dense_size as f64)
}
pub fn add(&self, other: &Self) -> Result<Self>
where
T: Copy + std::ops::Add<Output = T> + num_traits::Zero + PartialEq,
{
if self.shape != other.shape {
return Err(TorshError::InvalidArgument(format!(
"Shape mismatch: {:?} vs {:?}",
self.shape, other.shape
)));
}
let mut result_map: HashMap<Vec<usize>, T> = HashMap::new();
for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
result_map.insert(coords.clone(), value);
}
for (coords, &value) in other.indices.iter().zip(other.values.iter()) {
match result_map.get_mut(coords) {
Some(existing_value) => {
*existing_value = *existing_value + value;
}
None => {
result_map.insert(coords.clone(), value);
}
}
}
let zero = <T as num_traits::Zero>::zero();
let mut indices = Vec::new();
let mut values = Vec::new();
for (coords, value) in result_map {
if value != zero {
indices.push(coords);
values.push(value);
}
}
Self::from_coo(indices, values, self.shape.clone())
}
pub fn mul(&self, other: &Self) -> Result<Self>
where
T: Copy + std::ops::Mul<Output = T> + num_traits::Zero + PartialEq,
{
if self.shape != other.shape {
return Err(TorshError::InvalidArgument(format!(
"Shape mismatch: {:?} vs {:?}",
self.shape, other.shape
)));
}
let other_map: HashMap<Vec<usize>, T> = other
.indices
.iter()
.zip(other.values.iter())
.map(|(coords, &value)| (coords.clone(), value))
.collect();
let mut indices = Vec::new();
let mut values = Vec::new();
let zero = <T as num_traits::Zero>::zero();
for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
if let Some(&other_value) = other_map.get(coords) {
let result = value * other_value;
if result != zero {
indices.push(coords.clone());
values.push(result);
}
}
}
Self::from_coo(indices, values, self.shape.clone())
}
pub fn mul_scalar(&self, scalar: T) -> Result<Self>
where
T: Copy + std::ops::Mul<Output = T> + num_traits::Zero + PartialEq,
{
let zero = <T as num_traits::Zero>::zero();
if scalar == zero {
return Self::from_coo(Vec::new(), Vec::new(), self.shape.clone());
}
let new_values: Vec<T> = self.values.iter().map(|&v| v * scalar).collect();
Self::from_coo(self.indices.clone(), new_values, self.shape.clone())
}
pub fn matmul(&self, other: &Self) -> Result<Self>
where
T: Copy
+ std::ops::Add<Output = T>
+ std::ops::Mul<Output = T>
+ num_traits::Zero
+ PartialEq,
{
if self.shape.len() != 2 || other.shape.len() != 2 {
return Err(TorshError::InvalidArgument(
"Matrix multiplication requires 2D tensors".to_string(),
));
}
if self.shape[1] != other.shape[0] {
return Err(TorshError::InvalidArgument(format!(
"Incompatible shapes for matmul: {:?} x {:?}",
self.shape, other.shape
)));
}
let m = self.shape[0];
let n = other.shape[1];
let k = self.shape[1];
let _ = (m, k, n);
let mut left_rows: HashMap<usize, Vec<(usize, T)>> = HashMap::new();
let mut right_cols: HashMap<usize, Vec<(usize, T)>> = HashMap::new();
for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
let row = coords[0];
let col = coords[1];
left_rows
.entry(row)
.or_insert_with(Vec::new)
.push((col, value));
}
for (coords, &value) in other.indices.iter().zip(other.values.iter()) {
let row = coords[0];
let col = coords[1];
right_cols
.entry(col)
.or_insert_with(Vec::new)
.push((row, value));
}
let mut result_map: HashMap<Vec<usize>, T> = HashMap::new();
let zero = <T as num_traits::Zero>::zero();
for (&row, left_row_data) in left_rows.iter() {
for (&col, right_col_data) in right_cols.iter() {
let mut sum = zero;
let mut left_iter = left_row_data.iter().peekable();
let mut right_iter = right_col_data.iter().peekable();
while let (Some(&(left_col, left_val)), Some(&(right_row, right_val))) =
(left_iter.peek(), right_iter.peek())
{
match left_col.cmp(&right_row) {
std::cmp::Ordering::Equal => {
sum = sum + (*left_val) * (*right_val);
left_iter.next();
right_iter.next();
}
std::cmp::Ordering::Less => {
left_iter.next();
}
std::cmp::Ordering::Greater => {
right_iter.next();
}
}
}
if sum != zero {
result_map.insert(vec![row, col], sum);
}
}
}
let mut indices = Vec::new();
let mut values = Vec::new();
for (coords, value) in result_map {
indices.push(coords);
values.push(value);
}
Self::from_coo(indices, values, vec![m, n])
}
fn flat_to_coords(flat_idx: usize, shape: &[usize]) -> Vec<usize> {
let mut coords = vec![0; shape.len()];
let mut remaining = flat_idx;
for i in 0..shape.len() {
let stride: usize = shape[i + 1..].iter().product();
coords[i] = remaining / stride;
remaining %= stride;
}
coords
}
fn coords_to_flat(coords: &[usize], shape: &[usize]) -> usize {
let mut flat_idx = 0;
let mut stride = 1;
for i in (0..coords.len()).rev() {
flat_idx += coords[i] * stride;
stride *= shape[i];
}
flat_idx
}
pub fn transpose(&self) -> Result<Self>
where
T: Copy,
{
if self.shape.len() != 2 {
return Err(TorshError::InvalidArgument(
"Transpose is only supported for 2D tensors".to_string(),
));
}
let new_shape = vec![self.shape[1], self.shape[0]];
let new_indices: Vec<Vec<usize>> = self
.indices
.iter()
.map(|coords| vec![coords[1], coords[0]])
.collect();
Self::from_coo(new_indices, self.values.clone(), new_shape)
}
pub fn map<F>(&self, f: F) -> Result<Self>
where
F: Fn(T) -> T,
T: Copy + num_traits::Zero + PartialEq,
{
let new_values: Vec<T> = self.values.iter().map(|&v| f(v)).collect();
let zero = <T as num_traits::Zero>::zero();
let mut filtered_indices = Vec::new();
let mut filtered_values = Vec::new();
for (coords, &value) in self.indices.iter().zip(new_values.iter()) {
if value != zero {
filtered_indices.push(coords.clone());
filtered_values.push(value);
}
}
Self::from_coo(filtered_indices, filtered_values, self.shape.clone())
}
pub fn is_valid(&self) -> bool {
if self.indices.len() != self.values.len() {
return false;
}
if self.nnz != self.indices.len() {
return false;
}
let ndim = self.shape.len();
for coords in &self.indices {
if coords.len() != ndim {
return false;
}
for (dim, &idx) in coords.iter().enumerate() {
if idx >= self.shape[dim] {
return false;
}
}
}
true
}
pub fn coalesce(&mut self) -> Result<()>
where
T: Copy + std::ops::AddAssign + num_traits::Zero + PartialEq,
{
if self.indices.is_empty() {
return Ok(());
}
let mut coord_map: HashMap<Vec<usize>, T> = HashMap::new();
for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
match coord_map.get_mut(coords) {
Some(existing_value) => {
*existing_value += value;
}
None => {
coord_map.insert(coords.clone(), value);
}
}
}
let zero = <T as num_traits::Zero>::zero();
let mut new_indices = Vec::new();
let mut new_values = Vec::new();
for (coords, value) in coord_map {
if value != zero {
new_indices.push(coords);
new_values.push(value);
}
}
self.indices = new_indices;
self.values = new_values;
self.nnz = self.indices.len();
Ok(())
}
}
impl<T: TensorElement> SparseTensor<T> {
pub fn eye(size: usize) -> Result<Self>
where
T: Copy + num_traits::One,
{
let mut indices = Vec::new();
let mut values = Vec::new();
let one = <T as num_traits::One>::one();
for i in 0..size {
indices.push(vec![i, i]);
values.push(one);
}
Self::from_coo(indices, values, vec![size, size])
}
pub fn from_triplets(
rows: Vec<usize>,
cols: Vec<usize>,
vals: Vec<T>,
shape: Vec<usize>,
) -> Result<Self> {
if rows.len() != cols.len() || cols.len() != vals.len() {
return Err(TorshError::InvalidArgument(
"Rows, cols, and values must have the same length".to_string(),
));
}
let indices: Vec<Vec<usize>> = rows
.into_iter()
.zip(cols.into_iter())
.map(|(r, c)| vec![r, c])
.collect();
Self::from_coo(indices, vals, shape)
}
}
#[derive(Debug, Clone)]
pub struct SparseCSR<T: TensorElement> {
row_ptr: Vec<usize>,
col_indices: Vec<usize>,
values: Vec<T>,
shape: Vec<usize>,
device: DeviceType,
nnz: usize,
}
impl<T: TensorElement> SparseCSR<T> {
pub fn new(
row_ptr: Vec<usize>,
col_indices: Vec<usize>,
values: Vec<T>,
shape: Vec<usize>,
) -> Result<Self> {
if shape.len() != 2 {
return Err(TorshError::InvalidArgument(
"CSR format only supports 2D tensors".to_string(),
));
}
if col_indices.len() != values.len() {
return Err(TorshError::InvalidArgument(format!(
"Column indices length ({}) must match values length ({})",
col_indices.len(),
values.len()
)));
}
if row_ptr.len() != shape[0] + 1 {
return Err(TorshError::InvalidArgument(format!(
"Row pointer length ({}) must be num_rows + 1 ({})",
row_ptr.len(),
shape[0] + 1
)));
}
for i in 1..row_ptr.len() {
if row_ptr[i] < row_ptr[i - 1] {
return Err(TorshError::InvalidArgument(
"Row pointers must be monotonically increasing".to_string(),
));
}
}
for &col_idx in &col_indices {
if col_idx >= shape[1] {
return Err(TorshError::InvalidArgument(format!(
"Column index {} out of bounds for shape {:?}",
col_idx, shape
)));
}
}
let nnz = values.len();
if row_ptr.last().copied().unwrap_or(0) != nnz {
return Err(TorshError::InvalidArgument(
"Last row pointer must equal number of non-zero values".to_string(),
));
}
Ok(Self {
row_ptr,
col_indices,
values,
shape,
device: DeviceType::Cpu,
nnz,
})
}
pub fn from_coo(coo: &SparseTensor<T>) -> Result<Self>
where
T: Copy,
{
if coo.shape().len() != 2 {
return Err(TorshError::InvalidArgument(
"CSR format only supports 2D tensors".to_string(),
));
}
let num_rows = coo.shape()[0];
let num_cols = coo.shape()[1];
let mut entries: Vec<(usize, usize, T)> = coo
.indices()
.iter()
.zip(coo.values())
.map(|(coords, &val)| (coords[0], coords[1], val))
.collect();
entries.sort_by(|a, b| {
if a.0 == b.0 {
a.1.cmp(&b.1)
} else {
a.0.cmp(&b.0)
}
});
let mut row_ptr = vec![0; num_rows + 1];
let mut col_indices = Vec::with_capacity(entries.len());
let mut values = Vec::with_capacity(entries.len());
for (row, col, val) in entries {
col_indices.push(col);
values.push(val);
row_ptr[row + 1] += 1;
}
for i in 1..=num_rows {
row_ptr[i] += row_ptr[i - 1];
}
Self::new(row_ptr, col_indices, values, vec![num_rows, num_cols])
}
pub fn to_coo(&self) -> Result<SparseTensor<T>>
where
T: Copy,
{
let mut indices = Vec::new();
let mut values = Vec::new();
for row in 0..self.shape[0] {
let start = self.row_ptr[row];
let end = self.row_ptr[row + 1];
for idx in start..end {
indices.push(vec![row, self.col_indices[idx]]);
values.push(self.values[idx]);
}
}
SparseTensor::from_coo(indices, values, self.shape.clone())
}
pub fn to_dense(&self) -> Result<Tensor<T>>
where
T: Copy + num_traits::Zero,
{
let total_elements = self.shape[0] * self.shape[1];
let mut data = vec![<T as num_traits::Zero>::zero(); total_elements];
for row in 0..self.shape[0] {
let start = self.row_ptr[row];
let end = self.row_ptr[row + 1];
for idx in start..end {
let col = self.col_indices[idx];
let flat_idx = row * self.shape[1] + col;
data[flat_idx] = self.values[idx];
}
}
Tensor::from_data(data, self.shape.clone(), self.device)
}
pub fn matvec(&self, vec: &[T]) -> Result<Vec<T>>
where
T: Copy + std::ops::Add<Output = T> + std::ops::Mul<Output = T> + num_traits::Zero,
{
if vec.len() != self.shape[1] {
return Err(TorshError::InvalidArgument(format!(
"Vector length ({}) must match number of columns ({})",
vec.len(),
self.shape[1]
)));
}
let mut result = vec![<T as num_traits::Zero>::zero(); self.shape[0]];
result
.par_iter_mut()
.enumerate()
.for_each(|(row, result_val)| {
let start = self.row_ptr[row];
let end = self.row_ptr[row + 1];
let mut sum = <T as num_traits::Zero>::zero();
for idx in start..end {
let col = self.col_indices[idx];
sum = sum + self.values[idx] * vec[col];
}
*result_val = sum;
});
Ok(result)
}
pub fn get_row(&self, row: usize) -> Result<(Vec<usize>, Vec<T>)>
where
T: Copy,
{
if row >= self.shape[0] {
return Err(TorshError::InvalidArgument(format!(
"Row {} out of bounds for shape {:?}",
row, self.shape
)));
}
let start = self.row_ptr[row];
let end = self.row_ptr[row + 1];
let col_indices = self.col_indices[start..end].to_vec();
let values = self.values[start..end].to_vec();
Ok((col_indices, values))
}
pub fn nnz(&self) -> usize {
self.nnz
}
pub fn shape(&self) -> &[usize] {
&self.shape
}
pub fn device(&self) -> DeviceType {
self.device
}
pub fn row_ptr(&self) -> &[usize] {
&self.row_ptr
}
pub fn col_indices(&self) -> &[usize] {
&self.col_indices
}
pub fn values(&self) -> &[T] {
&self.values
}
}
#[derive(Debug, Clone)]
pub struct SparseCSC<T: TensorElement> {
col_ptr: Vec<usize>,
row_indices: Vec<usize>,
values: Vec<T>,
shape: Vec<usize>,
device: DeviceType,
nnz: usize,
}
impl<T: TensorElement> SparseCSC<T> {
pub fn new(
col_ptr: Vec<usize>,
row_indices: Vec<usize>,
values: Vec<T>,
shape: Vec<usize>,
) -> Result<Self> {
if shape.len() != 2 {
return Err(TorshError::InvalidArgument(
"CSC format only supports 2D tensors".to_string(),
));
}
if row_indices.len() != values.len() {
return Err(TorshError::InvalidArgument(format!(
"Row indices length ({}) must match values length ({})",
row_indices.len(),
values.len()
)));
}
if col_ptr.len() != shape[1] + 1 {
return Err(TorshError::InvalidArgument(format!(
"Column pointer length ({}) must be num_cols + 1 ({})",
col_ptr.len(),
shape[1] + 1
)));
}
for i in 1..col_ptr.len() {
if col_ptr[i] < col_ptr[i - 1] {
return Err(TorshError::InvalidArgument(
"Column pointers must be monotonically increasing".to_string(),
));
}
}
for &row_idx in &row_indices {
if row_idx >= shape[0] {
return Err(TorshError::InvalidArgument(format!(
"Row index {} out of bounds for shape {:?}",
row_idx, shape
)));
}
}
let nnz = values.len();
if col_ptr.last().copied().unwrap_or(0) != nnz {
return Err(TorshError::InvalidArgument(
"Last column pointer must equal number of non-zero values".to_string(),
));
}
Ok(Self {
col_ptr,
row_indices,
values,
shape,
device: DeviceType::Cpu,
nnz,
})
}
pub fn from_coo(coo: &SparseTensor<T>) -> Result<Self>
where
T: Copy,
{
if coo.shape().len() != 2 {
return Err(TorshError::InvalidArgument(
"CSC format only supports 2D tensors".to_string(),
));
}
let num_rows = coo.shape()[0];
let num_cols = coo.shape()[1];
let mut entries: Vec<(usize, usize, T)> = coo
.indices()
.iter()
.zip(coo.values())
.map(|(coords, &val)| (coords[0], coords[1], val))
.collect();
entries.sort_by(|a, b| {
if a.1 == b.1 {
a.0.cmp(&b.0)
} else {
a.1.cmp(&b.1)
}
});
let mut col_ptr = vec![0; num_cols + 1];
let mut row_indices = Vec::with_capacity(entries.len());
let mut values = Vec::with_capacity(entries.len());
for (row, col, val) in entries {
row_indices.push(row);
values.push(val);
col_ptr[col + 1] += 1;
}
for i in 1..=num_cols {
col_ptr[i] += col_ptr[i - 1];
}
Self::new(col_ptr, row_indices, values, vec![num_rows, num_cols])
}
pub fn to_coo(&self) -> Result<SparseTensor<T>>
where
T: Copy,
{
let mut indices = Vec::new();
let mut values = Vec::new();
for col in 0..self.shape[1] {
let start = self.col_ptr[col];
let end = self.col_ptr[col + 1];
for idx in start..end {
indices.push(vec![self.row_indices[idx], col]);
values.push(self.values[idx]);
}
}
SparseTensor::from_coo(indices, values, self.shape.clone())
}
pub fn to_dense(&self) -> Result<Tensor<T>>
where
T: Copy + num_traits::Zero,
{
let total_elements = self.shape[0] * self.shape[1];
let mut data = vec![<T as num_traits::Zero>::zero(); total_elements];
for col in 0..self.shape[1] {
let start = self.col_ptr[col];
let end = self.col_ptr[col + 1];
for idx in start..end {
let row = self.row_indices[idx];
let flat_idx = row * self.shape[1] + col;
data[flat_idx] = self.values[idx];
}
}
Tensor::from_data(data, self.shape.clone(), self.device)
}
pub fn transpose_matvec(&self, vec: &[T]) -> Result<Vec<T>>
where
T: Copy + std::ops::Add<Output = T> + std::ops::Mul<Output = T> + num_traits::Zero,
{
if vec.len() != self.shape[0] {
return Err(TorshError::InvalidArgument(format!(
"Vector length ({}) must match number of rows ({})",
vec.len(),
self.shape[0]
)));
}
let mut result = vec![<T as num_traits::Zero>::zero(); self.shape[1]];
result
.par_iter_mut()
.enumerate()
.for_each(|(col, result_val)| {
let start = self.col_ptr[col];
let end = self.col_ptr[col + 1];
let mut sum = <T as num_traits::Zero>::zero();
for idx in start..end {
let row = self.row_indices[idx];
sum = sum + self.values[idx] * vec[row];
}
*result_val = sum;
});
Ok(result)
}
pub fn get_col(&self, col: usize) -> Result<(Vec<usize>, Vec<T>)>
where
T: Copy,
{
if col >= self.shape[1] {
return Err(TorshError::InvalidArgument(format!(
"Column {} out of bounds for shape {:?}",
col, self.shape
)));
}
let start = self.col_ptr[col];
let end = self.col_ptr[col + 1];
let row_indices = self.row_indices[start..end].to_vec();
let values = self.values[start..end].to_vec();
Ok((row_indices, values))
}
pub fn nnz(&self) -> usize {
self.nnz
}
pub fn shape(&self) -> &[usize] {
&self.shape
}
pub fn device(&self) -> DeviceType {
self.device
}
pub fn col_ptr(&self) -> &[usize] {
&self.col_ptr
}
pub fn row_indices(&self) -> &[usize] {
&self.row_indices
}
pub fn values(&self) -> &[T] {
&self.values
}
}
#[cfg(test)]
mod tests {
use super::*;
use torsh_core::device::DeviceType;
#[test]
fn test_sparse_tensor_creation() {
let indices = vec![vec![0, 0], vec![1, 2], vec![2, 1]];
let values = vec![1.0, 2.0, 3.0];
let shape = vec![3, 3];
let sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
assert_eq!(sparse.nnz(), 3);
assert_eq!(sparse.shape(), &[3, 3]);
assert!(sparse.sparsity() > 0.6); }
#[test]
fn test_sparse_to_dense_conversion() {
let indices = vec![vec![0, 0], vec![1, 1], vec![2, 2]];
let values = vec![1.0, 2.0, 3.0];
let shape = vec![3, 3];
let sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let dense = sparse
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected_data = vec![1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0];
assert_eq!(
dense.data().expect("data access should succeed"),
expected_data
);
}
#[test]
fn test_sparse_addition() {
let indices1 = vec![vec![0, 0], vec![1, 1]];
let values1 = vec![1.0, 2.0];
let shape = vec![3, 3];
let sparse1 = SparseTensor::from_coo(indices1, values1, shape.clone())
.expect("COO sparse tensor creation should succeed");
let indices2 = vec![vec![0, 0], vec![2, 2]];
let values2 = vec![3.0, 4.0];
let sparse2 = SparseTensor::from_coo(indices2, values2, shape)
.expect("COO sparse tensor creation should succeed");
let result = sparse1
.add(&sparse2)
.expect("sparse addition should succeed");
assert_eq!(result.nnz(), 3);
let dense_result = result
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![4.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 4.0];
assert_eq!(
dense_result.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_sparse_multiplication() {
let indices1 = vec![vec![0, 0], vec![1, 1], vec![2, 2]];
let values1 = vec![2.0, 3.0, 4.0];
let shape = vec![3, 3];
let sparse1 = SparseTensor::from_coo(indices1, values1, shape.clone())
.expect("COO sparse tensor creation should succeed");
let indices2 = vec![vec![0, 0], vec![1, 1]];
let values2 = vec![5.0, 6.0];
let sparse2 = SparseTensor::from_coo(indices2, values2, shape)
.expect("COO sparse tensor creation should succeed");
let result = sparse1
.mul(&sparse2)
.expect("sparse element-wise multiplication should succeed");
assert_eq!(result.nnz(), 2);
let dense_result = result
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![10.0, 0.0, 0.0, 0.0, 18.0, 0.0, 0.0, 0.0, 0.0];
assert_eq!(
dense_result.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_sparse_matmul() {
let indices1 = vec![vec![0, 0], vec![1, 1]];
let values1 = vec![1.0, 2.0];
let shape1 = vec![2, 2];
let sparse1 = SparseTensor::from_coo(indices1, values1, shape1)
.expect("COO sparse tensor creation should succeed");
let indices2 = vec![vec![0, 0], vec![1, 1]];
let values2 = vec![3.0, 4.0];
let shape2 = vec![2, 2];
let sparse2 = SparseTensor::from_coo(indices2, values2, shape2)
.expect("COO sparse tensor creation should succeed");
let result = sparse1
.matmul(&sparse2)
.expect("sparse matrix multiplication should succeed");
assert_eq!(result.nnz(), 2);
let dense_result = result
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![3.0, 0.0, 0.0, 8.0];
assert_eq!(
dense_result.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_sparse_transpose() {
let indices = vec![vec![0, 1], vec![1, 0], vec![2, 1]];
let values = vec![1.0, 2.0, 3.0];
let shape = vec![3, 2];
let sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let transposed = sparse.transpose().expect("sparse transpose should succeed");
assert_eq!(transposed.shape(), &[2, 3]);
let dense_transposed = transposed
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![0.0, 2.0, 0.0, 1.0, 0.0, 3.0];
assert_eq!(
dense_transposed.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_sparse_identity() {
let eye = SparseTensor::<f32>::eye(3).expect("sparse identity creation should succeed");
assert_eq!(eye.nnz(), 3);
assert_eq!(eye.shape(), &[3, 3]);
let dense_eye = eye
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0];
assert_eq!(
dense_eye.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_memory_efficiency() {
let indices = vec![vec![0, 0]]; let values = vec![1.0];
let shape = vec![1000, 1000]; let sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
assert!(sparse.sparsity() > 0.999); assert!(sparse.memory_efficiency() > 0.9); }
#[test]
fn test_from_dense() {
let data = vec![1.0, 0.0, 0.0, 0.0, 2.0, 0.0];
let dense = Tensor::from_data(data, vec![2, 3], DeviceType::Cpu)
.expect("tensor creation should succeed");
let sparse =
SparseTensor::from_dense(&dense, 1e-6).expect("from_dense conversion should succeed");
assert_eq!(sparse.nnz(), 2);
let back_to_dense = sparse
.to_dense()
.expect("sparse to dense conversion should succeed");
assert_eq!(
dense.data().expect("data access should succeed"),
back_to_dense.data().expect("data access should succeed")
);
}
#[test]
fn test_coalesce() {
let indices = vec![vec![0, 0], vec![1, 1], vec![0, 0]]; let values = vec![1.0, 2.0, 3.0];
let shape = vec![2, 2];
let mut sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
assert_eq!(sparse.nnz(), 3);
sparse.coalesce().expect("coalesce should succeed");
assert_eq!(sparse.nnz(), 2);
let dense = sparse
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![4.0, 0.0, 0.0, 2.0]; assert_eq!(dense.data().expect("data access should succeed"), expected);
}
#[test]
fn test_scalar_multiplication() {
let indices = vec![vec![0, 0], vec![1, 1]];
let values = vec![2.0, 3.0];
let shape = vec![2, 2];
let sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let result = sparse
.mul_scalar(2.0)
.expect("scalar multiplication should succeed");
assert_eq!(result.nnz(), 2);
let dense_result = result
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![4.0, 0.0, 0.0, 6.0];
assert_eq!(
dense_result.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_map_function() {
let indices = vec![vec![0, 0], vec![1, 1]];
let values = vec![2.0, 3.0];
let shape = vec![2, 2];
let sparse = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let result = sparse.map(|x| x * x).expect("map operation should succeed"); assert_eq!(result.nnz(), 2);
let dense_result = result
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![4.0, 0.0, 0.0, 9.0];
assert_eq!(
dense_result.data().expect("data access should succeed"),
expected
);
}
#[test]
fn test_error_cases() {
let indices = vec![vec![0, 0]];
let values = vec![1.0, 2.0]; let shape = vec![2, 2];
assert!(SparseTensor::from_coo(indices, values, shape).is_err());
let indices = vec![vec![2, 0]]; let values = vec![1.0];
let shape = vec![2, 2];
assert!(SparseTensor::from_coo(indices, values, shape).is_err());
}
#[test]
fn test_csr_creation() {
let row_ptr = vec![0, 2, 3, 5];
let col_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
.expect("CSR creation should succeed");
assert_eq!(sparse.nnz(), 5);
assert_eq!(sparse.shape(), &[3, 3]);
}
#[test]
fn test_csr_to_dense() {
let row_ptr = vec![0, 2, 3, 5];
let col_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
.expect("CSR creation should succeed");
let dense = sparse
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 4.0, 0.0, 5.0];
assert_eq!(dense.data().expect("data access should succeed"), expected);
}
#[test]
fn test_csr_from_coo() {
let indices = vec![vec![0, 0], vec![0, 2], vec![1, 1], vec![2, 0], vec![2, 2]];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let coo = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let csr = SparseCSR::from_coo(&coo).expect("COO sparse tensor creation should succeed");
assert_eq!(csr.nnz(), 5);
assert_eq!(csr.shape(), &[3, 3]);
assert_eq!(csr.row_ptr(), &[0, 2, 3, 5]);
assert_eq!(csr.col_indices(), &[0, 2, 1, 0, 2]);
}
#[test]
fn test_csr_matvec() {
let row_ptr = vec![0, 1, 2];
let col_indices = vec![0, 1];
let values = vec![1.0, 2.0];
let shape = vec![2, 2];
let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
.expect("CSR creation should succeed");
let vec = vec![3.0, 4.0];
let result = sparse
.matvec(&vec)
.expect("matrix-vector multiplication should succeed");
assert_eq!(result, vec![3.0, 8.0]);
}
#[test]
fn test_csr_get_row() {
let row_ptr = vec![0, 2, 3, 5];
let col_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
.expect("CSR creation should succeed");
let (cols, vals) = sparse.get_row(0).expect("row access should succeed");
assert_eq!(cols, vec![0, 2]);
assert_eq!(vals, vec![1.0, 2.0]);
let (cols, vals) = sparse.get_row(1).expect("row access should succeed");
assert_eq!(cols, vec![1]);
assert_eq!(vals, vec![3.0]);
}
#[test]
fn test_csr_to_coo() {
let row_ptr = vec![0, 2, 3, 5];
let col_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let csr = SparseCSR::new(row_ptr, col_indices, values, shape)
.expect("CSR creation should succeed");
let coo = csr.to_coo().expect("to COO conversion should succeed");
assert_eq!(coo.nnz(), 5);
let dense_coo = coo
.to_dense()
.expect("sparse to dense conversion should succeed");
let dense_csr = csr
.to_dense()
.expect("sparse to dense conversion should succeed");
assert_eq!(
dense_coo.data().expect("data access should succeed"),
dense_csr.data().expect("data access should succeed")
);
}
#[test]
fn test_csc_creation() {
let col_ptr = vec![0, 2, 3, 5];
let row_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
let shape = vec![3, 3];
let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
.expect("CSC creation should succeed");
assert_eq!(sparse.nnz(), 5);
assert_eq!(sparse.shape(), &[3, 3]);
}
#[test]
fn test_csc_to_dense() {
let col_ptr = vec![0, 2, 3, 5];
let row_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
let shape = vec![3, 3];
let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
.expect("CSC creation should succeed");
let dense = sparse
.to_dense()
.expect("sparse to dense conversion should succeed");
let expected = vec![1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 4.0, 0.0, 5.0];
assert_eq!(dense.data().expect("data access should succeed"), expected);
}
#[test]
fn test_csc_from_coo() {
let indices = vec![vec![0, 0], vec![0, 2], vec![1, 1], vec![2, 0], vec![2, 2]];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let coo = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let csc = SparseCSC::from_coo(&coo).expect("COO sparse tensor creation should succeed");
assert_eq!(csc.nnz(), 5);
assert_eq!(csc.shape(), &[3, 3]);
assert_eq!(csc.col_ptr(), &[0, 2, 3, 5]);
assert_eq!(csc.row_indices(), &[0, 2, 1, 0, 2]);
}
#[test]
fn test_csc_transpose_matvec() {
let col_ptr = vec![0, 1, 2];
let row_indices = vec![0, 1];
let values = vec![1.0, 2.0];
let shape = vec![2, 2];
let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
.expect("CSC creation should succeed");
let vec = vec![3.0, 4.0];
let result = sparse
.transpose_matvec(&vec)
.expect("transpose matrix-vector multiplication should succeed");
assert_eq!(result, vec![3.0, 8.0]);
}
#[test]
fn test_csc_get_col() {
let col_ptr = vec![0, 2, 3, 5];
let row_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
let shape = vec![3, 3];
let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
.expect("CSC creation should succeed");
let (rows, vals) = sparse.get_col(0).expect("column access should succeed");
assert_eq!(rows, vec![0, 2]);
assert_eq!(vals, vec![1.0, 4.0]);
let (rows, vals) = sparse.get_col(1).expect("column access should succeed");
assert_eq!(rows, vec![1]);
assert_eq!(vals, vec![3.0]);
}
#[test]
fn test_csc_to_coo() {
let col_ptr = vec![0, 2, 3, 5];
let row_indices = vec![0, 2, 1, 0, 2];
let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
let shape = vec![3, 3];
let csc = SparseCSC::new(col_ptr, row_indices, values, shape)
.expect("CSC creation should succeed");
let coo = csc.to_coo().expect("to COO conversion should succeed");
assert_eq!(coo.nnz(), 5);
let dense_coo = coo
.to_dense()
.expect("sparse to dense conversion should succeed");
let dense_csc = csc
.to_dense()
.expect("sparse to dense conversion should succeed");
assert_eq!(
dense_coo.data().expect("data access should succeed"),
dense_csc.data().expect("data access should succeed")
);
}
#[test]
fn test_format_conversions() {
let indices = vec![vec![0, 0], vec![0, 2], vec![1, 1], vec![2, 0], vec![2, 2]];
let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let shape = vec![3, 3];
let coo1 = SparseTensor::from_coo(indices, values, shape)
.expect("COO sparse tensor creation should succeed");
let csr = SparseCSR::from_coo(&coo1).expect("COO sparse tensor creation should succeed");
let coo2 = csr.to_coo().expect("to COO conversion should succeed");
let csc = SparseCSC::from_coo(&coo2).expect("COO sparse tensor creation should succeed");
let coo3 = csc.to_coo().expect("to COO conversion should succeed");
let dense1 = coo1
.to_dense()
.expect("sparse to dense conversion should succeed");
let dense2 = coo2
.to_dense()
.expect("sparse to dense conversion should succeed");
let dense3 = coo3
.to_dense()
.expect("sparse to dense conversion should succeed");
assert_eq!(
dense1.data().expect("data access should succeed"),
dense2.data().expect("data access should succeed")
);
assert_eq!(
dense2.data().expect("data access should succeed"),
dense3.data().expect("data access should succeed")
);
}
#[test]
fn test_csr_error_cases() {
let row_ptr = vec![0, 1];
let col_indices = vec![0];
let values = vec![1.0];
let shape = vec![1]; assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
let row_ptr = vec![0, 2];
let col_indices = vec![0];
let values = vec![1.0, 2.0]; let shape = vec![1, 2];
assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
let row_ptr = vec![0, 2, 1]; let col_indices = vec![0, 1];
let values = vec![1.0, 2.0];
let shape = vec![2, 2];
assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
let row_ptr = vec![0, 1];
let col_indices = vec![5]; let values = vec![1.0];
let shape = vec![1, 2];
assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
}
#[test]
fn test_csc_error_cases() {
let col_ptr = vec![0, 1];
let row_indices = vec![0];
let values = vec![1.0];
let shape = vec![1]; assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
let col_ptr = vec![0, 2];
let row_indices = vec![0];
let values = vec![1.0, 2.0]; let shape = vec![2, 1];
assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
let col_ptr = vec![0, 2, 1]; let row_indices = vec![0, 1];
let values = vec![1.0, 2.0];
let shape = vec![2, 2];
assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
let col_ptr = vec![0, 1];
let row_indices = vec![5]; let values = vec![1.0];
let shape = vec![2, 1];
assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
}
}