1use crate::{core_ops::Tensor, TensorElement};
15use scirs2_core::parallel_ops::*;
16use std::collections::HashMap;
17use torsh_core::{
18 device::DeviceType,
19 error::{Result, TorshError},
20}; #[derive(Debug, Clone)]
44pub struct SparseTensor<T: TensorElement> {
45 indices: Vec<Vec<usize>>,
47 values: Vec<T>,
49 shape: Vec<usize>,
51 device: DeviceType,
53 nnz: usize,
55}
56
57impl<T: TensorElement> SparseTensor<T> {
58 pub fn from_coo(indices: Vec<Vec<usize>>, values: Vec<T>, shape: Vec<usize>) -> Result<Self> {
71 if indices.len() != values.len() {
72 return Err(TorshError::InvalidArgument(format!(
73 "Indices length ({}) must match values length ({})",
74 indices.len(),
75 values.len()
76 )));
77 }
78
79 let ndim = shape.len();
80 for (i, coord) in indices.iter().enumerate() {
81 if coord.len() != ndim {
82 return Err(TorshError::InvalidArgument(format!(
83 "Index {} has {} dimensions, expected {}",
84 i,
85 coord.len(),
86 ndim
87 )));
88 }
89
90 for (dim, &idx) in coord.iter().enumerate() {
91 if idx >= shape[dim] {
92 return Err(TorshError::InvalidArgument(format!(
93 "Index {} at dimension {} is out of bounds ({})",
94 idx, dim, shape[dim]
95 )));
96 }
97 }
98 }
99
100 Ok(Self {
101 nnz: indices.len(),
102 indices,
103 values,
104 shape,
105 device: DeviceType::Cpu,
106 })
107 }
108
109 pub fn from_dense(dense: &Tensor<T>, tolerance: T) -> Result<Self>
118 where
119 T: Copy + PartialOrd + num_traits::Zero + num_traits::Signed,
120 {
121 let data = dense.data()?;
122 let shape = dense.shape().dims().to_vec();
123
124 let mut indices = Vec::new();
125 let mut values = Vec::new();
126
127 for flat_idx in 0..data.len() {
129 let value = data[flat_idx];
130
131 let abs_value = value.abs();
133
134 if abs_value > tolerance {
136 let coords = Self::flat_to_coords(flat_idx, &shape);
138 indices.push(coords);
139 values.push(value);
140 }
141 }
142
143 Self::from_coo(indices, values, shape)
144 }
145
146 pub fn to_dense(&self) -> Result<Tensor<T>>
151 where
152 T: Copy + num_traits::Zero,
153 {
154 let total_elements: usize = self.shape.iter().product();
155 let mut data = vec![<T as num_traits::Zero>::zero(); total_elements];
156
157 for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
159 let flat_idx = Self::coords_to_flat(coords, &self.shape);
160 data[flat_idx] = value;
161 }
162
163 Tensor::from_data(data, self.shape.clone(), self.device)
164 }
165
166 pub fn nnz(&self) -> usize {
168 self.nnz
169 }
170
171 pub fn shape(&self) -> &[usize] {
173 &self.shape
174 }
175
176 pub fn device(&self) -> DeviceType {
178 self.device
179 }
180
181 pub fn indices(&self) -> &[Vec<usize>] {
183 &self.indices
184 }
185
186 pub fn values(&self) -> &[T] {
188 &self.values
189 }
190
191 pub fn sparsity(&self) -> f64 {
193 let total_elements: usize = self.shape.iter().product();
194 1.0 - (self.nnz as f64 / total_elements as f64)
195 }
196
197 pub fn memory_usage(&self) -> usize {
199 let indices_size = self.indices.len() * self.shape.len() * std::mem::size_of::<usize>();
200 let values_size = self.values.len() * std::mem::size_of::<T>();
201 let shape_size = self.shape.len() * std::mem::size_of::<usize>();
202
203 indices_size + values_size + shape_size + std::mem::size_of::<Self>()
204 }
205
206 pub fn memory_efficiency(&self) -> f64 {
208 let total_elements: usize = self.shape.iter().product();
209 let dense_size = total_elements * std::mem::size_of::<T>();
210 let sparse_size = self.memory_usage();
211
212 1.0 - (sparse_size as f64 / dense_size as f64)
213 }
214
215 pub fn add(&self, other: &Self) -> Result<Self>
223 where
224 T: Copy + std::ops::Add<Output = T> + num_traits::Zero + PartialEq,
225 {
226 if self.shape != other.shape {
227 return Err(TorshError::InvalidArgument(format!(
228 "Shape mismatch: {:?} vs {:?}",
229 self.shape, other.shape
230 )));
231 }
232
233 let mut result_map: HashMap<Vec<usize>, T> = HashMap::new();
235
236 for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
238 result_map.insert(coords.clone(), value);
239 }
240
241 for (coords, &value) in other.indices.iter().zip(other.values.iter()) {
243 match result_map.get_mut(coords) {
244 Some(existing_value) => {
245 *existing_value = *existing_value + value;
246 }
247 None => {
248 result_map.insert(coords.clone(), value);
249 }
250 }
251 }
252
253 let zero = <T as num_traits::Zero>::zero();
255 let mut indices = Vec::new();
256 let mut values = Vec::new();
257
258 for (coords, value) in result_map {
259 if value != zero {
260 indices.push(coords);
261 values.push(value);
262 }
263 }
264
265 Self::from_coo(indices, values, self.shape.clone())
266 }
267
268 pub fn mul(&self, other: &Self) -> Result<Self>
273 where
274 T: Copy + std::ops::Mul<Output = T> + num_traits::Zero + PartialEq,
275 {
276 if self.shape != other.shape {
277 return Err(TorshError::InvalidArgument(format!(
278 "Shape mismatch: {:?} vs {:?}",
279 self.shape, other.shape
280 )));
281 }
282
283 let other_map: HashMap<Vec<usize>, T> = other
285 .indices
286 .iter()
287 .zip(other.values.iter())
288 .map(|(coords, &value)| (coords.clone(), value))
289 .collect();
290
291 let mut indices = Vec::new();
292 let mut values = Vec::new();
293 let zero = <T as num_traits::Zero>::zero();
294
295 for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
297 if let Some(&other_value) = other_map.get(coords) {
298 let result = value * other_value;
299 if result != zero {
300 indices.push(coords.clone());
301 values.push(result);
302 }
303 }
304 }
305
306 Self::from_coo(indices, values, self.shape.clone())
307 }
308
309 pub fn mul_scalar(&self, scalar: T) -> Result<Self>
311 where
312 T: Copy + std::ops::Mul<Output = T> + num_traits::Zero + PartialEq,
313 {
314 let zero = <T as num_traits::Zero>::zero();
315 if scalar == zero {
316 return Self::from_coo(Vec::new(), Vec::new(), self.shape.clone());
318 }
319
320 let new_values: Vec<T> = self.values.iter().map(|&v| v * scalar).collect();
321
322 Self::from_coo(self.indices.clone(), new_values, self.shape.clone())
323 }
324
325 pub fn matmul(&self, other: &Self) -> Result<Self>
333 where
334 T: Copy
335 + std::ops::Add<Output = T>
336 + std::ops::Mul<Output = T>
337 + num_traits::Zero
338 + PartialEq,
339 {
340 if self.shape.len() != 2 || other.shape.len() != 2 {
341 return Err(TorshError::InvalidArgument(
342 "Matrix multiplication requires 2D tensors".to_string(),
343 ));
344 }
345
346 if self.shape[1] != other.shape[0] {
347 return Err(TorshError::InvalidArgument(format!(
348 "Incompatible shapes for matmul: {:?} x {:?}",
349 self.shape, other.shape
350 )));
351 }
352
353 let m = self.shape[0];
354 let n = other.shape[1];
355 let k = self.shape[1];
356
357 let _ = (m, k, n); let mut left_rows: HashMap<usize, Vec<(usize, T)>> = HashMap::new();
363 let mut right_cols: HashMap<usize, Vec<(usize, T)>> = HashMap::new();
364
365 for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
367 let row = coords[0];
368 let col = coords[1];
369 left_rows
370 .entry(row)
371 .or_insert_with(Vec::new)
372 .push((col, value));
373 }
374
375 for (coords, &value) in other.indices.iter().zip(other.values.iter()) {
377 let row = coords[0];
378 let col = coords[1];
379 right_cols
380 .entry(col)
381 .or_insert_with(Vec::new)
382 .push((row, value));
383 }
384
385 let mut result_map: HashMap<Vec<usize>, T> = HashMap::new();
386 let zero = <T as num_traits::Zero>::zero();
387
388 for (&row, left_row_data) in left_rows.iter() {
390 for (&col, right_col_data) in right_cols.iter() {
391 let mut sum = zero;
392
393 let mut left_iter = left_row_data.iter().peekable();
395 let mut right_iter = right_col_data.iter().peekable();
396
397 while let (Some(&(left_col, left_val)), Some(&(right_row, right_val))) =
398 (left_iter.peek(), right_iter.peek())
399 {
400 match left_col.cmp(&right_row) {
401 std::cmp::Ordering::Equal => {
402 sum = sum + (*left_val) * (*right_val);
403 left_iter.next();
404 right_iter.next();
405 }
406 std::cmp::Ordering::Less => {
407 left_iter.next();
408 }
409 std::cmp::Ordering::Greater => {
410 right_iter.next();
411 }
412 }
413 }
414
415 if sum != zero {
416 result_map.insert(vec![row, col], sum);
417 }
418 }
419 }
420
421 let mut indices = Vec::new();
423 let mut values = Vec::new();
424
425 for (coords, value) in result_map {
426 indices.push(coords);
427 values.push(value);
428 }
429
430 Self::from_coo(indices, values, vec![m, n])
431 }
432
433 fn flat_to_coords(flat_idx: usize, shape: &[usize]) -> Vec<usize> {
435 let mut coords = vec![0; shape.len()];
436 let mut remaining = flat_idx;
437
438 for i in 0..shape.len() {
439 let stride: usize = shape[i + 1..].iter().product();
440 coords[i] = remaining / stride;
441 remaining %= stride;
442 }
443
444 coords
445 }
446
447 fn coords_to_flat(coords: &[usize], shape: &[usize]) -> usize {
449 let mut flat_idx = 0;
450 let mut stride = 1;
451
452 for i in (0..coords.len()).rev() {
453 flat_idx += coords[i] * stride;
454 stride *= shape[i];
455 }
456
457 flat_idx
458 }
459
460 pub fn transpose(&self) -> Result<Self>
462 where
463 T: Copy,
464 {
465 if self.shape.len() != 2 {
466 return Err(TorshError::InvalidArgument(
467 "Transpose is only supported for 2D tensors".to_string(),
468 ));
469 }
470
471 let new_shape = vec![self.shape[1], self.shape[0]];
472 let new_indices: Vec<Vec<usize>> = self
473 .indices
474 .iter()
475 .map(|coords| vec![coords[1], coords[0]])
476 .collect();
477
478 Self::from_coo(new_indices, self.values.clone(), new_shape)
479 }
480
481 pub fn map<F>(&self, f: F) -> Result<Self>
483 where
484 F: Fn(T) -> T,
485 T: Copy + num_traits::Zero + PartialEq,
486 {
487 let new_values: Vec<T> = self.values.iter().map(|&v| f(v)).collect();
488
489 let zero = <T as num_traits::Zero>::zero();
491 let mut filtered_indices = Vec::new();
492 let mut filtered_values = Vec::new();
493
494 for (coords, &value) in self.indices.iter().zip(new_values.iter()) {
495 if value != zero {
496 filtered_indices.push(coords.clone());
497 filtered_values.push(value);
498 }
499 }
500
501 Self::from_coo(filtered_indices, filtered_values, self.shape.clone())
502 }
503
504 pub fn is_valid(&self) -> bool {
506 if self.indices.len() != self.values.len() {
508 return false;
509 }
510
511 if self.nnz != self.indices.len() {
513 return false;
514 }
515
516 let ndim = self.shape.len();
518 for coords in &self.indices {
519 if coords.len() != ndim {
520 return false;
521 }
522
523 for (dim, &idx) in coords.iter().enumerate() {
524 if idx >= self.shape[dim] {
525 return false;
526 }
527 }
528 }
529
530 true
531 }
532
533 pub fn coalesce(&mut self) -> Result<()>
535 where
536 T: Copy + std::ops::AddAssign + num_traits::Zero + PartialEq,
537 {
538 if self.indices.is_empty() {
539 return Ok(());
540 }
541
542 let mut coord_map: HashMap<Vec<usize>, T> = HashMap::new();
543
544 for (coords, &value) in self.indices.iter().zip(self.values.iter()) {
546 match coord_map.get_mut(coords) {
547 Some(existing_value) => {
548 *existing_value += value;
549 }
550 None => {
551 coord_map.insert(coords.clone(), value);
552 }
553 }
554 }
555
556 let zero = <T as num_traits::Zero>::zero();
558 let mut new_indices = Vec::new();
559 let mut new_values = Vec::new();
560
561 for (coords, value) in coord_map {
562 if value != zero {
563 new_indices.push(coords);
564 new_values.push(value);
565 }
566 }
567
568 self.indices = new_indices;
569 self.values = new_values;
570 self.nnz = self.indices.len();
571
572 Ok(())
573 }
574}
575
576impl<T: TensorElement> SparseTensor<T> {
578 pub fn eye(size: usize) -> Result<Self>
580 where
581 T: Copy + num_traits::One,
582 {
583 let mut indices = Vec::new();
584 let mut values = Vec::new();
585 let one = <T as num_traits::One>::one();
586
587 for i in 0..size {
588 indices.push(vec![i, i]);
589 values.push(one);
590 }
591
592 Self::from_coo(indices, values, vec![size, size])
593 }
594
595 pub fn from_triplets(
597 rows: Vec<usize>,
598 cols: Vec<usize>,
599 vals: Vec<T>,
600 shape: Vec<usize>,
601 ) -> Result<Self> {
602 if rows.len() != cols.len() || cols.len() != vals.len() {
603 return Err(TorshError::InvalidArgument(
604 "Rows, cols, and values must have the same length".to_string(),
605 ));
606 }
607
608 let indices: Vec<Vec<usize>> = rows
609 .into_iter()
610 .zip(cols.into_iter())
611 .map(|(r, c)| vec![r, c])
612 .collect();
613
614 Self::from_coo(indices, vals, shape)
615 }
616}
617
618#[derive(Debug, Clone)]
642pub struct SparseCSR<T: TensorElement> {
643 row_ptr: Vec<usize>,
645 col_indices: Vec<usize>,
647 values: Vec<T>,
649 shape: Vec<usize>,
651 device: DeviceType,
653 nnz: usize,
655}
656
657impl<T: TensorElement> SparseCSR<T> {
658 pub fn new(
666 row_ptr: Vec<usize>,
667 col_indices: Vec<usize>,
668 values: Vec<T>,
669 shape: Vec<usize>,
670 ) -> Result<Self> {
671 if shape.len() != 2 {
672 return Err(TorshError::InvalidArgument(
673 "CSR format only supports 2D tensors".to_string(),
674 ));
675 }
676
677 if col_indices.len() != values.len() {
678 return Err(TorshError::InvalidArgument(format!(
679 "Column indices length ({}) must match values length ({})",
680 col_indices.len(),
681 values.len()
682 )));
683 }
684
685 if row_ptr.len() != shape[0] + 1 {
686 return Err(TorshError::InvalidArgument(format!(
687 "Row pointer length ({}) must be num_rows + 1 ({})",
688 row_ptr.len(),
689 shape[0] + 1
690 )));
691 }
692
693 for i in 1..row_ptr.len() {
695 if row_ptr[i] < row_ptr[i - 1] {
696 return Err(TorshError::InvalidArgument(
697 "Row pointers must be monotonically increasing".to_string(),
698 ));
699 }
700 }
701
702 for &col_idx in &col_indices {
704 if col_idx >= shape[1] {
705 return Err(TorshError::InvalidArgument(format!(
706 "Column index {} out of bounds for shape {:?}",
707 col_idx, shape
708 )));
709 }
710 }
711
712 let nnz = values.len();
713 if row_ptr.last().copied().unwrap_or(0) != nnz {
714 return Err(TorshError::InvalidArgument(
715 "Last row pointer must equal number of non-zero values".to_string(),
716 ));
717 }
718
719 Ok(Self {
720 row_ptr,
721 col_indices,
722 values,
723 shape,
724 device: DeviceType::Cpu,
725 nnz,
726 })
727 }
728
729 pub fn from_coo(coo: &SparseTensor<T>) -> Result<Self>
731 where
732 T: Copy,
733 {
734 if coo.shape().len() != 2 {
735 return Err(TorshError::InvalidArgument(
736 "CSR format only supports 2D tensors".to_string(),
737 ));
738 }
739
740 let num_rows = coo.shape()[0];
741 let num_cols = coo.shape()[1];
742
743 let mut entries: Vec<(usize, usize, T)> = coo
745 .indices()
746 .iter()
747 .zip(coo.values())
748 .map(|(coords, &val)| (coords[0], coords[1], val))
749 .collect();
750
751 entries.sort_by(|a, b| {
752 if a.0 == b.0 {
753 a.1.cmp(&b.1)
754 } else {
755 a.0.cmp(&b.0)
756 }
757 });
758
759 let mut row_ptr = vec![0; num_rows + 1];
761 let mut col_indices = Vec::with_capacity(entries.len());
762 let mut values = Vec::with_capacity(entries.len());
763
764 for (row, col, val) in entries {
765 col_indices.push(col);
766 values.push(val);
767 row_ptr[row + 1] += 1;
768 }
769
770 for i in 1..=num_rows {
772 row_ptr[i] += row_ptr[i - 1];
773 }
774
775 Self::new(row_ptr, col_indices, values, vec![num_rows, num_cols])
776 }
777
778 pub fn to_coo(&self) -> Result<SparseTensor<T>>
780 where
781 T: Copy,
782 {
783 let mut indices = Vec::new();
784 let mut values = Vec::new();
785
786 for row in 0..self.shape[0] {
787 let start = self.row_ptr[row];
788 let end = self.row_ptr[row + 1];
789
790 for idx in start..end {
791 indices.push(vec![row, self.col_indices[idx]]);
792 values.push(self.values[idx]);
793 }
794 }
795
796 SparseTensor::from_coo(indices, values, self.shape.clone())
797 }
798
799 pub fn to_dense(&self) -> Result<Tensor<T>>
801 where
802 T: Copy + num_traits::Zero,
803 {
804 let total_elements = self.shape[0] * self.shape[1];
805 let mut data = vec![<T as num_traits::Zero>::zero(); total_elements];
806
807 for row in 0..self.shape[0] {
808 let start = self.row_ptr[row];
809 let end = self.row_ptr[row + 1];
810
811 for idx in start..end {
812 let col = self.col_indices[idx];
813 let flat_idx = row * self.shape[1] + col;
814 data[flat_idx] = self.values[idx];
815 }
816 }
817
818 Tensor::from_data(data, self.shape.clone(), self.device)
819 }
820
821 pub fn matvec(&self, vec: &[T]) -> Result<Vec<T>>
823 where
824 T: Copy + std::ops::Add<Output = T> + std::ops::Mul<Output = T> + num_traits::Zero,
825 {
826 if vec.len() != self.shape[1] {
827 return Err(TorshError::InvalidArgument(format!(
828 "Vector length ({}) must match number of columns ({})",
829 vec.len(),
830 self.shape[1]
831 )));
832 }
833
834 let mut result = vec![<T as num_traits::Zero>::zero(); self.shape[0]];
835
836 result
838 .par_iter_mut()
839 .enumerate()
840 .for_each(|(row, result_val)| {
841 let start = self.row_ptr[row];
842 let end = self.row_ptr[row + 1];
843 let mut sum = <T as num_traits::Zero>::zero();
844
845 for idx in start..end {
846 let col = self.col_indices[idx];
847 sum = sum + self.values[idx] * vec[col];
848 }
849
850 *result_val = sum;
851 });
852
853 Ok(result)
854 }
855
856 pub fn get_row(&self, row: usize) -> Result<(Vec<usize>, Vec<T>)>
858 where
859 T: Copy,
860 {
861 if row >= self.shape[0] {
862 return Err(TorshError::InvalidArgument(format!(
863 "Row {} out of bounds for shape {:?}",
864 row, self.shape
865 )));
866 }
867
868 let start = self.row_ptr[row];
869 let end = self.row_ptr[row + 1];
870
871 let col_indices = self.col_indices[start..end].to_vec();
872 let values = self.values[start..end].to_vec();
873
874 Ok((col_indices, values))
875 }
876
877 pub fn nnz(&self) -> usize {
879 self.nnz
880 }
881
882 pub fn shape(&self) -> &[usize] {
883 &self.shape
884 }
885
886 pub fn device(&self) -> DeviceType {
887 self.device
888 }
889
890 pub fn row_ptr(&self) -> &[usize] {
891 &self.row_ptr
892 }
893
894 pub fn col_indices(&self) -> &[usize] {
895 &self.col_indices
896 }
897
898 pub fn values(&self) -> &[T] {
899 &self.values
900 }
901}
902
903#[derive(Debug, Clone)]
927pub struct SparseCSC<T: TensorElement> {
928 col_ptr: Vec<usize>,
930 row_indices: Vec<usize>,
932 values: Vec<T>,
934 shape: Vec<usize>,
936 device: DeviceType,
938 nnz: usize,
940}
941
942impl<T: TensorElement> SparseCSC<T> {
943 pub fn new(
951 col_ptr: Vec<usize>,
952 row_indices: Vec<usize>,
953 values: Vec<T>,
954 shape: Vec<usize>,
955 ) -> Result<Self> {
956 if shape.len() != 2 {
957 return Err(TorshError::InvalidArgument(
958 "CSC format only supports 2D tensors".to_string(),
959 ));
960 }
961
962 if row_indices.len() != values.len() {
963 return Err(TorshError::InvalidArgument(format!(
964 "Row indices length ({}) must match values length ({})",
965 row_indices.len(),
966 values.len()
967 )));
968 }
969
970 if col_ptr.len() != shape[1] + 1 {
971 return Err(TorshError::InvalidArgument(format!(
972 "Column pointer length ({}) must be num_cols + 1 ({})",
973 col_ptr.len(),
974 shape[1] + 1
975 )));
976 }
977
978 for i in 1..col_ptr.len() {
980 if col_ptr[i] < col_ptr[i - 1] {
981 return Err(TorshError::InvalidArgument(
982 "Column pointers must be monotonically increasing".to_string(),
983 ));
984 }
985 }
986
987 for &row_idx in &row_indices {
989 if row_idx >= shape[0] {
990 return Err(TorshError::InvalidArgument(format!(
991 "Row index {} out of bounds for shape {:?}",
992 row_idx, shape
993 )));
994 }
995 }
996
997 let nnz = values.len();
998 if col_ptr.last().copied().unwrap_or(0) != nnz {
999 return Err(TorshError::InvalidArgument(
1000 "Last column pointer must equal number of non-zero values".to_string(),
1001 ));
1002 }
1003
1004 Ok(Self {
1005 col_ptr,
1006 row_indices,
1007 values,
1008 shape,
1009 device: DeviceType::Cpu,
1010 nnz,
1011 })
1012 }
1013
1014 pub fn from_coo(coo: &SparseTensor<T>) -> Result<Self>
1016 where
1017 T: Copy,
1018 {
1019 if coo.shape().len() != 2 {
1020 return Err(TorshError::InvalidArgument(
1021 "CSC format only supports 2D tensors".to_string(),
1022 ));
1023 }
1024
1025 let num_rows = coo.shape()[0];
1026 let num_cols = coo.shape()[1];
1027
1028 let mut entries: Vec<(usize, usize, T)> = coo
1030 .indices()
1031 .iter()
1032 .zip(coo.values())
1033 .map(|(coords, &val)| (coords[0], coords[1], val))
1034 .collect();
1035
1036 entries.sort_by(|a, b| {
1037 if a.1 == b.1 {
1038 a.0.cmp(&b.0)
1039 } else {
1040 a.1.cmp(&b.1)
1041 }
1042 });
1043
1044 let mut col_ptr = vec![0; num_cols + 1];
1046 let mut row_indices = Vec::with_capacity(entries.len());
1047 let mut values = Vec::with_capacity(entries.len());
1048
1049 for (row, col, val) in entries {
1050 row_indices.push(row);
1051 values.push(val);
1052 col_ptr[col + 1] += 1;
1053 }
1054
1055 for i in 1..=num_cols {
1057 col_ptr[i] += col_ptr[i - 1];
1058 }
1059
1060 Self::new(col_ptr, row_indices, values, vec![num_rows, num_cols])
1061 }
1062
1063 pub fn to_coo(&self) -> Result<SparseTensor<T>>
1065 where
1066 T: Copy,
1067 {
1068 let mut indices = Vec::new();
1069 let mut values = Vec::new();
1070
1071 for col in 0..self.shape[1] {
1072 let start = self.col_ptr[col];
1073 let end = self.col_ptr[col + 1];
1074
1075 for idx in start..end {
1076 indices.push(vec![self.row_indices[idx], col]);
1077 values.push(self.values[idx]);
1078 }
1079 }
1080
1081 SparseTensor::from_coo(indices, values, self.shape.clone())
1082 }
1083
1084 pub fn to_dense(&self) -> Result<Tensor<T>>
1086 where
1087 T: Copy + num_traits::Zero,
1088 {
1089 let total_elements = self.shape[0] * self.shape[1];
1090 let mut data = vec![<T as num_traits::Zero>::zero(); total_elements];
1091
1092 for col in 0..self.shape[1] {
1093 let start = self.col_ptr[col];
1094 let end = self.col_ptr[col + 1];
1095
1096 for idx in start..end {
1097 let row = self.row_indices[idx];
1098 let flat_idx = row * self.shape[1] + col;
1099 data[flat_idx] = self.values[idx];
1100 }
1101 }
1102
1103 Tensor::from_data(data, self.shape.clone(), self.device)
1104 }
1105
1106 pub fn transpose_matvec(&self, vec: &[T]) -> Result<Vec<T>>
1108 where
1109 T: Copy + std::ops::Add<Output = T> + std::ops::Mul<Output = T> + num_traits::Zero,
1110 {
1111 if vec.len() != self.shape[0] {
1112 return Err(TorshError::InvalidArgument(format!(
1113 "Vector length ({}) must match number of rows ({})",
1114 vec.len(),
1115 self.shape[0]
1116 )));
1117 }
1118
1119 let mut result = vec![<T as num_traits::Zero>::zero(); self.shape[1]];
1120
1121 result
1123 .par_iter_mut()
1124 .enumerate()
1125 .for_each(|(col, result_val)| {
1126 let start = self.col_ptr[col];
1127 let end = self.col_ptr[col + 1];
1128 let mut sum = <T as num_traits::Zero>::zero();
1129
1130 for idx in start..end {
1131 let row = self.row_indices[idx];
1132 sum = sum + self.values[idx] * vec[row];
1133 }
1134
1135 *result_val = sum;
1136 });
1137
1138 Ok(result)
1139 }
1140
1141 pub fn get_col(&self, col: usize) -> Result<(Vec<usize>, Vec<T>)>
1143 where
1144 T: Copy,
1145 {
1146 if col >= self.shape[1] {
1147 return Err(TorshError::InvalidArgument(format!(
1148 "Column {} out of bounds for shape {:?}",
1149 col, self.shape
1150 )));
1151 }
1152
1153 let start = self.col_ptr[col];
1154 let end = self.col_ptr[col + 1];
1155
1156 let row_indices = self.row_indices[start..end].to_vec();
1157 let values = self.values[start..end].to_vec();
1158
1159 Ok((row_indices, values))
1160 }
1161
1162 pub fn nnz(&self) -> usize {
1164 self.nnz
1165 }
1166
1167 pub fn shape(&self) -> &[usize] {
1168 &self.shape
1169 }
1170
1171 pub fn device(&self) -> DeviceType {
1172 self.device
1173 }
1174
1175 pub fn col_ptr(&self) -> &[usize] {
1176 &self.col_ptr
1177 }
1178
1179 pub fn row_indices(&self) -> &[usize] {
1180 &self.row_indices
1181 }
1182
1183 pub fn values(&self) -> &[T] {
1184 &self.values
1185 }
1186}
1187
1188#[cfg(test)]
1189mod tests {
1190 use super::*;
1191 use torsh_core::device::DeviceType;
1192
1193 #[test]
1194 fn test_sparse_tensor_creation() {
1195 let indices = vec![vec![0, 0], vec![1, 2], vec![2, 1]];
1196 let values = vec![1.0, 2.0, 3.0];
1197 let shape = vec![3, 3];
1198
1199 let sparse = SparseTensor::from_coo(indices, values, shape)
1200 .expect("COO sparse tensor creation should succeed");
1201 assert_eq!(sparse.nnz(), 3);
1202 assert_eq!(sparse.shape(), &[3, 3]);
1203 assert!(sparse.sparsity() > 0.6); }
1205
1206 #[test]
1207 fn test_sparse_to_dense_conversion() {
1208 let indices = vec![vec![0, 0], vec![1, 1], vec![2, 2]];
1209 let values = vec![1.0, 2.0, 3.0];
1210 let shape = vec![3, 3];
1211
1212 let sparse = SparseTensor::from_coo(indices, values, shape)
1213 .expect("COO sparse tensor creation should succeed");
1214 let dense = sparse
1215 .to_dense()
1216 .expect("sparse to dense conversion should succeed");
1217
1218 let expected_data = vec![1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 3.0];
1219
1220 assert_eq!(
1221 dense.data().expect("data access should succeed"),
1222 expected_data
1223 );
1224 }
1225
1226 #[test]
1227 fn test_sparse_addition() {
1228 let indices1 = vec![vec![0, 0], vec![1, 1]];
1229 let values1 = vec![1.0, 2.0];
1230 let shape = vec![3, 3];
1231 let sparse1 = SparseTensor::from_coo(indices1, values1, shape.clone())
1232 .expect("COO sparse tensor creation should succeed");
1233
1234 let indices2 = vec![vec![0, 0], vec![2, 2]];
1235 let values2 = vec![3.0, 4.0];
1236 let sparse2 = SparseTensor::from_coo(indices2, values2, shape)
1237 .expect("COO sparse tensor creation should succeed");
1238
1239 let result = sparse1
1240 .add(&sparse2)
1241 .expect("sparse addition should succeed");
1242
1243 assert_eq!(result.nnz(), 3);
1245
1246 let dense_result = result
1247 .to_dense()
1248 .expect("sparse to dense conversion should succeed");
1249 let expected = vec![4.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, 4.0];
1250 assert_eq!(
1251 dense_result.data().expect("data access should succeed"),
1252 expected
1253 );
1254 }
1255
1256 #[test]
1257 fn test_sparse_multiplication() {
1258 let indices1 = vec![vec![0, 0], vec![1, 1], vec![2, 2]];
1259 let values1 = vec![2.0, 3.0, 4.0];
1260 let shape = vec![3, 3];
1261 let sparse1 = SparseTensor::from_coo(indices1, values1, shape.clone())
1262 .expect("COO sparse tensor creation should succeed");
1263
1264 let indices2 = vec![vec![0, 0], vec![1, 1]];
1265 let values2 = vec![5.0, 6.0];
1266 let sparse2 = SparseTensor::from_coo(indices2, values2, shape)
1267 .expect("COO sparse tensor creation should succeed");
1268
1269 let result = sparse1
1270 .mul(&sparse2)
1271 .expect("sparse element-wise multiplication should succeed");
1272
1273 assert_eq!(result.nnz(), 2);
1275
1276 let dense_result = result
1277 .to_dense()
1278 .expect("sparse to dense conversion should succeed");
1279 let expected = vec![10.0, 0.0, 0.0, 0.0, 18.0, 0.0, 0.0, 0.0, 0.0];
1280 assert_eq!(
1281 dense_result.data().expect("data access should succeed"),
1282 expected
1283 );
1284 }
1285
1286 #[test]
1287 fn test_sparse_matmul() {
1288 let indices1 = vec![vec![0, 0], vec![1, 1]];
1290 let values1 = vec![1.0, 2.0];
1291 let shape1 = vec![2, 2];
1292 let sparse1 = SparseTensor::from_coo(indices1, values1, shape1)
1293 .expect("COO sparse tensor creation should succeed");
1294
1295 let indices2 = vec![vec![0, 0], vec![1, 1]];
1297 let values2 = vec![3.0, 4.0];
1298 let shape2 = vec![2, 2];
1299 let sparse2 = SparseTensor::from_coo(indices2, values2, shape2)
1300 .expect("COO sparse tensor creation should succeed");
1301
1302 let result = sparse1
1303 .matmul(&sparse2)
1304 .expect("sparse matrix multiplication should succeed");
1305
1306 assert_eq!(result.nnz(), 2);
1308
1309 let dense_result = result
1310 .to_dense()
1311 .expect("sparse to dense conversion should succeed");
1312 let expected = vec![3.0, 0.0, 0.0, 8.0];
1313 assert_eq!(
1314 dense_result.data().expect("data access should succeed"),
1315 expected
1316 );
1317 }
1318
1319 #[test]
1320 fn test_sparse_transpose() {
1321 let indices = vec![vec![0, 1], vec![1, 0], vec![2, 1]];
1322 let values = vec![1.0, 2.0, 3.0];
1323 let shape = vec![3, 2];
1324 let sparse = SparseTensor::from_coo(indices, values, shape)
1325 .expect("COO sparse tensor creation should succeed");
1326
1327 let transposed = sparse.transpose().expect("sparse transpose should succeed");
1328 assert_eq!(transposed.shape(), &[2, 3]);
1329
1330 let dense_transposed = transposed
1331 .to_dense()
1332 .expect("sparse to dense conversion should succeed");
1333 let expected = vec![0.0, 2.0, 0.0, 1.0, 0.0, 3.0];
1334 assert_eq!(
1335 dense_transposed.data().expect("data access should succeed"),
1336 expected
1337 );
1338 }
1339
1340 #[test]
1341 fn test_sparse_identity() {
1342 let eye = SparseTensor::<f32>::eye(3).expect("sparse identity creation should succeed");
1343 assert_eq!(eye.nnz(), 3);
1344 assert_eq!(eye.shape(), &[3, 3]);
1345
1346 let dense_eye = eye
1347 .to_dense()
1348 .expect("sparse to dense conversion should succeed");
1349 let expected = vec![1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0];
1350 assert_eq!(
1351 dense_eye.data().expect("data access should succeed"),
1352 expected
1353 );
1354 }
1355
1356 #[test]
1357 fn test_memory_efficiency() {
1358 let indices = vec![vec![0, 0]]; let values = vec![1.0];
1360 let shape = vec![1000, 1000]; let sparse = SparseTensor::from_coo(indices, values, shape)
1362 .expect("COO sparse tensor creation should succeed");
1363
1364 assert!(sparse.sparsity() > 0.999); assert!(sparse.memory_efficiency() > 0.9); }
1367
1368 #[test]
1369 fn test_from_dense() {
1370 let data = vec![1.0, 0.0, 0.0, 0.0, 2.0, 0.0];
1371 let dense = Tensor::from_data(data, vec![2, 3], DeviceType::Cpu)
1372 .expect("tensor creation should succeed");
1373
1374 let sparse =
1375 SparseTensor::from_dense(&dense, 1e-6).expect("from_dense conversion should succeed");
1376 assert_eq!(sparse.nnz(), 2);
1377
1378 let back_to_dense = sparse
1379 .to_dense()
1380 .expect("sparse to dense conversion should succeed");
1381 assert_eq!(
1382 dense.data().expect("data access should succeed"),
1383 back_to_dense.data().expect("data access should succeed")
1384 );
1385 }
1386
1387 #[test]
1388 fn test_coalesce() {
1389 let indices = vec![vec![0, 0], vec![1, 1], vec![0, 0]]; let values = vec![1.0, 2.0, 3.0];
1392 let shape = vec![2, 2];
1393
1394 let mut sparse = SparseTensor::from_coo(indices, values, shape)
1395 .expect("COO sparse tensor creation should succeed");
1396 assert_eq!(sparse.nnz(), 3);
1397
1398 sparse.coalesce().expect("coalesce should succeed");
1399 assert_eq!(sparse.nnz(), 2); let dense = sparse
1402 .to_dense()
1403 .expect("sparse to dense conversion should succeed");
1404 let expected = vec![4.0, 0.0, 0.0, 2.0]; assert_eq!(dense.data().expect("data access should succeed"), expected);
1406 }
1407
1408 #[test]
1409 fn test_scalar_multiplication() {
1410 let indices = vec![vec![0, 0], vec![1, 1]];
1411 let values = vec![2.0, 3.0];
1412 let shape = vec![2, 2];
1413 let sparse = SparseTensor::from_coo(indices, values, shape)
1414 .expect("COO sparse tensor creation should succeed");
1415
1416 let result = sparse
1417 .mul_scalar(2.0)
1418 .expect("scalar multiplication should succeed");
1419 assert_eq!(result.nnz(), 2);
1420
1421 let dense_result = result
1422 .to_dense()
1423 .expect("sparse to dense conversion should succeed");
1424 let expected = vec![4.0, 0.0, 0.0, 6.0];
1425 assert_eq!(
1426 dense_result.data().expect("data access should succeed"),
1427 expected
1428 );
1429 }
1430
1431 #[test]
1432 fn test_map_function() {
1433 let indices = vec![vec![0, 0], vec![1, 1]];
1434 let values = vec![2.0, 3.0];
1435 let shape = vec![2, 2];
1436 let sparse = SparseTensor::from_coo(indices, values, shape)
1437 .expect("COO sparse tensor creation should succeed");
1438
1439 let result = sparse.map(|x| x * x).expect("map operation should succeed"); assert_eq!(result.nnz(), 2);
1441
1442 let dense_result = result
1443 .to_dense()
1444 .expect("sparse to dense conversion should succeed");
1445 let expected = vec![4.0, 0.0, 0.0, 9.0];
1446 assert_eq!(
1447 dense_result.data().expect("data access should succeed"),
1448 expected
1449 );
1450 }
1451
1452 #[test]
1453 fn test_error_cases() {
1454 let indices = vec![vec![0, 0]];
1456 let values = vec![1.0, 2.0]; let shape = vec![2, 2];
1458 assert!(SparseTensor::from_coo(indices, values, shape).is_err());
1459
1460 let indices = vec![vec![2, 0]]; let values = vec![1.0];
1463 let shape = vec![2, 2];
1464 assert!(SparseTensor::from_coo(indices, values, shape).is_err());
1465 }
1466
1467 #[test]
1469 fn test_csr_creation() {
1470 let row_ptr = vec![0, 2, 3, 5];
1475 let col_indices = vec![0, 2, 1, 0, 2];
1476 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1477 let shape = vec![3, 3];
1478
1479 let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
1480 .expect("CSR creation should succeed");
1481 assert_eq!(sparse.nnz(), 5);
1482 assert_eq!(sparse.shape(), &[3, 3]);
1483 }
1484
1485 #[test]
1486 fn test_csr_to_dense() {
1487 let row_ptr = vec![0, 2, 3, 5];
1488 let col_indices = vec![0, 2, 1, 0, 2];
1489 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1490 let shape = vec![3, 3];
1491
1492 let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
1493 .expect("CSR creation should succeed");
1494 let dense = sparse
1495 .to_dense()
1496 .expect("sparse to dense conversion should succeed");
1497
1498 let expected = vec![1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 4.0, 0.0, 5.0];
1499 assert_eq!(dense.data().expect("data access should succeed"), expected);
1500 }
1501
1502 #[test]
1503 fn test_csr_from_coo() {
1504 let indices = vec![vec![0, 0], vec![0, 2], vec![1, 1], vec![2, 0], vec![2, 2]];
1505 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1506 let shape = vec![3, 3];
1507
1508 let coo = SparseTensor::from_coo(indices, values, shape)
1509 .expect("COO sparse tensor creation should succeed");
1510 let csr = SparseCSR::from_coo(&coo).expect("COO sparse tensor creation should succeed");
1511
1512 assert_eq!(csr.nnz(), 5);
1513 assert_eq!(csr.shape(), &[3, 3]);
1514
1515 assert_eq!(csr.row_ptr(), &[0, 2, 3, 5]);
1517 assert_eq!(csr.col_indices(), &[0, 2, 1, 0, 2]);
1518 }
1519
1520 #[test]
1521 fn test_csr_matvec() {
1522 let row_ptr = vec![0, 1, 2];
1524 let col_indices = vec![0, 1];
1525 let values = vec![1.0, 2.0];
1526 let shape = vec![2, 2];
1527
1528 let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
1529 .expect("CSR creation should succeed");
1530 let vec = vec![3.0, 4.0];
1531 let result = sparse
1532 .matvec(&vec)
1533 .expect("matrix-vector multiplication should succeed");
1534
1535 assert_eq!(result, vec![3.0, 8.0]);
1537 }
1538
1539 #[test]
1540 fn test_csr_get_row() {
1541 let row_ptr = vec![0, 2, 3, 5];
1542 let col_indices = vec![0, 2, 1, 0, 2];
1543 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1544 let shape = vec![3, 3];
1545
1546 let sparse = SparseCSR::new(row_ptr, col_indices, values, shape)
1547 .expect("CSR creation should succeed");
1548
1549 let (cols, vals) = sparse.get_row(0).expect("row access should succeed");
1551 assert_eq!(cols, vec![0, 2]);
1552 assert_eq!(vals, vec![1.0, 2.0]);
1553
1554 let (cols, vals) = sparse.get_row(1).expect("row access should succeed");
1556 assert_eq!(cols, vec![1]);
1557 assert_eq!(vals, vec![3.0]);
1558 }
1559
1560 #[test]
1561 fn test_csr_to_coo() {
1562 let row_ptr = vec![0, 2, 3, 5];
1563 let col_indices = vec![0, 2, 1, 0, 2];
1564 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1565 let shape = vec![3, 3];
1566
1567 let csr = SparseCSR::new(row_ptr, col_indices, values, shape)
1568 .expect("CSR creation should succeed");
1569 let coo = csr.to_coo().expect("to COO conversion should succeed");
1570
1571 assert_eq!(coo.nnz(), 5);
1572 let dense_coo = coo
1573 .to_dense()
1574 .expect("sparse to dense conversion should succeed");
1575 let dense_csr = csr
1576 .to_dense()
1577 .expect("sparse to dense conversion should succeed");
1578 assert_eq!(
1579 dense_coo.data().expect("data access should succeed"),
1580 dense_csr.data().expect("data access should succeed")
1581 );
1582 }
1583
1584 #[test]
1586 fn test_csc_creation() {
1587 let col_ptr = vec![0, 2, 3, 5];
1592 let row_indices = vec![0, 2, 1, 0, 2];
1593 let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
1594 let shape = vec![3, 3];
1595
1596 let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
1597 .expect("CSC creation should succeed");
1598 assert_eq!(sparse.nnz(), 5);
1599 assert_eq!(sparse.shape(), &[3, 3]);
1600 }
1601
1602 #[test]
1603 fn test_csc_to_dense() {
1604 let col_ptr = vec![0, 2, 3, 5];
1605 let row_indices = vec![0, 2, 1, 0, 2];
1606 let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
1607 let shape = vec![3, 3];
1608
1609 let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
1610 .expect("CSC creation should succeed");
1611 let dense = sparse
1612 .to_dense()
1613 .expect("sparse to dense conversion should succeed");
1614
1615 let expected = vec![1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 4.0, 0.0, 5.0];
1616 assert_eq!(dense.data().expect("data access should succeed"), expected);
1617 }
1618
1619 #[test]
1620 fn test_csc_from_coo() {
1621 let indices = vec![vec![0, 0], vec![0, 2], vec![1, 1], vec![2, 0], vec![2, 2]];
1622 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1623 let shape = vec![3, 3];
1624
1625 let coo = SparseTensor::from_coo(indices, values, shape)
1626 .expect("COO sparse tensor creation should succeed");
1627 let csc = SparseCSC::from_coo(&coo).expect("COO sparse tensor creation should succeed");
1628
1629 assert_eq!(csc.nnz(), 5);
1630 assert_eq!(csc.shape(), &[3, 3]);
1631
1632 assert_eq!(csc.col_ptr(), &[0, 2, 3, 5]);
1634 assert_eq!(csc.row_indices(), &[0, 2, 1, 0, 2]);
1635 }
1636
1637 #[test]
1638 fn test_csc_transpose_matvec() {
1639 let col_ptr = vec![0, 1, 2];
1642 let row_indices = vec![0, 1];
1643 let values = vec![1.0, 2.0];
1644 let shape = vec![2, 2];
1645
1646 let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
1647 .expect("CSC creation should succeed");
1648 let vec = vec![3.0, 4.0];
1649 let result = sparse
1650 .transpose_matvec(&vec)
1651 .expect("transpose matrix-vector multiplication should succeed");
1652
1653 assert_eq!(result, vec![3.0, 8.0]);
1655 }
1656
1657 #[test]
1658 fn test_csc_get_col() {
1659 let col_ptr = vec![0, 2, 3, 5];
1660 let row_indices = vec![0, 2, 1, 0, 2];
1661 let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
1662 let shape = vec![3, 3];
1663
1664 let sparse = SparseCSC::new(col_ptr, row_indices, values, shape)
1665 .expect("CSC creation should succeed");
1666
1667 let (rows, vals) = sparse.get_col(0).expect("column access should succeed");
1669 assert_eq!(rows, vec![0, 2]);
1670 assert_eq!(vals, vec![1.0, 4.0]);
1671
1672 let (rows, vals) = sparse.get_col(1).expect("column access should succeed");
1674 assert_eq!(rows, vec![1]);
1675 assert_eq!(vals, vec![3.0]);
1676 }
1677
1678 #[test]
1679 fn test_csc_to_coo() {
1680 let col_ptr = vec![0, 2, 3, 5];
1681 let row_indices = vec![0, 2, 1, 0, 2];
1682 let values = vec![1.0, 4.0, 3.0, 2.0, 5.0];
1683 let shape = vec![3, 3];
1684
1685 let csc = SparseCSC::new(col_ptr, row_indices, values, shape)
1686 .expect("CSC creation should succeed");
1687 let coo = csc.to_coo().expect("to COO conversion should succeed");
1688
1689 assert_eq!(coo.nnz(), 5);
1690 let dense_coo = coo
1691 .to_dense()
1692 .expect("sparse to dense conversion should succeed");
1693 let dense_csc = csc
1694 .to_dense()
1695 .expect("sparse to dense conversion should succeed");
1696 assert_eq!(
1697 dense_coo.data().expect("data access should succeed"),
1698 dense_csc.data().expect("data access should succeed")
1699 );
1700 }
1701
1702 #[test]
1703 fn test_format_conversions() {
1704 let indices = vec![vec![0, 0], vec![0, 2], vec![1, 1], vec![2, 0], vec![2, 2]];
1706 let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1707 let shape = vec![3, 3];
1708
1709 let coo1 = SparseTensor::from_coo(indices, values, shape)
1710 .expect("COO sparse tensor creation should succeed");
1711 let csr = SparseCSR::from_coo(&coo1).expect("COO sparse tensor creation should succeed");
1712 let coo2 = csr.to_coo().expect("to COO conversion should succeed");
1713 let csc = SparseCSC::from_coo(&coo2).expect("COO sparse tensor creation should succeed");
1714 let coo3 = csc.to_coo().expect("to COO conversion should succeed");
1715
1716 let dense1 = coo1
1718 .to_dense()
1719 .expect("sparse to dense conversion should succeed");
1720 let dense2 = coo2
1721 .to_dense()
1722 .expect("sparse to dense conversion should succeed");
1723 let dense3 = coo3
1724 .to_dense()
1725 .expect("sparse to dense conversion should succeed");
1726
1727 assert_eq!(
1728 dense1.data().expect("data access should succeed"),
1729 dense2.data().expect("data access should succeed")
1730 );
1731 assert_eq!(
1732 dense2.data().expect("data access should succeed"),
1733 dense3.data().expect("data access should succeed")
1734 );
1735 }
1736
1737 #[test]
1738 fn test_csr_error_cases() {
1739 let row_ptr = vec![0, 1];
1741 let col_indices = vec![0];
1742 let values = vec![1.0];
1743 let shape = vec![1]; assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
1745
1746 let row_ptr = vec![0, 2];
1748 let col_indices = vec![0];
1749 let values = vec![1.0, 2.0]; let shape = vec![1, 2];
1751 assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
1752
1753 let row_ptr = vec![0, 2, 1]; let col_indices = vec![0, 1];
1756 let values = vec![1.0, 2.0];
1757 let shape = vec![2, 2];
1758 assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
1759
1760 let row_ptr = vec![0, 1];
1762 let col_indices = vec![5]; let values = vec![1.0];
1764 let shape = vec![1, 2];
1765 assert!(SparseCSR::new(row_ptr, col_indices, values, shape).is_err());
1766 }
1767
1768 #[test]
1769 fn test_csc_error_cases() {
1770 let col_ptr = vec![0, 1];
1772 let row_indices = vec![0];
1773 let values = vec![1.0];
1774 let shape = vec![1]; assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
1776
1777 let col_ptr = vec![0, 2];
1779 let row_indices = vec![0];
1780 let values = vec![1.0, 2.0]; let shape = vec![2, 1];
1782 assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
1783
1784 let col_ptr = vec![0, 2, 1]; let row_indices = vec![0, 1];
1787 let values = vec![1.0, 2.0];
1788 let shape = vec![2, 2];
1789 assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
1790
1791 let col_ptr = vec![0, 1];
1793 let row_indices = vec![5]; let values = vec![1.0];
1795 let shape = vec![2, 1];
1796 assert!(SparseCSC::new(col_ptr, row_indices, values, shape).is_err());
1797 }
1798}