1pub mod bsc;
12pub mod bsr;
13pub mod csf;
14pub mod csr5;
15pub mod poly_precond;
16pub mod sell;
17
18use crate::csr::CsrMatrix;
19use crate::error::{SparseError, SparseResult};
20use scirs2_core::numeric::{Float, SparseElement, Zero};
21use std::fmt::Debug;
22
23#[derive(Debug, Clone)]
42pub struct EllpackMatrix<T> {
43 pub nrows: usize,
45 pub ncols: usize,
47 pub max_nnz_per_row: usize,
49 pub col_indices: Vec<usize>,
52 pub values: Vec<T>,
54}
55
56pub const ELLPACK_PADDING_COL: usize = usize::MAX;
58
59impl<T> EllpackMatrix<T>
60where
61 T: Clone + Copy + Zero + SparseElement + Debug,
62{
63 pub fn from_csr(csr: &CsrMatrix<T>) -> SparseResult<Self> {
68 let (nrows, ncols) = csr.shape();
69 let max_nnz_per_row = (0..nrows)
71 .map(|r| csr.indptr[r + 1] - csr.indptr[r])
72 .max()
73 .unwrap_or(0);
74
75 let total = nrows * max_nnz_per_row;
76 let mut col_indices = vec![ELLPACK_PADDING_COL; total];
77 let mut values = vec![T::sparse_zero(); total];
78
79 for row in 0..nrows {
80 let row_start = csr.indptr[row];
81 let row_end = csr.indptr[row + 1];
82 for (k, j) in (row_start..row_end).enumerate() {
83 let pos = row * max_nnz_per_row + k;
84 col_indices[pos] = csr.indices[j];
85 values[pos] = csr.data[j];
86 }
87 }
88
89 Ok(Self {
90 nrows,
91 ncols,
92 max_nnz_per_row,
93 col_indices,
94 values,
95 })
96 }
97
98 pub fn spmv(&self, x: &[T]) -> SparseResult<Vec<T>>
100 where
101 T: std::ops::Add<Output = T> + std::ops::Mul<Output = T>,
102 {
103 if x.len() != self.ncols {
104 return Err(SparseError::DimensionMismatch {
105 expected: self.ncols,
106 found: x.len(),
107 });
108 }
109 let mut y = vec![T::sparse_zero(); self.nrows];
110 for row in 0..self.nrows {
111 let base = row * self.max_nnz_per_row;
112 for k in 0..self.max_nnz_per_row {
113 let col = self.col_indices[base + k];
114 if col == ELLPACK_PADDING_COL {
115 break; }
117 y[row] = y[row] + self.values[base + k] * x[col];
118 }
119 }
120 Ok(y)
121 }
122
123 pub fn to_csr(&self) -> SparseResult<CsrMatrix<T>>
125 where
126 T: std::cmp::PartialEq,
127 {
128 let mut row_indices: Vec<usize> = Vec::new();
129 let mut col_indices: Vec<usize> = Vec::new();
130 let mut data: Vec<T> = Vec::new();
131
132 for row in 0..self.nrows {
133 let base = row * self.max_nnz_per_row;
134 for k in 0..self.max_nnz_per_row {
135 let col = self.col_indices[base + k];
136 if col == ELLPACK_PADDING_COL {
137 break;
138 }
139 let val = self.values[base + k];
140 if val != T::sparse_zero() || col != ELLPACK_PADDING_COL {
141 row_indices.push(row);
142 col_indices.push(col);
143 data.push(val);
144 }
145 }
146 }
147 CsrMatrix::new(data, row_indices, col_indices, (self.nrows, self.ncols))
148 }
149
150 pub fn nnz(&self) -> usize {
152 self.col_indices
153 .iter()
154 .filter(|&&c| c != ELLPACK_PADDING_COL)
155 .count()
156 }
157}
158
159#[derive(Debug, Clone)]
173pub struct DiaMatrix<T> {
174 pub nrows: usize,
176 pub ncols: usize,
178 pub offsets: Vec<i64>,
180 pub diags: Vec<Vec<T>>,
182}
183
184impl<T> DiaMatrix<T>
185where
186 T: Clone + Copy + Zero + SparseElement + Debug,
187{
188 pub fn new(
197 nrows: usize,
198 ncols: usize,
199 offsets: Vec<i64>,
200 diags: Vec<Vec<T>>,
201 ) -> SparseResult<Self> {
202 if offsets.len() != diags.len() {
203 return Err(SparseError::DimensionMismatch {
204 expected: offsets.len(),
205 found: diags.len(),
206 });
207 }
208 for (i, &k) in offsets.iter().enumerate() {
210 let expected_len = diagonal_length(nrows, ncols, k);
211 if diags[i].len() != expected_len {
212 return Err(SparseError::ValueError(format!(
213 "Diagonal at offset {} has length {} but expected {}",
214 k,
215 diags[i].len(),
216 expected_len
217 )));
218 }
219 }
220 Ok(Self {
221 nrows,
222 ncols,
223 offsets,
224 diags,
225 })
226 }
227
228 pub fn from_csr(csr: &CsrMatrix<T>) -> SparseResult<Self> {
230 let (nrows, ncols) = csr.shape();
231 let mut offset_set: std::collections::BTreeSet<i64> = std::collections::BTreeSet::new();
233 for row in 0..nrows {
234 for j in csr.indptr[row]..csr.indptr[row + 1] {
235 let col = csr.indices[j];
236 offset_set.insert(col as i64 - row as i64);
237 }
238 }
239 let offsets: Vec<i64> = offset_set.into_iter().collect();
240 let mut diag_map: std::collections::HashMap<i64, Vec<T>> = std::collections::HashMap::new();
241 for &k in &offsets {
242 let len = diagonal_length(nrows, ncols, k);
243 diag_map.insert(k, vec![T::sparse_zero(); len]);
244 }
245
246 for row in 0..nrows {
247 for j in csr.indptr[row]..csr.indptr[row + 1] {
248 let col = csr.indices[j];
249 let k = col as i64 - row as i64;
250 let diag_idx = if k >= 0 {
251 row } else {
253 col
254 };
255 if let Some(d) = diag_map.get_mut(&k) {
256 if diag_idx < d.len() {
257 d[diag_idx] = csr.data[j];
258 }
259 }
260 }
261 }
262
263 let diags: Vec<Vec<T>> = offsets.iter().map(|k| diag_map[k].clone()).collect();
264 Ok(Self {
265 nrows,
266 ncols,
267 offsets,
268 diags,
269 })
270 }
271
272 pub fn spmv(&self, x: &[T]) -> SparseResult<Vec<T>>
274 where
275 T: std::ops::Add<Output = T> + std::ops::Mul<Output = T>,
276 {
277 if x.len() != self.ncols {
278 return Err(SparseError::DimensionMismatch {
279 expected: self.ncols,
280 found: x.len(),
281 });
282 }
283 let mut y = vec![T::sparse_zero(); self.nrows];
284 for (idx, &k) in self.offsets.iter().enumerate() {
285 let d = &self.diags[idx];
286 if k >= 0 {
289 let ku = k as usize;
290 for (i, &v) in d.iter().enumerate() {
291 let col = i + ku;
292 if col < self.ncols {
293 y[i] = y[i] + v * x[col];
294 }
295 }
296 } else {
297 let km = (-k) as usize;
298 for (i, &v) in d.iter().enumerate() {
299 let row = i + km;
300 if row < self.nrows {
301 y[row] = y[row] + v * x[i];
302 }
303 }
304 }
305 }
306 Ok(y)
307 }
308
309 pub fn to_csr(&self) -> SparseResult<CsrMatrix<T>> {
311 let mut row_idx: Vec<usize> = Vec::new();
312 let mut col_idx: Vec<usize> = Vec::new();
313 let mut data: Vec<T> = Vec::new();
314
315 for (idx, &k) in self.offsets.iter().enumerate() {
316 let d = &self.diags[idx];
317 if k >= 0 {
318 let ku = k as usize;
319 for (i, &v) in d.iter().enumerate() {
320 let col = i + ku;
321 if col < self.ncols && v != T::sparse_zero() {
322 row_idx.push(i);
323 col_idx.push(col);
324 data.push(v);
325 }
326 }
327 } else {
328 let km = (-k) as usize;
329 for (i, &v) in d.iter().enumerate() {
330 let row = i + km;
331 if row < self.nrows && v != T::sparse_zero() {
332 row_idx.push(row);
333 col_idx.push(i);
334 data.push(v);
335 }
336 }
337 }
338 }
339
340 CsrMatrix::new(data, row_idx, col_idx, (self.nrows, self.ncols))
341 }
342
343 pub fn nnz(&self) -> usize {
345 self.diags.iter().map(|d| d.len()).sum()
346 }
347}
348
349fn diagonal_length(nrows: usize, ncols: usize, k: i64) -> usize {
351 if k >= 0 {
352 let ku = k as usize;
353 if ku >= ncols {
354 0
355 } else {
356 nrows.min(ncols - ku)
357 }
358 } else {
359 let km = (-k) as usize;
360 if km >= nrows {
361 0
362 } else {
363 ncols.min(nrows - km)
364 }
365 }
366}
367
368#[derive(Debug, Clone)]
383pub struct BlockCSR<T> {
384 pub nrows: usize,
386 pub ncols: usize,
388 pub block_rows: usize,
390 pub block_cols: usize,
392 pub num_block_rows: usize,
394 pub num_block_cols: usize,
396 pub indptr: Vec<usize>,
398 pub block_col_indices: Vec<usize>,
400 pub data: Vec<T>,
404}
405
406impl<T> BlockCSR<T>
407where
408 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
409{
410 pub fn from_csr(
418 csr: &CsrMatrix<T>,
419 block_rows: usize,
420 block_cols: usize,
421 ) -> SparseResult<Self> {
422 if block_rows == 0 || block_cols == 0 {
423 return Err(SparseError::ValueError(
424 "Block dimensions must be at least 1".to_string(),
425 ));
426 }
427 let (nrows, ncols) = csr.shape();
428 let num_block_rows = nrows.div_ceil(block_rows);
429 let num_block_cols = ncols.div_ceil(block_cols);
430 let block_size = block_rows * block_cols;
431
432 let mut block_map: std::collections::BTreeMap<(usize, usize), Vec<T>> =
435 std::collections::BTreeMap::new();
436
437 for row in 0..nrows {
438 let brow = row / block_rows;
439 let local_row = row % block_rows;
440 for j in csr.indptr[row]..csr.indptr[row + 1] {
441 let col = csr.indices[j];
442 let bcol = col / block_cols;
443 let local_col = col % block_cols;
444 let block = block_map
445 .entry((brow, bcol))
446 .or_insert_with(|| vec![T::sparse_zero(); block_size]);
447 block[local_row * block_cols + local_col] = csr.data[j];
448 }
449 }
450
451 let mut indptr = vec![0usize; num_block_rows + 1];
453 let mut block_col_indices: Vec<usize> = Vec::new();
454 let mut data: Vec<T> = Vec::new();
455
456 let mut current_brow = 0usize;
458 for (&(brow, bcol), block_data) in &block_map {
459 while current_brow < brow {
460 indptr[current_brow + 1] = block_col_indices.len();
461 current_brow += 1;
462 }
463 block_col_indices.push(bcol);
464 data.extend_from_slice(block_data);
465 }
466 while current_brow < num_block_rows {
468 indptr[current_brow + 1] = block_col_indices.len();
469 current_brow += 1;
470 }
471 indptr[num_block_rows] = block_col_indices.len();
472
473 Ok(Self {
474 nrows,
475 ncols,
476 block_rows,
477 block_cols,
478 num_block_rows,
479 num_block_cols,
480 indptr,
481 block_col_indices,
482 data,
483 })
484 }
485
486 pub fn spmv(&self, x: &[T]) -> SparseResult<Vec<T>>
488 where
489 T: std::ops::Add<Output = T> + std::ops::Mul<Output = T>,
490 {
491 if x.len() != self.ncols {
492 return Err(SparseError::DimensionMismatch {
493 expected: self.ncols,
494 found: x.len(),
495 });
496 }
497 let mut y = vec![T::sparse_zero(); self.nrows];
498 let bs = self.block_rows * self.block_cols;
499
500 for brow in 0..self.num_block_rows {
501 let row_base = brow * self.block_rows;
502 for kb in self.indptr[brow]..self.indptr[brow + 1] {
503 let bcol = self.block_col_indices[kb];
504 let col_base = bcol * self.block_cols;
505 let block = &self.data[kb * bs..(kb + 1) * bs];
506 for lr in 0..self.block_rows {
507 let row = row_base + lr;
508 if row >= self.nrows {
509 break;
510 }
511 for lc in 0..self.block_cols {
512 let col = col_base + lc;
513 if col >= self.ncols {
514 break;
515 }
516 y[row] = y[row] + block[lr * self.block_cols + lc] * x[col];
517 }
518 }
519 }
520 }
521 Ok(y)
522 }
523
524 pub fn to_csr(&self) -> SparseResult<CsrMatrix<T>> {
526 let bs = self.block_rows * self.block_cols;
527 let mut row_idx: Vec<usize> = Vec::new();
528 let mut col_idx: Vec<usize> = Vec::new();
529 let mut data_out: Vec<T> = Vec::new();
530
531 for brow in 0..self.num_block_rows {
532 let row_base = brow * self.block_rows;
533 for kb in self.indptr[brow]..self.indptr[brow + 1] {
534 let bcol = self.block_col_indices[kb];
535 let col_base = bcol * self.block_cols;
536 let block = &self.data[kb * bs..(kb + 1) * bs];
537 for lr in 0..self.block_rows {
538 let row = row_base + lr;
539 if row >= self.nrows {
540 break;
541 }
542 for lc in 0..self.block_cols {
543 let col = col_base + lc;
544 if col >= self.ncols {
545 break;
546 }
547 let v = block[lr * self.block_cols + lc];
548 if v != T::sparse_zero() {
549 row_idx.push(row);
550 col_idx.push(col);
551 data_out.push(v);
552 }
553 }
554 }
555 }
556 }
557 CsrMatrix::new(data_out, row_idx, col_idx, (self.nrows, self.ncols))
558 }
559
560 pub fn num_blocks(&self) -> usize {
562 self.block_col_indices.len()
563 }
564}
565
566#[derive(Debug, Clone)]
581pub struct HybridMatrix<T> {
582 pub nrows: usize,
584 pub ncols: usize,
586 pub ell_width: usize,
588 pub ell_col: Vec<usize>,
590 pub ell_val: Vec<T>,
592 pub coo_row: Vec<usize>,
594 pub coo_col: Vec<usize>,
596 pub coo_val: Vec<T>,
598}
599
600impl<T> HybridMatrix<T>
601where
602 T: Clone + Copy + Zero + SparseElement + Debug,
603{
604 pub fn from_csr(csr: &CsrMatrix<T>, ell_width: usize) -> SparseResult<Self> {
609 let (nrows, ncols) = csr.shape();
610 let total_ell = nrows * ell_width;
611
612 let mut ell_col = vec![ELLPACK_PADDING_COL; total_ell];
613 let mut ell_val = vec![T::sparse_zero(); total_ell];
614 let mut coo_row: Vec<usize> = Vec::new();
615 let mut coo_col: Vec<usize> = Vec::new();
616 let mut coo_val: Vec<T> = Vec::new();
617
618 for row in 0..nrows {
619 let row_start = csr.indptr[row];
620 let row_end = csr.indptr[row + 1];
621 let base = row * ell_width;
622
623 for (k, j) in (row_start..row_end).enumerate() {
624 let col = csr.indices[j];
625 let val = csr.data[j];
626 if k < ell_width {
627 ell_col[base + k] = col;
628 ell_val[base + k] = val;
629 } else {
630 coo_row.push(row);
631 coo_col.push(col);
632 coo_val.push(val);
633 }
634 }
635 }
636
637 Ok(Self {
638 nrows,
639 ncols,
640 ell_width,
641 ell_col,
642 ell_val,
643 coo_row,
644 coo_col,
645 coo_val,
646 })
647 }
648
649 pub fn from_csr_auto(csr: &CsrMatrix<T>) -> SparseResult<Self> {
651 let (nrows, _) = csr.shape();
652 let mut row_nnz: Vec<usize> = (0..nrows)
653 .map(|r| csr.indptr[r + 1] - csr.indptr[r])
654 .collect();
655 row_nnz.sort_unstable();
656 let ell_width = row_nnz.get(nrows / 2).copied().unwrap_or(0);
657 Self::from_csr(csr, ell_width)
658 }
659
660 pub fn spmv(&self, x: &[T]) -> SparseResult<Vec<T>>
662 where
663 T: std::ops::Add<Output = T> + std::ops::Mul<Output = T>,
664 {
665 if x.len() != self.ncols {
666 return Err(SparseError::DimensionMismatch {
667 expected: self.ncols,
668 found: x.len(),
669 });
670 }
671 let mut y = vec![T::sparse_zero(); self.nrows];
672
673 for row in 0..self.nrows {
675 let base = row * self.ell_width;
676 for k in 0..self.ell_width {
677 let col = self.ell_col[base + k];
678 if col == ELLPACK_PADDING_COL {
679 break;
680 }
681 y[row] = y[row] + self.ell_val[base + k] * x[col];
682 }
683 }
684
685 for ((&row, &col), &val) in self
687 .coo_row
688 .iter()
689 .zip(self.coo_col.iter())
690 .zip(self.coo_val.iter())
691 {
692 y[row] = y[row] + val * x[col];
693 }
694
695 Ok(y)
696 }
697
698 pub fn to_csr(&self) -> SparseResult<CsrMatrix<T>>
700 where
701 T: std::cmp::PartialEq,
702 {
703 let mut row_idx: Vec<usize> = Vec::new();
704 let mut col_idx: Vec<usize> = Vec::new();
705 let mut data: Vec<T> = Vec::new();
706
707 for row in 0..self.nrows {
709 let base = row * self.ell_width;
710 for k in 0..self.ell_width {
711 let col = self.ell_col[base + k];
712 if col == ELLPACK_PADDING_COL {
713 break;
714 }
715 let val = self.ell_val[base + k];
716 row_idx.push(row);
717 col_idx.push(col);
718 data.push(val);
719 }
720 }
721
722 for ((&row, &col), &val) in self
724 .coo_row
725 .iter()
726 .zip(self.coo_col.iter())
727 .zip(self.coo_val.iter())
728 {
729 row_idx.push(row);
730 col_idx.push(col);
731 data.push(val);
732 }
733
734 CsrMatrix::new(data, row_idx, col_idx, (self.nrows, self.ncols))
735 }
736
737 pub fn nnz(&self) -> usize {
739 let ell_nnz = self
740 .ell_col
741 .iter()
742 .filter(|&&c| c != ELLPACK_PADDING_COL)
743 .count();
744 ell_nnz + self.coo_val.len()
745 }
746}
747
748pub mod format_convert {
754 use super::*;
755
756 pub fn csr_to_ellpack<T>(csr: &CsrMatrix<T>) -> SparseResult<EllpackMatrix<T>>
758 where
759 T: Clone + Copy + Zero + SparseElement + Debug,
760 {
761 EllpackMatrix::from_csr(csr)
762 }
763
764 pub fn csr_to_dia<T>(csr: &CsrMatrix<T>) -> SparseResult<DiaMatrix<T>>
766 where
767 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
768 {
769 DiaMatrix::from_csr(csr)
770 }
771
772 pub fn csr_to_bcsr<T>(
774 csr: &CsrMatrix<T>,
775 block_rows: usize,
776 block_cols: usize,
777 ) -> SparseResult<BlockCSR<T>>
778 where
779 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
780 {
781 BlockCSR::from_csr(csr, block_rows, block_cols)
782 }
783
784 pub fn csr_to_hybrid<T>(csr: &CsrMatrix<T>) -> SparseResult<HybridMatrix<T>>
786 where
787 T: Clone + Copy + Zero + SparseElement + Debug,
788 {
789 HybridMatrix::from_csr_auto(csr)
790 }
791
792 pub fn ellpack_to_csr<T>(ell: &EllpackMatrix<T>) -> SparseResult<CsrMatrix<T>>
794 where
795 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
796 {
797 ell.to_csr()
798 }
799
800 pub fn dia_to_csr<T>(dia: &DiaMatrix<T>) -> SparseResult<CsrMatrix<T>>
802 where
803 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
804 {
805 dia.to_csr()
806 }
807
808 pub fn bcsr_to_csr<T>(bcsr: &BlockCSR<T>) -> SparseResult<CsrMatrix<T>>
810 where
811 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
812 {
813 bcsr.to_csr()
814 }
815
816 pub fn hybrid_to_csr<T>(hyb: &HybridMatrix<T>) -> SparseResult<CsrMatrix<T>>
818 where
819 T: Clone + Copy + Zero + SparseElement + Debug + std::cmp::PartialEq,
820 {
821 hyb.to_csr()
822 }
823}
824
825#[cfg(test)]
830mod tests {
831 use super::*;
832 use approx::assert_relative_eq;
833
834 fn laplacian_1d(n: usize) -> CsrMatrix<f64> {
835 let mut rows = Vec::new();
837 let mut cols = Vec::new();
838 let mut vals = Vec::new();
839 for i in 0..n {
840 rows.push(i);
841 cols.push(i);
842 vals.push(2.0);
843 if i > 0 {
844 rows.push(i);
845 cols.push(i - 1);
846 vals.push(-1.0);
847 }
848 if i + 1 < n {
849 rows.push(i);
850 cols.push(i + 1);
851 vals.push(-1.0);
852 }
853 }
854 CsrMatrix::new(vals, rows, cols, (n, n)).expect("laplacian_1d")
855 }
856
857 fn check_spmv(csr: &CsrMatrix<f64>, y_ref: &[f64], label: &str) {
858 let n = csr.cols();
859 let x: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
860 let mut ref_y = vec![0.0f64; csr.rows()];
862 for row in 0..csr.rows() {
863 for j in csr.indptr[row]..csr.indptr[row + 1] {
864 ref_y[row] += csr.data[j] * x[csr.indices[j]];
865 }
866 }
867 for (i, (&got, &exp)) in y_ref.iter().zip(ref_y.iter()).enumerate() {
868 assert!(
869 (got - exp).abs() < 1e-12,
870 "{}[{}] mismatch: got={} exp={}",
871 label,
872 i,
873 got,
874 exp
875 );
876 }
877 }
878
879 #[test]
880 fn test_ellpack_spmv_roundtrip() {
881 let csr = laplacian_1d(6);
882 let n = csr.cols();
883 let x: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
884
885 let ell = EllpackMatrix::from_csr(&csr).expect("ellpack");
886 let y = ell.spmv(&x).expect("spmv");
887 check_spmv(&csr, &y, "ELLPACK");
888
889 let csr2 = ell.to_csr().expect("ell->csr");
891 assert_eq!(csr2.nnz(), csr.nnz());
892 }
893
894 #[test]
895 fn test_dia_spmv_roundtrip() {
896 let csr = laplacian_1d(6);
897 let n = csr.cols();
898 let x: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
899
900 let dia = DiaMatrix::from_csr(&csr).expect("dia");
901 let y = dia.spmv(&x).expect("spmv");
902 check_spmv(&csr, &y, "DIA");
903
904 let csr2 = dia.to_csr().expect("dia->csr");
905 assert_eq!(csr2.nnz(), csr.nnz());
906 }
907
908 #[test]
909 fn test_bcsr_spmv_roundtrip() {
910 let csr = laplacian_1d(6);
911 let n = csr.cols();
912 let x: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
913
914 let bcsr = BlockCSR::from_csr(&csr, 2, 2).expect("bcsr");
915 let y = bcsr.spmv(&x).expect("spmv");
916 check_spmv(&csr, &y, "BCSR");
917
918 let csr2 = bcsr.to_csr().expect("bcsr->csr");
919 assert_eq!(csr2.nnz(), csr.nnz());
920 }
921
922 #[test]
923 fn test_hybrid_spmv_roundtrip() {
924 let csr = laplacian_1d(6);
925 let n = csr.cols();
926 let x: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
927
928 let hyb = HybridMatrix::from_csr_auto(&csr).expect("hybrid");
929 let y = hyb.spmv(&x).expect("spmv");
930 check_spmv(&csr, &y, "Hybrid");
931
932 let csr2 = hyb.to_csr().expect("hyb->csr");
933 assert_eq!(csr2.nnz(), csr.nnz());
934 }
935
936 #[test]
937 fn test_format_convert_roundtrip() {
938 let csr = laplacian_1d(8);
939 let n = csr.cols();
940 let x: Vec<f64> = (0..n).map(|i| (i + 1) as f64).collect();
941
942 let ell = format_convert::csr_to_ellpack(&csr).expect("csr_to_ellpack");
943 let y_ell = ell.spmv(&x).expect("ell spmv");
944
945 let hyb = format_convert::csr_to_hybrid(&csr).expect("csr_to_hybrid");
946 let y_hyb = hyb.spmv(&x).expect("hyb spmv");
947
948 for i in 0..csr.rows() {
949 assert_relative_eq!(y_ell[i], y_hyb[i], epsilon = 1e-12);
950 }
951 }
952}