torsh-sparse 0.1.2

Sparse tensor operations for ToRSh with SciRS2 integration
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
//! Hybrid sparse formats that combine multiple sparse representations
//!
//! This module provides intelligent sparse format selection and hybrid formats
//! that can optimize storage and computation based on matrix characteristics.

use crate::{
    CooTensor, CscTensor, CsrTensor, DiaTensor, EllTensor, SparseFormat, SparseTensor, TorshResult,
};
use std::collections::HashMap;
use torsh_core::{Shape, TorshError};
use torsh_tensor::Tensor;

/// Type alias for block-based sparse triplets
type BlockTriplets = HashMap<(usize, usize), Vec<(usize, usize, f32)>>;

/// Hybrid sparse tensor that can store different regions in different formats
pub struct HybridTensor {
    /// Map of regions to their sparse representations
    regions: HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>,
    /// Overall shape of the tensor
    shape: Shape,
    /// Number of non-zero elements across all regions
    nnz: usize,
}

/// Identifier for different regions within a hybrid tensor
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct RegionId {
    /// Starting row
    row_start: usize,
    /// Starting column
    col_start: usize,
    /// Number of rows in region
    rows: usize,
    /// Number of columns in region
    cols: usize,
}

impl RegionId {
    /// Create a new region identifier
    pub fn new(row_start: usize, col_start: usize, rows: usize, cols: usize) -> Self {
        Self {
            row_start,
            col_start,
            rows,
            cols,
        }
    }
}

impl HybridTensor {
    /// Create a new hybrid tensor from regions
    pub fn new(
        regions: HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>,
        shape: Shape,
    ) -> TorshResult<Self> {
        let nnz = regions.values().map(|region| region.nnz()).sum();

        // Validate that regions don't overlap and fit within shape
        Self::validate_regions(&regions, &shape)?;

        Ok(Self {
            regions,
            shape,
            nnz,
        })
    }

    /// Create a hybrid tensor by automatically partitioning a sparse tensor
    pub fn from_sparse<T: SparseTensor + Send + Sync + 'static>(
        sparse: T,
        partition_strategy: PartitionStrategy,
    ) -> TorshResult<Self> {
        let shape = sparse.shape().clone();
        let regions = Self::partition_tensor(Box::new(sparse), partition_strategy)?;
        Self::new(regions, shape)
    }

    /// Partition a sparse tensor into regions using the given strategy
    fn partition_tensor(
        sparse: Box<dyn SparseTensor + Send + Sync>,
        strategy: PartitionStrategy,
    ) -> TorshResult<HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>> {
        match strategy {
            PartitionStrategy::BlockBased { block_size } => {
                Self::partition_block_based(sparse, block_size)
            }
            PartitionStrategy::DensityBased { threshold } => {
                Self::partition_density_based(sparse, threshold)
            }
            PartitionStrategy::PatternBased => Self::partition_pattern_based(sparse),
        }
    }

    /// Partition tensor into fixed-size blocks
    fn partition_block_based(
        sparse: Box<dyn SparseTensor + Send + Sync>,
        block_size: (usize, usize),
    ) -> TorshResult<HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>> {
        let shape = sparse.shape();
        let (rows, cols) = (shape.dims()[0], shape.dims()[1]);
        let (block_rows, block_cols) = block_size;

        let mut regions = HashMap::new();
        let coo = sparse.to_coo()?;
        let triplets = coo.triplets();

        // Group triplets by blocks
        let mut block_triplets: BlockTriplets = HashMap::new();

        for (row, col, val) in triplets {
            let block_row = row / block_rows;
            let block_col = col / block_cols;
            block_triplets
                .entry((block_row, block_col))
                .or_default()
                .push((row % block_rows, col % block_cols, val));
        }

        // Create sparse tensors for each non-empty block
        for ((block_row, block_col), triplets) in block_triplets {
            let row_start = block_row * block_rows;
            let col_start = block_col * block_cols;
            let actual_rows = std::cmp::min(block_rows, rows - row_start);
            let actual_cols = std::cmp::min(block_cols, cols - col_start);

            let region_id = RegionId::new(row_start, col_start, actual_rows, actual_cols);

            // Create COO tensor for this block
            let (block_rows_vec, block_cols_vec, block_vals): (Vec<_>, Vec<_>, Vec<_>) =
                triplets.into_iter().fold(
                    (Vec::new(), Vec::new(), Vec::new()),
                    |(mut rows, mut cols, mut vals), (r, c, v)| {
                        rows.push(r);
                        cols.push(c);
                        vals.push(v);
                        (rows, cols, vals)
                    },
                );

            let block_shape = Shape::new(vec![actual_rows, actual_cols]);
            let block_coo =
                CooTensor::new(block_rows_vec, block_cols_vec, block_vals, block_shape)?;

            // Choose best format for this block
            let block_tensor = Self::select_optimal_format_for_block(&block_coo)?;
            regions.insert(region_id, block_tensor);
        }

        Ok(regions)
    }

    /// Partition tensor based on density patterns
    fn partition_density_based(
        sparse: Box<dyn SparseTensor + Send + Sync>,
        density_threshold: f32,
    ) -> TorshResult<HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>> {
        // For simplicity, we'll use a grid-based approach and check density
        let block_size = (64, 64); // Default block size for density analysis
        let mut regions = Self::partition_block_based(sparse, block_size)?;

        // Re-format regions based on their density
        let mut optimized_regions = HashMap::new();
        for (region_id, tensor) in regions.drain() {
            let density = 1.0 - tensor.sparsity();
            let optimized_tensor = if density > density_threshold {
                // High density: use format optimized for dense operations
                let coo = tensor.to_coo()?;
                Box::new(CsrTensor::from_coo(&coo)?) as Box<dyn SparseTensor + Send + Sync>
            } else {
                // Low density: keep in COO for flexibility
                Box::new(tensor.to_coo()?) as Box<dyn SparseTensor + Send + Sync>
            };
            optimized_regions.insert(region_id, optimized_tensor);
        }

        Ok(optimized_regions)
    }

    /// Partition tensor based on structural patterns
    fn partition_pattern_based(
        sparse: Box<dyn SparseTensor + Send + Sync>,
    ) -> TorshResult<HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>> {
        let coo = sparse.to_coo()?;
        let triplets = coo.triplets();
        let shape = sparse.shape();

        // Analyze patterns: diagonal, block diagonal, banded, etc.
        let pattern = Self::analyze_sparsity_pattern(&triplets, shape)?;

        match pattern {
            SparsityPattern::Diagonal => {
                // Use DIA format for the entire matrix
                let mut regions = HashMap::new();
                let region_id = RegionId::new(0, 0, shape.dims()[0], shape.dims()[1]);
                let dia_tensor = DiaTensor::from_coo(&coo)?;
                regions.insert(
                    region_id,
                    Box::new(dia_tensor) as Box<dyn SparseTensor + Send + Sync>,
                );
                Ok(regions)
            }
            SparsityPattern::BlockDiagonal { block_size } => {
                // Partition into diagonal blocks
                Self::partition_block_based(sparse, block_size)
            }
            SparsityPattern::Banded { bandwidth: _ } => {
                // Use specialized banded format or ELL
                let mut regions = HashMap::new();
                let region_id = RegionId::new(0, 0, shape.dims()[0], shape.dims()[1]);
                let ell_tensor = EllTensor::from_coo(&coo)?;
                regions.insert(
                    region_id,
                    Box::new(ell_tensor) as Box<dyn SparseTensor + Send + Sync>,
                );
                Ok(regions)
            }
            SparsityPattern::Random => {
                // Use block-based partitioning with optimal format selection
                Self::partition_block_based(sparse, (32, 32))
            }
        }
    }

    /// Analyze the sparsity pattern of triplets
    pub fn analyze_sparsity_pattern(
        triplets: &[(usize, usize, f32)],
        shape: &Shape,
    ) -> TorshResult<SparsityPattern> {
        let (rows, cols) = (shape.dims()[0], shape.dims()[1]);

        // Check for diagonal pattern
        let diagonal_count = triplets.iter().filter(|(r, c, _)| r == c).count();
        let diagonal_ratio = diagonal_count as f32 / triplets.len() as f32;

        if diagonal_ratio > 0.8 {
            return Ok(SparsityPattern::Diagonal);
        }

        // Check for banded pattern
        let max_bandwidth = triplets
            .iter()
            .map(|(r, c, _)| (*r as i32 - *c as i32).unsigned_abs() as usize)
            .max()
            .unwrap_or(0);

        let effective_bandwidth = std::cmp::min(max_bandwidth, std::cmp::min(rows, cols) / 4);

        if effective_bandwidth < std::cmp::min(rows, cols) / 8 {
            return Ok(SparsityPattern::Banded {
                bandwidth: effective_bandwidth,
            });
        }

        // Check for block diagonal pattern
        // Simple heuristic: if most non-zeros are within small blocks along diagonal
        let block_size = 16;
        let mut block_diagonal_count = 0;

        for (r, c, _) in triplets {
            let block_r = r / block_size;
            let block_c = c / block_size;
            if block_r == block_c {
                block_diagonal_count += 1;
            }
        }

        let block_diagonal_ratio = block_diagonal_count as f32 / triplets.len() as f32;

        if block_diagonal_ratio > 0.6 {
            return Ok(SparsityPattern::BlockDiagonal {
                block_size: (block_size, block_size),
            });
        }

        Ok(SparsityPattern::Random)
    }

    /// Select optimal sparse format for a block based on its characteristics
    fn select_optimal_format_for_block(
        coo: &CooTensor,
    ) -> TorshResult<Box<dyn SparseTensor + Send + Sync>> {
        let shape = coo.shape();
        let nnz = coo.nnz();
        let total_elements = shape.numel();
        let density = nnz as f32 / total_elements as f32;

        // Decision tree for format selection
        if density > 0.1 {
            // High density: use CSR for efficient matrix operations
            Ok(Box::new(CsrTensor::from_coo(coo)?))
        } else if nnz < 100 {
            // Very sparse: stay with COO for simplicity
            Ok(Box::new(coo.clone()))
        } else {
            // Medium sparsity: use CSR for general operations
            Ok(Box::new(CsrTensor::from_coo(coo)?))
        }
    }

    /// Validate that regions don't overlap and fit within the tensor shape
    fn validate_regions(
        regions: &HashMap<RegionId, Box<dyn SparseTensor + Send + Sync>>,
        shape: &Shape,
    ) -> TorshResult<()> {
        let (total_rows, total_cols) = (shape.dims()[0], shape.dims()[1]);

        for (region_id, tensor) in regions {
            // Check bounds
            if region_id.row_start + region_id.rows > total_rows
                || region_id.col_start + region_id.cols > total_cols
            {
                return Err(TorshError::InvalidArgument(
                    "Region extends beyond tensor bounds".to_string(),
                ));
            }

            // Check tensor shape matches region
            let tensor_shape = tensor.shape();
            if tensor_shape.dims() != [region_id.rows, region_id.cols] {
                return Err(TorshError::InvalidArgument(
                    "Region tensor shape doesn't match region dimensions".to_string(),
                ));
            }
        }

        // Check for overlapping regions
        let region_ids: Vec<&RegionId> = regions.keys().collect();
        for i in 0..region_ids.len() {
            for j in (i + 1)..region_ids.len() {
                let region1 = region_ids[i];
                let region2 = region_ids[j];

                // Check if regions overlap
                let r1_end_row = region1.row_start + region1.rows;
                let r1_end_col = region1.col_start + region1.cols;
                let r2_end_row = region2.row_start + region2.rows;
                let r2_end_col = region2.col_start + region2.cols;

                // Regions overlap if they intersect in both row and column dimensions
                let rows_overlap =
                    !(r1_end_row <= region2.row_start || r2_end_row <= region1.row_start);
                let cols_overlap =
                    !(r1_end_col <= region2.col_start || r2_end_col <= region1.col_start);

                if rows_overlap && cols_overlap {
                    return Err(TorshError::InvalidArgument(format!(
                        "Regions overlap: [{}, {}, {}, {}] and [{}, {}, {}, {}]",
                        region1.row_start,
                        region1.col_start,
                        region1.rows,
                        region1.cols,
                        region2.row_start,
                        region2.col_start,
                        region2.rows,
                        region2.cols
                    )));
                }
            }
        }

        Ok(())
    }
}

impl SparseTensor for HybridTensor {
    fn format(&self) -> SparseFormat {
        SparseFormat::Coo // Hybrid doesn't have a single format
    }

    fn shape(&self) -> &Shape {
        &self.shape
    }

    fn dtype(&self) -> torsh_core::DType {
        torsh_core::DType::F32 // Assume f32 for now
    }

    fn device(&self) -> torsh_core::device::DeviceType {
        torsh_core::device::DeviceType::Cpu // Assume CPU for now
    }

    fn nnz(&self) -> usize {
        self.nnz
    }

    fn to_dense(&self) -> TorshResult<Tensor> {
        use torsh_tensor::creation::zeros;

        let dense = zeros::<f32>(&[self.shape.dims()[0], self.shape.dims()[1]])?;

        for (region_id, tensor) in &self.regions {
            let region_dense = tensor.to_dense()?;

            // Copy region data to the appropriate location in the dense tensor
            for i in 0..region_id.rows {
                for j in 0..region_id.cols {
                    let global_row = region_id.row_start + i;
                    let global_col = region_id.col_start + j;
                    let value = region_dense.get(&[i, j])?;
                    dense.set(&[global_row, global_col], value)?;
                }
            }
        }

        Ok(dense)
    }

    fn to_coo(&self) -> TorshResult<CooTensor> {
        let mut all_row_indices = Vec::new();
        let mut all_col_indices = Vec::new();
        let mut all_values = Vec::new();

        for (region_id, tensor) in &self.regions {
            let region_coo = tensor.to_coo()?;
            let triplets = region_coo.triplets();

            for (row, col, val) in triplets {
                all_row_indices.push(region_id.row_start + row);
                all_col_indices.push(region_id.col_start + col);
                all_values.push(val);
            }
        }

        CooTensor::new(
            all_row_indices,
            all_col_indices,
            all_values,
            self.shape.clone(),
        )
    }

    fn to_csr(&self) -> TorshResult<CsrTensor> {
        let coo = self.to_coo()?;
        CsrTensor::from_coo(&coo)
    }

    fn to_csc(&self) -> TorshResult<CscTensor> {
        let coo = self.to_coo()?;
        CscTensor::from_coo(&coo)
    }

    fn as_any(&self) -> &dyn std::any::Any {
        self
    }
}

/// Strategy for partitioning a sparse tensor
#[derive(Debug, Clone)]
pub enum PartitionStrategy {
    /// Partition into fixed-size blocks
    BlockBased { block_size: (usize, usize) },
    /// Partition based on density thresholds
    DensityBased { threshold: f32 },
    /// Partition based on detected sparsity patterns
    PatternBased,
}

/// Detected sparsity patterns
#[derive(Debug, Clone)]
pub enum SparsityPattern {
    /// Diagonal matrix
    Diagonal,
    /// Block diagonal matrix
    BlockDiagonal { block_size: (usize, usize) },
    /// Banded matrix
    Banded { bandwidth: usize },
    /// Random sparsity pattern
    Random,
}

/// Automatically select the best sparse format for a given tensor
pub fn auto_select_format(dense: &Tensor, threshold: f32) -> TorshResult<SparseFormat> {
    let shape = dense.shape();
    if shape.ndim() != 2 {
        return Err(TorshError::InvalidArgument(
            "Can only select format for 2D tensors".to_string(),
        ));
    }

    let (rows, cols) = (shape.dims()[0], shape.dims()[1]);
    let total_elements = rows * cols;

    // Count non-zero elements
    let mut nnz = 0;
    let mut diagonal_nnz = 0;
    let mut max_bandwidth = 0;

    for i in 0..rows {
        for j in 0..cols {
            let val = dense.get(&[i, j])?;
            if val.abs() > threshold {
                nnz += 1;
                if i == j {
                    diagonal_nnz += 1;
                }
                max_bandwidth =
                    std::cmp::max(max_bandwidth, (i as i32 - j as i32).unsigned_abs() as usize);
            }
        }
    }

    let density = nnz as f32 / total_elements as f32;
    let diagonal_ratio = diagonal_nnz as f32 / nnz as f32;

    // Decision tree for format selection
    if diagonal_ratio > 0.8 {
        Ok(SparseFormat::Dia)
    } else if density > 0.1 {
        Ok(SparseFormat::Csr) // High density, good for matrix ops
    } else if max_bandwidth < std::cmp::min(rows, cols) / 8 {
        Ok(SparseFormat::Ell) // Banded structure
    } else if nnz < 1000 {
        Ok(SparseFormat::Coo) // Small, use flexible format
    } else if cols > rows * 2 {
        Ok(SparseFormat::Csc) // Tall and narrow, column-major
    } else {
        Ok(SparseFormat::Csr) // Default to row-major
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use approx::assert_relative_eq;
    use torsh_tensor::creation::{eye, zeros};

    #[test]
    fn test_auto_format_selection() {
        // Test diagonal matrix
        let diagonal = eye::<f32>(10).unwrap();
        let format = auto_select_format(&diagonal, 0.0).unwrap();
        assert_eq!(format, SparseFormat::Dia);

        // Test dense-ish matrix
        let dense_ish = zeros::<f32>(&[5, 5]).unwrap();
        for i in 0..5 {
            for j in 0..5 {
                if (i + j) % 2 == 0 {
                    dense_ish.set(&[i, j], 1.0).unwrap();
                }
            }
        }
        let format = auto_select_format(&dense_ish, 0.0).unwrap();
        // Should be CSR due to high density
        assert_eq!(format, SparseFormat::Csr);
    }

    #[test]
    fn test_sparsity_pattern_analysis() {
        // Test diagonal pattern
        let triplets = vec![(0, 0, 1.0), (1, 1, 1.0), (2, 2, 1.0)];
        let shape = Shape::new(vec![3, 3]);
        let pattern = HybridTensor::analyze_sparsity_pattern(&triplets, &shape).unwrap();
        matches!(pattern, SparsityPattern::Diagonal);
    }

    #[test]
    fn test_hybrid_tensor_creation() {
        // Create a simple hybrid tensor with one region
        let mut regions = HashMap::new();
        let coo = CooTensor::new(
            vec![0, 1],
            vec![0, 1],
            vec![1.0, 2.0],
            Shape::new(vec![2, 2]),
        )
        .unwrap();

        let region_id = RegionId::new(0, 0, 2, 2);
        regions.insert(
            region_id,
            Box::new(coo) as Box<dyn SparseTensor + Send + Sync>,
        );

        let hybrid = HybridTensor::new(regions, Shape::new(vec![2, 2])).unwrap();
        assert_eq!(hybrid.nnz(), 2);
        assert_eq!(hybrid.shape().dims(), &[2, 2]);
    }

    #[test]
    fn test_hybrid_tensor_to_dense() {
        // Create hybrid tensor and convert to dense
        let mut regions = HashMap::new();
        let coo = CooTensor::new(
            vec![0, 1],
            vec![0, 1],
            vec![3.0, 4.0],
            Shape::new(vec![2, 2]),
        )
        .unwrap();

        let region_id = RegionId::new(0, 0, 2, 2);
        regions.insert(
            region_id,
            Box::new(coo) as Box<dyn SparseTensor + Send + Sync>,
        );

        let hybrid = HybridTensor::new(regions, Shape::new(vec![2, 2])).unwrap();
        let dense = hybrid.to_dense().unwrap();

        assert_relative_eq!(dense.get(&[0, 0]).unwrap(), 3.0);
        assert_relative_eq!(dense.get(&[1, 1]).unwrap(), 4.0);
        assert_relative_eq!(dense.get(&[0, 1]).unwrap(), 0.0);
        assert_relative_eq!(dense.get(&[1, 0]).unwrap(), 0.0);
    }
}