Skip to main content

ringkernel_graph/algorithms/
spmv.rs

1//! Sparse Matrix-Vector Multiplication (SpMV).
2//!
3//! Computes y = A * x where A is a sparse matrix in CSR format.
4//! This is a fundamental operation for iterative graph algorithms like PageRank.
5
6use crate::models::CsrMatrix;
7use crate::{GraphError, Result};
8
9/// SpMV configuration.
10#[derive(Debug, Clone)]
11pub struct SpmvConfig {
12    /// Use parallel implementation.
13    pub parallel: bool,
14    /// Alpha scaling factor (y = alpha * A * x).
15    pub alpha: f64,
16}
17
18impl Default for SpmvConfig {
19    fn default() -> Self {
20        Self {
21            parallel: false,
22            alpha: 1.0,
23        }
24    }
25}
26
27impl SpmvConfig {
28    /// Create new SpMV configuration.
29    pub fn new() -> Self {
30        Self::default()
31    }
32
33    /// Enable parallel execution.
34    pub fn parallel(mut self) -> Self {
35        self.parallel = true;
36        self
37    }
38
39    /// Set scaling factor.
40    pub fn with_alpha(mut self, alpha: f64) -> Self {
41        self.alpha = alpha;
42        self
43    }
44}
45
46/// Sequential SpMV: y = A * x.
47///
48/// # Arguments
49///
50/// * `matrix` - Sparse matrix in CSR format
51/// * `x` - Input vector
52///
53/// # Returns
54///
55/// Output vector y = A * x
56pub fn spmv(matrix: &CsrMatrix, x: &[f64]) -> Result<Vec<f64>> {
57    spmv_with_config(matrix, x, &SpmvConfig::default())
58}
59
60/// SpMV with configuration.
61pub fn spmv_with_config(matrix: &CsrMatrix, x: &[f64], config: &SpmvConfig) -> Result<Vec<f64>> {
62    if x.len() != matrix.num_cols {
63        return Err(GraphError::DimensionMismatch {
64            expected: matrix.num_cols,
65            actual: x.len(),
66        });
67    }
68
69    if config.parallel {
70        spmv_parallel_impl(matrix, x, config.alpha)
71    } else {
72        spmv_sequential_impl(matrix, x, config.alpha)
73    }
74}
75
76/// Sequential implementation.
77fn spmv_sequential_impl(matrix: &CsrMatrix, x: &[f64], alpha: f64) -> Result<Vec<f64>> {
78    let mut y = vec![0.0; matrix.num_rows];
79
80    for (row, y_row) in y.iter_mut().enumerate() {
81        let start = matrix.row_ptr[row] as usize;
82        let end = matrix.row_ptr[row + 1] as usize;
83
84        let mut sum = 0.0;
85        for i in start..end {
86            let col = matrix.col_idx[i] as usize;
87            let val = matrix.values.as_ref().map(|v| v[i]).unwrap_or(1.0);
88            sum += val * x[col];
89        }
90
91        *y_row = alpha * sum;
92    }
93
94    Ok(y)
95}
96
97/// Parallel SpMV using row-based parallelism.
98///
99/// Each row is processed independently, making this suitable for GPU execution.
100pub fn spmv_parallel(matrix: &CsrMatrix, x: &[f64]) -> Result<Vec<f64>> {
101    spmv_with_config(matrix, x, &SpmvConfig::default().parallel())
102}
103
104/// Parallel implementation (currently sequential, ready for GPU/rayon).
105fn spmv_parallel_impl(matrix: &CsrMatrix, x: &[f64], alpha: f64) -> Result<Vec<f64>> {
106    // Each row can be computed independently (parallelizable)
107    let y: Vec<f64> = (0..matrix.num_rows)
108        .map(|row| {
109            let start = matrix.row_ptr[row] as usize;
110            let end = matrix.row_ptr[row + 1] as usize;
111
112            let mut sum = 0.0;
113            for i in start..end {
114                let col = matrix.col_idx[i] as usize;
115                let val = matrix.values.as_ref().map(|v| v[i]).unwrap_or(1.0);
116                sum += val * x[col];
117            }
118
119            alpha * sum
120        })
121        .collect();
122
123    Ok(y)
124}
125
126/// SpMV with y = alpha * A * x + beta * y (BLAS-style).
127///
128/// This allows accumulating results without allocating new vectors.
129pub fn spmv_axpby(
130    matrix: &CsrMatrix,
131    x: &[f64],
132    y: &mut [f64],
133    alpha: f64,
134    beta: f64,
135) -> Result<()> {
136    if x.len() != matrix.num_cols {
137        return Err(GraphError::DimensionMismatch {
138            expected: matrix.num_cols,
139            actual: x.len(),
140        });
141    }
142
143    if y.len() != matrix.num_rows {
144        return Err(GraphError::DimensionMismatch {
145            expected: matrix.num_rows,
146            actual: y.len(),
147        });
148    }
149
150    for (row, y_row) in y.iter_mut().enumerate() {
151        let start = matrix.row_ptr[row] as usize;
152        let end = matrix.row_ptr[row + 1] as usize;
153
154        let mut sum = 0.0;
155        for i in start..end {
156            let col = matrix.col_idx[i] as usize;
157            let val = matrix.values.as_ref().map(|v| v[i]).unwrap_or(1.0);
158            sum += val * x[col];
159        }
160
161        *y_row = alpha * sum + beta * *y_row;
162    }
163
164    Ok(())
165}
166
167/// Compute dot product of two vectors.
168pub fn dot(x: &[f64], y: &[f64]) -> f64 {
169    x.iter().zip(y.iter()).map(|(&a, &b)| a * b).sum()
170}
171
172/// Compute L2 norm of a vector.
173pub fn norm2(x: &[f64]) -> f64 {
174    dot(x, x).sqrt()
175}
176
177/// Normalize vector in-place.
178pub fn normalize(x: &mut [f64]) {
179    let n = norm2(x);
180    if n > 1e-10 {
181        for xi in x.iter_mut() {
182            *xi /= n;
183        }
184    }
185}
186
187/// Power iteration for dominant eigenvector.
188///
189/// Computes the eigenvector corresponding to the largest eigenvalue
190/// of the matrix using iterative multiplication.
191pub fn power_iteration(
192    matrix: &CsrMatrix,
193    max_iterations: usize,
194    tolerance: f64,
195) -> Result<(Vec<f64>, f64)> {
196    if matrix.num_rows == 0 {
197        return Err(GraphError::EmptyGraph);
198    }
199
200    // Initialize with uniform vector
201    let n = matrix.num_rows;
202    let mut x = vec![1.0 / (n as f64).sqrt(); n];
203    let mut eigenvalue = 0.0;
204
205    for _ in 0..max_iterations {
206        // y = A * x
207        let y = spmv(matrix, &x)?;
208
209        // Compute Rayleigh quotient (eigenvalue estimate)
210        let new_eigenvalue = dot(&x, &y);
211
212        // Normalize
213        let norm = norm2(&y);
214        if norm < 1e-10 {
215            break;
216        }
217
218        x = y.into_iter().map(|yi| yi / norm).collect();
219
220        // Check convergence
221        if (new_eigenvalue - eigenvalue).abs() < tolerance {
222            return Ok((x, new_eigenvalue));
223        }
224
225        eigenvalue = new_eigenvalue;
226    }
227
228    Ok((x, eigenvalue))
229}
230
231#[cfg(test)]
232mod tests {
233    use super::*;
234
235    #[test]
236    fn test_spmv_identity() {
237        // Identity matrix (diagonal with 1s)
238        let edges = [(0, 0, 1.0), (1, 1, 1.0), (2, 2, 1.0)];
239        let matrix = CsrMatrix::from_weighted_edges(3, &edges);
240
241        let x = vec![1.0, 2.0, 3.0];
242        let y = spmv(&matrix, &x).unwrap();
243
244        // Identity matrix: y = I * x = x
245        assert!((y[0] - 1.0).abs() < 1e-10);
246        assert!((y[1] - 2.0).abs() < 1e-10);
247        assert!((y[2] - 3.0).abs() < 1e-10);
248    }
249
250    #[test]
251    fn test_spmv_unweighted() {
252        // Adjacency matrix: 0 -> 1, 0 -> 2, 1 -> 2
253        let edges = [(0, 1), (0, 2), (1, 2)];
254        let matrix = CsrMatrix::from_edges(3, &edges);
255
256        // x = [1, 1, 1]
257        // y[0] = 0 (no outgoing edges counted in row 0? Actually row 0 has edges to 1 and 2)
258        // Wait, CSR row i contains edges FROM i, so:
259        // y[0] = x[1] + x[2] = 2
260        // y[1] = x[2] = 1
261        // y[2] = 0
262
263        let x = vec![1.0, 1.0, 1.0];
264        let y = spmv(&matrix, &x).unwrap();
265
266        assert!((y[0] - 2.0).abs() < 1e-10);
267        assert!((y[1] - 1.0).abs() < 1e-10);
268        assert!((y[2] - 0.0).abs() < 1e-10);
269    }
270
271    #[test]
272    fn test_spmv_weighted() {
273        let edges = [(0, 1, 2.0), (0, 2, 3.0), (1, 2, 4.0)];
274        let matrix = CsrMatrix::from_weighted_edges(3, &edges);
275
276        let x = vec![1.0, 1.0, 1.0];
277        let y = spmv(&matrix, &x).unwrap();
278
279        // y[0] = 2.0 * 1 + 3.0 * 1 = 5.0
280        // y[1] = 4.0 * 1 = 4.0
281        // y[2] = 0
282
283        assert!((y[0] - 5.0).abs() < 1e-10);
284        assert!((y[1] - 4.0).abs() < 1e-10);
285        assert!((y[2] - 0.0).abs() < 1e-10);
286    }
287
288    #[test]
289    fn test_spmv_alpha() {
290        let edges = [(0, 1, 1.0)];
291        let matrix = CsrMatrix::from_weighted_edges(2, &edges);
292
293        let x = vec![1.0, 2.0];
294        let config = SpmvConfig::new().with_alpha(0.5);
295        let y = spmv_with_config(&matrix, &x, &config).unwrap();
296
297        // y[0] = 0.5 * (1.0 * 2.0) = 1.0
298        assert!((y[0] - 1.0).abs() < 1e-10);
299    }
300
301    #[test]
302    fn test_spmv_dimension_mismatch() {
303        let matrix = CsrMatrix::from_edges(3, &[(0, 1)]);
304        let x = vec![1.0, 2.0]; // Wrong size
305
306        let result = spmv(&matrix, &x);
307        assert!(matches!(result, Err(GraphError::DimensionMismatch { .. })));
308    }
309
310    #[test]
311    fn test_spmv_parallel_same_as_sequential() {
312        let edges = [(0, 1, 1.0), (0, 2, 2.0), (1, 2, 3.0), (2, 0, 4.0)];
313        let matrix = CsrMatrix::from_weighted_edges(3, &edges);
314
315        let x = vec![1.0, 2.0, 3.0];
316
317        let seq = spmv(&matrix, &x).unwrap();
318        let par = spmv_parallel(&matrix, &x).unwrap();
319
320        for i in 0..3 {
321            assert!((seq[i] - par[i]).abs() < 1e-10);
322        }
323    }
324
325    #[test]
326    fn test_spmv_axpby() {
327        let edges = [(0, 1, 1.0)];
328        let matrix = CsrMatrix::from_weighted_edges(2, &edges);
329
330        let x = vec![1.0, 2.0];
331        let mut y = vec![1.0, 1.0];
332
333        // y = 2 * A * x + 3 * y
334        spmv_axpby(&matrix, &x, &mut y, 2.0, 3.0).unwrap();
335
336        // y[0] = 2 * (1 * 2) + 3 * 1 = 7
337        // y[1] = 2 * 0 + 3 * 1 = 3
338        assert!((y[0] - 7.0).abs() < 1e-10);
339        assert!((y[1] - 3.0).abs() < 1e-10);
340    }
341
342    #[test]
343    fn test_dot_product() {
344        let x = vec![1.0, 2.0, 3.0];
345        let y = vec![4.0, 5.0, 6.0];
346
347        assert!((dot(&x, &y) - 32.0).abs() < 1e-10);
348    }
349
350    #[test]
351    fn test_norm2() {
352        let x = vec![3.0, 4.0];
353        assert!((norm2(&x) - 5.0).abs() < 1e-10);
354    }
355
356    #[test]
357    fn test_normalize() {
358        let mut x = vec![3.0, 4.0];
359        normalize(&mut x);
360
361        assert!((norm2(&x) - 1.0).abs() < 1e-10);
362        assert!((x[0] - 0.6).abs() < 1e-10);
363        assert!((x[1] - 0.8).abs() < 1e-10);
364    }
365
366    #[test]
367    fn test_power_iteration() {
368        // Simple 2x2 matrix with known eigenvector
369        // A = [[2, 1], [1, 2]]
370        // Eigenvalues: 3, 1
371        // Eigenvector for 3: [1/sqrt(2), 1/sqrt(2)]
372
373        let mut builder = crate::models::CsrMatrixBuilder::new(2);
374        builder.add_weighted_edge(0, 0, 2.0);
375        builder.add_weighted_edge(0, 1, 1.0);
376        builder.add_weighted_edge(1, 0, 1.0);
377        builder.add_weighted_edge(1, 1, 2.0);
378        let matrix = builder.build();
379
380        let (eigenvector, eigenvalue) = power_iteration(&matrix, 100, 1e-6).unwrap();
381
382        // Eigenvalue should be close to 3
383        assert!((eigenvalue - 3.0).abs() < 0.01);
384
385        // Eigenvector should be [1/sqrt(2), 1/sqrt(2)] or [-1/sqrt(2), -1/sqrt(2)]
386        let expected = 1.0 / 2.0_f64.sqrt();
387        assert!((eigenvector[0].abs() - expected).abs() < 0.01);
388        assert!((eigenvector[1].abs() - expected).abs() < 0.01);
389    }
390}