Skip to main content

scirs2_graph/
simd_ops.rs

1//! SIMD-accelerated operations for graph algorithms
2//!
3//! This module provides SIMD-optimized implementations of performance-critical
4//! graph algorithm kernels, leveraging the `scirs2-core` SIMD infrastructure
5//! via the `SimdUnifiedOps` trait.
6//!
7//! ## Accelerated Operations
8//!
9//! - **PageRank power iteration**: vector-scalar multiply, L1 norm, convergence check
10//! - **Spectral methods**: graph Laplacian construction, eigenvalue iteration helpers
11//! - **Adjacency matrix-vector products**: dense and CSR-style sparse matvec
12//! - **Betweenness centrality accumulation**: batch dependency accumulation
13//! - **BFS/DFS distance vector operations**: distance initialization, level updates
14//!
15//! ## Usage
16//!
17//! All functions are feature-gated under `#[cfg(feature = "simd")]` and automatically
18//! dispatch to the optimal SIMD implementation available on the current platform
19//! (AVX-512, AVX2, SSE2, NEON, or scalar fallback).
20//!
21//! ```rust,ignore
22//! use scirs2_graph::simd_ops::{SimdPageRank, SimdSpectral, SimdAdjacency};
23//! ```
24
25use scirs2_core::ndarray::{Array1, Array2, ArrayView1, ArrayView2};
26use scirs2_core::simd_ops::SimdUnifiedOps;
27
28use crate::error::{GraphError, Result};
29
30// ---------------------------------------------------------------------------
31// PageRank SIMD operations
32// ---------------------------------------------------------------------------
33
34/// SIMD-accelerated operations for PageRank power iteration.
35///
36/// These functions accelerate the inner loops of PageRank computation:
37/// distributing rank contributions, computing convergence (L1 norm of
38/// the difference vector), and normalizing the rank vector.
39pub struct SimdPageRank;
40
41impl SimdPageRank {
42    /// Perform one PageRank power iteration step using SIMD.
43    ///
44    /// Computes `new_rank = (1 - damping) / n + damping * M * old_rank`
45    /// where `M` is the column-stochastic transition matrix.
46    ///
47    /// # Arguments
48    /// * `transition_matrix` - Row-stochastic transition matrix (n x n)
49    /// * `old_rank` - Current rank vector (length n)
50    /// * `damping` - Damping factor (typically 0.85)
51    ///
52    /// # Returns
53    /// The new rank vector after one iteration.
54    pub fn power_iteration_step(
55        transition_matrix: &Array2<f64>,
56        old_rank: &Array1<f64>,
57        damping: f64,
58    ) -> Result<Array1<f64>> {
59        let (rows, cols) = transition_matrix.dim();
60        if cols != old_rank.len() {
61            return Err(GraphError::InvalidGraph(format!(
62                "Transition matrix columns ({}) do not match rank vector length ({})",
63                cols,
64                old_rank.len()
65            )));
66        }
67
68        let n = rows;
69        let base_rank = (1.0 - damping) / n as f64;
70
71        // SIMD matrix-vector product: M * old_rank
72        let matvec_result = SimdAdjacency::dense_matvec(transition_matrix, old_rank)?;
73
74        // new_rank = base_rank + damping * (M * old_rank)
75        let scaled = f64::simd_scalar_mul(&matvec_result.view(), damping);
76        let base_vec = Array1::from_elem(n, base_rank);
77        let new_rank = f64::simd_add(&base_vec.view(), &scaled.view());
78
79        Ok(new_rank)
80    }
81
82    /// Compute L1 norm of the difference between two rank vectors (convergence check).
83    ///
84    /// Returns `sum(|new_rank[i] - old_rank[i]|)` using SIMD acceleration.
85    ///
86    /// # Arguments
87    /// * `new_rank` - New rank vector
88    /// * `old_rank` - Previous rank vector
89    ///
90    /// # Returns
91    /// The L1 norm of the difference.
92    pub fn convergence_l1(new_rank: &ArrayView1<f64>, old_rank: &ArrayView1<f64>) -> f64 {
93        let diff = f64::simd_sub(new_rank, old_rank);
94        let abs_diff = f64::simd_abs(&diff.view());
95        f64::simd_sum(&abs_diff.view())
96    }
97
98    /// Check if PageRank has converged by comparing L1 norm against tolerance.
99    ///
100    /// # Arguments
101    /// * `new_rank` - New rank vector
102    /// * `old_rank` - Previous rank vector
103    /// * `tolerance` - Convergence threshold
104    ///
105    /// # Returns
106    /// `true` if the L1 norm of the difference is below tolerance.
107    pub fn has_converged(
108        new_rank: &ArrayView1<f64>,
109        old_rank: &ArrayView1<f64>,
110        tolerance: f64,
111    ) -> bool {
112        Self::convergence_l1(new_rank, old_rank) < tolerance
113    }
114
115    /// SIMD-accelerated vector-scalar multiply for rank distribution.
116    ///
117    /// Computes `result[i] = rank[i] * scalar` for distributing a node's
118    /// rank equally among its neighbors.
119    ///
120    /// # Arguments
121    /// * `rank` - The rank vector
122    /// * `scalar` - The scalar multiplier (e.g., `damping / degree`)
123    ///
124    /// # Returns
125    /// The scaled vector.
126    pub fn scale_rank(rank: &ArrayView1<f64>, scalar: f64) -> Array1<f64> {
127        f64::simd_scalar_mul(rank, scalar)
128    }
129
130    /// Normalize a rank vector so it sums to 1.0.
131    ///
132    /// # Arguments
133    /// * `rank` - The rank vector to normalize
134    ///
135    /// # Returns
136    /// The normalized rank vector, or an error if the sum is zero.
137    pub fn normalize_rank(rank: &Array1<f64>) -> Result<Array1<f64>> {
138        let total = f64::simd_sum(&rank.view());
139        if total.abs() < 1e-15 {
140            return Err(GraphError::AlgorithmError(
141                "Cannot normalize zero-sum rank vector".to_string(),
142            ));
143        }
144        Ok(f64::simd_scalar_mul(&rank.view(), 1.0 / total))
145    }
146
147    /// Compute damping-adjusted teleportation vector.
148    ///
149    /// For dangling nodes (nodes with no outgoing edges), their rank is
150    /// distributed uniformly. This function computes the dangling node
151    /// contribution: `(damping / n) * sum(rank[i] for dangling node i)`.
152    ///
153    /// # Arguments
154    /// * `rank` - Current rank vector
155    /// * `is_dangling` - Boolean mask: `true` for dangling nodes
156    /// * `damping` - Damping factor
157    ///
158    /// # Returns
159    /// The uniform teleportation contribution per node.
160    pub fn dangling_node_contribution(
161        rank: &ArrayView1<f64>,
162        is_dangling: &[bool],
163        damping: f64,
164    ) -> f64 {
165        let n = rank.len();
166        if n == 0 {
167            return 0.0;
168        }
169
170        // Create a mask vector: 1.0 for dangling, 0.0 otherwise
171        let mask: Array1<f64> =
172            Array1::from_iter(is_dangling.iter().map(|&d| if d { 1.0 } else { 0.0 }));
173
174        // Sum rank values for dangling nodes via SIMD dot product
175        let dangling_sum = f64::simd_dot(rank, &mask.view());
176
177        damping * dangling_sum / n as f64
178    }
179
180    /// Full SIMD-accelerated PageRank computation.
181    ///
182    /// Runs the PageRank power iteration until convergence or max iterations.
183    ///
184    /// # Arguments
185    /// * `transition_matrix` - Row-stochastic transition matrix (n x n)
186    /// * `damping` - Damping factor (typically 0.85)
187    /// * `tolerance` - Convergence threshold
188    /// * `max_iterations` - Maximum number of iterations
189    ///
190    /// # Returns
191    /// A tuple of (rank vector, iterations used).
192    pub fn compute_pagerank(
193        transition_matrix: &Array2<f64>,
194        damping: f64,
195        tolerance: f64,
196        max_iterations: usize,
197    ) -> Result<(Array1<f64>, usize)> {
198        let n = transition_matrix.nrows();
199        if n == 0 {
200            return Err(GraphError::InvalidGraph(
201                "Cannot compute PageRank on empty graph".to_string(),
202            ));
203        }
204        if transition_matrix.ncols() != n {
205            return Err(GraphError::InvalidGraph(format!(
206                "Transition matrix must be square, got {}x{}",
207                n,
208                transition_matrix.ncols()
209            )));
210        }
211
212        let mut rank = Array1::from_elem(n, 1.0 / n as f64);
213
214        for iteration in 0..max_iterations {
215            let new_rank = Self::power_iteration_step(transition_matrix, &rank, damping)?;
216
217            if Self::has_converged(&new_rank.view(), &rank.view(), tolerance) {
218                return Ok((new_rank, iteration + 1));
219            }
220
221            rank = new_rank;
222        }
223
224        // Return final state even if not converged
225        Ok((rank, max_iterations))
226    }
227}
228
229// ---------------------------------------------------------------------------
230// Spectral SIMD operations
231// ---------------------------------------------------------------------------
232
233/// SIMD-accelerated operations for spectral graph methods.
234///
235/// Provides optimized kernels for Laplacian construction, eigenvalue
236/// iteration helpers, and spectral embedding computations.
237pub struct SimdSpectral;
238
239impl SimdSpectral {
240    /// Construct the standard graph Laplacian L = D - A using SIMD.
241    ///
242    /// The Laplacian is computed row-by-row: `L[i,j] = degree[i] * delta(i,j) - A[i,j]`.
243    /// SIMD acceleration is applied to the row-level subtraction.
244    ///
245    /// # Arguments
246    /// * `adjacency` - The adjacency matrix (n x n)
247    /// * `degrees` - The degree vector (length n)
248    ///
249    /// # Returns
250    /// The standard Laplacian matrix.
251    pub fn standard_laplacian(
252        adjacency: &Array2<f64>,
253        degrees: &Array1<f64>,
254    ) -> Result<Array2<f64>> {
255        let n = adjacency.nrows();
256        if adjacency.ncols() != n {
257            return Err(GraphError::InvalidGraph(
258                "Adjacency matrix must be square".to_string(),
259            ));
260        }
261        if degrees.len() != n {
262            return Err(GraphError::InvalidGraph(format!(
263                "Degree vector length ({}) does not match adjacency matrix size ({})",
264                degrees.len(),
265                n
266            )));
267        }
268
269        let mut laplacian = Array2::zeros((n, n));
270
271        for i in 0..n {
272            let adj_row = adjacency.row(i);
273
274            // Use SIMD to negate the adjacency row: -A[i, :]
275            if let Some(adj_slice) = adj_row.as_slice() {
276                let adj_view = ArrayView1::from(adj_slice);
277                let neg_row = f64::simd_scalar_mul(&adj_view, -1.0);
278                if let Some(neg_slice) = neg_row.as_slice() {
279                    let mut lap_row = laplacian.row_mut(i);
280                    if let Some(lap_slice) = lap_row.as_slice_mut() {
281                        lap_slice.copy_from_slice(neg_slice);
282                    }
283                }
284            } else {
285                // Fallback for non-contiguous data
286                for j in 0..n {
287                    laplacian[[i, j]] = -adjacency[[i, j]];
288                }
289            }
290
291            // Set diagonal element to degree
292            laplacian[[i, i]] = degrees[i];
293        }
294
295        Ok(laplacian)
296    }
297
298    /// Construct the normalized Laplacian L_norm = I - D^{-1/2} A D^{-1/2} using SIMD.
299    ///
300    /// # Arguments
301    /// * `adjacency` - The adjacency matrix (n x n)
302    /// * `degrees` - The degree vector (length n)
303    ///
304    /// # Returns
305    /// The normalized Laplacian matrix.
306    pub fn normalized_laplacian(
307        adjacency: &Array2<f64>,
308        degrees: &Array1<f64>,
309    ) -> Result<Array2<f64>> {
310        let n = adjacency.nrows();
311        if adjacency.ncols() != n {
312            return Err(GraphError::InvalidGraph(
313                "Adjacency matrix must be square".to_string(),
314            ));
315        }
316        if degrees.len() != n {
317            return Err(GraphError::InvalidGraph(format!(
318                "Degree vector length ({}) does not match adjacency matrix size ({})",
319                degrees.len(),
320                n
321            )));
322        }
323
324        // Compute D^{-1/2} using SIMD sqrt + reciprocal
325        let d_inv_sqrt = Self::compute_degree_inv_sqrt(degrees);
326
327        let mut normalized = Array2::zeros((n, n));
328
329        for i in 0..n {
330            let adj_row = adjacency.row(i);
331
332            if let Some(adj_slice) = adj_row.as_slice() {
333                let adj_view = ArrayView1::from(adj_slice);
334
335                // Scale the adjacency row: D^{-1/2}[i] * A[i,:] * D^{-1/2}[:]
336                let scaled_by_i = f64::simd_scalar_mul(&adj_view, d_inv_sqrt[i]);
337                let d_inv_sqrt_view = d_inv_sqrt.view();
338                let scaled_row = f64::simd_mul(&scaled_by_i.view(), &d_inv_sqrt_view);
339
340                // Negate to get -D^{-1/2} A D^{-1/2}
341                let neg_scaled = f64::simd_scalar_mul(&scaled_row.view(), -1.0);
342
343                if let Some(neg_slice) = neg_scaled.as_slice() {
344                    let mut norm_row = normalized.row_mut(i);
345                    if let Some(norm_slice) = norm_row.as_slice_mut() {
346                        norm_slice.copy_from_slice(neg_slice);
347                    }
348                }
349            } else {
350                for j in 0..n {
351                    normalized[[i, j]] = -d_inv_sqrt[i] * adjacency[[i, j]] * d_inv_sqrt[j];
352                }
353            }
354
355            // Add identity: diagonal becomes 1 - D^{-1/2}[i] * A[i,i] * D^{-1/2}[i]
356            // For simple graphs A[i,i] = 0, so diagonal = 1.0
357            normalized[[i, i]] += 1.0;
358        }
359
360        Ok(normalized)
361    }
362
363    /// Construct the random-walk Laplacian L_rw = I - D^{-1} A using SIMD.
364    ///
365    /// # Arguments
366    /// * `adjacency` - The adjacency matrix (n x n)
367    /// * `degrees` - The degree vector (length n)
368    ///
369    /// # Returns
370    /// The random-walk Laplacian matrix.
371    pub fn random_walk_laplacian(
372        adjacency: &Array2<f64>,
373        degrees: &Array1<f64>,
374    ) -> Result<Array2<f64>> {
375        let n = adjacency.nrows();
376        if adjacency.ncols() != n {
377            return Err(GraphError::InvalidGraph(
378                "Adjacency matrix must be square".to_string(),
379            ));
380        }
381        if degrees.len() != n {
382            return Err(GraphError::InvalidGraph(format!(
383                "Degree vector length ({}) does not match adjacency matrix size ({})",
384                degrees.len(),
385                n
386            )));
387        }
388
389        let mut rw_laplacian = Array2::zeros((n, n));
390
391        for i in 0..n {
392            let degree = degrees[i];
393            if degree > 0.0 {
394                let adj_row = adjacency.row(i);
395
396                if let Some(adj_slice) = adj_row.as_slice() {
397                    let adj_view = ArrayView1::from(adj_slice);
398                    // -(1/degree) * A[i,:]
399                    let scaled = f64::simd_scalar_mul(&adj_view, -1.0 / degree);
400                    if let Some(scaled_slice) = scaled.as_slice() {
401                        let mut rw_row = rw_laplacian.row_mut(i);
402                        if let Some(rw_slice) = rw_row.as_slice_mut() {
403                            rw_slice.copy_from_slice(scaled_slice);
404                        }
405                    }
406                } else {
407                    for j in 0..n {
408                        rw_laplacian[[i, j]] = -adjacency[[i, j]] / degree;
409                    }
410                }
411            }
412
413            // Identity contribution
414            rw_laplacian[[i, i]] += 1.0;
415        }
416
417        Ok(rw_laplacian)
418    }
419
420    /// Compute D^{-1/2} vector using SIMD acceleration.
421    ///
422    /// For each degree d, computes 1/sqrt(d) if d > 0, else 0.
423    ///
424    /// # Arguments
425    /// * `degrees` - The degree vector
426    ///
427    /// # Returns
428    /// The D^{-1/2} diagonal vector.
429    pub fn compute_degree_inv_sqrt(degrees: &Array1<f64>) -> Array1<f64> {
430        let sqrt_degrees = f64::simd_sqrt(&degrees.view());
431        let n = degrees.len();
432        let mut result = Array1::zeros(n);
433
434        for i in 0..n {
435            let s = sqrt_degrees[i];
436            result[i] = if s > 1e-15 { 1.0 / s } else { 0.0 };
437        }
438
439        result
440    }
441
442    /// SIMD-accelerated power iteration for finding the dominant eigenvector.
443    ///
444    /// Used in spectral methods to find eigenvectors of the Laplacian.
445    ///
446    /// # Arguments
447    /// * `matrix` - Symmetric matrix (n x n)
448    /// * `initial` - Initial guess vector (length n)
449    /// * `max_iterations` - Maximum iterations
450    /// * `tolerance` - Convergence tolerance
451    ///
452    /// # Returns
453    /// A tuple of (eigenvalue, eigenvector, iterations used).
454    pub fn power_iteration(
455        matrix: &Array2<f64>,
456        initial: &Array1<f64>,
457        max_iterations: usize,
458        tolerance: f64,
459    ) -> Result<(f64, Array1<f64>, usize)> {
460        let n = matrix.nrows();
461        if matrix.ncols() != n {
462            return Err(GraphError::InvalidGraph(
463                "Matrix must be square for power iteration".to_string(),
464            ));
465        }
466        if initial.len() != n {
467            return Err(GraphError::InvalidGraph(format!(
468                "Initial vector length ({}) does not match matrix size ({})",
469                initial.len(),
470                n
471            )));
472        }
473
474        let mut v = initial.clone();
475        let norm = f64::simd_norm(&v.view());
476        if norm < 1e-15 {
477            return Err(GraphError::AlgorithmError(
478                "Initial vector is (near) zero".to_string(),
479            ));
480        }
481        v = f64::simd_scalar_mul(&v.view(), 1.0 / norm);
482
483        let mut eigenvalue = 0.0;
484
485        for iteration in 0..max_iterations {
486            // w = M * v (SIMD matvec)
487            let w = SimdAdjacency::dense_matvec(matrix, &v)?;
488
489            // New eigenvalue estimate: v^T * w
490            let new_eigenvalue = f64::simd_dot(&v.view(), &w.view());
491
492            // Normalize w
493            let w_norm = f64::simd_norm(&w.view());
494            if w_norm < 1e-15 {
495                return Err(GraphError::AlgorithmError(
496                    "Power iteration produced zero vector".to_string(),
497                ));
498            }
499            let new_v = f64::simd_scalar_mul(&w.view(), 1.0 / w_norm);
500
501            // Check convergence
502            if (new_eigenvalue - eigenvalue).abs() < tolerance {
503                return Ok((new_eigenvalue, new_v, iteration + 1));
504            }
505
506            eigenvalue = new_eigenvalue;
507            v = new_v;
508        }
509
510        Ok((eigenvalue, v, max_iterations))
511    }
512
513    /// SIMD-accelerated Gram-Schmidt orthogonalization.
514    ///
515    /// Orthogonalizes a set of column vectors in-place.
516    ///
517    /// # Arguments
518    /// * `vectors` - Matrix whose columns are vectors to orthogonalize (n x k)
519    pub fn gram_schmidt_orthogonalize(vectors: &mut Array2<f64>) {
520        let (_n, k) = vectors.dim();
521
522        for i in 0..k {
523            // Normalize current vector
524            let norm = {
525                let col = vectors.column(i);
526                f64::simd_norm(&col)
527            };
528            if norm > 1e-12 {
529                let mut col = vectors.column_mut(i);
530                col /= norm;
531            }
532
533            // Orthogonalize subsequent vectors against this one
534            for j in (i + 1)..k {
535                let (dot_val, current_data) = {
536                    let current = vectors.column(i);
537                    let next = vectors.column(j);
538
539                    let dot = if let (Some(c_slice), Some(n_slice)) =
540                        (current.as_slice(), next.as_slice())
541                    {
542                        let c_view = ArrayView1::from(c_slice);
543                        let n_view = ArrayView1::from(n_slice);
544                        f64::simd_dot(&c_view, &n_view)
545                    } else {
546                        current.dot(&next)
547                    };
548
549                    (dot, current.to_owned())
550                };
551
552                // next = next - dot * current
553                let mut next_col = vectors.column_mut(j);
554                let projection = f64::simd_scalar_mul(&current_data.view(), dot_val);
555
556                if let (Some(next_slice), Some(proj_slice)) =
557                    (next_col.as_slice_mut(), projection.as_slice())
558                {
559                    for (n_val, p_val) in next_slice.iter_mut().zip(proj_slice.iter()) {
560                        *n_val -= p_val;
561                    }
562                } else {
563                    for idx in 0..next_col.len() {
564                        next_col[idx] -= projection[idx];
565                    }
566                }
567            }
568        }
569    }
570
571    /// SIMD-accelerated Rayleigh quotient: v^T M v / (v^T v).
572    ///
573    /// # Arguments
574    /// * `matrix` - Symmetric matrix (n x n)
575    /// * `vector` - Input vector (length n)
576    ///
577    /// # Returns
578    /// The Rayleigh quotient value.
579    pub fn rayleigh_quotient(matrix: &Array2<f64>, vector: &ArrayView1<f64>) -> Result<f64> {
580        let mv = SimdAdjacency::dense_matvec(matrix, &vector.to_owned())?;
581        let numerator = f64::simd_dot(vector, &mv.view());
582        let denominator = f64::simd_dot(vector, vector);
583
584        if denominator.abs() < 1e-15 {
585            return Err(GraphError::AlgorithmError(
586                "Rayleigh quotient: zero-norm vector".to_string(),
587            ));
588        }
589
590        Ok(numerator / denominator)
591    }
592}
593
594// ---------------------------------------------------------------------------
595// Adjacency matrix-vector product SIMD operations
596// ---------------------------------------------------------------------------
597
598/// SIMD-accelerated adjacency matrix-vector product operations.
599///
600/// Provides optimized dense and sparse matrix-vector products that are
601/// central to many graph algorithms (PageRank, spectral methods, centrality).
602pub struct SimdAdjacency;
603
604impl SimdAdjacency {
605    /// Dense adjacency matrix-vector product using SIMD.
606    ///
607    /// Computes `y = A * x` where A is the adjacency matrix.
608    /// Each row uses SIMD dot product for acceleration.
609    ///
610    /// # Arguments
611    /// * `matrix` - Dense adjacency matrix (rows x cols)
612    /// * `vector` - Input vector (length cols)
613    ///
614    /// # Returns
615    /// Result vector of length rows.
616    pub fn dense_matvec(matrix: &Array2<f64>, vector: &Array1<f64>) -> Result<Array1<f64>> {
617        let (rows, cols) = matrix.dim();
618        if cols != vector.len() {
619            return Err(GraphError::InvalidGraph(format!(
620                "Matrix columns ({}) do not match vector length ({})",
621                cols,
622                vector.len()
623            )));
624        }
625
626        let mut result = Array1::zeros(rows);
627
628        for i in 0..rows {
629            let row = matrix.row(i);
630            if let (Some(row_slice), Some(vec_slice)) = (row.as_slice(), vector.as_slice()) {
631                let row_view = ArrayView1::from(row_slice);
632                let vec_view = ArrayView1::from(vec_slice);
633                result[i] = f64::simd_dot(&row_view, &vec_view);
634            } else {
635                // Fallback for non-contiguous data
636                result[i] = row.dot(&vector.view());
637            }
638        }
639
640        Ok(result)
641    }
642
643    /// Dense matrix-vector product with alpha scaling: y = alpha * A * x.
644    ///
645    /// # Arguments
646    /// * `alpha` - Scaling factor
647    /// * `matrix` - Dense matrix (rows x cols)
648    /// * `vector` - Input vector (length cols)
649    ///
650    /// # Returns
651    /// Result vector of length rows.
652    pub fn dense_matvec_scaled(
653        alpha: f64,
654        matrix: &Array2<f64>,
655        vector: &Array1<f64>,
656    ) -> Result<Array1<f64>> {
657        let raw = Self::dense_matvec(matrix, vector)?;
658        Ok(f64::simd_scalar_mul(&raw.view(), alpha))
659    }
660
661    /// CSR-style sparse matrix-vector product using SIMD.
662    ///
663    /// Computes `y = A * x` where A is stored in compressed sparse row format.
664    /// For each row, gathers x values at column indices and performs SIMD dot product.
665    ///
666    /// # Arguments
667    /// * `row_ptr` - Row pointer array (length n+1)
668    /// * `col_idx` - Column index array
669    /// * `values` - Non-zero values array
670    /// * `x` - Input vector
671    /// * `n_rows` - Number of rows
672    ///
673    /// # Returns
674    /// Result vector of length n_rows.
675    pub fn sparse_csr_matvec(
676        row_ptr: &[usize],
677        col_idx: &[usize],
678        values: &[f64],
679        x: &[f64],
680        n_rows: usize,
681    ) -> Result<Vec<f64>> {
682        if row_ptr.len() != n_rows + 1 {
683            return Err(GraphError::InvalidGraph(format!(
684                "row_ptr length ({}) should be n_rows + 1 ({})",
685                row_ptr.len(),
686                n_rows + 1
687            )));
688        }
689
690        let mut y = vec![0.0; n_rows];
691
692        for i in 0..n_rows {
693            let row_start = row_ptr[i];
694            let row_end = row_ptr[i + 1];
695            let row_nnz = row_end - row_start;
696
697            if row_nnz == 0 {
698                continue;
699            }
700
701            let row_values = &values[row_start..row_end];
702            let row_indices = &col_idx[row_start..row_end];
703
704            // Gather x values at column indices
705            let x_gathered: Vec<f64> = row_indices.iter().map(|&j| x[j]).collect();
706
707            // SIMD dot product for this row
708            let row_view = ArrayView1::from(row_values);
709            let x_view = ArrayView1::from(x_gathered.as_slice());
710            y[i] = f64::simd_dot(&row_view, &x_view);
711        }
712
713        Ok(y)
714    }
715
716    /// Compute weighted adjacency matrix-vector product.
717    ///
718    /// Like `dense_matvec`, but scales each row by a per-node weight
719    /// (e.g., inverse degree for normalization).
720    ///
721    /// # Arguments
722    /// * `matrix` - Adjacency matrix (n x n)
723    /// * `vector` - Input vector (length n)
724    /// * `row_weights` - Per-row weight vector (length n)
725    ///
726    /// # Returns
727    /// Result vector where `y[i] = row_weights[i] * sum_j(A[i,j] * x[j])`.
728    pub fn weighted_matvec(
729        matrix: &Array2<f64>,
730        vector: &Array1<f64>,
731        row_weights: &Array1<f64>,
732    ) -> Result<Array1<f64>> {
733        let raw = Self::dense_matvec(matrix, vector)?;
734        Ok(f64::simd_mul(&raw.view(), &row_weights.view()))
735    }
736
737    /// Compute the transition matrix from an adjacency matrix using SIMD.
738    ///
739    /// Each row of the adjacency matrix is divided by the row sum (out-degree),
740    /// producing a row-stochastic matrix.
741    ///
742    /// # Arguments
743    /// * `adjacency` - Dense adjacency matrix (n x n)
744    ///
745    /// # Returns
746    /// Row-stochastic transition matrix.
747    pub fn adjacency_to_transition(adjacency: &Array2<f64>) -> Result<Array2<f64>> {
748        let n = adjacency.nrows();
749        if adjacency.ncols() != n {
750            return Err(GraphError::InvalidGraph(
751                "Adjacency matrix must be square".to_string(),
752            ));
753        }
754
755        let mut transition = Array2::zeros((n, n));
756
757        for i in 0..n {
758            let row = adjacency.row(i);
759            let row_sum = f64::simd_sum(&row);
760
761            if row_sum > 1e-15 {
762                if let Some(row_slice) = row.as_slice() {
763                    let row_view = ArrayView1::from(row_slice);
764                    let normalized = f64::simd_scalar_mul(&row_view, 1.0 / row_sum);
765                    if let Some(norm_slice) = normalized.as_slice() {
766                        let mut t_row = transition.row_mut(i);
767                        if let Some(t_slice) = t_row.as_slice_mut() {
768                            t_slice.copy_from_slice(norm_slice);
769                        }
770                    }
771                } else {
772                    for j in 0..n {
773                        transition[[i, j]] = adjacency[[i, j]] / row_sum;
774                    }
775                }
776            }
777            // Rows with zero sum (dangling nodes) remain all zeros
778        }
779
780        Ok(transition)
781    }
782}
783
784// ---------------------------------------------------------------------------
785// Betweenness centrality SIMD operations
786// ---------------------------------------------------------------------------
787
788/// SIMD-accelerated operations for betweenness centrality computation.
789///
790/// Accelerates the dependency accumulation phase of Brandes' algorithm,
791/// which is the most compute-intensive part of betweenness centrality.
792pub struct SimdBetweenness;
793
794impl SimdBetweenness {
795    /// SIMD-accelerated dependency accumulation for Brandes' algorithm.
796    ///
797    /// Updates the dependency values: `delta[v] += (sigma[v] / sigma[w]) * (1 + delta[w])`
798    /// for all predecessors v of w in the shortest-path DAG.
799    ///
800    /// # Arguments
801    /// * `delta` - Dependency accumulator vector (modified in place)
802    /// * `sigma` - Number of shortest paths through each node
803    /// * `predecessors` - For each node w, list of predecessor node indices
804    /// * `order` - Nodes in reverse BFS order (for back-propagation)
805    pub fn accumulate_dependencies(
806        delta: &mut [f64],
807        sigma: &[f64],
808        predecessors: &[Vec<usize>],
809        order: &[usize],
810    ) {
811        // Process nodes in reverse BFS order
812        for &w in order.iter().rev() {
813            let sigma_w = sigma[w];
814            if sigma_w <= 0.0 {
815                continue;
816            }
817
818            let coeff = (1.0 + delta[w]) / sigma_w;
819
820            for &v in &predecessors[w] {
821                delta[v] += sigma[v] * coeff;
822            }
823        }
824    }
825
826    /// Batch update betweenness scores from a single-source dependency vector.
827    ///
828    /// Adds the dependency values to the centrality scores using SIMD.
829    ///
830    /// # Arguments
831    /// * `centrality` - Running betweenness centrality scores (modified in place)
832    /// * `delta` - Dependency values from a single-source computation
833    /// * `source` - Index of the source node (excluded from update)
834    pub fn batch_update_centrality(centrality: &mut [f64], delta: &[f64], source: usize) {
835        let n = centrality.len().min(delta.len());
836
837        if n == 0 {
838            return;
839        }
840
841        // Use SIMD addition for bulk update
842        let cent_view = ArrayView1::from(&centrality[..n]);
843        let delta_view = ArrayView1::from(&delta[..n]);
844        let updated = f64::simd_add(&cent_view, &delta_view);
845
846        if let Some(updated_slice) = updated.as_slice() {
847            centrality[..n].copy_from_slice(updated_slice);
848        }
849
850        // Exclude source node contribution
851        if source < n {
852            centrality[source] -= delta[source];
853        }
854    }
855
856    /// Normalize betweenness centrality scores.
857    ///
858    /// For undirected graphs, divide by (n-1)(n-2).
859    /// For directed graphs, divide by (n-1)(n-2) as well (Brandes' convention).
860    ///
861    /// # Arguments
862    /// * `centrality` - Betweenness scores to normalize (modified in place)
863    /// * `n` - Number of nodes in the graph
864    /// * `directed` - Whether the graph is directed
865    pub fn normalize(centrality: &mut [f64], n: usize, directed: bool) {
866        if n <= 2 {
867            return;
868        }
869
870        let mut scale = 1.0 / ((n - 1) * (n - 2)) as f64;
871        if !directed {
872            // For undirected graphs, each pair is counted twice
873            scale *= 2.0;
874        }
875
876        let cent_view = ArrayView1::from(&*centrality);
877        let scaled = f64::simd_scalar_mul(&cent_view, scale);
878
879        if let Some(scaled_slice) = scaled.as_slice() {
880            centrality.copy_from_slice(scaled_slice);
881        }
882    }
883}
884
885// ---------------------------------------------------------------------------
886// BFS/DFS distance vector operations
887// ---------------------------------------------------------------------------
888
889/// SIMD-accelerated operations for BFS/DFS distance and level vectors.
890///
891/// Provides optimized initialization, update, and reduction operations
892/// on the distance/level arrays used in graph traversals.
893pub struct SimdTraversal;
894
895impl SimdTraversal {
896    /// Initialize a distance vector with infinity (or a sentinel value).
897    ///
898    /// Sets all entries to `f64::INFINITY` except the source node.
899    ///
900    /// # Arguments
901    /// * `n` - Number of nodes
902    /// * `source` - Source node index
903    ///
904    /// # Returns
905    /// Distance vector with `distances[source] = 0.0` and all others infinity.
906    pub fn init_distances(n: usize, source: usize) -> Array1<f64> {
907        let mut distances = Array1::from_elem(n, f64::INFINITY);
908        if source < n {
909            distances[source] = 0.0;
910        }
911        distances
912    }
913
914    /// Initialize a distance vector for multi-source BFS.
915    ///
916    /// # Arguments
917    /// * `n` - Number of nodes
918    /// * `sources` - Slice of source node indices
919    ///
920    /// # Returns
921    /// Distance vector with `distances[s] = 0.0` for each source s.
922    pub fn init_distances_multi_source(n: usize, sources: &[usize]) -> Array1<f64> {
923        let mut distances = Array1::from_elem(n, f64::INFINITY);
924        for &s in sources {
925            if s < n {
926                distances[s] = 0.0;
927            }
928        }
929        distances
930    }
931
932    /// SIMD-accelerated BFS frontier relaxation.
933    ///
934    /// For each node in the current frontier, attempts to relax the distance
935    /// of its neighbors. This is the inner loop of BFS.
936    ///
937    /// # Arguments
938    /// * `distances` - Current distance vector (modified in place)
939    /// * `adjacency_row` - Adjacency row for the current node
940    /// * `current_distance` - Distance of the current node
941    ///
942    /// # Returns
943    /// Vector of neighbor indices that were relaxed (for the next frontier).
944    pub fn relax_neighbors(
945        distances: &mut Array1<f64>,
946        adjacency_row: &ArrayView1<f64>,
947        current_distance: f64,
948    ) -> Vec<usize> {
949        let n = distances.len().min(adjacency_row.len());
950        let new_dist = current_distance + 1.0;
951        let mut relaxed = Vec::new();
952
953        for j in 0..n {
954            if adjacency_row[j] > 0.0 && distances[j] > new_dist {
955                distances[j] = new_dist;
956                relaxed.push(j);
957            }
958        }
959
960        relaxed
961    }
962
963    /// Compute the eccentricity of a node from its distance vector.
964    ///
965    /// The eccentricity is the maximum finite distance to any reachable node.
966    ///
967    /// # Arguments
968    /// * `distances` - Distance vector from a single source
969    ///
970    /// # Returns
971    /// The eccentricity value. Returns 0.0 if no nodes are reachable.
972    pub fn eccentricity(distances: &ArrayView1<f64>) -> f64 {
973        let mut max_dist = 0.0_f64;
974        for &d in distances.iter() {
975            if d.is_finite() && d > max_dist {
976                max_dist = d;
977            }
978        }
979        max_dist
980    }
981
982    /// Count the number of reachable nodes from a distance vector using SIMD.
983    ///
984    /// A node is reachable if its distance is finite.
985    ///
986    /// # Arguments
987    /// * `distances` - Distance vector from a single source
988    ///
989    /// # Returns
990    /// Number of reachable nodes (including the source itself).
991    pub fn count_reachable(distances: &ArrayView1<f64>) -> usize {
992        distances.iter().filter(|&&d| d.is_finite()).count()
993    }
994
995    /// Compute the sum of finite distances using SIMD.
996    ///
997    /// Used for computing closeness centrality: C(v) = (n-1) / sum(d(v, w)).
998    ///
999    /// # Arguments
1000    /// * `distances` - Distance vector from a single source
1001    ///
1002    /// # Returns
1003    /// Sum of all finite distances.
1004    pub fn sum_finite_distances(distances: &ArrayView1<f64>) -> f64 {
1005        // Replace infinities with 0.0, then sum
1006        let finite_dists: Array1<f64> =
1007            Array1::from_iter(
1008                distances
1009                    .iter()
1010                    .map(|&d| if d.is_finite() { d } else { 0.0 }),
1011            );
1012        f64::simd_sum(&finite_dists.view())
1013    }
1014
1015    /// SIMD-accelerated element-wise minimum of two distance vectors.
1016    ///
1017    /// Useful for merging BFS trees or computing all-pairs shortest paths.
1018    ///
1019    /// # Arguments
1020    /// * `a` - First distance vector
1021    /// * `b` - Second distance vector
1022    ///
1023    /// # Returns
1024    /// Element-wise minimum vector.
1025    pub fn element_wise_min(a: &ArrayView1<f64>, b: &ArrayView1<f64>) -> Array1<f64> {
1026        f64::simd_min(a, b)
1027    }
1028
1029    /// Compute the diameter of a graph from an all-pairs distance matrix.
1030    ///
1031    /// The diameter is the maximum eccentricity over all nodes.
1032    ///
1033    /// # Arguments
1034    /// * `distance_matrix` - All-pairs distance matrix (n x n)
1035    ///
1036    /// # Returns
1037    /// The graph diameter. Returns 0.0 for empty or disconnected graphs.
1038    pub fn diameter_from_distance_matrix(distance_matrix: &ArrayView2<f64>) -> f64 {
1039        let n = distance_matrix.nrows();
1040        let mut max_dist = 0.0_f64;
1041
1042        for i in 0..n {
1043            let row = distance_matrix.row(i);
1044            let ecc = Self::eccentricity(&row);
1045            if ecc > max_dist {
1046                max_dist = ecc;
1047            }
1048        }
1049
1050        max_dist
1051    }
1052}
1053
1054// ---------------------------------------------------------------------------
1055// Utility: SIMD-accelerated vector operations common to graph algorithms
1056// ---------------------------------------------------------------------------
1057
1058/// General SIMD-accelerated vector utilities for graph computations.
1059pub struct SimdGraphUtils;
1060
1061impl SimdGraphUtils {
1062    /// SIMD-accelerated AXPY: y = alpha * x + y.
1063    ///
1064    /// # Arguments
1065    /// * `alpha` - Scalar multiplier
1066    /// * `x` - Input vector
1067    /// * `y` - Accumulator vector (modified in place)
1068    pub fn axpy(alpha: f64, x: &ArrayView1<f64>, y: &mut [f64]) {
1069        let scaled_x = f64::simd_scalar_mul(x, alpha);
1070        let y_view = ArrayView1::from(&*y);
1071        let result = f64::simd_add(&y_view, &scaled_x.view());
1072
1073        if let Some(result_slice) = result.as_slice() {
1074            y.copy_from_slice(result_slice);
1075        }
1076    }
1077
1078    /// L2 norm of a vector using SIMD.
1079    pub fn norm_l2(v: &ArrayView1<f64>) -> f64 {
1080        f64::simd_norm(v)
1081    }
1082
1083    /// L1 norm of a vector using SIMD.
1084    pub fn norm_l1(v: &ArrayView1<f64>) -> f64 {
1085        let abs_v = f64::simd_abs(v);
1086        f64::simd_sum(&abs_v.view())
1087    }
1088
1089    /// Linf norm (max absolute value) of a vector using SIMD.
1090    pub fn norm_linf(v: &ArrayView1<f64>) -> f64 {
1091        let abs_v = f64::simd_abs(v);
1092        f64::simd_max_element(&abs_v.view())
1093    }
1094
1095    /// SIMD dot product.
1096    pub fn dot(a: &ArrayView1<f64>, b: &ArrayView1<f64>) -> f64 {
1097        f64::simd_dot(a, b)
1098    }
1099
1100    /// Normalize a vector to unit L2 norm using SIMD.
1101    ///
1102    /// Returns None if the vector is zero.
1103    pub fn normalize_l2(v: &ArrayView1<f64>) -> Option<Array1<f64>> {
1104        let norm = f64::simd_norm(v);
1105        if norm < 1e-15 {
1106            None
1107        } else {
1108            Some(f64::simd_scalar_mul(v, 1.0 / norm))
1109        }
1110    }
1111
1112    /// Compute cosine similarity between two vectors using SIMD.
1113    pub fn cosine_similarity(a: &ArrayView1<f64>, b: &ArrayView1<f64>) -> f64 {
1114        let dot = f64::simd_dot(a, b);
1115        let norm_a = f64::simd_norm(a);
1116        let norm_b = f64::simd_norm(b);
1117        let denom = norm_a * norm_b;
1118        if denom < 1e-15 {
1119            0.0
1120        } else {
1121            dot / denom
1122        }
1123    }
1124
1125    /// Element-wise vector difference using SIMD.
1126    pub fn difference(a: &ArrayView1<f64>, b: &ArrayView1<f64>) -> Array1<f64> {
1127        f64::simd_sub(a, b)
1128    }
1129
1130    /// Element-wise weighted sum: result = a * weight_a + b * weight_b.
1131    pub fn weighted_sum(
1132        a: &ArrayView1<f64>,
1133        weight_a: f64,
1134        b: &ArrayView1<f64>,
1135        weight_b: f64,
1136    ) -> Array1<f64> {
1137        let scaled_a = f64::simd_scalar_mul(a, weight_a);
1138        let scaled_b = f64::simd_scalar_mul(b, weight_b);
1139        f64::simd_add(&scaled_a.view(), &scaled_b.view())
1140    }
1141}
1142
1143// ---------------------------------------------------------------------------
1144// Tests
1145// ---------------------------------------------------------------------------
1146
1147#[cfg(test)]
1148mod tests {
1149    use super::*;
1150    use scirs2_core::ndarray::{Array1, Array2};
1151
1152    // ---- PageRank tests ----
1153
1154    #[test]
1155    fn test_pagerank_convergence_check() {
1156        let a = Array1::from_vec(vec![0.25, 0.25, 0.25, 0.25]);
1157        let b = Array1::from_vec(vec![0.24, 0.26, 0.25, 0.25]);
1158
1159        let l1 = SimdPageRank::convergence_l1(&a.view(), &b.view());
1160        assert!(
1161            (l1 - 0.02).abs() < 1e-10,
1162            "L1 norm should be 0.02, got {l1}"
1163        );
1164
1165        assert!(SimdPageRank::has_converged(&a.view(), &b.view(), 0.05));
1166        assert!(!SimdPageRank::has_converged(&a.view(), &b.view(), 0.01));
1167    }
1168
1169    #[test]
1170    fn test_pagerank_scale_rank() {
1171        let rank = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
1172        let scaled = SimdPageRank::scale_rank(&rank.view(), 0.5);
1173        let expected = [0.5, 1.0, 1.5, 2.0];
1174        for (got, exp) in scaled.iter().zip(expected.iter()) {
1175            assert!((got - exp).abs() < 1e-10, "Expected {exp}, got {got}");
1176        }
1177    }
1178
1179    #[test]
1180    fn test_pagerank_normalize() {
1181        let rank = Array1::from_vec(vec![2.0, 3.0, 5.0]);
1182        let normalized = SimdPageRank::normalize_rank(&rank).expect("Normalization should succeed");
1183        let sum: f64 = normalized.iter().sum();
1184        assert!(
1185            (sum - 1.0).abs() < 1e-10,
1186            "Normalized rank should sum to 1.0, got {sum}"
1187        );
1188    }
1189
1190    #[test]
1191    fn test_pagerank_normalize_zero() {
1192        let rank = Array1::from_vec(vec![0.0, 0.0, 0.0]);
1193        let result = SimdPageRank::normalize_rank(&rank);
1194        assert!(result.is_err(), "Normalizing zero vector should fail");
1195    }
1196
1197    #[test]
1198    fn test_pagerank_dangling_contribution() {
1199        let rank = Array1::from_vec(vec![0.2, 0.3, 0.5]);
1200        let is_dangling = vec![true, false, true];
1201        let contribution =
1202            SimdPageRank::dangling_node_contribution(&rank.view(), &is_dangling, 0.85);
1203        // dangling sum = 0.2 + 0.5 = 0.7
1204        // contribution = 0.85 * 0.7 / 3 = 0.1983...
1205        let expected = 0.85 * 0.7 / 3.0;
1206        assert!(
1207            (contribution - expected).abs() < 1e-10,
1208            "Expected {expected}, got {contribution}"
1209        );
1210    }
1211
1212    #[test]
1213    fn test_pagerank_power_iteration() {
1214        // Simple 3-node cycle: 0 -> 1 -> 2 -> 0
1215        // Transition matrix (row-stochastic):
1216        // [0, 1, 0]
1217        // [0, 0, 1]
1218        // [1, 0, 0]
1219        let transition =
1220            Array2::from_shape_vec((3, 3), vec![0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0])
1221                .expect("Test: failed to create transition matrix");
1222
1223        let (rank, _iters) = SimdPageRank::compute_pagerank(&transition, 0.85, 1e-8, 200)
1224            .expect("PageRank should converge");
1225
1226        // Symmetric cycle => all ranks equal
1227        let expected = 1.0 / 3.0;
1228        for (i, &r) in rank.iter().enumerate() {
1229            assert!(
1230                (r - expected).abs() < 0.01,
1231                "Node {i} rank {r} should be near {expected}"
1232            );
1233        }
1234    }
1235
1236    #[test]
1237    fn test_pagerank_empty_graph() {
1238        let transition = Array2::zeros((0, 0));
1239        let result = SimdPageRank::compute_pagerank(&transition, 0.85, 1e-6, 100);
1240        assert!(result.is_err(), "Empty graph should return error");
1241    }
1242
1243    // ---- Spectral tests ----
1244
1245    #[test]
1246    fn test_standard_laplacian() {
1247        // Simple path graph: 0 -- 1 -- 2
1248        // Adjacency:
1249        // [0, 1, 0]
1250        // [1, 0, 1]
1251        // [0, 1, 0]
1252        let adj = Array2::from_shape_vec((3, 3), vec![0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0])
1253            .expect("Test: failed to create adjacency matrix");
1254        let degrees = Array1::from_vec(vec![1.0, 2.0, 1.0]);
1255
1256        let lap = SimdSpectral::standard_laplacian(&adj, &degrees)
1257            .expect("Laplacian construction should succeed");
1258
1259        // Expected Laplacian:
1260        // [ 1, -1,  0]
1261        // [-1,  2, -1]
1262        // [ 0, -1,  1]
1263        assert!((lap[[0, 0]] - 1.0).abs() < 1e-10);
1264        assert!((lap[[0, 1]] - (-1.0)).abs() < 1e-10);
1265        assert!((lap[[0, 2]] - 0.0).abs() < 1e-10);
1266        assert!((lap[[1, 0]] - (-1.0)).abs() < 1e-10);
1267        assert!((lap[[1, 1]] - 2.0).abs() < 1e-10);
1268        assert!((lap[[1, 2]] - (-1.0)).abs() < 1e-10);
1269        assert!((lap[[2, 0]] - 0.0).abs() < 1e-10);
1270        assert!((lap[[2, 1]] - (-1.0)).abs() < 1e-10);
1271        assert!((lap[[2, 2]] - 1.0).abs() < 1e-10);
1272
1273        // Laplacian property: row sums = 0
1274        for i in 0..3 {
1275            let row_sum: f64 = lap.row(i).sum();
1276            assert!(
1277                row_sum.abs() < 1e-10,
1278                "Laplacian row {i} sum should be 0, got {row_sum}"
1279            );
1280        }
1281    }
1282
1283    #[test]
1284    fn test_normalized_laplacian() {
1285        // Complete graph K3
1286        let adj = Array2::from_shape_vec((3, 3), vec![0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0])
1287            .expect("Test: failed to create adjacency matrix");
1288        let degrees = Array1::from_vec(vec![2.0, 2.0, 2.0]);
1289
1290        let lap = SimdSpectral::normalized_laplacian(&adj, &degrees)
1291            .expect("Normalized Laplacian construction should succeed");
1292
1293        // Diagonal should be 1.0 (for simple graphs with no self-loops)
1294        for i in 0..3 {
1295            assert!(
1296                (lap[[i, i]] - 1.0).abs() < 1e-10,
1297                "Diagonal element [{i},{i}] should be 1.0, got {}",
1298                lap[[i, i]]
1299            );
1300        }
1301
1302        // Off-diagonal: -1/sqrt(2*2) = -0.5
1303        assert!(
1304            (lap[[0, 1]] - (-0.5)).abs() < 1e-10,
1305            "Off-diagonal [0,1] should be -0.5, got {}",
1306            lap[[0, 1]]
1307        );
1308    }
1309
1310    #[test]
1311    fn test_random_walk_laplacian() {
1312        let adj = Array2::from_shape_vec((3, 3), vec![0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0])
1313            .expect("Test: failed to create adjacency matrix");
1314        let degrees = Array1::from_vec(vec![1.0, 2.0, 1.0]);
1315
1316        let lap = SimdSpectral::random_walk_laplacian(&adj, &degrees)
1317            .expect("Random-walk Laplacian construction should succeed");
1318
1319        // Diagonal: all 1.0
1320        for i in 0..3 {
1321            assert!(
1322                (lap[[i, i]] - 1.0).abs() < 1e-10,
1323                "Diagonal [{i},{i}] should be 1.0"
1324            );
1325        }
1326
1327        // Row 0: degree=1, so off-diagonal = -1/1 * adj
1328        assert!((lap[[0, 1]] - (-1.0)).abs() < 1e-10);
1329        assert!((lap[[0, 2]] - 0.0).abs() < 1e-10);
1330
1331        // Row 1: degree=2, so off-diagonal = -1/2 * adj
1332        assert!((lap[[1, 0]] - (-0.5)).abs() < 1e-10);
1333        assert!((lap[[1, 2]] - (-0.5)).abs() < 1e-10);
1334    }
1335
1336    #[test]
1337    fn test_degree_inv_sqrt() {
1338        let degrees = Array1::from_vec(vec![4.0, 9.0, 0.0, 1.0]);
1339        let result = SimdSpectral::compute_degree_inv_sqrt(&degrees);
1340
1341        assert!((result[0] - 0.5).abs() < 1e-10, "1/sqrt(4) = 0.5");
1342        assert!((result[1] - 1.0 / 3.0).abs() < 1e-10, "1/sqrt(9) = 1/3");
1343        assert!((result[2] - 0.0).abs() < 1e-10, "1/sqrt(0) => 0.0");
1344        assert!((result[3] - 1.0).abs() < 1e-10, "1/sqrt(1) = 1.0");
1345    }
1346
1347    #[test]
1348    fn test_power_iteration() {
1349        // Symmetric 2x2 matrix with known eigenvalues
1350        // M = [[2, 1], [1, 2]], eigenvalues: 3, 1
1351        let matrix = Array2::from_shape_vec((2, 2), vec![2.0, 1.0, 1.0, 2.0])
1352            .expect("Test: failed to create matrix");
1353        let initial = Array1::from_vec(vec![1.0, 0.5]);
1354
1355        let (eigenvalue, eigenvector, _iters) =
1356            SimdSpectral::power_iteration(&matrix, &initial, 100, 1e-10)
1357                .expect("Power iteration should converge");
1358
1359        // Dominant eigenvalue should be 3.0
1360        assert!(
1361            (eigenvalue - 3.0).abs() < 0.01,
1362            "Eigenvalue should be near 3.0, got {eigenvalue}"
1363        );
1364
1365        // Corresponding eigenvector should be proportional to [1, 1]
1366        let ratio = eigenvector[0] / eigenvector[1];
1367        assert!(
1368            (ratio - 1.0).abs() < 0.01,
1369            "Eigenvector ratio should be near 1.0, got {ratio}"
1370        );
1371    }
1372
1373    #[test]
1374    fn test_rayleigh_quotient() {
1375        // Identity matrix: Rayleigh quotient should be 1.0
1376        let identity = Array2::eye(3);
1377        let v = Array1::from_vec(vec![1.0, 2.0, 3.0]);
1378        let rq = SimdSpectral::rayleigh_quotient(&identity, &v.view())
1379            .expect("Rayleigh quotient should succeed");
1380        assert!(
1381            (rq - 1.0).abs() < 1e-10,
1382            "Rayleigh quotient of identity should be 1.0, got {rq}"
1383        );
1384    }
1385
1386    #[test]
1387    fn test_gram_schmidt() {
1388        let mut vectors = Array2::from_shape_vec((3, 2), vec![1.0, 1.0, 0.0, 1.0, 0.0, 0.0])
1389            .expect("Test: failed to create matrix");
1390
1391        SimdSpectral::gram_schmidt_orthogonalize(&mut vectors);
1392
1393        // Check orthogonality
1394        let col0 = vectors.column(0);
1395        let col1 = vectors.column(1);
1396        let dot = f64::simd_dot(&col0, &col1);
1397        assert!(
1398            dot.abs() < 1e-10,
1399            "Columns should be orthogonal, dot product = {dot}"
1400        );
1401
1402        // Check unit norms
1403        let norm0 = f64::simd_norm(&col0);
1404        let norm1 = f64::simd_norm(&col1);
1405        assert!(
1406            (norm0 - 1.0).abs() < 1e-10,
1407            "Column 0 norm should be 1.0, got {norm0}"
1408        );
1409        assert!(
1410            (norm1 - 1.0).abs() < 1e-10,
1411            "Column 1 norm should be 1.0, got {norm1}"
1412        );
1413    }
1414
1415    // ---- Adjacency tests ----
1416
1417    #[test]
1418    fn test_dense_matvec() {
1419        let matrix = Array2::from_shape_vec((2, 3), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
1420            .expect("Test: failed to create matrix");
1421        let vector = Array1::from_vec(vec![1.0, 1.0, 1.0]);
1422
1423        let result = SimdAdjacency::dense_matvec(&matrix, &vector).expect("Matvec should succeed");
1424
1425        assert!((result[0] - 6.0).abs() < 1e-10, "Row 0 dot = 6.0");
1426        assert!((result[1] - 15.0).abs() < 1e-10, "Row 1 dot = 15.0");
1427    }
1428
1429    #[test]
1430    fn test_dense_matvec_dimension_mismatch() {
1431        let matrix =
1432            Array2::from_shape_vec((2, 3), vec![1.0; 6]).expect("Test: failed to create matrix");
1433        let vector = Array1::from_vec(vec![1.0, 1.0]);
1434
1435        let result = SimdAdjacency::dense_matvec(&matrix, &vector);
1436        assert!(result.is_err(), "Dimension mismatch should return error");
1437    }
1438
1439    #[test]
1440    fn test_dense_matvec_scaled() {
1441        let matrix = Array2::from_shape_vec((2, 2), vec![1.0, 2.0, 3.0, 4.0])
1442            .expect("Test: failed to create matrix");
1443        let vector = Array1::from_vec(vec![1.0, 1.0]);
1444
1445        let result = SimdAdjacency::dense_matvec_scaled(2.0, &matrix, &vector)
1446            .expect("Scaled matvec should succeed");
1447
1448        assert!((result[0] - 6.0).abs() < 1e-10, "2*(1+2) = 6.0");
1449        assert!((result[1] - 14.0).abs() < 1e-10, "2*(3+4) = 14.0");
1450    }
1451
1452    #[test]
1453    fn test_sparse_csr_matvec() {
1454        // Matrix:
1455        // [1 0 2]
1456        // [0 3 0]
1457        // [4 0 5]
1458        let row_ptr = vec![0, 2, 3, 5];
1459        let col_idx = vec![0, 2, 1, 0, 2];
1460        let values = vec![1.0, 2.0, 3.0, 4.0, 5.0];
1461        let x = vec![1.0, 2.0, 3.0];
1462
1463        let y = SimdAdjacency::sparse_csr_matvec(&row_ptr, &col_idx, &values, &x, 3)
1464            .expect("Sparse matvec should succeed");
1465
1466        assert!((y[0] - 7.0).abs() < 1e-10, "1*1 + 2*3 = 7.0, got {}", y[0]);
1467        assert!((y[1] - 6.0).abs() < 1e-10, "3*2 = 6.0, got {}", y[1]);
1468        assert!(
1469            (y[2] - 19.0).abs() < 1e-10,
1470            "4*1 + 5*3 = 19.0, got {}",
1471            y[2]
1472        );
1473    }
1474
1475    #[test]
1476    fn test_adjacency_to_transition() {
1477        // Adjacency: [[0,1,1],[1,0,0],[1,0,0]]
1478        let adj = Array2::from_shape_vec((3, 3), vec![0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0])
1479            .expect("Test: failed to create adjacency matrix");
1480
1481        let transition = SimdAdjacency::adjacency_to_transition(&adj)
1482            .expect("Transition matrix construction should succeed");
1483
1484        // Row 0: sum=2, so [0, 0.5, 0.5]
1485        assert!((transition[[0, 0]] - 0.0).abs() < 1e-10);
1486        assert!((transition[[0, 1]] - 0.5).abs() < 1e-10);
1487        assert!((transition[[0, 2]] - 0.5).abs() < 1e-10);
1488
1489        // Row 1: sum=1, so [1, 0, 0]
1490        assert!((transition[[1, 0]] - 1.0).abs() < 1e-10);
1491        assert!((transition[[1, 1]] - 0.0).abs() < 1e-10);
1492
1493        // Row 2: sum=1, so [1, 0, 0]
1494        assert!((transition[[2, 0]] - 1.0).abs() < 1e-10);
1495        assert!((transition[[2, 2]] - 0.0).abs() < 1e-10);
1496
1497        // All row sums should be 1.0
1498        for i in 0..3 {
1499            let row_sum: f64 = transition.row(i).sum();
1500            assert!(
1501                (row_sum - 1.0).abs() < 1e-10,
1502                "Transition row {i} sum should be 1.0, got {row_sum}"
1503            );
1504        }
1505    }
1506
1507    #[test]
1508    fn test_weighted_matvec() {
1509        let matrix = Array2::from_shape_vec((2, 2), vec![1.0, 2.0, 3.0, 4.0])
1510            .expect("Test: failed to create matrix");
1511        let vector = Array1::from_vec(vec![1.0, 1.0]);
1512        let weights = Array1::from_vec(vec![0.5, 2.0]);
1513
1514        let result = SimdAdjacency::weighted_matvec(&matrix, &vector, &weights)
1515            .expect("Weighted matvec should succeed");
1516
1517        // Row 0: 0.5 * (1+2) = 1.5
1518        // Row 1: 2.0 * (3+4) = 14.0
1519        assert!((result[0] - 1.5).abs() < 1e-10);
1520        assert!((result[1] - 14.0).abs() < 1e-10);
1521    }
1522
1523    // ---- Betweenness tests ----
1524
1525    #[test]
1526    fn test_dependency_accumulation() {
1527        // Simple path: 0 -- 1 -- 2
1528        // BFS from 0: order = [0, 1, 2]
1529        // sigma = [1, 1, 1]
1530        // predecessors: [[], [0], [1]]
1531        let mut delta = vec![0.0, 0.0, 0.0];
1532        let sigma = vec![1.0, 1.0, 1.0];
1533        let predecessors = vec![vec![], vec![0], vec![1]];
1534        let order = vec![0, 1, 2];
1535
1536        SimdBetweenness::accumulate_dependencies(&mut delta, &sigma, &predecessors, &order);
1537
1538        // Node 2 has no successors in order, delta[2] = 0
1539        // Node 1: delta[1] += sigma[1]/sigma[2] * (1 + delta[2]) = 1
1540        // Node 0: delta[0] += sigma[0]/sigma[1] * (1 + delta[1]) = 2
1541        assert!(
1542            (delta[2] - 0.0).abs() < 1e-10,
1543            "delta[2] should be 0.0, got {}",
1544            delta[2]
1545        );
1546        assert!(
1547            (delta[1] - 1.0).abs() < 1e-10,
1548            "delta[1] should be 1.0, got {}",
1549            delta[1]
1550        );
1551        assert!(
1552            (delta[0] - 2.0).abs() < 1e-10,
1553            "delta[0] should be 2.0, got {}",
1554            delta[0]
1555        );
1556    }
1557
1558    #[test]
1559    fn test_batch_update_centrality() {
1560        let mut centrality = vec![1.0, 2.0, 3.0];
1561        let delta = vec![0.5, 1.0, 1.5];
1562
1563        SimdBetweenness::batch_update_centrality(&mut centrality, &delta, 1);
1564
1565        // centrality += delta, then subtract delta[source]
1566        // [1.5, 3.0, 4.5] then centrality[1] -= 1.0 => [1.5, 2.0, 4.5]
1567        assert!((centrality[0] - 1.5).abs() < 1e-10);
1568        assert!((centrality[1] - 2.0).abs() < 1e-10);
1569        assert!((centrality[2] - 4.5).abs() < 1e-10);
1570    }
1571
1572    #[test]
1573    fn test_betweenness_normalize() {
1574        let mut centrality = vec![6.0, 12.0, 18.0];
1575        SimdBetweenness::normalize(&mut centrality, 4, false);
1576
1577        // scale = 2.0 / (3 * 2) = 1/3
1578        let scale = 2.0 / 6.0;
1579        assert!((centrality[0] - 6.0 * scale).abs() < 1e-10);
1580        assert!((centrality[1] - 12.0 * scale).abs() < 1e-10);
1581        assert!((centrality[2] - 18.0 * scale).abs() < 1e-10);
1582    }
1583
1584    // ---- Traversal tests ----
1585
1586    #[test]
1587    fn test_init_distances() {
1588        let dist = SimdTraversal::init_distances(5, 2);
1589        assert_eq!(dist.len(), 5);
1590        assert!(dist[0].is_infinite());
1591        assert!(dist[1].is_infinite());
1592        assert!((dist[2] - 0.0).abs() < 1e-10);
1593        assert!(dist[3].is_infinite());
1594        assert!(dist[4].is_infinite());
1595    }
1596
1597    #[test]
1598    fn test_init_distances_multi_source() {
1599        let dist = SimdTraversal::init_distances_multi_source(5, &[0, 3]);
1600        assert!((dist[0] - 0.0).abs() < 1e-10);
1601        assert!(dist[1].is_infinite());
1602        assert!(dist[2].is_infinite());
1603        assert!((dist[3] - 0.0).abs() < 1e-10);
1604        assert!(dist[4].is_infinite());
1605    }
1606
1607    #[test]
1608    fn test_eccentricity() {
1609        let dist = Array1::from_vec(vec![0.0, 1.0, 2.0, f64::INFINITY, 3.0]);
1610        let ecc = SimdTraversal::eccentricity(&dist.view());
1611        assert!(
1612            (ecc - 3.0).abs() < 1e-10,
1613            "Eccentricity should be 3.0, got {ecc}"
1614        );
1615    }
1616
1617    #[test]
1618    fn test_count_reachable() {
1619        let dist = Array1::from_vec(vec![0.0, 1.0, f64::INFINITY, 2.0, f64::INFINITY]);
1620        let count = SimdTraversal::count_reachable(&dist.view());
1621        assert_eq!(count, 3, "Should have 3 reachable nodes");
1622    }
1623
1624    #[test]
1625    fn test_sum_finite_distances() {
1626        let dist = Array1::from_vec(vec![0.0, 1.0, f64::INFINITY, 3.0, f64::INFINITY]);
1627        let sum = SimdTraversal::sum_finite_distances(&dist.view());
1628        assert!(
1629            (sum - 4.0).abs() < 1e-10,
1630            "Sum of finite distances should be 4.0, got {sum}"
1631        );
1632    }
1633
1634    #[test]
1635    fn test_element_wise_min() {
1636        let a = Array1::from_vec(vec![1.0, 5.0, 3.0]);
1637        let b = Array1::from_vec(vec![4.0, 2.0, 3.0]);
1638        let result = SimdTraversal::element_wise_min(&a.view(), &b.view());
1639        assert!((result[0] - 1.0).abs() < 1e-10);
1640        assert!((result[1] - 2.0).abs() < 1e-10);
1641        assert!((result[2] - 3.0).abs() < 1e-10);
1642    }
1643
1644    #[test]
1645    fn test_relax_neighbors() {
1646        let mut distances = Array1::from_vec(vec![0.0, f64::INFINITY, f64::INFINITY, 1.0]);
1647        let adj_row = Array1::from_vec(vec![0.0, 1.0, 1.0, 0.0]);
1648
1649        let relaxed = SimdTraversal::relax_neighbors(&mut distances, &adj_row.view(), 0.0);
1650
1651        assert_eq!(relaxed, vec![1, 2], "Nodes 1 and 2 should be relaxed");
1652        assert!((distances[1] - 1.0).abs() < 1e-10);
1653        assert!((distances[2] - 1.0).abs() < 1e-10);
1654        // Node 3 was already at distance 1.0, which is <= 1.0, so not relaxed
1655        assert!((distances[3] - 1.0).abs() < 1e-10);
1656    }
1657
1658    // ---- Utility tests ----
1659
1660    #[test]
1661    fn test_axpy() {
1662        let x = Array1::from_vec(vec![1.0, 2.0, 3.0]);
1663        let mut y = vec![4.0, 5.0, 6.0];
1664
1665        SimdGraphUtils::axpy(2.0, &x.view(), &mut y);
1666
1667        assert!((y[0] - 6.0).abs() < 1e-10, "4 + 2*1 = 6");
1668        assert!((y[1] - 9.0).abs() < 1e-10, "5 + 2*2 = 9");
1669        assert!((y[2] - 12.0).abs() < 1e-10, "6 + 2*3 = 12");
1670    }
1671
1672    #[test]
1673    fn test_norms() {
1674        let v = Array1::from_vec(vec![3.0, -4.0]);
1675
1676        let l2 = SimdGraphUtils::norm_l2(&v.view());
1677        assert!((l2 - 5.0).abs() < 1e-10, "L2 norm of [3,-4] = 5.0");
1678
1679        let l1 = SimdGraphUtils::norm_l1(&v.view());
1680        assert!((l1 - 7.0).abs() < 1e-10, "L1 norm of [3,-4] = 7.0");
1681
1682        let linf = SimdGraphUtils::norm_linf(&v.view());
1683        assert!((linf - 4.0).abs() < 1e-10, "Linf norm of [3,-4] = 4.0");
1684    }
1685
1686    #[test]
1687    fn test_normalize_l2() {
1688        let v = Array1::from_vec(vec![3.0, 4.0]);
1689        let normalized =
1690            SimdGraphUtils::normalize_l2(&v.view()).expect("Normalization should succeed");
1691
1692        let norm = f64::simd_norm(&normalized.view());
1693        assert!(
1694            (norm - 1.0).abs() < 1e-10,
1695            "Normalized vector should have unit norm"
1696        );
1697
1698        let zero = Array1::from_vec(vec![0.0, 0.0]);
1699        assert!(
1700            SimdGraphUtils::normalize_l2(&zero.view()).is_none(),
1701            "Zero vector should return None"
1702        );
1703    }
1704
1705    #[test]
1706    fn test_cosine_similarity() {
1707        let a = Array1::from_vec(vec![1.0, 0.0]);
1708        let b = Array1::from_vec(vec![0.0, 1.0]);
1709        let sim = SimdGraphUtils::cosine_similarity(&a.view(), &b.view());
1710        assert!(sim.abs() < 1e-10, "Orthogonal vectors: cosine = 0");
1711
1712        let c = Array1::from_vec(vec![1.0, 1.0]);
1713        let d = Array1::from_vec(vec![2.0, 2.0]);
1714        let sim2 = SimdGraphUtils::cosine_similarity(&c.view(), &d.view());
1715        assert!((sim2 - 1.0).abs() < 1e-10, "Parallel vectors: cosine = 1.0");
1716    }
1717
1718    #[test]
1719    fn test_weighted_sum() {
1720        let a = Array1::from_vec(vec![1.0, 2.0, 3.0]);
1721        let b = Array1::from_vec(vec![4.0, 5.0, 6.0]);
1722
1723        let result = SimdGraphUtils::weighted_sum(&a.view(), 2.0, &b.view(), 0.5);
1724
1725        assert!((result[0] - 4.0).abs() < 1e-10, "2*1 + 0.5*4 = 4.0");
1726        assert!((result[1] - 6.5).abs() < 1e-10, "2*2 + 0.5*5 = 6.5");
1727        assert!((result[2] - 9.0).abs() < 1e-10, "2*3 + 0.5*6 = 9.0");
1728    }
1729
1730    #[test]
1731    fn test_diameter_from_distance_matrix() {
1732        let dist_matrix =
1733            Array2::from_shape_vec((3, 3), vec![0.0, 1.0, 2.0, 1.0, 0.0, 1.0, 2.0, 1.0, 0.0])
1734                .expect("Test: failed to create distance matrix");
1735
1736        let diameter = SimdTraversal::diameter_from_distance_matrix(&dist_matrix.view());
1737        assert!(
1738            (diameter - 2.0).abs() < 1e-10,
1739            "Diameter should be 2.0, got {diameter}"
1740        );
1741    }
1742}