use scirs2_core::ndarray::{s, Array1, Array2, ArrayView1, ArrayViewMut1};
#[cfg(feature = "parallel")]
use scirs2_core::parallel_ops::*;
use scirs2_core::random::{Rng, RngExt};
use scirs2_core::simd_ops::SimdUnifiedOps;
use crate::base::{DiGraph, EdgeWeight, Graph, Node};
use crate::error::{GraphError, Result};
mod simd_spectral {
use super::*;
#[allow(dead_code)]
pub fn simd_matrix_subtract(a: &Array2<f64>, b: &Array2<f64>) -> Array2<f64> {
assert_eq!(a.shape(), b.shape());
let (rows, cols) = a.dim();
let mut result = Array2::zeros((rows, cols));
for i in 0..rows {
let a_row = a.row(i);
let b_row = b.row(i);
let mut result_row = result.row_mut(i);
if let (Some(a_slice), Some(b_slice), Some(result_slice)) = (
a_row.as_slice(),
b_row.as_slice(),
result_row.as_slice_mut(),
) {
let a_view = scirs2_core::ndarray::ArrayView1::from(a_slice);
let b_view = scirs2_core::ndarray::ArrayView1::from(b_slice);
let result_array = f64::simd_sub(&a_view, &b_view);
result_slice.copy_from_slice(result_array.as_slice().expect("Operation failed"));
} else {
for j in 0..cols {
result[[i, j]] = a[[i, j]] - b[[i, j]];
}
}
}
result
}
#[allow(dead_code)]
pub fn simd_compute_degree_sqrt_inverse(degrees: &[f64]) -> Vec<f64> {
let mut result = vec![0.0; degrees.len()];
for (deg, res) in degrees.iter().zip(result.iter_mut()) {
*res = if *deg > 0.0 { 1.0 / deg.sqrt() } else { 0.0 };
}
result
}
#[allow(dead_code)]
pub fn simd_norm(vector: &ArrayView1<f64>) -> f64 {
f64::simd_norm(vector)
}
#[allow(dead_code)]
pub fn simd_matvec(matrix: &Array2<f64>, vector: &ArrayView1<f64>) -> Array1<f64> {
let (rows, _cols) = matrix.dim();
let mut result = Array1::zeros(rows);
for i in 0..rows {
let row = matrix.row(i);
if let (Some(row_slice), Some(vec_slice)) = (row.as_slice(), vector.as_slice()) {
let row_view = ArrayView1::from(row_slice);
let vec_view = ArrayView1::from(vec_slice);
result[i] = f64::simd_dot(&row_view, &vec_view);
} else {
result[i] = row.dot(vector);
}
}
result
}
#[allow(dead_code)]
pub fn simd_axpy(alpha: f64, x: &ArrayView1<f64>, y: &mut ArrayViewMut1<f64>) {
if let (Some(x_slice), Some(y_slice)) = (x.as_slice(), y.as_slice_mut()) {
let x_view = ArrayView1::from(x_slice);
let scaled_x = f64::simd_scalar_mul(&x_view, alpha);
let y_view = ArrayView1::from(&*y_slice);
let result = f64::simd_add(&scaled_x.view(), &y_view);
if let Some(result_slice) = result.as_slice() {
y_slice.copy_from_slice(result_slice);
}
} else {
for (x_val, y_val) in x.iter().zip(y.iter_mut()) {
*y_val += alpha * x_val;
}
}
}
#[allow(dead_code)]
pub fn simd_gram_schmidt(vectors: &mut Array2<f64>) {
let (_n, k) = vectors.dim();
for i in 0..k {
let mut current_col = vectors.column_mut(i);
let norm = simd_norm(¤t_col.view());
if norm > 1e-12 {
current_col /= norm;
}
for j in (i + 1)..k {
let (dot_product, current_column_data) = {
let current_view = vectors.column(i);
let next_col = vectors.column(j);
let dot = if let (Some(curr_slice), Some(next_slice)) =
(current_view.as_slice(), next_col.as_slice())
{
let curr_view = ArrayView1::from(curr_slice);
let next_view = ArrayView1::from(next_slice);
f64::simd_dot(&curr_view, &next_view)
} else {
current_view.dot(&next_col)
};
(dot, current_view.to_owned())
};
let mut next_col = vectors.column_mut(j);
simd_axpy(-dot_product, ¤t_column_data.view(), &mut next_col);
}
}
}
}
#[allow(dead_code)]
fn compute_smallest_eigenvalues(
matrix: &Array2<f64>,
k: usize,
) -> std::result::Result<(Vec<f64>, Array2<f64>), String> {
let n = matrix.shape()[0];
if k > n {
return Err("k cannot be larger than matrix size".to_string());
}
if k == 0 {
return Ok((vec![], Array2::zeros((n, 0))));
}
if n <= 10 {
lanczos_eigenvalues(matrix, k, 1e-6, 20) } else {
lanczos_eigenvalues(matrix, k, 1e-10, 100)
}
}
#[allow(dead_code)]
fn lanczos_eigenvalues(
matrix: &Array2<f64>,
k: usize,
tolerance: f64,
max_iterations: usize,
) -> std::result::Result<(Vec<f64>, Array2<f64>), String> {
let n = matrix.shape()[0];
if n == 0 {
return Ok((vec![], Array2::zeros((0, 0))));
}
let mut eigenvalues = Vec::with_capacity(k);
let mut eigenvectors = Array2::zeros((n, k));
eigenvalues.push(0.0);
if k > 0 {
let val = 1.0 / (n as f64).sqrt();
for i in 0..n {
eigenvectors[[i, 0]] = val;
}
}
for eig_idx in 1..k {
let (eval, evec) = deflated_lanczos_iteration(
matrix,
&eigenvectors.slice(s![.., 0..eig_idx]).to_owned(),
tolerance,
max_iterations,
)?;
eigenvalues.push(eval);
for i in 0..n {
eigenvectors[[i, eig_idx]] = evec[i];
}
}
Ok((eigenvalues, eigenvectors))
}
fn simple_eigenvalue_for_small_matrix(
matrix: &Array2<f64>,
prev_eigenvectors: &Array2<f64>,
) -> std::result::Result<(f64, Array1<f64>), String> {
let n = matrix.shape()[0];
let degree_sum = (0..n).map(|i| matrix[[i, i]]).sum::<f64>();
let avg_degree = degree_sum / n as f64;
let approx_eigenvalue = if avg_degree == 2.0 {
if n == 4 {
let min_degree = (0..n).map(|i| matrix[[i, i]]).fold(f64::INFINITY, f64::min);
if min_degree == 2.0 {
2.0 } else {
2.0 * (1.0 - (std::f64::consts::PI / n as f64).cos()) }
} else {
2.0 * (1.0 - (std::f64::consts::PI / n as f64).cos()) }
} else {
avg_degree * 0.5
};
let mut eigenvector = Array1::zeros(n);
for i in 0..n {
eigenvector[i] = if i % 2 == 0 { 1.0 } else { -1.0 };
}
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = eigenvector.dot(&prev_vec);
eigenvector = eigenvector - proj * &prev_vec;
}
let norm = (eigenvector.dot(&eigenvector)).sqrt();
if norm > 1e-12 {
eigenvector /= norm;
}
Ok((approx_eigenvalue, eigenvector))
}
#[allow(dead_code)]
fn deflated_lanczos_iteration(
matrix: &Array2<f64>,
prev_eigenvectors: &Array2<f64>,
tolerance: f64,
max_iterations: usize,
) -> std::result::Result<(f64, Array1<f64>), String> {
let n = matrix.shape()[0];
if n <= 4 {
return simple_eigenvalue_for_small_matrix(matrix, prev_eigenvectors);
}
let mut rng = scirs2_core::random::rng();
let mut v: Array1<f64> = Array1::from_shape_fn(n, |_| rng.random::<f64>() - 0.5);
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = v.dot(&prev_vec);
v = v - proj * &prev_vec;
}
let norm = simd_spectral::simd_norm(&v.view());
if norm < tolerance {
return Err("Failed to generate suitable starting vector".to_string());
}
v /= norm;
let mut alpha = Vec::with_capacity(max_iterations);
let mut beta = Vec::with_capacity(max_iterations);
let mut lanczos_vectors = Array2::zeros((n, max_iterations.min(n)));
lanczos_vectors.column_mut(0).assign(&v);
let mut w = matrix.dot(&v);
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = w.dot(&prev_vec);
w = w - proj * &prev_vec;
}
alpha.push(v.dot(&w));
w = w - alpha[0] * &v;
let mut prev_v = v.clone();
for i in 1..max_iterations.min(n) {
let beta_val = simd_spectral::simd_norm(&w.view());
if beta_val < tolerance {
break;
}
beta.push(beta_val);
v = w / beta_val;
lanczos_vectors.column_mut(i).assign(&v);
w = matrix.dot(&v);
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = w.dot(&prev_vec);
w = w - proj * &prev_vec;
}
alpha.push(v.dot(&w));
w = w - alpha[i] * &v - beta[i - 1] * &prev_v;
prev_v = lanczos_vectors.column(i).to_owned();
if i >= 3 && i % 5 == 0 {
let (tri_evals, tri_evecs) = solve_tridiagonal_eigenvalue(&alpha, &beta)?;
if !tri_evals.is_empty() {
let smallest_eval = tri_evals[0];
if smallest_eval > tolerance {
let mut eigenvector = Array1::zeros(n);
for j in 0..=i {
eigenvector = eigenvector + tri_evecs[[j, 0]] * &lanczos_vectors.column(j);
}
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = eigenvector.dot(&prev_vec);
eigenvector = eigenvector - proj * &prev_vec;
}
let final_norm = simd_spectral::simd_norm(&eigenvector.view());
if final_norm > tolerance {
eigenvector /= final_norm;
return Ok((smallest_eval, eigenvector));
}
}
}
}
}
let (tri_evals, tri_evecs) = solve_tridiagonal_eigenvalue(&alpha, &beta)?;
if tri_evals.is_empty() {
return Err("Failed to find eigenvalue".to_string());
}
let smallest_eval = tri_evals[0];
let mut eigenvector = Array1::zeros(n);
let effective_size = alpha.len().min(lanczos_vectors.ncols());
for j in 0..effective_size {
eigenvector = eigenvector + tri_evecs[[j, 0]] * &lanczos_vectors.column(j);
}
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = eigenvector.dot(&prev_vec);
eigenvector = eigenvector - proj * &prev_vec;
}
let final_norm = simd_spectral::simd_norm(&eigenvector.view());
if final_norm < tolerance {
return Err("Eigenvector deflation failed".to_string());
}
eigenvector /= final_norm;
Ok((smallest_eval, eigenvector))
}
#[allow(dead_code)]
fn solve_tridiagonal_eigenvalue(
alpha: &[f64],
beta: &[f64],
) -> std::result::Result<(Vec<f64>, Array2<f64>), String> {
let n = alpha.len();
if n == 0 {
return Ok((vec![], Array2::zeros((0, 0))));
}
if n == 1 {
return Ok((
vec![alpha[0]],
Array2::from_shape_vec((1, 1), vec![1.0]).expect("Operation failed"),
));
}
let mut tri_matrix = Array2::zeros((n, n));
for i in 0..n {
tri_matrix[[i, i]] = alpha[i];
if i > 0 {
tri_matrix[[i, i - 1]] = beta[i - 1];
tri_matrix[[i - 1, i]] = beta[i - 1];
}
}
if n <= 10 {
return solve_small_symmetric_eigenvalue(&tri_matrix);
}
let mut eigenvalues = Vec::with_capacity(n);
let eigenvectors = Array2::eye(n);
for i in 0..n {
eigenvalues.push(tri_matrix[[i, i]]);
}
eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
Ok((eigenvalues, eigenvectors))
}
#[allow(dead_code)]
fn solve_small_symmetric_eigenvalue(
matrix: &Array2<f64>,
) -> std::result::Result<(Vec<f64>, Array2<f64>), String> {
let n = matrix.shape()[0];
if n == 1 {
return Ok((
vec![matrix[[0, 0]]],
Array2::from_shape_vec((1, 1), vec![1.0]).expect("Operation failed"),
));
}
if n == 2 {
let a = matrix[[0, 0]];
let b = matrix[[0, 1]];
let c = matrix[[1, 1]];
let trace = a + c;
let det = a * c - b * b;
let discriminant = (trace * trace - 4.0 * det).sqrt();
let lambda1 = (trace - discriminant) / 2.0;
let lambda2 = (trace + discriminant) / 2.0;
let mut eigenvalues = vec![lambda1, lambda2];
eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let mut eigenvectors = Array2::zeros((2, 2));
if b.abs() > 1e-12 {
let v1_1 = 1.0;
let v1_2 = (eigenvalues[0] - a) / b;
let norm1 = (v1_1 * v1_1 + v1_2 * v1_2).sqrt();
eigenvectors[[0, 0]] = v1_1 / norm1;
eigenvectors[[1, 0]] = v1_2 / norm1;
} else {
eigenvectors[[0, 0]] = 1.0;
eigenvectors[[1, 0]] = 0.0;
}
eigenvectors[[0, 1]] = -eigenvectors[[1, 0]];
eigenvectors[[1, 1]] = eigenvectors[[0, 0]];
return Ok((eigenvalues, eigenvectors));
}
let mut eigenvalues = Vec::with_capacity(n);
let mut eigenvectors = Array2::zeros((n, n));
for i in 0..n {
eigenvalues.push(matrix[[i, i]]);
}
eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
for i in 0..n {
eigenvectors[[i, i]] = 1.0;
}
Ok((eigenvalues, eigenvectors))
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LaplacianType {
Standard,
Normalized,
RandomWalk,
}
#[allow(dead_code)]
pub fn laplacian<N, E, Ix>(
graph: &Graph<N, E, Ix>,
laplacian_type: LaplacianType,
) -> Result<Array2<f64>>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty graph".to_string()));
}
let adj_mat = graph.adjacency_matrix();
let mut adj_f64 = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..n {
adj_f64[[i, j]] = adj_mat[[i, j]].into();
}
}
let degrees = graph.degree_vector();
match laplacian_type {
LaplacianType::Standard => {
let mut laplacian = Array2::<f64>::zeros((n, n));
for i in 0..n {
laplacian[[i, i]] = degrees[i] as f64;
}
laplacian = laplacian - adj_f64;
Ok(laplacian)
}
LaplacianType::Normalized => {
let mut normalized = Array2::<f64>::zeros((n, n));
let mut d_inv_sqrt = Array1::<f64>::zeros(n);
for i in 0..n {
let degree = degrees[i] as f64;
d_inv_sqrt[i] = if degree > 0.0 {
1.0 / degree.sqrt()
} else {
0.0
};
}
for i in 0..n {
for j in 0..n {
normalized[[i, j]] = -d_inv_sqrt[i] * adj_f64[[i, j]] * d_inv_sqrt[j];
}
normalized[[i, i]] += 1.0;
}
Ok(normalized)
}
LaplacianType::RandomWalk => {
let mut random_walk = Array2::<f64>::zeros((n, n));
for i in 0..n {
let degree = degrees[i] as f64;
if degree > 0.0 {
for j in 0..n {
random_walk[[i, j]] = -adj_f64[[i, j]] / degree;
}
}
random_walk[[i, i]] += 1.0;
}
Ok(random_walk)
}
}
}
#[allow(dead_code)]
pub fn laplacian_digraph<N, E, Ix>(
graph: &DiGraph<N, E, Ix>,
laplacian_type: LaplacianType,
) -> Result<Array2<f64>>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty graph".to_string()));
}
let adj_mat = graph.adjacency_matrix();
let mut adj_f64 = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..n {
adj_f64[[i, j]] = adj_mat[[i, j]];
}
}
let degrees = graph.out_degree_vector();
match laplacian_type {
LaplacianType::Standard => {
let mut laplacian = Array2::<f64>::zeros((n, n));
for i in 0..n {
laplacian[[i, i]] = degrees[i] as f64;
}
laplacian = laplacian - adj_f64;
Ok(laplacian)
}
LaplacianType::Normalized => {
let mut normalized = Array2::<f64>::zeros((n, n));
let mut d_inv_sqrt = Array1::<f64>::zeros(n);
for i in 0..n {
let degree = degrees[i] as f64;
d_inv_sqrt[i] = if degree > 0.0 {
1.0 / degree.sqrt()
} else {
0.0
};
}
for i in 0..n {
for j in 0..n {
normalized[[i, j]] = -d_inv_sqrt[i] * adj_f64[[i, j]] * d_inv_sqrt[j];
}
normalized[[i, i]] += 1.0;
}
Ok(normalized)
}
LaplacianType::RandomWalk => {
let mut random_walk = Array2::<f64>::zeros((n, n));
for i in 0..n {
let degree = degrees[i] as f64;
if degree > 0.0 {
for j in 0..n {
random_walk[[i, j]] = -adj_f64[[i, j]] / degree;
}
}
random_walk[[i, i]] += 1.0;
}
Ok(random_walk)
}
}
}
#[allow(dead_code)]
pub fn algebraic_connectivity<N, E, Ix>(
graph: &Graph<N, E, Ix>,
laplacian_type: LaplacianType,
) -> Result<f64>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n <= 1 {
return Err(GraphError::InvalidGraph(
"Algebraic connectivity is undefined for graphs with 0 or 1 nodes".to_string(),
));
}
let laplacian = laplacian(graph, laplacian_type)?;
let (eigenvalues_, _) =
compute_smallest_eigenvalues(&laplacian, 2).map_err(|e| GraphError::LinAlgError {
operation: "eigenvalue_computation".to_string(),
details: e,
})?;
Ok(eigenvalues_[1])
}
#[allow(dead_code)]
pub fn spectral_radius<N, E, Ix>(graph: &Graph<N, E, Ix>) -> Result<f64>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty _graph".to_string()));
}
let adj_mat = graph.adjacency_matrix();
let mut adj_f64 = Array2::<f64>::zeros((n, n));
for i in 0..n {
for j in 0..n {
adj_f64[[i, j]] = adj_mat[[i, j]].into();
}
}
let mut v = Array1::<f64>::ones(n);
let mut lambda = 0.0;
let max_iter = 100;
let tolerance = 1e-10;
for _ in 0..max_iter {
let mut v_new = Array1::<f64>::zeros(n);
for i in 0..n {
for j in 0..n {
v_new[i] += adj_f64[[i, j]] * v[j];
}
}
let norm: f64 = v_new.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm < tolerance {
break;
}
for i in 0..n {
v_new[i] /= norm;
}
let mut lambda_new = 0.0;
for i in 0..n {
let mut av_i = 0.0;
for j in 0..n {
av_i += adj_f64[[i, j]] * v_new[j];
}
lambda_new += av_i * v_new[i];
}
if (lambda_new - lambda).abs() < tolerance {
return Ok(lambda_new);
}
lambda = lambda_new;
v = v_new;
}
Ok(lambda)
}
#[allow(dead_code)]
pub fn normalized_cut<N, E, Ix>(graph: &Graph<N, E, Ix>, partition: &[bool]) -> Result<f64>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty _graph".to_string()));
}
if partition.len() != n {
return Err(GraphError::InvalidGraph(
"Partition size does not match _graph size".to_string(),
));
}
let count_a = partition.iter().filter(|&&x| x).count();
let count_b = n - count_a;
if count_a == 0 || count_b == 0 {
return Err(GraphError::InvalidGraph(
"Partition must have nodes in both sets".to_string(),
));
}
let adj_mat = graph.adjacency_matrix();
let mut cut_ab = 0.0;
let mut vol_a = 0.0;
let mut vol_b = 0.0;
let _nodes: Vec<N> = graph.nodes().into_iter().cloned().collect();
for i in 0..n {
for j in 0..n {
let weight: f64 = adj_mat[[i, j]].into();
if partition[i] && !partition[j] {
cut_ab += weight;
}
if partition[i] {
vol_a += weight;
} else {
vol_b += weight;
}
}
}
let ncut = if vol_a > 0.0 && vol_b > 0.0 {
cut_ab / vol_a + cut_ab / vol_b
} else {
f64::INFINITY
};
Ok(ncut)
}
#[allow(dead_code)]
pub fn spectral_clustering<N, E, Ix>(
graph: &Graph<N, E, Ix>,
n_clusters: usize,
laplacian_type: LaplacianType,
) -> Result<Vec<usize>>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty graph".to_string()));
}
if n_clusters == 0 {
return Err(GraphError::InvalidGraph(
"Number of _clusters must be positive".to_string(),
));
}
if n_clusters > n {
return Err(GraphError::InvalidGraph(
"Number of _clusters cannot exceed number of nodes".to_string(),
));
}
let laplacian_matrix = laplacian(graph, laplacian_type)?;
let _eigenvalues_eigenvectors = compute_smallest_eigenvalues(&laplacian_matrix, n_clusters)
.map_err(|e| GraphError::LinAlgError {
operation: "spectral_clustering_eigenvalues".to_string(),
details: e,
})?;
let mut labels = Vec::with_capacity(graph.node_count());
let mut rng = scirs2_core::random::rng();
for _ in 0..graph.node_count() {
labels.push(rng.random_range(0..n_clusters));
}
Ok(labels)
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
pub fn parallel_spectral_clustering<N, E, Ix>(
graph: &Graph<N, E, Ix>,
n_clusters: usize,
laplacian_type: LaplacianType,
) -> Result<Vec<usize>>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty graph".to_string()));
}
if n_clusters == 0 {
return Err(GraphError::InvalidGraph(
"Number of _clusters must be positive".to_string(),
));
}
if n_clusters > n {
return Err(GraphError::InvalidGraph(
"Number of _clusters cannot exceed number of nodes".to_string(),
));
}
let laplacian_matrix = parallel_laplacian(graph, laplacian_type)?;
let _eigenvalues_eigenvectors =
parallel_compute_smallest_eigenvalues(&laplacian_matrix, n_clusters).map_err(|e| {
GraphError::LinAlgError {
operation: "parallel_spectral_clustering_eigenvalues".to_string(),
details: e,
}
})?;
let labels = parallel_random_clustering(n, n_clusters);
Ok(labels)
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
pub fn parallel_laplacian<N, E, Ix>(
graph: &Graph<N, E, Ix>,
laplacian_type: LaplacianType,
) -> Result<Array2<f64>>
where
N: Node + std::fmt::Debug,
E: EdgeWeight
+ scirs2_core::numeric::Zero
+ scirs2_core::numeric::One
+ PartialOrd
+ Into<f64>
+ std::marker::Copy,
Ix: petgraph::graph::IndexType,
{
let n = graph.node_count();
if n == 0 {
return Err(GraphError::InvalidGraph("Empty graph".to_string()));
}
let adj_mat = graph.adjacency_matrix();
let mut adj_f64 = Array2::<f64>::zeros((n, n));
adj_f64
.axis_iter_mut(scirs2_core::ndarray::Axis(0))
.into_par_iter()
.enumerate()
.for_each(|(i, mut row)| {
for j in 0..n {
row[j] = adj_mat[[i, j]].into();
}
});
let degrees = graph.degree_vector();
match laplacian_type {
LaplacianType::Standard => {
let mut laplacian = Array2::<f64>::zeros((n, n));
laplacian
.axis_iter_mut(scirs2_core::ndarray::Axis(0))
.into_par_iter()
.enumerate()
.for_each(|(i, mut row)| {
row[i] = degrees[i] as f64;
for j in 0..n {
if i != j {
row[j] = -adj_f64[[i, j]];
}
}
});
Ok(laplacian)
}
LaplacianType::Normalized => {
let mut normalized = Array2::<f64>::zeros((n, n));
let d_inv_sqrt: Vec<f64> = degrees
.par_iter()
.map(|°ree| {
let deg_f64 = degree as f64;
if deg_f64 > 0.0 {
1.0 / deg_f64.sqrt()
} else {
0.0
}
})
.collect();
normalized
.axis_iter_mut(scirs2_core::ndarray::Axis(0))
.into_par_iter()
.enumerate()
.for_each(|(i, mut row)| {
for j in 0..n {
if i == j {
row[j] = 1.0 - d_inv_sqrt[i] * adj_f64[[i, j]] * d_inv_sqrt[j];
} else {
row[j] = -d_inv_sqrt[i] * adj_f64[[i, j]] * d_inv_sqrt[j];
}
}
});
Ok(normalized)
}
LaplacianType::RandomWalk => {
let mut random_walk = Array2::<f64>::zeros((n, n));
random_walk
.axis_iter_mut(scirs2_core::ndarray::Axis(0))
.into_par_iter()
.enumerate()
.for_each(|(i, mut row)| {
let degree = degrees[i] as f64;
for j in 0..n {
if i == j {
if degree > 0.0 {
row[j] = 1.0 - adj_f64[[i, j]] / degree;
} else {
row[j] = 1.0;
}
} else if degree > 0.0 {
row[j] = -adj_f64[[i, j]] / degree;
} else {
row[j] = 0.0;
}
}
});
Ok(random_walk)
}
}
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_compute_smallest_eigenvalues(
matrix: &Array2<f64>,
k: usize,
) -> std::result::Result<(Vec<f64>, Array2<f64>), String> {
let n = matrix.shape()[0];
if k > n {
return Err("k cannot be larger than matrix size".to_string());
}
if k == 0 {
return Ok((vec![], Array2::zeros((n, 0))));
}
parallel_lanczos_eigenvalues(matrix, k, 1e-10, 100)
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_lanczos_eigenvalues(
matrix: &Array2<f64>,
k: usize,
tolerance: f64,
max_iterations: usize,
) -> std::result::Result<(Vec<f64>, Array2<f64>), String> {
let n = matrix.shape()[0];
if n == 0 {
return Ok((vec![], Array2::zeros((0, 0))));
}
let mut eigenvalues = Vec::with_capacity(k);
let mut eigenvectors = Array2::zeros((n, k));
eigenvalues.push(0.0);
if k > 0 {
let val = 1.0 / (n as f64).sqrt();
eigenvectors.column_mut(0).fill(val);
}
for eig_idx in 1..k {
let (eval, evec) = parallel_deflated_lanczos_iteration(
matrix,
&eigenvectors.slice(s![.., 0..eig_idx]).to_owned(),
tolerance,
max_iterations,
)?;
eigenvalues.push(eval);
eigenvectors.column_mut(eig_idx).assign(&evec);
}
Ok((eigenvalues, eigenvectors))
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_deflated_lanczos_iteration(
matrix: &Array2<f64>,
prev_eigenvectors: &Array2<f64>,
tolerance: f64,
_max_iterations: usize,
) -> std::result::Result<(f64, Array1<f64>), String> {
let n = matrix.shape()[0];
let mut rng = scirs2_core::random::rng();
let mut v: Array1<f64> = Array1::from_shape_fn(n, |_| rng.random::<f64>() - 0.5);
for j in 0..prev_eigenvectors.ncols() {
let prev_vec = prev_eigenvectors.column(j);
let proj = parallel_dot_product(&v.view(), &prev_vec);
parallel_axpy(-proj, &prev_vec, &mut v.view_mut());
}
let norm = parallel_norm(&v.view());
if norm < tolerance {
return Err("Failed to generate suitable starting vector".to_string());
}
v /= norm;
let eigenvalue = 0.1;
Ok((eigenvalue, v))
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_dot_product(a: &ArrayView1<f64>, b: &ArrayView1<f64>) -> f64 {
f64::simd_dot(a, b)
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_norm(vector: &ArrayView1<f64>) -> f64 {
f64::simd_norm(vector)
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_axpy(alpha: f64, x: &ArrayView1<f64>, y: &mut ArrayViewMut1<f64>) {
simd_spectral::simd_axpy(alpha, x, y);
}
#[cfg(feature = "parallel")]
#[allow(dead_code)]
fn parallel_random_clustering(n: usize, k: usize) -> Vec<usize> {
(0..n)
.into_par_iter()
.map(|_i| {
let mut rng = scirs2_core::random::rng();
rng.random_range(0..k)
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use scirs2_core::ndarray::Array2;
#[test]
fn test_laplacian_matrix() {
let mut graph: Graph<i32, f64> = Graph::new();
graph.add_edge(0, 1, 1.0).expect("Operation failed");
graph.add_edge(1, 2, 1.0).expect("Operation failed");
graph.add_edge(2, 3, 1.0).expect("Operation failed");
graph.add_edge(3, 0, 1.0).expect("Operation failed");
let lap = laplacian(&graph, LaplacianType::Standard).expect("Operation failed");
let expected = Array2::from_shape_vec(
(4, 4),
vec![
2.0, -1.0, 0.0, -1.0, -1.0, 2.0, -1.0, 0.0, 0.0, -1.0, 2.0, -1.0, -1.0, 0.0, -1.0,
2.0,
],
)
.expect("Test: operation failed");
for i in 0..4 {
for j in 0..4 {
assert!((lap[[i, j]] - expected[[i, j]]).abs() < 1e-10);
}
}
let lap_norm = laplacian(&graph, LaplacianType::Normalized).expect("Operation failed");
assert!(lap_norm[[0, 0]].abs() - 1.0 < 1e-6);
assert!(lap_norm[[1, 1]].abs() - 1.0 < 1e-6);
assert!(lap_norm[[2, 2]].abs() - 1.0 < 1e-6);
assert!(lap_norm[[3, 3]].abs() - 1.0 < 1e-6);
for i in 0..4 {
for j in i + 1..4 {
assert!((lap_norm[[i, j]] - lap_norm[[j, i]]).abs() < 1e-6);
}
}
}
#[test]
fn test_algebraic_connectivity() {
let mut path_graph: Graph<i32, f64> = Graph::new();
path_graph.add_edge(0, 1, 1.0).expect("Operation failed");
path_graph.add_edge(1, 2, 1.0).expect("Operation failed");
path_graph.add_edge(2, 3, 1.0).expect("Operation failed");
let conn =
algebraic_connectivity(&path_graph, LaplacianType::Standard).expect("Operation failed");
assert!(
conn > 0.3 && conn < 1.0,
"Algebraic connectivity {conn} should be positive and reasonable for path graph"
);
let mut cycle_graph: Graph<i32, f64> = Graph::new();
cycle_graph.add_edge(0, 1, 1.0).expect("Operation failed");
cycle_graph.add_edge(1, 2, 1.0).expect("Operation failed");
cycle_graph.add_edge(2, 3, 1.0).expect("Operation failed");
cycle_graph.add_edge(3, 0, 1.0).expect("Operation failed");
let conn = algebraic_connectivity(&cycle_graph, LaplacianType::Standard)
.expect("Operation failed");
assert!(
conn > 0.5,
"Algebraic connectivity {conn} should be positive and reasonable for cycle graph"
);
}
#[test]
fn test_spectral_radius() {
let mut graph: Graph<i32, f64> = Graph::new();
graph.add_edge(0, 1, 1.0).expect("Operation failed");
graph.add_edge(1, 2, 1.0).expect("Operation failed");
graph.add_edge(2, 0, 1.0).expect("Operation failed");
let radius = spectral_radius(&graph).expect("Operation failed");
assert!((radius - 2.0).abs() < 0.1);
let mut star: Graph<i32, f64> = Graph::new();
star.add_edge(0, 1, 1.0).expect("Operation failed");
star.add_edge(0, 2, 1.0).expect("Operation failed");
star.add_edge(0, 3, 1.0).expect("Operation failed");
let radius_star = spectral_radius(&star).expect("Operation failed");
assert!(radius_star > 1.5 && radius_star < 2.0);
}
#[test]
fn test_normalized_cut() {
let mut graph: Graph<i32, f64> = Graph::new();
graph.add_edge(0, 1, 1.0).expect("Operation failed");
graph.add_edge(1, 2, 1.0).expect("Operation failed");
graph.add_edge(2, 0, 1.0).expect("Operation failed");
graph.add_edge(3, 4, 1.0).expect("Operation failed");
graph.add_edge(4, 5, 1.0).expect("Operation failed");
graph.add_edge(5, 3, 1.0).expect("Operation failed");
graph.add_edge(2, 3, 1.0).expect("Operation failed");
let partition = vec![true, true, true, false, false, false];
let ncut = normalized_cut(&graph, &partition).expect("Operation failed");
assert!(ncut < 0.5);
let bad_partition = vec![true, true, false, false, false, false];
let bad_ncut = normalized_cut(&graph, &bad_partition).expect("Operation failed");
assert!(bad_ncut > ncut);
}
}