use ndarray::{Array1, Array2, Axis};
use thiserror::Error;
#[cfg(feature = "sparse")]
pub mod sparse;
#[cfg(feature = "faer")]
use faer::{
dyn_stack::{MemBuffer, MemStack},
matrix_free::eigen::{partial_eigen_scratch, partial_self_adjoint_eigen, PartialEigenParams},
Col, Mat, Par, Side,
};
#[derive(Debug, Error)]
pub enum Error {
#[error("matrix is not square: {0} x {1}")]
NotSquare(usize, usize),
#[error("matrix has negative entries")]
NegativeEntries,
#[error("zero degree node at index {0}")]
ZeroDegree(usize),
#[error("graph is disconnected (multiple zero eigenvalues)")]
Disconnected,
#[error("invalid embedding dimension k={k} for n={n}")]
InvalidEmbeddingDim { k: usize, n: usize },
#[error("backend error: {0}")]
Backend(String),
#[error("asymmetric laplacian requires directed graph")]
AsymmetricRequired,
}
pub type Result<T> = std::result::Result<T, Error>;
pub fn directed_laplacian(adj: &Array2<f64>) -> Result<Array2<f64>> {
let n = ensure_square(adj)?;
let p = transition_matrix(adj);
let mut l_dir = Array2::eye(n);
let p_sym = (&p + &p.t()) * 0.5;
l_dir -= &p_sym;
Ok(l_dir)
}
fn ensure_square(a: &Array2<f64>) -> Result<usize> {
let (n, m) = a.dim();
if n != m {
return Err(Error::NotSquare(n, m));
}
Ok(n)
}
pub fn degree_matrix(adj: &Array2<f64>) -> Array2<f64> {
let n = adj.nrows();
let mut d = Array2::zeros((n, n));
for i in 0..n {
let degree: f64 = adj.row(i).sum();
d[[i, i]] = degree;
}
d
}
pub fn degree_vector(adj: &Array2<f64>) -> Array1<f64> {
adj.sum_axis(Axis(1))
}
pub fn adjacency_to_laplacian(adj: &Array2<f64>) -> Array2<f64> {
let d = degree_matrix(adj);
&d - adj
}
pub fn normalized_laplacian(adj: &Array2<f64>) -> Array2<f64> {
let n = adj.nrows();
let degrees = degree_vector(adj);
let d_inv_sqrt: Array1<f64> = degrees.mapv(|d| if d > 0.0 { 1.0 / d.sqrt() } else { 0.0 });
let mut l_sym = Array2::eye(n);
for i in 0..n {
for j in 0..n {
if adj[[i, j]] > 0.0 {
l_sym[[i, j]] -= d_inv_sqrt[i] * adj[[i, j]] * d_inv_sqrt[j];
}
}
}
l_sym
}
pub fn normalized_laplacian_checked(adj: &Array2<f64>) -> Result<Array2<f64>> {
let degrees = degree_vector(adj);
if let Some((idx, _)) = degrees.iter().enumerate().find(|(_, &d)| d <= 0.0) {
return Err(Error::ZeroDegree(idx));
}
Ok(normalized_laplacian(adj))
}
pub fn random_walk_laplacian(adj: &Array2<f64>) -> Array2<f64> {
let n = adj.nrows();
let degrees = degree_vector(adj);
let mut l_rw = Array2::eye(n);
for i in 0..n {
if degrees[i] > 0.0 {
for j in 0..n {
if adj[[i, j]] > 0.0 {
l_rw[[i, j]] -= adj[[i, j]] / degrees[i];
}
}
}
}
l_rw
}
pub fn transition_matrix(adj: &Array2<f64>) -> Array2<f64> {
let n = adj.nrows();
let degrees = degree_vector(adj);
let mut p = Array2::zeros((n, n));
for i in 0..n {
if degrees[i] > 0.0 {
for j in 0..n {
p[[i, j]] = adj[[i, j]] / degrees[i];
}
}
}
p
}
pub fn gaussian_similarity(points: &Array2<f64>, sigma: f64) -> Array2<f64> {
let n = points.nrows();
let sigma_sq_2 = 2.0 * sigma * sigma;
let mut adj = Array2::zeros((n, n));
for i in 0..n {
for j in i..n {
let mut dist_sq = 0.0;
for k in 0..points.ncols() {
let diff = points[[i, k]] - points[[j, k]];
dist_sq += diff * diff;
}
let sim = (-dist_sq / sigma_sq_2).exp();
adj[[i, j]] = sim;
adj[[j, i]] = sim;
}
}
adj
}
pub fn knn_graph(distances: &Array2<f64>, k: usize) -> Array2<f64> {
let n = distances.nrows();
let mut adj = Array2::zeros((n, n));
for i in 0..n {
let mut dists: Vec<(usize, f64)> = (0..n)
.filter(|&j| j != i)
.map(|j| (j, distances[[i, j]]))
.collect();
dists.sort_by(|a, b| a.1.total_cmp(&b.1));
for &(j, _) in dists.iter().take(k) {
adj[[i, j]] = 1.0;
}
}
for i in 0..n {
for j in (i + 1)..n {
if adj[[i, j]] > 0.0 || adj[[j, i]] > 0.0 {
adj[[i, j]] = 1.0;
adj[[j, i]] = 1.0;
}
}
}
adj
}
pub fn epsilon_graph(distances: &Array2<f64>, epsilon: f64) -> Array2<f64> {
distances.mapv(|d| if d < epsilon && d > 0.0 { 1.0 } else { 0.0 })
}
#[derive(Debug, Clone)]
pub struct SpectralEmbeddingConfig {
pub iters: usize,
pub jacobi_max_n: usize,
pub jacobi_tol: f64,
pub jacobi_max_sweeps: usize,
pub skip_first: bool,
pub row_normalize: bool,
}
impl Default for SpectralEmbeddingConfig {
fn default() -> Self {
Self {
iters: 50,
jacobi_max_n: 64,
jacobi_tol: 1e-10,
jacobi_max_sweeps: 50_000,
skip_first: true,
row_normalize: true,
}
}
}
fn jacobi_eigh(a: &Array2<f64>, tol: f64, max_sweeps: usize) -> (Vec<f64>, Array2<f64>) {
let n = a.nrows();
let mut d = a.to_owned();
let mut v = Array2::<f64>::eye(n);
for _ in 0..max_sweeps {
let mut p = 0usize;
let mut q = 1usize;
let mut max = 0.0f64;
for i in 0..n {
for j in (i + 1)..n {
let val = d[[i, j]].abs();
if val > max {
max = val;
p = i;
q = j;
}
}
}
if max <= tol {
break;
}
let app = d[[p, p]];
let aqq = d[[q, q]];
let apq = d[[p, q]];
let tau = (aqq - app) / (2.0 * apq);
let t = if tau >= 0.0 {
1.0 / (tau + (1.0 + tau * tau).sqrt())
} else {
-1.0 / (-tau + (1.0 + tau * tau).sqrt())
};
let c = 1.0 / (1.0 + t * t).sqrt();
let s = t * c;
for i in 0..n {
if i != p && i != q {
let dip = d[[i, p]];
let diq = d[[i, q]];
d[[i, p]] = c * dip - s * diq;
d[[p, i]] = d[[i, p]];
d[[i, q]] = s * dip + c * diq;
d[[q, i]] = d[[i, q]];
}
}
let dpp = c * c * app - 2.0 * s * c * apq + s * s * aqq;
let dqq = s * s * app + 2.0 * s * c * apq + c * c * aqq;
d[[p, p]] = dpp;
d[[q, q]] = dqq;
d[[p, q]] = 0.0;
d[[q, p]] = 0.0;
for i in 0..n {
let vip = v[[i, p]];
let viq = v[[i, q]];
v[[i, p]] = c * vip - s * viq;
v[[i, q]] = s * vip + c * viq;
}
}
let eigvals: Vec<f64> = (0..n).map(|i| d[[i, i]]).collect();
(eigvals, v)
}
pub fn spectral_embedding(
adj: &Array2<f64>,
k: usize,
cfg: &SpectralEmbeddingConfig,
) -> Result<Array2<f64>> {
let n = ensure_square(adj)?;
if k == 0 {
return Ok(Array2::zeros((n, 0)));
}
let start = if cfg.skip_first { 1 } else { 0 };
if n <= k + start {
return Err(Error::InvalidEmbeddingDim { k, n });
}
let lap = normalized_laplacian(adj);
if n <= cfg.jacobi_max_n {
let (eigvals, eigvecs) = jacobi_eigh(&lap, cfg.jacobi_tol, cfg.jacobi_max_sweeps);
let mut order: Vec<usize> = (0..n).collect();
order.sort_by(|&i, &j| eigvals[i].total_cmp(&eigvals[j]));
let mut u = Array2::<f64>::zeros((n, k));
for (out_col, &eig_idx) in order.iter().skip(start).take(k).enumerate() {
u.column_mut(out_col).assign(&eigvecs.column(eig_idx));
}
if cfg.row_normalize {
for mut row in u.rows_mut() {
let norm_sq: f64 = row.iter().map(|x| x * x).sum();
let norm = norm_sq.sqrt();
if norm > 0.0 {
for x in row.iter_mut() {
*x /= norm;
}
}
}
}
return Ok(u);
}
#[cfg(feature = "faer")]
{
if n <= 512 {
return spectral_embedding_faer_dense_from_laplacian(&lap, k, cfg);
}
spectral_embedding_faer_krylov_schur_from_laplacian(&lap, k, cfg)
}
#[cfg(not(feature = "faer"))]
{
let a = Array2::eye(n) - ⪅
let r = k + start;
let mut q = Array2::<f64>::zeros((n, r));
for i in 0..n {
for j in 0..r {
q[[i, j]] = ((((i + 1) * 1315423911usize) ^ ((j + 1) * 2654435761usize)) % 10_000)
as f64
/ 10_000.0
- 0.5;
}
}
fn orthonormalize(mut x: Array2<f64>) -> Array2<f64> {
let r = x.ncols();
for j in 0..r {
for i in 0..j {
let dot = x.column(i).dot(&x.column(j));
let col_i = x.column(i).to_owned();
let mut col_j = x.column_mut(j);
col_j.scaled_add(-dot, &col_i);
}
let norm = x.column(j).dot(&x.column(j)).sqrt();
if norm > 0.0 {
x.column_mut(j).mapv_inplace(|v| v / norm);
}
}
x
}
q = orthonormalize(q);
for _ in 0..cfg.iters {
let z = a.dot(&q);
q = orthonormalize(z);
}
let mut u = q.slice(ndarray::s![.., start..(start + k)]).to_owned();
if cfg.row_normalize {
for mut row in u.rows_mut() {
let norm_sq: f64 = row.iter().map(|x| x * x).sum();
let norm = norm_sq.sqrt();
if norm > 0.0 {
for x in row.iter_mut() {
*x /= norm;
}
}
}
}
Ok(u)
}
}
#[cfg(feature = "faer")]
fn spectral_embedding_faer_dense_from_laplacian(
lap: &Array2<f64>,
k: usize,
cfg: &SpectralEmbeddingConfig,
) -> Result<Array2<f64>> {
let n = ensure_square(lap)?;
let start = if cfg.skip_first { 1 } else { 0 };
if n <= k + start {
return Err(Error::InvalidEmbeddingDim { k, n });
}
let mut m = Mat::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
m[(i, j)] = lap[[i, j]];
}
}
let evd = m
.self_adjoint_eigen(Side::Lower)
.map_err(|e| Error::Backend(format!("faer self_adjoint_eigen: {e:?}")))?;
let eigvecs = evd.U();
let mut u = Array2::<f64>::zeros((n, k));
for out_col in 0..k {
let col = start + out_col;
for i in 0..n {
u[[i, out_col]] = eigvecs[(i, col)];
}
}
if cfg.row_normalize {
for mut row in u.rows_mut() {
let norm_sq: f64 = row.iter().map(|x| x * x).sum();
let norm = norm_sq.sqrt();
if norm > 0.0 {
for x in row.iter_mut() {
*x /= norm;
}
}
}
}
Ok(u)
}
#[cfg(feature = "faer")]
fn spectral_embedding_faer_krylov_schur_from_laplacian(
lap: &Array2<f64>,
k: usize,
cfg: &SpectralEmbeddingConfig,
) -> Result<Array2<f64>> {
let n = ensure_square(lap)?;
let start = if cfg.skip_first { 1 } else { 0 };
if k == 0 {
return Ok(Array2::zeros((n, 0)));
}
if n <= k + start {
return Err(Error::InvalidEmbeddingDim { k, n });
}
let mut a = Mat::<f64>::zeros(n, n);
for i in 0..n {
for j in 0..n {
a[(i, j)] = -lap[[i, j]];
}
a[(i, i)] += 2.0;
}
let r = k + start;
let mut eigvecs = Mat::<f64>::zeros(n, r);
let mut eigvals = vec![0.0_f64; r];
let mut v0 = Col::<f64>::zeros(n);
for i in 0..n {
v0[i] = (((i + 1) * 2654435761usize % 10_000) as f64) / 10_000.0 - 0.5;
}
let par = Par::Seq;
let params = PartialEigenParams {
max_restarts: 8,
..Default::default()
};
let req = partial_eigen_scratch(&a, r, par, params);
let mut mem = MemBuffer::new(req);
let stack = MemStack::new(&mut mem);
let _info = partial_self_adjoint_eigen(
eigvecs.as_mut(),
&mut eigvals,
&a,
v0.as_ref(),
cfg.jacobi_tol,
par,
stack,
params,
);
let mut u = Array2::<f64>::zeros((n, k));
for out_col in 0..k {
let col = start + out_col;
for i in 0..n {
u[[i, out_col]] = eigvecs[(i, col)];
}
}
if cfg.row_normalize {
for mut row in u.rows_mut() {
let norm_sq: f64 = row.iter().map(|x| x * x).sum();
let norm = norm_sq.sqrt();
if norm > 0.0 {
for x in row.iter_mut() {
*x /= norm;
}
}
}
}
Ok(u)
}
pub fn laplacian_quadratic_form(lap: &Array2<f64>, x: &Array1<f64>) -> f64 {
let lx = lap.dot(x);
x.dot(&lx)
}
pub fn is_connected(adj: &Array2<f64>) -> bool {
let n = adj.nrows();
if n == 0 {
return true;
}
let mut visited = vec![false; n];
let mut stack = vec![0usize];
while let Some(node) = stack.pop() {
if visited[node] {
continue;
}
visited[node] = true;
for j in 0..n {
if adj[[node, j]] > 0.0 && !visited[j] {
stack.push(j);
}
}
}
visited.iter().all(|&v| v)
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
use proptest::prelude::*;
#[test]
fn test_laplacian_basic() {
let adj = array![[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [0.0, 1.0, 0.0]];
let lap = adjacency_to_laplacian(&adj);
assert!((lap[[0, 0]] - 1.0).abs() < 1e-10);
assert!((lap[[1, 1]] - 2.0).abs() < 1e-10);
assert!((lap[[2, 2]] - 1.0).abs() < 1e-10);
assert!((lap[[0, 1]] + 1.0).abs() < 1e-10);
}
#[test]
fn test_laplacian_row_sum_zero() {
let adj = array![[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]];
let lap = adjacency_to_laplacian(&adj);
for i in 0..3 {
let row_sum: f64 = lap.row(i).sum();
assert!(row_sum.abs() < 1e-10, "Row {} sum: {}", i, row_sum);
}
}
#[test]
fn test_normalized_laplacian_diagonal() {
let adj = array![[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]];
let lap_norm = normalized_laplacian(&adj);
for i in 0..3 {
assert!(
(lap_norm[[i, i]] - 1.0).abs() < 1e-10,
"Diagonal {} = {}",
i,
lap_norm[[i, i]]
);
}
}
#[test]
fn test_transition_matrix_row_sum() {
let adj = array![[0.0, 1.0, 2.0], [1.0, 0.0, 1.0], [2.0, 1.0, 0.0]];
let p = transition_matrix(&adj);
for i in 0..3 {
let row_sum: f64 = p.row(i).sum();
assert!((row_sum - 1.0).abs() < 1e-10, "Row {} sum: {}", i, row_sum);
}
}
#[test]
fn test_spectral_embedding_shape() {
let adj = array![[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [0.0, 1.0, 0.0]];
let u = spectral_embedding(&adj, 1, &SpectralEmbeddingConfig::default()).unwrap();
assert_eq!(u.nrows(), 3);
assert_eq!(u.ncols(), 1);
}
#[cfg(feature = "faer")]
#[test]
fn spectral_embedding_faer_dense_agrees_with_jacobi_on_small_graph() {
let adj = array![
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
];
let cfg = SpectralEmbeddingConfig {
jacobi_max_n: 16,
skip_first: true,
row_normalize: true,
..Default::default()
};
let u_jacobi = spectral_embedding(&adj, 1, &cfg).unwrap();
let lap = normalized_laplacian(&adj);
let u_faer = spectral_embedding_faer_dense_from_laplacian(&lap, 1, &cfg).unwrap();
let dot: f64 = (0..4).map(|i| u_jacobi[[i, 0]] * u_faer[[i, 0]]).sum();
assert!(dot.abs() > 0.99, "abs(dot)={} too small", dot.abs());
}
#[test]
fn test_normalized_laplacian_checked_rejects_isolated() {
let adj = array![[0.0, 0.0], [0.0, 0.0]];
let err = normalized_laplacian_checked(&adj).unwrap_err();
match err {
Error::ZeroDegree(0) | Error::ZeroDegree(1) => {}
other => panic!("unexpected error: {other}"),
}
}
#[test]
fn test_gaussian_similarity_self() {
let points = array![[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]];
let sim = gaussian_similarity(&points, 1.0);
for i in 0..3 {
assert!((sim[[i, i]] - 1.0).abs() < 1e-10);
}
for i in 0..3 {
for j in 0..3 {
assert!((sim[[i, j]] - sim[[j, i]]).abs() < 1e-10);
}
}
}
#[test]
fn test_knn_graph() {
let distances = array![
[0.0, 1.0, 2.0, 10.0],
[1.0, 0.0, 1.5, 10.0],
[2.0, 1.5, 0.0, 10.0],
[10.0, 10.0, 10.0, 0.0]
];
let adj = knn_graph(&distances, 2);
assert!(adj.sum() > 0.0);
for i in 0..4 {
for j in 0..4 {
assert!((adj[[i, j]] - adj[[j, i]]).abs() < 1e-10);
}
}
}
#[test]
fn test_is_connected() {
let adj_connected = array![[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [0.0, 1.0, 0.0]];
assert!(is_connected(&adj_connected));
let adj_disconnected = array![[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]];
assert!(!is_connected(&adj_disconnected));
}
#[test]
fn test_quadratic_form() {
let adj = array![[0.0, 1.0], [1.0, 0.0]];
let lap = adjacency_to_laplacian(&adj);
let ones = array![1.0, 1.0];
let qf = laplacian_quadratic_form(&lap, &ones);
assert!(qf.abs() < 1e-10);
let non_const = array![1.0, -1.0];
let qf = laplacian_quadratic_form(&lap, &non_const);
assert!(qf > 0.0);
}
proptest! {
#[test]
fn prop_normalized_laplacian_is_symmetric_for_symmetric_adj(
n in 2usize..20,
weights in prop::collection::vec(0.0f64..1.0, 1..500),
) {
let mut adj = Array2::<f64>::zeros((n, n));
let mut it = weights.into_iter();
for i in 0..n {
for j in (i+1)..n {
let w = it.next().unwrap_or(0.0);
adj[[i, j]] = w;
adj[[j, i]] = w;
}
}
for i in 0..n { adj[[i,i]] = 0.0; }
for i in 0..n {
if adj.row(i).sum() == 0.0 {
let j = (i + 1) % n;
adj[[i, j]] = 1e-3;
adj[[j, i]] = 1e-3;
}
}
let l = normalized_laplacian(&adj);
let eps = 1e-10;
for i in 0..n {
for j in 0..n {
prop_assert!((l[[i,j]] - l[[j,i]]).abs() <= eps);
}
}
for i in 0..n {
prop_assert!((l[[i,i]] - 1.0).abs() <= 1e-10);
}
}
#[test]
fn prop_rayleigh_quotient_bounds_for_normalized_laplacian(
n in 2usize..25,
weights in prop::collection::vec(0.0f64..1.0, 1..2000),
x in prop::collection::vec(-1.0f64..1.0, 2..25),
) {
let mut adj = Array2::<f64>::zeros((n, n));
let mut it = weights.into_iter();
for i in 0..n {
for j in (i+1)..n {
let w = it.next().unwrap_or(0.0);
adj[[i, j]] = w;
adj[[j, i]] = w;
}
}
for i in 0..n { adj[[i,i]] = 0.0; }
for i in 0..n {
if adj.row(i).sum() == 0.0 {
let j = (i + 1) % n;
adj[[i, j]] = 1e-3;
adj[[j, i]] = 1e-3;
}
}
let l = normalized_laplacian_checked(&adj).unwrap();
let mut xv = Array1::<f64>::zeros(n);
for i in 0..n {
xv[i] = x.get(i).copied().unwrap_or(0.0);
}
let denom = xv.dot(&xv);
prop_assume!(denom > 1e-12);
let rq = laplacian_quadratic_form(&l, &xv) / denom;
prop_assert!(rq >= -1e-9, "rq={} < 0", rq);
prop_assert!(rq <= 2.0 + 1e-6, "rq={} > 2", rq);
}
}
}