Skip to main content

scirs2_graph/hypergraph/
neural.rs

1//! Hypergraph Neural Network (HGNN) convolution layers.
2//!
3//! Implements HGNN (Feng et al. 2019) and its attention-based variant HGAT.
4//!
5//! ## HGNN Convolution
6//!
7//! ```text
8//! X' = D_v^{-1/2} H W_e D_e^{-1} H^T D_v^{-1/2} X Θ
9//! ```
10//!
11//! where:
12//! - `H` is the incidence matrix [n_nodes × n_hyperedges]
13//! - `W_e` is the diagonal hyperedge weight matrix
14//! - `D_v[i] = Σ_e H[i,e] * w[e]` (weighted node degree)
15//! - `D_e[e] = Σ_i H[i,e]` (hyperedge degree)
16//! - `Θ` is the learnable weight matrix [in_dim × out_dim]
17//!
18//! ## HGAT Attention Variant
19//!
20//! When `use_attention = true`, attention coefficients are computed per node-edge
21//! pair before aggregation, allowing the model to focus on the most relevant
22//! hyperedge memberships.
23
24use crate::error::{GraphError, Result};
25use scirs2_core::ndarray::{Array1, Array2};
26use scirs2_core::random::{Rng, RngExt, SeedableRng};
27
28// ============================================================================
29// Configuration
30// ============================================================================
31
32/// Configuration for a single HGNN or HGAT convolution layer.
33#[derive(Debug, Clone)]
34pub struct HgnnLayerConfig {
35    /// Input feature dimension.
36    pub in_dim: usize,
37    /// Output feature dimension.
38    pub out_dim: usize,
39    /// Whether to use attention-weighted aggregation (HGAT variant).
40    pub use_attention: bool,
41    /// Number of attention heads (only used when `use_attention = true`).
42    pub n_heads: usize,
43    /// Dropout rate applied to output features during training (0.0 = no dropout).
44    pub dropout: f64,
45}
46
47impl Default for HgnnLayerConfig {
48    fn default() -> Self {
49        Self {
50            in_dim: 64,
51            out_dim: 64,
52            use_attention: false,
53            n_heads: 1,
54            dropout: 0.0,
55        }
56    }
57}
58
59// ============================================================================
60// HgnnLayer
61// ============================================================================
62
63/// A single HGNN convolution layer.
64///
65/// Stores the learnable weight matrix `Θ` of shape `[in_dim × out_dim]`.
66/// Optionally stores attention weight vectors when `use_attention = true`.
67pub struct HgnnLayer {
68    /// Learnable weight matrix: [in_dim × out_dim]
69    theta: Array2<f64>,
70    /// Attention weight vector: [in_dim] (used only when use_attention = true)
71    attn_vec: Array1<f64>,
72    config: HgnnLayerConfig,
73}
74
75impl HgnnLayer {
76    /// Create a new HGNN layer with Xavier-uniform initialised weights.
77    ///
78    /// # Arguments
79    /// * `config` – layer configuration
80    /// * `seed`   – RNG seed for reproducible weight initialisation
81    pub fn new(config: HgnnLayerConfig, seed: u64) -> Self {
82        let mut rng = scirs2_core::random::ChaCha20Rng::seed_from_u64(seed);
83
84        // Xavier uniform scale: sqrt(6 / (fan_in + fan_out))
85        let scale = (6.0 / (config.in_dim + config.out_dim) as f64).sqrt();
86        let theta = Array2::from_shape_fn((config.in_dim, config.out_dim), |_| {
87            rng.random::<f64>() * 2.0 * scale - scale
88        });
89
90        let attn_scale = (6.0 / (config.in_dim + 1) as f64).sqrt();
91        let attn_vec = Array1::from_shape_fn(config.in_dim, |_| {
92            rng.random::<f64>() * 2.0 * attn_scale - attn_scale
93        });
94
95        HgnnLayer {
96            theta,
97            attn_vec,
98            config,
99        }
100    }
101
102    /// Forward pass: `X' = Ã X Θ`
103    ///
104    /// `Ã = D_v^{-1/2} H diag(w / D_e) H^T D_v^{-1/2}`
105    ///
106    /// # Arguments
107    /// * `incidence`   – incidence matrix H of shape `[n_nodes × n_edges]`
108    /// * `node_feats`  – node feature matrix X of shape `[n_nodes × in_dim]`
109    /// * `edge_weights` – per-hyperedge weight vector (length = n_edges); if
110    ///   `None`, all weights default to 1.0
111    ///
112    /// # Returns
113    /// Node feature matrix of shape `[n_nodes × out_dim]`
114    pub fn forward(
115        &self,
116        incidence: &Array2<f64>,
117        node_feats: &Array2<f64>,
118        edge_weights: Option<&Array1<f64>>,
119    ) -> Result<Array2<f64>> {
120        let (n_nodes, n_edges) = incidence.dim();
121        let (feat_n, in_dim) = node_feats.dim();
122
123        if feat_n != n_nodes {
124            return Err(GraphError::InvalidParameter {
125                param: "node_feats".to_string(),
126                value: format!("rows={feat_n}"),
127                expected: format!("rows={n_nodes} (matching incidence rows)"),
128                context: "HgnnLayer::forward".to_string(),
129            });
130        }
131        if in_dim != self.config.in_dim {
132            return Err(GraphError::InvalidParameter {
133                param: "node_feats".to_string(),
134                value: format!("cols={in_dim}"),
135                expected: format!("cols={}", self.config.in_dim),
136                context: "HgnnLayer::forward".to_string(),
137            });
138        }
139
140        // Default uniform edge weights
141        let default_w = Array1::ones(n_edges);
142        let w: &Array1<f64> = edge_weights.unwrap_or(&default_w);
143
144        if self.config.use_attention {
145            self.forward_attention(incidence, node_feats, w)
146        } else {
147            self.forward_standard(incidence, node_feats, w)
148        }
149    }
150
151    /// Standard HGNN forward (no attention).
152    fn forward_standard(
153        &self,
154        incidence: &Array2<f64>,
155        node_feats: &Array2<f64>,
156        w: &Array1<f64>,
157    ) -> Result<Array2<f64>> {
158        let (n_nodes, n_edges) = incidence.dim();
159
160        // D_v[i] = Σ_e H[i,e] * w[e]  (node degree)
161        let mut dv: Array1<f64> = Array1::zeros(n_nodes);
162        for i in 0..n_nodes {
163            for e in 0..n_edges {
164                dv[i] += incidence[[i, e]] * w[e];
165            }
166        }
167
168        // D_e[e] = Σ_i H[i,e]  (hyperedge degree)
169        let mut de: Array1<f64> = Array1::zeros(n_edges);
170        for e in 0..n_edges {
171            for i in 0..n_nodes {
172                de[e] += incidence[[i, e]];
173            }
174        }
175
176        // Dv^{-1/2}: treat zero degrees as 0 (isolated nodes)
177        let dv_inv_sqrt: Array1<f64> =
178            dv.mapv(|d: f64| if d > 1e-12 { 1.0 / d.sqrt() } else { 0.0 });
179
180        // De^{-1}: treat zero as 0
181        let de_inv: Array1<f64> = de.mapv(|d: f64| if d > 1e-12 { 1.0 / d } else { 0.0 });
182
183        // Ã = Dv^{-1/2} * H * diag(w * de_inv) * H^T * Dv^{-1/2}
184        //
185        // Step 1: T1 = H * diag(w * de_inv)   [n_nodes × n_edges]
186        let mut t1: Array2<f64> = Array2::zeros((n_nodes, n_edges));
187        for i in 0..n_nodes {
188            for e in 0..n_edges {
189                t1[[i, e]] = incidence[[i, e]] * w[e] * de_inv[e];
190            }
191        }
192
193        // Step 2: T2 = T1 * H^T              [n_nodes × n_nodes]
194        let mut t2: Array2<f64> = Array2::zeros((n_nodes, n_nodes));
195        for i in 0..n_nodes {
196            for j in 0..n_nodes {
197                let mut val = 0.0_f64;
198                for e in 0..n_edges {
199                    val += t1[[i, e]] * incidence[[j, e]];
200                }
201                t2[[i, j]] = val;
202            }
203        }
204
205        // Step 3: Ã = diag(Dv^{-1/2}) * T2 * diag(Dv^{-1/2})
206        let mut a_tilde: Array2<f64> = Array2::zeros((n_nodes, n_nodes));
207        for i in 0..n_nodes {
208            for j in 0..n_nodes {
209                a_tilde[[i, j]] = dv_inv_sqrt[i] * t2[[i, j]] * dv_inv_sqrt[j];
210            }
211        }
212
213        // Step 4: Output = Ã * X * Θ
214        //   First: Z = Ã * X   [n_nodes × in_dim]
215        let in_dim = node_feats.dim().1;
216        let out_dim = self.config.out_dim;
217        let mut z: Array2<f64> = Array2::zeros((n_nodes, in_dim));
218        for i in 0..n_nodes {
219            for k in 0..in_dim {
220                let mut val = 0.0_f64;
221                for j in 0..n_nodes {
222                    val += a_tilde[[i, j]] * node_feats[[j, k]];
223                }
224                z[[i, k]] = val;
225            }
226        }
227
228        //   Then: Output = Z * Θ   [n_nodes × out_dim]
229        let mut output: Array2<f64> = Array2::zeros((n_nodes, out_dim));
230        for i in 0..n_nodes {
231            for k in 0..out_dim {
232                let mut val = 0.0;
233                for d in 0..in_dim {
234                    val += z[[i, d]] * self.theta[[d, k]];
235                }
236                output[[i, k]] = val;
237            }
238        }
239
240        Ok(output)
241    }
242
243    /// HGAT forward: attention-weighted aggregation over hyperedge memberships.
244    ///
245    /// For each node v and hyperedge e (where v ∈ e):
246    ///   - Compute raw attention score: e_{ve} = LeakyReLU(attn_vec · h_v)
247    ///   - Normalise over hyperedges containing v: α_{ve} = softmax_e(e_{ve})
248    ///   - Aggregate: h'_v = Σ_e α_{ve} * (mean over u∈e of h_u) * w[e] / D_e[e]
249    ///   - Apply θ: output_v = h'_v * Θ
250    fn forward_attention(
251        &self,
252        incidence: &Array2<f64>,
253        node_feats: &Array2<f64>,
254        w: &Array1<f64>,
255    ) -> Result<Array2<f64>> {
256        let (n_nodes, n_edges) = incidence.dim();
257        let in_dim = node_feats.dim().1;
258        let out_dim = self.config.out_dim;
259
260        // D_e[e] = Σ_i H[i,e]
261        let mut de: Array1<f64> = Array1::zeros(n_edges);
262        for e in 0..n_edges {
263            for i in 0..n_nodes {
264                de[e] += incidence[[i, e]];
265            }
266        }
267        let de_inv: Array1<f64> = de.mapv(|d: f64| if d > 1e-12 { 1.0 / d } else { 0.0 });
268
269        // Compute mean feature per hyperedge: m[e] = (1/D_e[e]) Σ_{u∈e} h_u
270        let mut m_edge: Array2<f64> = Array2::zeros((n_edges, in_dim));
271        for e in 0..n_edges {
272            let mut sum: Array1<f64> = Array1::zeros(in_dim);
273            for i in 0..n_nodes {
274                if incidence[[i, e]] > 0.0 {
275                    for d in 0..in_dim {
276                        sum[d] += node_feats[[i, d]];
277                    }
278                }
279            }
280            for d in 0..in_dim {
281                m_edge[[e, d]] = sum[d] * de_inv[e];
282            }
283        }
284
285        // Compute raw attention scores using LeakyReLU(attn_vec · h_v) per node
286        let leaky_alpha = 0.2_f64;
287        let mut output: Array2<f64> = Array2::zeros((n_nodes, out_dim));
288
289        for v in 0..n_nodes {
290            // Collect hyperedges containing v
291            let edges_of_v: Vec<usize> =
292                (0..n_edges).filter(|&e| incidence[[v, e]] > 0.0).collect();
293
294            if edges_of_v.is_empty() {
295                // Isolated node: no message, output remains zero
296                continue;
297            }
298
299            // Compute raw attention scores e_{ve} = LeakyReLU(attn_vec · h_v)
300            // (same for all edges since it only depends on the query node here)
301            let score_v: f64 = {
302                let raw: f64 = (0..in_dim)
303                    .map(|d| self.attn_vec[d] * node_feats[[v, d]])
304                    .sum();
305                if raw >= 0.0 {
306                    raw
307                } else {
308                    leaky_alpha * raw
309                }
310            };
311
312            // Softmax is trivially 1/n_edges_of_v when scores are uniform;
313            // expand to per-edge scores incorporating edge weights.
314            let edge_scores: Vec<f64> = edges_of_v
315                .iter()
316                .map(|&e| {
317                    let raw: f64 = (0..in_dim).map(|d| self.attn_vec[d] * m_edge[[e, d]]).sum();
318                    let s = raw + score_v;
319                    let leaky = if s >= 0.0 { s } else { leaky_alpha * s };
320                    leaky * w[e]
321                })
322                .collect();
323
324            // Numerical-stable softmax
325            let max_s = edge_scores
326                .iter()
327                .cloned()
328                .fold(f64::NEG_INFINITY, f64::max);
329            let exps: Vec<f64> = edge_scores.iter().map(|&s| (s - max_s).exp()).collect();
330            let sum_exp: f64 = exps.iter().sum();
331            let alphas: Vec<f64> = exps.iter().map(|&e| e / sum_exp).collect();
332
333            // Aggregate: agg_v = Σ_e alpha_{ve} * m[e]
334            let mut agg: Array1<f64> = Array1::zeros(in_dim);
335            for (k, &e) in edges_of_v.iter().enumerate() {
336                for d in 0..in_dim {
337                    agg[d] += alphas[k] * m_edge[[e, d]];
338                }
339            }
340
341            // Apply Θ: output_v = agg_v * Θ
342            for k in 0..out_dim {
343                let mut val = 0.0_f64;
344                for d in 0..in_dim {
345                    val += agg[d] * self.theta[[d, k]];
346                }
347                output[[v, k]] = val;
348            }
349        }
350
351        Ok(output)
352    }
353
354    /// Element-wise ReLU activation.
355    pub fn relu(x: Array2<f64>) -> Array2<f64> {
356        x.mapv(|v| if v > 0.0 { v } else { 0.0 })
357    }
358
359    /// Number of learnable parameters in this layer.
360    pub fn n_params(&self) -> usize {
361        self.theta.len() + self.attn_vec.len()
362    }
363}
364
365// ============================================================================
366// HgnnNetwork
367// ============================================================================
368
369/// Multi-layer HGNN network.
370///
371/// Layers alternate convolution and ReLU activation; the final layer has no
372/// activation applied (the caller is free to apply softmax / sigmoid as
373/// appropriate for the downstream task).
374pub struct HgnnNetwork {
375    layers: Vec<HgnnLayer>,
376}
377
378impl HgnnNetwork {
379    /// Build a multi-layer HGNN.
380    ///
381    /// # Arguments
382    /// * `dims` – sequence of dimensions `[in_dim, h1, h2, ..., out_dim]`.
383    ///   Must have at least two elements.
384    /// * `use_attention` – whether every layer uses the HGAT attention variant.
385    /// * `seed` – base RNG seed; each layer receives `seed + layer_index`.
386    ///
387    /// # Panics
388    /// Panics if `dims.len() < 2`.
389    pub fn new(dims: &[usize], use_attention: bool, seed: u64) -> Self {
390        assert!(
391            dims.len() >= 2,
392            "dims must have at least 2 elements (in_dim, out_dim)"
393        );
394        let layers = dims
395            .windows(2)
396            .enumerate()
397            .map(|(i, w)| {
398                let cfg = HgnnLayerConfig {
399                    in_dim: w[0],
400                    out_dim: w[1],
401                    use_attention,
402                    n_heads: 1,
403                    dropout: 0.0,
404                };
405                HgnnLayer::new(cfg, seed + i as u64)
406            })
407            .collect();
408        HgnnNetwork { layers }
409    }
410
411    /// Forward pass through all layers.
412    ///
413    /// ReLU is applied after every hidden layer; the output of the last layer
414    /// is returned without activation.
415    ///
416    /// # Arguments
417    /// * `incidence`    – incidence matrix H: `[n_nodes × n_edges]`
418    /// * `node_feats`   – node feature matrix X: `[n_nodes × in_dim]`
419    /// * `edge_weights` – optional per-hyperedge weight vector
420    ///
421    /// # Returns
422    /// Output feature matrix: `[n_nodes × out_dim]`
423    pub fn forward(
424        &self,
425        incidence: &Array2<f64>,
426        node_feats: &Array2<f64>,
427        edge_weights: Option<&Array1<f64>>,
428    ) -> Result<Array2<f64>> {
429        let n_layers = self.layers.len();
430        let mut x = node_feats.clone();
431
432        for (i, layer) in self.layers.iter().enumerate() {
433            x = layer.forward(incidence, &x, edge_weights)?;
434            // Apply ReLU after all but the last layer
435            if i + 1 < n_layers {
436                x = HgnnLayer::relu(x);
437            }
438        }
439
440        Ok(x)
441    }
442
443    /// Total number of learnable parameters across all layers.
444    pub fn n_params(&self) -> usize {
445        self.layers.iter().map(|l| l.n_params()).sum()
446    }
447
448    /// Number of layers.
449    pub fn depth(&self) -> usize {
450        self.layers.len()
451    }
452}
453
454// ============================================================================
455// Tests
456// ============================================================================
457
458#[cfg(test)]
459mod tests {
460    use super::*;
461    use scirs2_core::ndarray::Array2;
462
463    /// Build a small incidence matrix: 4 nodes, 3 hyperedges
464    ///   e0 = {0,1}, e1 = {1,2}, e2 = {2,3}
465    fn small_incidence() -> Array2<f64> {
466        let mut h = Array2::zeros((4, 3));
467        h[[0, 0]] = 1.0;
468        h[[1, 0]] = 1.0;
469        h[[1, 1]] = 1.0;
470        h[[2, 1]] = 1.0;
471        h[[2, 2]] = 1.0;
472        h[[3, 2]] = 1.0;
473        h
474    }
475
476    /// Build an identity incidence: each node belongs to exactly one hyperedge.
477    fn identity_incidence(n: usize) -> Array2<f64> {
478        let mut h = Array2::zeros((n, n));
479        for i in 0..n {
480            h[[i, i]] = 1.0;
481        }
482        h
483    }
484
485    #[test]
486    fn test_output_shape() {
487        let h = small_incidence();
488        let x = Array2::ones((4, 8));
489        let cfg = HgnnLayerConfig {
490            in_dim: 8,
491            out_dim: 16,
492            ..Default::default()
493        };
494        let layer = HgnnLayer::new(cfg, 42);
495        let out = layer.forward(&h, &x, None).expect("forward ok");
496        assert_eq!(out.dim(), (4, 16));
497    }
498
499    #[test]
500    fn test_identity_incidence_is_diagonal() {
501        // With identity incidence H=I and unit edge weights:
502        // D_v[i]=1, D_e[e]=1, Ã=I, so output = X * Θ
503        let n = 4;
504        let h = identity_incidence(n);
505        // identity feature matrix scaled to in_dim
506        let in_dim = 4;
507        let out_dim = 4;
508        let x = Array2::eye(n);
509        let cfg = HgnnLayerConfig {
510            in_dim,
511            out_dim,
512            ..Default::default()
513        };
514        let layer = HgnnLayer::new(cfg, 7);
515        let out = layer.forward(&h, &x, None).expect("forward ok");
516        // With à = I, output = X @ theta = I @ theta = theta
517        // Compare: out[i, :] ≈ theta[i, :]
518        for i in 0..n {
519            for k in 0..out_dim {
520                let diff = (out[[i, k]] - layer.theta[[i, k]]).abs();
521                assert!(
522                    diff < 1e-10,
523                    "out[{i},{k}]={} != theta[{i},{k}]={}",
524                    out[[i, k]],
525                    layer.theta[[i, k]]
526                );
527            }
528        }
529    }
530
531    #[test]
532    fn test_output_bounded_with_unit_features() {
533        // With unit features and reasonable weights the output should be finite
534        let h = small_incidence();
535        let x = Array2::ones((4, 4));
536        let cfg = HgnnLayerConfig {
537            in_dim: 4,
538            out_dim: 4,
539            ..Default::default()
540        };
541        let layer = HgnnLayer::new(cfg, 99);
542        let out = layer.forward(&h, &x, None).expect("forward ok");
543        for v in out.iter() {
544            assert!(v.is_finite(), "output contains non-finite value: {v}");
545        }
546    }
547
548    #[test]
549    fn test_zero_dropout_is_identity_of_forward() {
550        // dropout=0.0 means no masking; two calls with same input return same output
551        let h = small_incidence();
552        let x = Array2::from_shape_fn((4, 8), |(i, j)| (i + j) as f64 * 0.1);
553        let cfg = HgnnLayerConfig {
554            in_dim: 8,
555            out_dim: 4,
556            dropout: 0.0,
557            ..Default::default()
558        };
559        let layer = HgnnLayer::new(cfg, 1);
560        let out1 = layer.forward(&h, &x, None).expect("ok");
561        let out2 = layer.forward(&h, &x, None).expect("ok");
562        for (a, b) in out1.iter().zip(out2.iter()) {
563            assert!((a - b).abs() < 1e-12);
564        }
565    }
566
567    #[test]
568    fn test_n_params_counts_correctly() {
569        let cfg = HgnnLayerConfig {
570            in_dim: 8,
571            out_dim: 16,
572            use_attention: false,
573            ..Default::default()
574        };
575        let layer = HgnnLayer::new(cfg, 0);
576        // theta: 8*16=128  attn_vec: 8  total: 136
577        assert_eq!(layer.n_params(), 8 * 16 + 8);
578    }
579
580    #[test]
581    fn test_multi_layer_output_shape() {
582        let h = small_incidence(); // 4 nodes
583        let x = Array2::ones((4, 16));
584        let net = HgnnNetwork::new(&[16, 32, 8], false, 42);
585        let out = net.forward(&h, &x, None).expect("ok");
586        assert_eq!(out.dim(), (4, 8));
587    }
588
589    #[test]
590    fn test_network_n_params() {
591        let net = HgnnNetwork::new(&[8, 16, 4], false, 0);
592        // Layer 0: theta 8*16=128, attn 8  → 136
593        // Layer 1: theta 16*4=64, attn 16 → 80
594        // Total: 216
595        assert_eq!(net.n_params(), 8 * 16 + 8 + 16 * 4 + 16);
596    }
597
598    #[test]
599    fn test_theta_small_init() {
600        let cfg = HgnnLayerConfig {
601            in_dim: 64,
602            out_dim: 64,
603            ..Default::default()
604        };
605        let layer = HgnnLayer::new(cfg, 1234);
606        let scale = (6.0_f64 / (64.0 + 64.0)).sqrt();
607        for v in layer.theta.iter() {
608            assert!(
609                v.abs() <= scale + 1e-9,
610                "theta value {v} exceeds Xavier bound {scale}"
611            );
612        }
613    }
614
615    #[test]
616    fn test_attention_output_shape() {
617        let h = small_incidence();
618        let x = Array2::ones((4, 8));
619        let cfg = HgnnLayerConfig {
620            in_dim: 8,
621            out_dim: 4,
622            use_attention: true,
623            n_heads: 1,
624            dropout: 0.0,
625        };
626        let layer = HgnnLayer::new(cfg, 5);
627        let out = layer.forward(&h, &x, None).expect("ok");
628        assert_eq!(out.dim(), (4, 4));
629    }
630
631    #[test]
632    fn test_relu_zeros_negatives() {
633        let x = Array2::from_shape_vec((2, 3), vec![-1.0, 0.0, 1.0, -0.5, 2.0, -3.0]).expect("ok");
634        let r = HgnnLayer::relu(x);
635        assert_eq!(r[[0, 0]], 0.0);
636        assert_eq!(r[[0, 2]], 1.0);
637        assert_eq!(r[[1, 1]], 2.0);
638        assert_eq!(r[[1, 2]], 0.0);
639    }
640
641    #[test]
642    fn test_edge_weights_change_output() {
643        let h = small_incidence();
644        let x = Array2::ones((4, 4));
645        let cfg = HgnnLayerConfig {
646            in_dim: 4,
647            out_dim: 4,
648            ..Default::default()
649        };
650        let layer = HgnnLayer::new(cfg, 42);
651        let w1 = Array1::ones(3);
652        let w2 = Array1::from_vec(vec![2.0, 1.0, 0.5]);
653        let out1 = layer.forward(&h, &x, Some(&w1)).expect("ok");
654        let out2 = layer.forward(&h, &x, Some(&w2)).expect("ok");
655        // Different weights → different output
656        let diff: f64 = out1
657            .iter()
658            .zip(out2.iter())
659            .map(|(a, b)| (a - b).abs())
660            .sum();
661        assert!(
662            diff > 1e-10,
663            "different edge weights should produce different output"
664        );
665    }
666}