quantrs2_anneal/
partitioning.rs

1//! Graph partitioning algorithms for quantum annealing
2//!
3//! This module implements spectral and other graph partitioning methods
4//! to decompose large QUBO problems into smaller subproblems that can
5//! fit on quantum annealing hardware.
6
7use crate::ising::{IsingError, IsingResult};
8use scirs2_core::Complex64;
9use std::collections::{HashMap, HashSet, VecDeque};
10
11/// Represents a partition of variables into groups
12#[derive(Debug, Clone)]
13pub struct Partition {
14    /// Maps variable index to partition group
15    pub assignment: HashMap<usize, usize>,
16    /// Number of partitions
17    pub num_partitions: usize,
18    /// Quality metric (e.g., edge cut)
19    pub quality: f64,
20}
21
22impl Partition {
23    /// Create a new partition
24    #[must_use]
25    pub fn new(num_partitions: usize) -> Self {
26        Self {
27            assignment: HashMap::new(),
28            num_partitions,
29            quality: 0.0,
30        }
31    }
32
33    /// Get variables in a specific partition
34    #[must_use]
35    pub fn get_partition(&self, partition_id: usize) -> Vec<usize> {
36        self.assignment
37            .iter()
38            .filter_map(|(&var, &part)| (part == partition_id).then_some(var))
39            .collect()
40    }
41
42    /// Calculate edge cut for given edges
43    pub fn calculate_edge_cut(&mut self, edges: &[(usize, usize, f64)]) -> f64 {
44        let mut cut_weight = 0.0;
45
46        for &(u, v, weight) in edges {
47            if let (Some(&p1), Some(&p2)) = (self.assignment.get(&u), self.assignment.get(&v)) {
48                if p1 != p2 {
49                    cut_weight += weight.abs();
50                }
51            }
52        }
53
54        self.quality = cut_weight;
55        cut_weight
56    }
57}
58
59/// Spectral partitioning using eigendecomposition
60#[derive(Clone, Debug)]
61pub struct SpectralPartitioner {
62    /// Number of eigenvectors to use
63    pub num_eigenvectors: usize,
64    /// Maximum iterations for eigensolvers
65    pub max_iterations: usize,
66    /// Convergence tolerance
67    pub tolerance: f64,
68}
69
70impl Default for SpectralPartitioner {
71    fn default() -> Self {
72        Self {
73            num_eigenvectors: 2,
74            max_iterations: 1000,
75            tolerance: 1e-6,
76        }
77    }
78}
79
80impl SpectralPartitioner {
81    /// Create a new spectral partitioner with default settings
82    #[must_use]
83    pub fn new() -> Self {
84        Self::default()
85    }
86}
87
88impl SpectralPartitioner {
89    /// Partition a graph using spectral methods
90    pub fn partition_graph(
91        &self,
92        num_vars: usize,
93        edges: &[(usize, usize, f64)],
94        num_partitions: usize,
95    ) -> IsingResult<Partition> {
96        if num_partitions < 2 {
97            return Err(IsingError::InvalidValue(
98                "Number of partitions must be at least 2".to_string(),
99            ));
100        }
101
102        // Build graph Laplacian
103        let laplacian = build_laplacian(num_vars, edges);
104
105        // Compute eigenvectors using power iteration (simplified approach)
106        let eigenvectors = compute_eigenvectors_power_iteration(
107            &laplacian,
108            self.num_eigenvectors.min(num_partitions),
109            self.max_iterations,
110            self.tolerance,
111        )?;
112
113        // Use k-means clustering on eigenvectors to assign partitions
114        let mut partition = Partition::new(num_partitions);
115
116        if num_partitions == 2 {
117            // For bipartition, use median split on Fiedler vector for balanced partitions
118            if eigenvectors.len() > 1 {
119                let fiedler = &eigenvectors[1]; // Second smallest eigenvalue's eigenvector
120
121                // Create indices sorted by Fiedler vector values
122                let mut sorted_indices: Vec<(usize, f64)> =
123                    fiedler.iter().enumerate().map(|(i, &v)| (i, v)).collect();
124                sorted_indices
125                    .sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
126
127                // Assign first half to partition 0, second half to partition 1
128                for i in 0..sorted_indices.len() / 2 {
129                    partition.assignment.insert(sorted_indices[i].0, 0);
130                }
131                for i in sorted_indices.len() / 2..sorted_indices.len() {
132                    partition.assignment.insert(sorted_indices[i].0, 1);
133                }
134            } else {
135                // Fallback: simple split
136                for var in 0..num_vars {
137                    partition
138                        .assignment
139                        .insert(var, usize::from(var >= num_vars / 2));
140                }
141            }
142        } else {
143            // For k-way partition, use k-means on eigenvector coordinates
144            let assignments = kmeans_clustering(&eigenvectors, num_vars, num_partitions);
145
146            for (var, &cluster) in assignments.iter().enumerate() {
147                partition.assignment.insert(var, cluster);
148            }
149        }
150
151        // Calculate partition quality
152        partition.calculate_edge_cut(edges);
153
154        Ok(partition)
155    }
156}
157
158/// Build graph Laplacian matrix
159fn build_laplacian(num_vars: usize, edges: &[(usize, usize, f64)]) -> Vec<Vec<f64>> {
160    let mut laplacian = vec![vec![0.0; num_vars]; num_vars];
161
162    // Build adjacency and degree
163    for &(u, v, weight) in edges {
164        if u != v && u < num_vars && v < num_vars {
165            let w = weight.abs();
166            laplacian[u][v] -= w;
167            laplacian[v][u] -= w;
168            laplacian[u][u] += w;
169            laplacian[v][v] += w;
170        }
171    }
172
173    laplacian
174}
175
176/// Compute eigenvectors using power iteration (simplified)
177fn compute_eigenvectors_power_iteration(
178    laplacian: &[Vec<f64>],
179    num_eigenvectors: usize,
180    max_iterations: usize,
181    tolerance: f64,
182) -> IsingResult<Vec<Vec<f64>>> {
183    let n = laplacian.len();
184    let mut eigenvectors = Vec::new();
185
186    // First eigenvector is constant (for connected graphs)
187    let first = vec![1.0 / (n as f64).sqrt(); n];
188    eigenvectors.push(first);
189
190    // Compute remaining eigenvectors using deflation
191    for k in 1..num_eigenvectors.min(n) {
192        let mut v = vec![0.0; n];
193
194        // Initialize with random values
195        for i in 0..n {
196            v[i] = ((i + k) as f64).sin();
197        }
198
199        // Orthogonalize against previous eigenvectors
200        for prev in &eigenvectors {
201            let dot = dot_product(&v, prev);
202            for i in 0..n {
203                v[i] -= dot * prev[i];
204            }
205        }
206
207        // Normalize
208        normalize_vector(&mut v);
209
210        // Power iteration with deflation
211        for _ in 0..max_iterations {
212            let mut v_new = matrix_vector_multiply(laplacian, &v);
213
214            // Apply inverse iteration (simplified - just negate for smallest eigenvalues)
215            for i in 0..n {
216                v_new[i] = -v_new[i];
217            }
218
219            // Orthogonalize
220            for prev in &eigenvectors {
221                let dot = dot_product(&v_new, prev);
222                for i in 0..n {
223                    v_new[i] -= dot * prev[i];
224                }
225            }
226
227            // Normalize
228            normalize_vector(&mut v_new);
229
230            // Check convergence
231            let mut diff = 0.0;
232            for i in 0..n {
233                diff += (v_new[i] - v[i]).abs();
234            }
235
236            v = v_new;
237
238            if diff < tolerance {
239                break;
240            }
241        }
242
243        eigenvectors.push(v);
244    }
245
246    Ok(eigenvectors)
247}
248
249/// Simple k-means clustering
250fn kmeans_clustering(eigenvectors: &[Vec<f64>], num_points: usize, k: usize) -> Vec<usize> {
251    let num_features = eigenvectors.len();
252    let mut assignments = vec![0; num_points];
253    let mut centroids = vec![vec![0.0; num_features]; k];
254
255    // Initialize centroids
256    for i in 0..k {
257        let point_idx = (i * num_points) / k;
258        for j in 0..num_features {
259            centroids[i][j] = eigenvectors[j][point_idx];
260        }
261    }
262
263    // K-means iterations
264    for _ in 0..100 {
265        let old_assignments = assignments.clone();
266
267        // Assign points to nearest centroid
268        for point in 0..num_points {
269            let mut min_dist = f64::INFINITY;
270            let mut best_cluster = 0;
271
272            for cluster in 0..k {
273                let mut dist = 0.0;
274                for feature in 0..num_features {
275                    let diff = eigenvectors[feature][point] - centroids[cluster][feature];
276                    dist += diff * diff;
277                }
278
279                if dist < min_dist {
280                    min_dist = dist;
281                    best_cluster = cluster;
282                }
283            }
284
285            assignments[point] = best_cluster;
286        }
287
288        // Update centroids
289        for cluster in 0..k {
290            for feature in 0..num_features {
291                centroids[cluster][feature] = 0.0;
292            }
293
294            let mut count = 0;
295            for (point, &assigned) in assignments.iter().enumerate() {
296                if assigned == cluster {
297                    for feature in 0..num_features {
298                        centroids[cluster][feature] += eigenvectors[feature][point];
299                    }
300                    count += 1;
301                }
302            }
303
304            if count > 0 {
305                for feature in 0..num_features {
306                    centroids[cluster][feature] /= f64::from(count);
307                }
308            }
309        }
310
311        // Check convergence
312        if assignments == old_assignments {
313            break;
314        }
315    }
316
317    assignments
318}
319
320/// Helper functions
321fn dot_product(a: &[f64], b: &[f64]) -> f64 {
322    a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
323}
324
325fn normalize_vector(v: &mut [f64]) {
326    let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
327    if norm > 0.0 {
328        for x in v.iter_mut() {
329            *x /= norm;
330        }
331    }
332}
333
334fn matrix_vector_multiply(matrix: &[Vec<f64>], vector: &[f64]) -> Vec<f64> {
335    matrix.iter().map(|row| dot_product(row, vector)).collect()
336}
337
338/// Kernighan-Lin graph partitioning algorithm
339#[derive(Clone, Debug)]
340pub struct KernighanLinPartitioner {
341    /// Maximum number of improvement iterations
342    pub max_iterations: usize,
343    /// Random seed for initialization
344    pub seed: Option<u64>,
345}
346
347impl Default for KernighanLinPartitioner {
348    fn default() -> Self {
349        Self {
350            max_iterations: 100,
351            seed: None,
352        }
353    }
354}
355
356impl KernighanLinPartitioner {
357    /// Partition graph into two parts using Kernighan-Lin algorithm
358    pub fn bipartition(
359        &self,
360        num_vars: usize,
361        edges: &[(usize, usize, f64)],
362    ) -> IsingResult<Partition> {
363        // Build adjacency lists
364        let mut adj: HashMap<usize, Vec<(usize, f64)>> = HashMap::new();
365        for i in 0..num_vars {
366            adj.insert(i, Vec::new());
367        }
368
369        for &(u, v, weight) in edges {
370            if u != v {
371                if let Some(u_adj) = adj.get_mut(&u) {
372                    u_adj.push((v, weight));
373                }
374                if let Some(v_adj) = adj.get_mut(&v) {
375                    v_adj.push((u, weight));
376                }
377            }
378        }
379
380        // Initialize partition (balanced)
381        let mut partition = Partition::new(2);
382        for i in 0..num_vars {
383            partition
384                .assignment
385                .insert(i, usize::from(i >= num_vars / 2));
386        }
387
388        // Kernighan-Lin iterations
389        let mut improved = true;
390        let mut iteration = 0;
391
392        while improved && iteration < self.max_iterations {
393            improved = false;
394            let mut gains = Vec::new();
395            let mut swapped = HashSet::new();
396
397            // Calculate gains for all possible swaps
398            for u in 0..num_vars {
399                if swapped.contains(&u) {
400                    continue;
401                }
402
403                let u_part = partition.assignment[&u];
404
405                for v in (u + 1)..num_vars {
406                    if swapped.contains(&v) {
407                        continue;
408                    }
409
410                    let v_part = partition.assignment[&v];
411
412                    if u_part != v_part {
413                        // Calculate gain from swapping u and v
414                        let gain = calculate_swap_gain(&adj, &partition.assignment, u, v);
415                        gains.push((gain, u, v));
416                    }
417                }
418            }
419
420            // Sort by gain (descending)
421            gains.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
422
423            // Apply best swaps
424            let mut cumulative_gain = 0.0;
425            let mut best_gain = 0.0;
426            let mut best_swaps = Vec::new();
427            let mut temp_swaps = Vec::new();
428
429            for (gain, u, v) in gains {
430                if swapped.contains(&u) || swapped.contains(&v) {
431                    continue;
432                }
433
434                cumulative_gain += gain;
435                temp_swaps.push((u, v));
436                swapped.insert(u);
437                swapped.insert(v);
438
439                if cumulative_gain > best_gain {
440                    best_gain = cumulative_gain;
441                    best_swaps = temp_swaps.clone();
442                }
443
444                // Apply swap temporarily
445                let u_part = partition.assignment[&u];
446                let v_part = partition.assignment[&v];
447                partition.assignment.insert(u, v_part);
448                partition.assignment.insert(v, u_part);
449            }
450
451            // Revert to best configuration
452            for (u, v) in &temp_swaps {
453                if !best_swaps.contains(&(*u, *v)) {
454                    let u_part = partition.assignment[u];
455                    let v_part = partition.assignment[v];
456                    partition.assignment.insert(*u, v_part);
457                    partition.assignment.insert(*v, u_part);
458                }
459            }
460
461            if best_gain > 1e-6 {
462                improved = true;
463            }
464
465            iteration += 1;
466        }
467
468        // Calculate final quality
469        partition.calculate_edge_cut(edges);
470
471        Ok(partition)
472    }
473}
474
475/// Calculate gain from swapping two nodes
476fn calculate_swap_gain(
477    adj: &HashMap<usize, Vec<(usize, f64)>>,
478    assignment: &HashMap<usize, usize>,
479    u: usize,
480    v: usize,
481) -> f64 {
482    let u_part = assignment[&u];
483    let v_part = assignment[&v];
484
485    let mut gain = 0.0;
486
487    // Calculate current cut contribution
488    for &(neighbor, weight) in &adj[&u] {
489        let n_part = assignment[&neighbor];
490        if n_part == u_part {
491            gain -= weight; // Internal edge becomes external
492        } else {
493            gain += weight; // External edge becomes internal
494        }
495    }
496
497    for &(neighbor, weight) in &adj[&v] {
498        let n_part = assignment[&neighbor];
499        if n_part == v_part {
500            gain -= weight; // Internal edge becomes external
501        } else {
502            gain += weight; // External edge becomes internal
503        }
504    }
505
506    // Adjust for direct edge between u and v
507    if let Some(&(_, weight)) = adj[&u].iter().find(|(n, _)| *n == v) {
508        gain += 2.0 * weight; // This edge changes twice
509    }
510
511    gain
512}
513
514/// Recursive bisection partitioner
515pub struct RecursiveBisectionPartitioner {
516    /// Base partitioner for bisection
517    pub bisection_method: BipartitionMethod,
518    /// Balance constraint (ratio of partition sizes)
519    pub balance_ratio: f64,
520}
521
522#[derive(Clone)]
523pub enum BipartitionMethod {
524    Spectral(SpectralPartitioner),
525    KernighanLin(KernighanLinPartitioner),
526}
527
528impl Default for RecursiveBisectionPartitioner {
529    fn default() -> Self {
530        Self {
531            bisection_method: BipartitionMethod::Spectral(SpectralPartitioner::default()),
532            balance_ratio: 1.1, // Allow 10% imbalance
533        }
534    }
535}
536
537impl RecursiveBisectionPartitioner {
538    /// Partition graph into k parts using recursive bisection
539    pub fn partition(
540        &self,
541        num_vars: usize,
542        edges: &[(usize, usize, f64)],
543        num_partitions: usize,
544    ) -> IsingResult<Partition> {
545        if num_partitions == 1 {
546            let mut partition = Partition::new(1);
547            for i in 0..num_vars {
548                partition.assignment.insert(i, 0);
549            }
550            return Ok(partition);
551        }
552
553        // Start with all variables in one partition
554        let mut current_partitions = vec![HashSet::new(); 1];
555        for i in 0..num_vars {
556            current_partitions[0].insert(i);
557        }
558
559        // Recursively bisect until we have enough partitions
560        while current_partitions.len() < num_partitions {
561            // Find largest partition to split
562            let (largest_idx, _) = current_partitions
563                .iter()
564                .enumerate()
565                .max_by_key(|(_, p)| p.len())
566                .ok_or_else(|| {
567                    IsingError::InvalidValue("No partitions available to split".to_string())
568                })?;
569
570            let to_split = current_partitions.remove(largest_idx);
571
572            // Extract subgraph
573            let subgraph_vars: Vec<usize> = to_split.iter().copied().collect();
574            let var_map: HashMap<usize, usize> = subgraph_vars
575                .iter()
576                .enumerate()
577                .map(|(new, &old)| (old, new))
578                .collect();
579
580            let subgraph_edges: Vec<(usize, usize, f64)> = edges
581                .iter()
582                .filter_map(|&(u, v, w)| {
583                    if let (Some(&new_u), Some(&new_v)) = (var_map.get(&u), var_map.get(&v)) {
584                        Some((new_u, new_v, w))
585                    } else {
586                        None
587                    }
588                })
589                .collect();
590
591            // Bisect the subgraph
592            let bipartition = match &self.bisection_method {
593                BipartitionMethod::Spectral(sp) => {
594                    sp.partition_graph(subgraph_vars.len(), &subgraph_edges, 2)?
595                }
596                BipartitionMethod::KernighanLin(kl) => {
597                    kl.bipartition(subgraph_vars.len(), &subgraph_edges)?
598                }
599            };
600
601            // Create two new partitions
602            let mut part0 = HashSet::new();
603            let mut part1 = HashSet::new();
604
605            for (i, &original_var) in subgraph_vars.iter().enumerate() {
606                if bipartition.assignment[&i] == 0 {
607                    part0.insert(original_var);
608                } else {
609                    part1.insert(original_var);
610                }
611            }
612
613            current_partitions.push(part0);
614            current_partitions.push(part1);
615        }
616
617        // Build final partition assignment
618        let mut partition = Partition::new(num_partitions);
619        for (part_id, part_vars) in current_partitions.iter().enumerate() {
620            for &var in part_vars {
621                partition.assignment.insert(var, part_id);
622            }
623        }
624
625        // Calculate quality
626        partition.calculate_edge_cut(edges);
627
628        Ok(partition)
629    }
630}
631
632#[cfg(test)]
633mod tests {
634    use super::*;
635
636    #[test]
637    fn test_spectral_bipartition() {
638        // Create a simple graph: 0-1-2-3 (path)
639        let edges = vec![(0, 1, 1.0), (1, 2, 1.0), (2, 3, 1.0)];
640
641        let partitioner = SpectralPartitioner::default();
642        let partition = partitioner
643            .partition_graph(4, &edges, 2)
644            .expect("partition_graph should succeed for valid input");
645
646        // Check that we have a valid bipartition
647        assert_eq!(partition.num_partitions, 2);
648        assert_eq!(partition.assignment.len(), 4);
649
650        // The optimal cut should separate the path in the middle
651        let cut = partition.quality;
652        println!("Spectral partition cut: {}", cut);
653        println!("Partition assignment: {:?}", partition.assignment);
654        // For a path graph, any bipartition will cut at least one edge
655        assert!(cut >= 1.0);
656    }
657
658    #[test]
659    fn test_kernighan_lin() {
660        // Create a graph with two clear clusters
661        let edges = vec![
662            // Cluster 1
663            (0, 1, 2.0),
664            (0, 2, 2.0),
665            (1, 2, 2.0),
666            // Cluster 2
667            (3, 4, 2.0),
668            (3, 5, 2.0),
669            (4, 5, 2.0),
670            // Weak connection between clusters
671            (2, 3, 0.5),
672        ];
673
674        let partitioner = KernighanLinPartitioner::default();
675        let partition = partitioner
676            .bipartition(6, &edges)
677            .expect("bipartition should succeed for valid input");
678
679        // Should find the natural clustering
680        assert_eq!(partition.num_partitions, 2);
681        assert_eq!(partition.assignment.len(), 6);
682
683        // The cut should be small (ideally just the weak edge)
684        assert!(partition.quality < 1.0);
685    }
686
687    #[test]
688    fn test_recursive_bisection() {
689        // Create a grid graph
690        let mut edges = Vec::new();
691        let n = 4; // 4x4 grid
692
693        for i in 0..n {
694            for j in 0..n {
695                let node = i * n + j;
696                // Right neighbor
697                if j < n - 1 {
698                    edges.push((node, node + 1, 1.0));
699                }
700                // Bottom neighbor
701                if i < n - 1 {
702                    edges.push((node, node + n, 1.0));
703                }
704            }
705        }
706
707        let partitioner = RecursiveBisectionPartitioner::default();
708        let partition = partitioner
709            .partition(n * n, &edges, 4)
710            .expect("partition should succeed for valid grid graph");
711
712        // Should create 4 partitions
713        assert_eq!(partition.num_partitions, 4);
714        assert_eq!(partition.assignment.len(), n * n);
715
716        // Each partition should have roughly equal size
717        let mut sizes = vec![0; 4];
718        for &part in partition.assignment.values() {
719            sizes[part] += 1;
720        }
721
722        println!("Partition sizes: {:?}", sizes);
723        for size in sizes {
724            assert!(size >= 2 && size <= 6); // Allow more imbalance for small graphs
725        }
726    }
727}