ruvector_mincut/parallel/
mod.rs1#![allow(missing_docs)]
7
8use crate::compact::{
9 BitSet256, CompactCoreState, CompactEdge, CompactVertexId, CompactWitness, CoreResult,
10 MAX_EDGES_PER_CORE,
11};
12use core::sync::atomic::{AtomicU16, AtomicU8, Ordering};
13
14#[cfg(feature = "wasm")]
16use crate::wasm::simd::{simd_boundary_size, simd_popcount};
17
18#[cfg(not(feature = "wasm"))]
19#[inline]
20fn simd_popcount(bits: &[u64; 4]) -> u32 {
21 bits.iter().map(|b| b.count_ones()).sum()
22}
23
24#[cfg(not(feature = "wasm"))]
25#[inline]
26fn simd_boundary_size(set_a: &BitSet256, edges: &[(CompactVertexId, CompactVertexId)]) -> u16 {
27 let mut count = 0u16;
28 for &(src, tgt) in edges {
29 let src_in = set_a.contains(src);
30 let tgt_in = set_a.contains(tgt);
31 if src_in != tgt_in {
32 count += 1;
33 }
34 }
35 count
36}
37
38pub const NUM_CORES: usize = 256;
40
41pub const RANGES_PER_CORE: usize = 1;
43
44pub const TOTAL_RANGES: usize = NUM_CORES * RANGES_PER_CORE;
46
47pub const RANGE_FACTOR: f32 = 1.2;
49
50#[derive(Clone, Copy, Debug, PartialEq, Eq)]
52#[repr(u8)]
53pub enum CoreStrategy {
54 GeometricRanges = 0,
56 GraphPartition = 1,
58 WorkStealing = 2,
60}
61
62#[derive(Clone, Copy)]
64#[repr(C)]
65pub struct CoreMessage {
66 pub msg_type: u8,
67 pub src_core: u8,
68 pub payload: u16,
69}
70
71impl CoreMessage {
72 pub const TYPE_IDLE: u8 = 0;
73 pub const TYPE_WORK_REQUEST: u8 = 1;
74 pub const TYPE_WORK_AVAILABLE: u8 = 2;
75 pub const TYPE_RESULT: u8 = 3;
76 pub const TYPE_SYNC: u8 = 4;
77 pub const TYPE_STEAL_REQUEST: u8 = 5;
78 pub const TYPE_STEAL_RESPONSE: u8 = 6;
79}
80
81#[derive(Clone, Copy, Default)]
83#[repr(C)]
84pub struct WorkItem {
85 pub range_idx: u16,
87 pub priority: u8,
89 pub status: u8,
91}
92
93impl WorkItem {
94 pub const STATUS_PENDING: u8 = 0;
95 pub const STATUS_IN_PROGRESS: u8 = 1;
96 pub const STATUS_COMPLETE: u8 = 2;
97}
98
99#[repr(C, align(64))]
101pub struct SharedCoordinator {
102 pub global_min_cut: AtomicU16,
104 pub completed_cores: AtomicU16,
106 pub phase: AtomicU8,
108 pub queue_head: AtomicU16,
110 pub queue_tail: AtomicU16,
112 pub best_core: AtomicU8,
114 _pad: [u8; 52],
116}
117
118impl SharedCoordinator {
119 pub const PHASE_INIT: u8 = 0;
120 pub const PHASE_DISTRIBUTE: u8 = 1;
121 pub const PHASE_COMPUTE: u8 = 2;
122 pub const PHASE_COLLECT: u8 = 3;
123 pub const PHASE_DONE: u8 = 4;
124
125 pub fn new() -> Self {
126 Self {
127 global_min_cut: AtomicU16::new(u16::MAX),
128 completed_cores: AtomicU16::new(0),
129 phase: AtomicU8::new(Self::PHASE_INIT),
130 queue_head: AtomicU16::new(0),
131 queue_tail: AtomicU16::new(0),
132 best_core: AtomicU8::new(0),
133 _pad: [0; 52],
134 }
135 }
136
137 pub fn try_update_min(&self, new_min: u16, core_id: u8) -> bool {
139 let mut current = self.global_min_cut.load(Ordering::Acquire);
140 loop {
141 if new_min >= current {
142 return false;
143 }
144 match self.global_min_cut.compare_exchange_weak(
145 current,
146 new_min,
147 Ordering::AcqRel,
148 Ordering::Acquire,
149 ) {
150 Ok(_) => {
151 self.best_core.store(core_id, Ordering::Release);
152 return true;
153 }
154 Err(c) => current = c,
155 }
156 }
157 }
158
159 pub fn mark_completed(&self) -> u16 {
161 self.completed_cores.fetch_add(1, Ordering::AcqRel) + 1
162 }
163
164 pub fn all_completed(&self) -> bool {
166 self.completed_cores.load(Ordering::Acquire) >= NUM_CORES as u16
167 }
168}
169
170#[inline]
172pub fn compute_core_range(core_id: u8) -> (u16, u16) {
173 let i = core_id as u32;
174 let lambda_min = (RANGE_FACTOR.powi(i as i32)).floor() as u16;
175 let lambda_max = (RANGE_FACTOR.powi((i + 1) as i32)).floor() as u16;
176 (lambda_min.max(1), lambda_max.max(1))
177}
178
179pub struct CoreDistributor {
181 pub strategy: CoreStrategy,
182 pub num_vertices: u16,
183 pub num_edges: u16,
184}
185
186impl CoreDistributor {
187 pub fn new(strategy: CoreStrategy, num_vertices: u16, num_edges: u16) -> Self {
188 Self {
189 strategy,
190 num_vertices,
191 num_edges,
192 }
193 }
194
195 #[inline]
197 pub fn vertex_to_core(&self, v: CompactVertexId) -> u8 {
198 match self.strategy {
199 CoreStrategy::GeometricRanges => {
200 0
202 }
203 CoreStrategy::GraphPartition => {
204 ((v as u32 * NUM_CORES as u32) / self.num_vertices as u32) as u8
206 }
207 CoreStrategy::WorkStealing => {
208 0
210 }
211 }
212 }
213
214 pub fn core_vertex_range(&self, core_id: u8) -> (CompactVertexId, CompactVertexId) {
216 match self.strategy {
217 CoreStrategy::GeometricRanges => (0, self.num_vertices),
218 CoreStrategy::GraphPartition => {
219 let n = self.num_vertices as u32;
220 let start = (core_id as u32 * n) / NUM_CORES as u32;
221 let end = ((core_id as u32 + 1) * n) / NUM_CORES as u32;
222 (start as u16, end as u16)
223 }
224 CoreStrategy::WorkStealing => (0, self.num_vertices),
225 }
226 }
227}
228
229pub struct CoreExecutor<'a> {
231 pub core_id: u8,
233 pub state: CompactCoreState,
235 pub coordinator: Option<&'a SharedCoordinator>,
237}
238
239impl<'a> CoreExecutor<'a> {
240 pub fn init(core_id: u8, coordinator: Option<&'a SharedCoordinator>) -> Self {
242 let (lambda_min, lambda_max) = compute_core_range(core_id);
243
244 let state = CompactCoreState {
245 adjacency: Default::default(),
246 edges: [CompactEdge::default(); MAX_EDGES_PER_CORE],
247 num_vertices: 0,
248 num_edges: 0,
249 min_cut: u16::MAX,
250 best_witness: CompactWitness::default(),
251 lambda_min,
252 lambda_max,
253 core_id,
254 status: CompactCoreState::STATUS_IDLE,
255 };
256
257 Self {
258 core_id,
259 state,
260 coordinator,
261 }
262 }
263
264 pub fn add_edge(&mut self, src: CompactVertexId, tgt: CompactVertexId, weight: u16) {
266 if self.state.num_edges as usize >= 512 {
267 return; }
269
270 let idx = self.state.num_edges as usize;
271 self.state.edges[idx] = CompactEdge {
272 source: src,
273 target: tgt,
274 weight,
275 flags: CompactEdge::FLAG_ACTIVE,
276 };
277 self.state.num_edges += 1;
278
279 self.state.num_vertices = self.state.num_vertices.max(src + 1).max(tgt + 1);
281 }
282
283 pub fn process(&mut self) -> CoreResult {
285 self.state.status = CompactCoreState::STATUS_PROCESSING;
286
287 let mut min_degree = u16::MAX;
290 let mut min_vertex = 0u16;
291
292 for v in 0..self.state.num_vertices {
293 let degree = self.compute_degree(v);
294 if degree > 0 && degree < min_degree {
295 min_degree = degree;
296 min_vertex = v;
297 }
298 }
299
300 if min_degree >= self.state.lambda_min && min_degree <= self.state.lambda_max {
302 self.state.min_cut = min_degree;
303
304 let mut membership = BitSet256::new();
306 membership.insert(min_vertex);
307 self.state.best_witness = CompactWitness::new(min_vertex, membership, min_degree);
308
309 if let Some(coord) = self.coordinator {
311 coord.try_update_min(min_degree, self.core_id);
312 }
313 }
314
315 self.state.status = CompactCoreState::STATUS_DONE;
316
317 if let Some(coord) = self.coordinator {
319 coord.mark_completed();
320 }
321
322 CoreResult {
323 core_id: self.core_id,
324 status: self.state.status,
325 min_cut: self.state.min_cut,
326 witness_hash: self.state.best_witness.hash,
327 witness_seed: self.state.best_witness.seed,
328 witness_cardinality: self.state.best_witness.cardinality,
329 witness_boundary: self.state.best_witness.boundary_size,
330 padding: [0; 4],
331 }
332 }
333
334 fn compute_degree(&self, v: CompactVertexId) -> u16 {
336 let mut degree = 0u16;
337 for i in 0..self.state.num_edges as usize {
338 let edge = &self.state.edges[i];
339 if edge.is_active() && (edge.source == v || edge.target == v) {
340 degree = degree.saturating_add(edge.weight);
342 }
343 }
344 degree
345 }
346
347 #[inline]
351 pub fn compute_boundary_simd(&self, set: &BitSet256) -> u16 {
352 let edges: Vec<(CompactVertexId, CompactVertexId)> = self.state.edges
354 [..self.state.num_edges as usize]
355 .iter()
356 .filter(|e| e.is_active())
357 .map(|e| (e.source, e.target))
358 .collect();
359
360 simd_boundary_size(set, &edges)
362 }
363
364 #[inline]
366 pub fn membership_count_simd(&self, set: &BitSet256) -> u32 {
367 simd_popcount(&set.bits)
368 }
369}
370
371pub struct ResultAggregator {
373 pub results: [CoreResult; NUM_CORES],
375 pub best_idx: usize,
377 pub global_min: u16,
379}
380
381impl ResultAggregator {
382 pub fn new() -> Self {
384 Self {
385 results: [CoreResult::default(); NUM_CORES],
386 best_idx: 0,
387 global_min: u16::MAX,
388 }
389 }
390
391 pub fn add_result(&mut self, result: CoreResult) {
393 let idx = result.core_id as usize;
394 self.results[idx] = result;
395
396 if result.min_cut < self.global_min {
397 self.global_min = result.min_cut;
398 self.best_idx = idx;
399 }
400 }
401
402 pub fn best_result(&self) -> &CoreResult {
404 &self.results[self.best_idx]
405 }
406}
407
408#[cfg(test)]
409mod tests {
410 use super::*;
411
412 #[test]
413 fn test_compute_core_range() {
414 let (min0, max0) = compute_core_range(0);
415 assert_eq!(min0, 1);
416 assert_eq!(max0, 1);
417
418 let (min10, max10) = compute_core_range(10);
419 assert_eq!(min10, 6);
420 assert_eq!(max10, 7);
421 }
422
423 #[test]
424 fn test_shared_coordinator() {
425 let coord = SharedCoordinator::new();
426
427 assert!(coord.try_update_min(100, 0));
428 assert_eq!(coord.global_min_cut.load(Ordering::Acquire), 100);
429
430 assert!(coord.try_update_min(50, 1));
431 assert_eq!(coord.global_min_cut.load(Ordering::Acquire), 50);
432
433 assert!(!coord.try_update_min(60, 2)); assert_eq!(coord.global_min_cut.load(Ordering::Acquire), 50);
435 }
436
437 #[test]
438 fn test_core_executor() {
439 let coord = SharedCoordinator::new();
440 let mut exec = CoreExecutor::init(0, Some(&coord));
441
442 exec.add_edge(0, 1, 1);
443 exec.add_edge(1, 2, 1);
444
445 let result = exec.process();
446 assert_eq!(result.core_id, 0);
447 }
448
449 #[test]
450 fn test_result_aggregator() {
451 let mut agg = ResultAggregator::new();
452
453 agg.add_result(CoreResult {
454 core_id: 0,
455 min_cut: 100,
456 ..Default::default()
457 });
458
459 agg.add_result(CoreResult {
460 core_id: 1,
461 min_cut: 50,
462 ..Default::default()
463 });
464
465 assert_eq!(agg.global_min, 50);
466 assert_eq!(agg.best_idx, 1);
467 }
468}