1use crate::graph::VertexId;
12use std::collections::{HashMap, HashSet, VecDeque};
13use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
14use std::sync::{Arc, RwLock};
15
16#[derive(Debug, Clone)]
18pub struct PoolConfig {
19 pub max_materialized_levels: usize,
21 pub eviction_threshold: u64,
23 pub prealloc_size: usize,
25 pub lazy_dealloc: bool,
27 pub memory_budget: usize,
29}
30
31impl Default for PoolConfig {
32 fn default() -> Self {
33 Self {
34 max_materialized_levels: 16,
35 eviction_threshold: 100,
36 prealloc_size: 1024,
37 lazy_dealloc: true,
38 memory_budget: 0,
39 }
40 }
41}
42
43#[derive(Debug, Clone, Default)]
45pub struct PoolStats {
46 pub allocations: u64,
48 pub deallocations: u64,
50 pub pool_size_bytes: usize,
52 pub materialized_levels: usize,
54 pub evictions: u64,
56 pub peak_memory: usize,
58}
59
60#[derive(Debug, Clone)]
62pub enum LazyLevel {
63 Unmaterialized,
65 Materialized(LevelData),
67 Dirty(LevelData),
69 Evicted {
71 last_vertex_count: usize,
73 },
74}
75
76impl LazyLevel {
77 pub fn is_materialized(&self) -> bool {
79 matches!(self, LazyLevel::Materialized(_) | LazyLevel::Dirty(_))
80 }
81
82 pub fn is_dirty(&self) -> bool {
84 matches!(self, LazyLevel::Dirty(_))
85 }
86
87 pub fn data(&self) -> Option<&LevelData> {
89 match self {
90 LazyLevel::Materialized(data) | LazyLevel::Dirty(data) => Some(data),
91 _ => None,
92 }
93 }
94
95 pub fn data_mut(&mut self) -> Option<&mut LevelData> {
97 match self {
98 LazyLevel::Materialized(data) | LazyLevel::Dirty(data) => Some(data),
99 _ => None,
100 }
101 }
102}
103
104#[derive(Debug, Clone)]
106pub struct LevelData {
107 pub level: usize,
109 pub vertices: Vec<u16>,
111 pub adjacency: CompactAdjacency,
113 pub cut_value: f64,
115 last_access: u64,
117 memory_size: usize,
119}
120
121impl LevelData {
122 pub fn new(level: usize, capacity: usize) -> Self {
124 Self {
125 level,
126 vertices: Vec::with_capacity(capacity),
127 adjacency: CompactAdjacency::new(capacity),
128 cut_value: f64::INFINITY,
129 last_access: 0,
130 memory_size: 0,
131 }
132 }
133
134 pub fn update_memory_size(&mut self) {
136 self.memory_size =
137 self.vertices.len() * std::mem::size_of::<u16>() + self.adjacency.memory_size();
138 }
139
140 pub fn memory_size(&self) -> usize {
142 self.memory_size
143 }
144}
145
146#[derive(Debug, Clone)]
148pub struct CompactAdjacency {
149 offsets: Vec<u32>,
151 neighbors: Vec<(u16, u16)>,
153}
154
155impl CompactAdjacency {
156 pub fn new(capacity: usize) -> Self {
158 Self {
159 offsets: Vec::with_capacity(capacity + 1),
160 neighbors: Vec::new(),
161 }
162 }
163
164 pub fn from_edges(edges: &[(u16, u16, u16)], num_vertices: usize) -> Self {
166 let mut adj: Vec<Vec<(u16, u16)>> = vec![Vec::new(); num_vertices];
167
168 for &(u, v, w) in edges {
169 adj[u as usize].push((v, w));
170 adj[v as usize].push((u, w));
171 }
172
173 let mut offsets = Vec::with_capacity(num_vertices + 1);
174 let mut neighbors = Vec::new();
175
176 offsets.push(0);
177 for vertex_neighbors in &adj {
178 neighbors.extend_from_slice(vertex_neighbors);
179 offsets.push(neighbors.len() as u32);
180 }
181
182 Self { offsets, neighbors }
183 }
184
185 pub fn neighbors(&self, v: u16) -> &[(u16, u16)] {
187 let idx = v as usize;
188 if idx + 1 >= self.offsets.len() {
189 return &[];
190 }
191 let start = self.offsets[idx] as usize;
192 let end = self.offsets[idx + 1] as usize;
193 &self.neighbors[start..end]
194 }
195
196 pub fn degree(&self, v: u16) -> usize {
198 let idx = v as usize;
199 if idx + 1 >= self.offsets.len() {
200 return 0;
201 }
202 (self.offsets[idx + 1] - self.offsets[idx]) as usize
203 }
204
205 pub fn memory_size(&self) -> usize {
207 self.offsets.len() * std::mem::size_of::<u32>()
208 + self.neighbors.len() * std::mem::size_of::<(u16, u16)>()
209 }
210
211 pub fn num_vertices(&self) -> usize {
213 if self.offsets.is_empty() {
214 0
215 } else {
216 self.offsets.len() - 1
217 }
218 }
219}
220
221pub struct LevelPool {
223 config: PoolConfig,
224 levels: RwLock<HashMap<usize, LazyLevel>>,
226 lru_order: RwLock<VecDeque<usize>>,
228 operation_counter: AtomicU64,
230 memory_usage: AtomicUsize,
232 allocations: AtomicU64,
234 deallocations: AtomicU64,
235 evictions: AtomicU64,
236 peak_memory: AtomicUsize,
237 free_list: RwLock<Vec<LevelData>>,
239}
240
241impl LevelPool {
242 pub fn new() -> Self {
244 Self::with_config(PoolConfig::default())
245 }
246
247 pub fn with_config(config: PoolConfig) -> Self {
249 Self {
250 config,
251 levels: RwLock::new(HashMap::new()),
252 lru_order: RwLock::new(VecDeque::new()),
253 operation_counter: AtomicU64::new(0),
254 memory_usage: AtomicUsize::new(0),
255 allocations: AtomicU64::new(0),
256 deallocations: AtomicU64::new(0),
257 evictions: AtomicU64::new(0),
258 peak_memory: AtomicUsize::new(0),
259 free_list: RwLock::new(Vec::new()),
260 }
261 }
262
263 pub fn get_level(&self, level_idx: usize) -> Option<LazyLevel> {
265 self.touch(level_idx);
266
267 let levels = self.levels.read().unwrap();
268 levels.get(&level_idx).cloned()
269 }
270
271 pub fn is_materialized(&self, level_idx: usize) -> bool {
273 let levels = self.levels.read().unwrap();
274 levels
275 .get(&level_idx)
276 .map(|l| l.is_materialized())
277 .unwrap_or(false)
278 }
279
280 pub fn materialize(&self, level_idx: usize, data: LevelData) {
282 self.ensure_capacity();
283
284 let memory_size = data.memory_size();
285 self.memory_usage.fetch_add(memory_size, Ordering::Relaxed);
286
287 let current = self.memory_usage.load(Ordering::Relaxed);
289 let peak = self.peak_memory.load(Ordering::Relaxed);
290 if current > peak {
291 self.peak_memory.store(current, Ordering::Relaxed);
292 }
293
294 let mut levels = self.levels.write().unwrap();
295 levels.insert(level_idx, LazyLevel::Materialized(data));
296
297 let mut lru = self.lru_order.write().unwrap();
298 lru.retain(|&l| l != level_idx);
299 lru.push_back(level_idx);
300
301 self.allocations.fetch_add(1, Ordering::Relaxed);
302 }
303
304 pub fn mark_dirty(&self, level_idx: usize) {
306 let mut levels = self.levels.write().unwrap();
307 if let Some(level) = levels.get_mut(&level_idx) {
308 if let LazyLevel::Materialized(data) = level.clone() {
309 *level = LazyLevel::Dirty(data);
310 }
311 }
312 }
313
314 pub fn mark_clean(&self, level_idx: usize) {
316 let mut levels = self.levels.write().unwrap();
317 if let Some(level) = levels.get_mut(&level_idx) {
318 if let LazyLevel::Dirty(data) = level.clone() {
319 *level = LazyLevel::Materialized(data);
320 }
321 }
322 }
323
324 pub fn evict(&self, level_idx: usize) {
326 let mut levels = self.levels.write().unwrap();
327
328 if let Some(level) = levels.get(&level_idx) {
329 let last_vertex_count = level.data().map(|d| d.vertices.len()).unwrap_or(0);
330
331 let memory_freed = level.data().map(|d| d.memory_size()).unwrap_or(0);
332
333 if self.config.lazy_dealloc {
335 if let Some(data) = level.data().cloned() {
336 let mut free_list = self.free_list.write().unwrap();
337 if free_list.len() < 10 {
338 free_list.push(data);
339 }
340 }
341 }
342
343 levels.insert(level_idx, LazyLevel::Evicted { last_vertex_count });
344
345 self.memory_usage.fetch_sub(memory_freed, Ordering::Relaxed);
346 self.evictions.fetch_add(1, Ordering::Relaxed);
347 self.deallocations.fetch_add(1, Ordering::Relaxed);
348 }
349
350 let mut lru = self.lru_order.write().unwrap();
351 lru.retain(|&l| l != level_idx);
352 }
353
354 fn ensure_capacity(&self) {
356 let levels = self.levels.read().unwrap();
357 let materialized_count = levels.values().filter(|l| l.is_materialized()).count();
358 drop(levels);
359
360 if materialized_count >= self.config.max_materialized_levels {
361 let lru = self.lru_order.read().unwrap();
363 if let Some(&evict_idx) = lru.front() {
364 drop(lru);
365 self.evict(evict_idx);
366 }
367 }
368
369 if self.config.memory_budget > 0 {
371 while self.memory_usage.load(Ordering::Relaxed) > self.config.memory_budget {
372 let lru = self.lru_order.read().unwrap();
373 if let Some(&evict_idx) = lru.front() {
374 drop(lru);
375 self.evict(evict_idx);
376 } else {
377 break;
378 }
379 }
380 }
381 }
382
383 fn touch(&self, level_idx: usize) {
385 let timestamp = self.operation_counter.fetch_add(1, Ordering::Relaxed);
386
387 let mut levels = self.levels.write().unwrap();
388 if let Some(level) = levels.get_mut(&level_idx) {
389 if let Some(data) = level.data_mut() {
390 data.last_access = timestamp;
391 }
392 }
393 drop(levels);
394
395 let mut lru = self.lru_order.write().unwrap();
397 lru.retain(|&l| l != level_idx);
398 lru.push_back(level_idx);
399 }
400
401 pub fn allocate_level(&self, level_idx: usize, capacity: usize) -> LevelData {
403 let mut free_list = self.free_list.write().unwrap();
405 if let Some(mut data) = free_list.pop() {
406 data.level = level_idx;
407 data.vertices.clear();
408 data.cut_value = f64::INFINITY;
409 return data;
410 }
411 drop(free_list);
412
413 LevelData::new(level_idx, capacity)
415 }
416
417 pub fn stats(&self) -> PoolStats {
419 let levels = self.levels.read().unwrap();
420 let materialized_count = levels.values().filter(|l| l.is_materialized()).count();
421
422 PoolStats {
423 allocations: self.allocations.load(Ordering::Relaxed),
424 deallocations: self.deallocations.load(Ordering::Relaxed),
425 pool_size_bytes: self.memory_usage.load(Ordering::Relaxed),
426 materialized_levels: materialized_count,
427 evictions: self.evictions.load(Ordering::Relaxed),
428 peak_memory: self.peak_memory.load(Ordering::Relaxed),
429 }
430 }
431
432 pub fn memory_usage(&self) -> usize {
434 self.memory_usage.load(Ordering::Relaxed)
435 }
436
437 pub fn clear(&self) {
439 let mut levels = self.levels.write().unwrap();
440 levels.clear();
441
442 let mut lru = self.lru_order.write().unwrap();
443 lru.clear();
444
445 self.memory_usage.store(0, Ordering::Relaxed);
446 }
447}
448
449impl Default for LevelPool {
450 fn default() -> Self {
451 Self::new()
452 }
453}
454
455pub struct CompactVertexMapper {
457 to_compact: HashMap<VertexId, u16>,
459 to_original: Vec<VertexId>,
461 next_id: u16,
463}
464
465impl CompactVertexMapper {
466 pub fn new() -> Self {
468 Self {
469 to_compact: HashMap::new(),
470 to_original: Vec::new(),
471 next_id: 0,
472 }
473 }
474
475 pub fn from_vertices(vertices: &[VertexId]) -> Self {
477 let mut mapper = Self::new();
478 for &v in vertices {
479 mapper.get_or_insert(v);
480 }
481 mapper
482 }
483
484 pub fn get_or_insert(&mut self, original: VertexId) -> u16 {
486 if let Some(&compact) = self.to_compact.get(&original) {
487 return compact;
488 }
489
490 let compact = self.next_id;
491 self.next_id += 1;
492 self.to_compact.insert(original, compact);
493 self.to_original.push(original);
494 compact
495 }
496
497 pub fn get(&self, original: VertexId) -> Option<u16> {
499 self.to_compact.get(&original).copied()
500 }
501
502 pub fn to_original(&self, compact: u16) -> Option<VertexId> {
504 self.to_original.get(compact as usize).copied()
505 }
506
507 pub fn len(&self) -> usize {
509 self.to_original.len()
510 }
511
512 pub fn is_empty(&self) -> bool {
514 self.to_original.is_empty()
515 }
516}
517
518impl Default for CompactVertexMapper {
519 fn default() -> Self {
520 Self::new()
521 }
522}
523
524#[cfg(test)]
525mod tests {
526 use super::*;
527
528 #[test]
529 fn test_lazy_level_states() {
530 let level = LazyLevel::Unmaterialized;
531 assert!(!level.is_materialized());
532
533 let data = LevelData::new(0, 100);
534 let level = LazyLevel::Materialized(data.clone());
535 assert!(level.is_materialized());
536 assert!(!level.is_dirty());
537
538 let level = LazyLevel::Dirty(data);
539 assert!(level.is_materialized());
540 assert!(level.is_dirty());
541 }
542
543 #[test]
544 fn test_compact_adjacency() {
545 let edges = vec![(0u16, 1u16, 10u16), (1, 2, 20), (2, 0, 30)];
546
547 let adj = CompactAdjacency::from_edges(&edges, 3);
548
549 assert_eq!(adj.num_vertices(), 3);
550 assert_eq!(adj.degree(0), 2);
551 assert_eq!(adj.degree(1), 2);
552 assert_eq!(adj.degree(2), 2);
553 }
554
555 #[test]
556 fn test_level_pool_materialize() {
557 let pool = LevelPool::new();
558
559 let data = LevelData::new(0, 100);
560 pool.materialize(0, data);
561
562 assert!(pool.is_materialized(0));
563 assert!(!pool.is_materialized(1));
564 }
565
566 #[test]
567 fn test_level_pool_eviction() {
568 let pool = LevelPool::with_config(PoolConfig {
569 max_materialized_levels: 2,
570 ..Default::default()
571 });
572
573 pool.materialize(0, LevelData::new(0, 100));
574 pool.materialize(1, LevelData::new(1, 100));
575
576 assert!(pool.is_materialized(0));
577 assert!(pool.is_materialized(1));
578
579 pool.materialize(2, LevelData::new(2, 100));
581
582 assert!(!pool.is_materialized(0));
583 assert!(pool.is_materialized(1));
584 assert!(pool.is_materialized(2));
585 }
586
587 #[test]
588 fn test_level_pool_dirty() {
589 let pool = LevelPool::new();
590
591 let data = LevelData::new(0, 100);
592 pool.materialize(0, data);
593
594 pool.mark_dirty(0);
595
596 if let Some(LazyLevel::Dirty(_)) = pool.get_level(0) {
597 } else {
599 panic!("Level should be dirty");
600 }
601
602 pool.mark_clean(0);
603
604 if let Some(LazyLevel::Materialized(_)) = pool.get_level(0) {
605 } else {
607 panic!("Level should be clean");
608 }
609 }
610
611 #[test]
612 fn test_compact_vertex_mapper() {
613 let mut mapper = CompactVertexMapper::new();
614
615 let c1 = mapper.get_or_insert(100);
616 let c2 = mapper.get_or_insert(200);
617 let c3 = mapper.get_or_insert(100); assert_eq!(c1, 0);
620 assert_eq!(c2, 1);
621 assert_eq!(c3, 0);
622
623 assert_eq!(mapper.to_original(c1), Some(100));
624 assert_eq!(mapper.to_original(c2), Some(200));
625 }
626
627 #[test]
628 fn test_pool_stats() {
629 let pool = LevelPool::new();
630
631 let data = LevelData::new(0, 100);
632 pool.materialize(0, data);
633
634 let stats = pool.stats();
635 assert_eq!(stats.allocations, 1);
636 assert_eq!(stats.materialized_levels, 1);
637 }
638
639 #[test]
640 fn test_level_data_memory_size() {
641 let mut data = LevelData::new(0, 100);
642 data.vertices = vec![0, 1, 2, 3, 4];
643 data.update_memory_size();
644
645 assert!(data.memory_size() > 0);
646 }
647}