1use crate::error::Result;
7use parking_lot::RwLock;
8use std::collections::BTreeMap;
9use std::sync::Arc;
10use std::time::{Duration, Instant};
11use wgpu::{Device, Queue};
12
13pub struct MemoryCompactor {
15 #[allow(dead_code)]
17 device: Arc<Device>,
18 #[allow(dead_code)]
20 queue: Arc<Queue>,
21 allocations: Arc<RwLock<AllocationMap>>,
22 config: CompactionConfig,
23 stats: Arc<RwLock<CompactionStats>>,
24}
25
26impl MemoryCompactor {
27 pub fn new(device: Arc<Device>, queue: Arc<Queue>, config: CompactionConfig) -> Self {
29 Self {
30 device,
31 queue,
32 allocations: Arc::new(RwLock::new(AllocationMap::new())),
33 config,
34 stats: Arc::new(RwLock::new(CompactionStats::default())),
35 }
36 }
37
38 pub fn register_allocation(&self, id: u64, offset: u64, size: u64, active: bool) {
40 let mut allocs = self.allocations.write();
41 allocs.insert(
42 id,
43 AllocationInfo {
44 offset,
45 size,
46 active,
47 last_access: Instant::now(),
48 },
49 );
50 }
51
52 pub fn unregister_allocation(&self, id: u64) {
54 let mut allocs = self.allocations.write();
55 allocs.remove(id);
56 }
57
58 pub fn detect_fragmentation(&self) -> FragmentationInfo {
60 let allocs = self.allocations.read();
61 let sorted = allocs.sorted_allocations();
62
63 if sorted.is_empty() {
64 return FragmentationInfo {
65 total_size: 0,
66 used_size: 0,
67 wasted_size: 0,
68 fragment_count: 0,
69 largest_fragment: 0,
70 fragmentation_ratio: 0.0,
71 };
72 }
73
74 let mut used_size = 0u64;
75 let mut wasted_size = 0u64;
76 let mut fragment_count = 0usize;
77 let mut largest_fragment = 0u64;
78 let mut last_end = 0u64;
79
80 for info in sorted.iter() {
81 if info.active {
82 let gap = info.offset.saturating_sub(last_end);
83
84 if gap > 0 {
85 wasted_size += gap;
86 fragment_count += 1;
87 largest_fragment = largest_fragment.max(gap);
88 }
89
90 used_size += info.size;
91 last_end = info.offset + info.size;
92 }
93 }
94
95 let total_size = last_end;
96
97 let fragmentation_ratio = if total_size > 0 {
98 wasted_size as f64 / total_size as f64
99 } else {
100 0.0
101 };
102
103 FragmentationInfo {
104 total_size,
105 used_size,
106 wasted_size,
107 fragment_count,
108 largest_fragment,
109 fragmentation_ratio,
110 }
111 }
112
113 pub fn needs_compaction(&self) -> bool {
115 let frag = self.detect_fragmentation();
116
117 frag.fragmentation_ratio > self.config.fragmentation_threshold
118 || frag.fragment_count > self.config.max_fragments
119 }
120
121 pub async fn compact(&self) -> Result<CompactionResult> {
123 let start = Instant::now();
124
125 let before = self.detect_fragmentation();
127
128 if !self.should_compact(&before) {
129 return Ok(CompactionResult {
130 success: false,
131 duration: start.elapsed(),
132 before: before.clone(),
133 after: before,
134 bytes_moved: 0,
135 allocations_moved: 0,
136 });
137 }
138
139 let result = match self.config.strategy {
141 CompactionStrategy::Copy => self.compact_by_copy().await?,
142 CompactionStrategy::InPlace => self.compact_in_place().await?,
143 CompactionStrategy::Hybrid => self.compact_hybrid().await?,
144 };
145
146 let mut stats = self.stats.write();
148 stats.total_compactions += 1;
149 stats.total_duration += result.duration;
150 stats.total_bytes_moved += result.bytes_moved;
151 stats.last_compaction = Some(Instant::now());
152
153 Ok(result)
154 }
155
156 fn should_compact(&self, frag: &FragmentationInfo) -> bool {
158 if frag.fragmentation_ratio < self.config.fragmentation_threshold {
159 return false;
160 }
161
162 let stats = self.stats.read();
164 if let Some(last) = stats.last_compaction {
165 if last.elapsed() < self.config.min_compact_interval {
166 return false;
167 }
168 }
169
170 true
171 }
172
173 async fn compact_by_copy(&self) -> Result<CompactionResult> {
175 let start = Instant::now();
176 let before = self.detect_fragmentation();
177
178 let allocs = self.allocations.read();
179 let sorted = allocs.sorted_allocations();
180
181 let mut bytes_moved = 0u64;
182 let mut allocations_moved = 0usize;
183
184 for info in sorted.iter() {
191 if info.active {
192 bytes_moved += info.size;
193 allocations_moved += 1;
194 }
195 }
196
197 let after = FragmentationInfo {
198 total_size: before.used_size,
199 used_size: before.used_size,
200 wasted_size: 0,
201 fragment_count: 0,
202 largest_fragment: 0,
203 fragmentation_ratio: 0.0,
204 };
205
206 Ok(CompactionResult {
207 success: true,
208 duration: start.elapsed(),
209 before,
210 after,
211 bytes_moved,
212 allocations_moved,
213 })
214 }
215
216 async fn compact_in_place(&self) -> Result<CompactionResult> {
218 let start = Instant::now();
219 let before = self.detect_fragmentation();
220
221 let bytes_moved = before.wasted_size;
225 let allocations_moved = before.fragment_count;
226
227 let after = FragmentationInfo {
228 total_size: before.used_size,
229 used_size: before.used_size,
230 wasted_size: 0,
231 fragment_count: 0,
232 largest_fragment: 0,
233 fragmentation_ratio: 0.0,
234 };
235
236 Ok(CompactionResult {
237 success: true,
238 duration: start.elapsed(),
239 before,
240 after,
241 bytes_moved,
242 allocations_moved,
243 })
244 }
245
246 async fn compact_hybrid(&self) -> Result<CompactionResult> {
248 let before = self.detect_fragmentation();
249
250 if before.fragmentation_ratio > 0.5 {
252 self.compact_by_copy().await
253 } else {
254 self.compact_in_place().await
255 }
256 }
257
258 pub fn get_stats(&self) -> CompactionStats {
260 self.stats.read().clone()
261 }
262
263 pub fn reset_stats(&self) {
265 let mut stats = self.stats.write();
266 *stats = CompactionStats::default();
267 }
268}
269
270#[derive(Debug, Clone)]
272struct AllocationInfo {
273 offset: u64,
274 size: u64,
275 active: bool,
276 #[allow(dead_code)]
278 last_access: Instant,
279}
280
281struct AllocationMap {
283 allocations: BTreeMap<u64, AllocationInfo>,
284}
285
286impl AllocationMap {
287 fn new() -> Self {
288 Self {
289 allocations: BTreeMap::new(),
290 }
291 }
292
293 fn insert(&mut self, id: u64, info: AllocationInfo) {
294 self.allocations.insert(id, info);
295 }
296
297 fn remove(&mut self, id: u64) {
298 self.allocations.remove(&id);
299 }
300
301 fn sorted_allocations(&self) -> Vec<AllocationInfo> {
302 let mut allocs: Vec<_> = self.allocations.values().cloned().collect();
303 allocs.sort_by_key(|a| a.offset);
304 allocs
305 }
306}
307
308#[derive(Debug, Clone)]
310pub struct FragmentationInfo {
311 pub total_size: u64,
313 pub used_size: u64,
315 pub wasted_size: u64,
317 pub fragment_count: usize,
319 pub largest_fragment: u64,
321 pub fragmentation_ratio: f64,
323}
324
325#[derive(Debug, Clone)]
327pub struct CompactionResult {
328 pub success: bool,
330 pub duration: Duration,
332 pub before: FragmentationInfo,
334 pub after: FragmentationInfo,
336 pub bytes_moved: u64,
338 pub allocations_moved: usize,
340}
341
342#[derive(Debug, Clone)]
344pub struct CompactionConfig {
345 pub strategy: CompactionStrategy,
347 pub fragmentation_threshold: f64,
349 pub max_fragments: usize,
351 pub min_compact_interval: Duration,
353 pub auto_compact: bool,
355}
356
357impl Default for CompactionConfig {
358 fn default() -> Self {
359 Self {
360 strategy: CompactionStrategy::Hybrid,
361 fragmentation_threshold: 0.3,
362 max_fragments: 100,
363 min_compact_interval: Duration::from_secs(60),
364 auto_compact: false,
365 }
366 }
367}
368
369#[derive(Debug, Clone, Copy)]
371pub enum CompactionStrategy {
372 Copy,
374 InPlace,
376 Hybrid,
378}
379
380#[derive(Debug, Clone, Default)]
382pub struct CompactionStats {
383 pub total_compactions: u64,
385 pub total_duration: Duration,
387 pub total_bytes_moved: u64,
389 pub last_compaction: Option<Instant>,
391}
392
393impl CompactionStats {
394 pub fn average_duration(&self) -> Option<Duration> {
396 if self.total_compactions > 0 {
397 Some(self.total_duration / self.total_compactions as u32)
398 } else {
399 None
400 }
401 }
402}
403
404#[cfg(test)]
405mod tests {
406 use super::*;
407
408 #[test]
409 fn test_fragmentation_detection() {
410 let mut map = AllocationMap::new();
411
412 map.insert(
414 1,
415 AllocationInfo {
416 offset: 0,
417 size: 100,
418 active: true,
419 last_access: Instant::now(),
420 },
421 );
422 map.insert(
423 2,
424 AllocationInfo {
425 offset: 200, size: 100,
427 active: true,
428 last_access: Instant::now(),
429 },
430 );
431 map.insert(
432 3,
433 AllocationInfo {
434 offset: 400, size: 100,
436 active: true,
437 last_access: Instant::now(),
438 },
439 );
440
441 let sorted = map.sorted_allocations();
442 assert_eq!(sorted.len(), 3);
443 assert_eq!(sorted[0].offset, 0);
444 assert_eq!(sorted[1].offset, 200);
445 assert_eq!(sorted[2].offset, 400);
446 }
447
448 #[test]
449 fn test_compaction_config_default() {
450 let config = CompactionConfig::default();
451 assert_eq!(config.fragmentation_threshold, 0.3);
452 assert_eq!(config.max_fragments, 100);
453 assert!(!config.auto_compact);
454 }
455}