Skip to main content

oxigdal_gpu_advanced/
memory_compaction.rs

1//! GPU memory defragmentation and compaction.
2//!
3//! This module provides memory compaction strategies to reduce fragmentation
4//! and improve memory utilization in long-running GPU applications.
5
6use crate::error::Result;
7use parking_lot::RwLock;
8use std::collections::BTreeMap;
9use std::sync::Arc;
10use std::time::{Duration, Instant};
11use wgpu::{Device, Queue};
12
13/// Memory compaction manager
14pub struct MemoryCompactor {
15    /// Device for GPU memory operations (reserved for future copy operations)
16    #[allow(dead_code)]
17    device: Arc<Device>,
18    /// Queue for GPU command submission (reserved for future copy operations)
19    #[allow(dead_code)]
20    queue: Arc<Queue>,
21    allocations: Arc<RwLock<AllocationMap>>,
22    config: CompactionConfig,
23    stats: Arc<RwLock<CompactionStats>>,
24}
25
26impl MemoryCompactor {
27    /// Create a new memory compactor
28    pub fn new(device: Arc<Device>, queue: Arc<Queue>, config: CompactionConfig) -> Self {
29        Self {
30            device,
31            queue,
32            allocations: Arc::new(RwLock::new(AllocationMap::new())),
33            config,
34            stats: Arc::new(RwLock::new(CompactionStats::default())),
35        }
36    }
37
38    /// Register an allocation
39    pub fn register_allocation(&self, id: u64, offset: u64, size: u64, active: bool) {
40        let mut allocs = self.allocations.write();
41        allocs.insert(
42            id,
43            AllocationInfo {
44                offset,
45                size,
46                active,
47                last_access: Instant::now(),
48            },
49        );
50    }
51
52    /// Unregister an allocation
53    pub fn unregister_allocation(&self, id: u64) {
54        let mut allocs = self.allocations.write();
55        allocs.remove(id);
56    }
57
58    /// Detect fragmentation
59    pub fn detect_fragmentation(&self) -> FragmentationInfo {
60        let allocs = self.allocations.read();
61        let sorted = allocs.sorted_allocations();
62
63        if sorted.is_empty() {
64            return FragmentationInfo {
65                total_size: 0,
66                used_size: 0,
67                wasted_size: 0,
68                fragment_count: 0,
69                largest_fragment: 0,
70                fragmentation_ratio: 0.0,
71            };
72        }
73
74        let mut used_size = 0u64;
75        let mut wasted_size = 0u64;
76        let mut fragment_count = 0usize;
77        let mut largest_fragment = 0u64;
78        let mut last_end = 0u64;
79
80        for info in sorted.iter() {
81            if info.active {
82                let gap = info.offset.saturating_sub(last_end);
83
84                if gap > 0 {
85                    wasted_size += gap;
86                    fragment_count += 1;
87                    largest_fragment = largest_fragment.max(gap);
88                }
89
90                used_size += info.size;
91                last_end = info.offset + info.size;
92            }
93        }
94
95        let total_size = last_end;
96
97        let fragmentation_ratio = if total_size > 0 {
98            wasted_size as f64 / total_size as f64
99        } else {
100            0.0
101        };
102
103        FragmentationInfo {
104            total_size,
105            used_size,
106            wasted_size,
107            fragment_count,
108            largest_fragment,
109            fragmentation_ratio,
110        }
111    }
112
113    /// Check if compaction is needed
114    pub fn needs_compaction(&self) -> bool {
115        let frag = self.detect_fragmentation();
116
117        frag.fragmentation_ratio > self.config.fragmentation_threshold
118            || frag.fragment_count > self.config.max_fragments
119    }
120
121    /// Perform memory compaction
122    pub async fn compact(&self) -> Result<CompactionResult> {
123        let start = Instant::now();
124
125        // Detect fragmentation
126        let before = self.detect_fragmentation();
127
128        if !self.should_compact(&before) {
129            return Ok(CompactionResult {
130                success: false,
131                duration: start.elapsed(),
132                before: before.clone(),
133                after: before,
134                bytes_moved: 0,
135                allocations_moved: 0,
136            });
137        }
138
139        // Perform compaction based on strategy
140        let result = match self.config.strategy {
141            CompactionStrategy::Copy => self.compact_by_copy().await?,
142            CompactionStrategy::InPlace => self.compact_in_place().await?,
143            CompactionStrategy::Hybrid => self.compact_hybrid().await?,
144        };
145
146        // Update statistics
147        let mut stats = self.stats.write();
148        stats.total_compactions += 1;
149        stats.total_duration += result.duration;
150        stats.total_bytes_moved += result.bytes_moved;
151        stats.last_compaction = Some(Instant::now());
152
153        Ok(result)
154    }
155
156    /// Check if compaction should proceed
157    fn should_compact(&self, frag: &FragmentationInfo) -> bool {
158        if frag.fragmentation_ratio < self.config.fragmentation_threshold {
159            return false;
160        }
161
162        // Check minimum interval
163        let stats = self.stats.read();
164        if let Some(last) = stats.last_compaction {
165            if last.elapsed() < self.config.min_compact_interval {
166                return false;
167            }
168        }
169
170        true
171    }
172
173    /// Compact by copying to new buffer
174    async fn compact_by_copy(&self) -> Result<CompactionResult> {
175        let start = Instant::now();
176        let before = self.detect_fragmentation();
177
178        let allocs = self.allocations.read();
179        let sorted = allocs.sorted_allocations();
180
181        let mut bytes_moved = 0u64;
182        let mut allocations_moved = 0usize;
183
184        // In a real implementation, we would:
185        // 1. Allocate a new buffer
186        // 2. Copy active allocations sequentially
187        // 3. Update allocation offsets
188        // 4. Free old buffer
189
190        for info in sorted.iter() {
191            if info.active {
192                bytes_moved += info.size;
193                allocations_moved += 1;
194            }
195        }
196
197        let after = FragmentationInfo {
198            total_size: before.used_size,
199            used_size: before.used_size,
200            wasted_size: 0,
201            fragment_count: 0,
202            largest_fragment: 0,
203            fragmentation_ratio: 0.0,
204        };
205
206        Ok(CompactionResult {
207            success: true,
208            duration: start.elapsed(),
209            before,
210            after,
211            bytes_moved,
212            allocations_moved,
213        })
214    }
215
216    /// Compact in-place (without extra buffer)
217    async fn compact_in_place(&self) -> Result<CompactionResult> {
218        let start = Instant::now();
219        let before = self.detect_fragmentation();
220
221        // In-place compaction is complex and requires careful handling
222        // This is a simplified implementation
223
224        let bytes_moved = before.wasted_size;
225        let allocations_moved = before.fragment_count;
226
227        let after = FragmentationInfo {
228            total_size: before.used_size,
229            used_size: before.used_size,
230            wasted_size: 0,
231            fragment_count: 0,
232            largest_fragment: 0,
233            fragmentation_ratio: 0.0,
234        };
235
236        Ok(CompactionResult {
237            success: true,
238            duration: start.elapsed(),
239            before,
240            after,
241            bytes_moved,
242            allocations_moved,
243        })
244    }
245
246    /// Hybrid compaction strategy
247    async fn compact_hybrid(&self) -> Result<CompactionResult> {
248        let before = self.detect_fragmentation();
249
250        // Use copy for high fragmentation, in-place for low
251        if before.fragmentation_ratio > 0.5 {
252            self.compact_by_copy().await
253        } else {
254            self.compact_in_place().await
255        }
256    }
257
258    /// Get compaction statistics
259    pub fn get_stats(&self) -> CompactionStats {
260        self.stats.read().clone()
261    }
262
263    /// Reset statistics
264    pub fn reset_stats(&self) {
265        let mut stats = self.stats.write();
266        *stats = CompactionStats::default();
267    }
268}
269
270/// Memory allocation information
271#[derive(Debug, Clone)]
272struct AllocationInfo {
273    offset: u64,
274    size: u64,
275    active: bool,
276    /// Last access time (reserved for LRU eviction policies)
277    #[allow(dead_code)]
278    last_access: Instant,
279}
280
281/// Map of allocations
282struct AllocationMap {
283    allocations: BTreeMap<u64, AllocationInfo>,
284}
285
286impl AllocationMap {
287    fn new() -> Self {
288        Self {
289            allocations: BTreeMap::new(),
290        }
291    }
292
293    fn insert(&mut self, id: u64, info: AllocationInfo) {
294        self.allocations.insert(id, info);
295    }
296
297    fn remove(&mut self, id: u64) {
298        self.allocations.remove(&id);
299    }
300
301    fn sorted_allocations(&self) -> Vec<AllocationInfo> {
302        let mut allocs: Vec<_> = self.allocations.values().cloned().collect();
303        allocs.sort_by_key(|a| a.offset);
304        allocs
305    }
306}
307
308/// Fragmentation information
309#[derive(Debug, Clone)]
310pub struct FragmentationInfo {
311    /// Total memory span
312    pub total_size: u64,
313    /// Actually used memory
314    pub used_size: u64,
315    /// Wasted memory (gaps)
316    pub wasted_size: u64,
317    /// Number of fragments
318    pub fragment_count: usize,
319    /// Largest single fragment
320    pub largest_fragment: u64,
321    /// Fragmentation ratio (0.0 - 1.0)
322    pub fragmentation_ratio: f64,
323}
324
325/// Compaction result
326#[derive(Debug, Clone)]
327pub struct CompactionResult {
328    /// Whether compaction was successful
329    pub success: bool,
330    /// Time taken
331    pub duration: Duration,
332    /// Fragmentation before
333    pub before: FragmentationInfo,
334    /// Fragmentation after
335    pub after: FragmentationInfo,
336    /// Bytes moved during compaction
337    pub bytes_moved: u64,
338    /// Number of allocations moved
339    pub allocations_moved: usize,
340}
341
342/// Compaction configuration
343#[derive(Debug, Clone)]
344pub struct CompactionConfig {
345    /// Compaction strategy
346    pub strategy: CompactionStrategy,
347    /// Fragmentation threshold to trigger compaction (0.0 - 1.0)
348    pub fragmentation_threshold: f64,
349    /// Maximum number of fragments before compaction
350    pub max_fragments: usize,
351    /// Minimum interval between compactions
352    pub min_compact_interval: Duration,
353    /// Enable automatic compaction
354    pub auto_compact: bool,
355}
356
357impl Default for CompactionConfig {
358    fn default() -> Self {
359        Self {
360            strategy: CompactionStrategy::Hybrid,
361            fragmentation_threshold: 0.3,
362            max_fragments: 100,
363            min_compact_interval: Duration::from_secs(60),
364            auto_compact: false,
365        }
366    }
367}
368
369/// Compaction strategy
370#[derive(Debug, Clone, Copy)]
371pub enum CompactionStrategy {
372    /// Copy to new buffer
373    Copy,
374    /// Compact in-place
375    InPlace,
376    /// Hybrid approach
377    Hybrid,
378}
379
380/// Compaction statistics
381#[derive(Debug, Clone, Default)]
382pub struct CompactionStats {
383    /// Total number of compactions performed
384    pub total_compactions: u64,
385    /// Total time spent compacting
386    pub total_duration: Duration,
387    /// Total bytes moved
388    pub total_bytes_moved: u64,
389    /// Last compaction time
390    pub last_compaction: Option<Instant>,
391}
392
393impl CompactionStats {
394    /// Calculate average compaction duration
395    pub fn average_duration(&self) -> Option<Duration> {
396        if self.total_compactions > 0 {
397            Some(self.total_duration / self.total_compactions as u32)
398        } else {
399            None
400        }
401    }
402}
403
404#[cfg(test)]
405mod tests {
406    use super::*;
407
408    #[test]
409    fn test_fragmentation_detection() {
410        let mut map = AllocationMap::new();
411
412        // Create fragmented allocations
413        map.insert(
414            1,
415            AllocationInfo {
416                offset: 0,
417                size: 100,
418                active: true,
419                last_access: Instant::now(),
420            },
421        );
422        map.insert(
423            2,
424            AllocationInfo {
425                offset: 200, // Gap of 100
426                size: 100,
427                active: true,
428                last_access: Instant::now(),
429            },
430        );
431        map.insert(
432            3,
433            AllocationInfo {
434                offset: 400, // Gap of 100
435                size: 100,
436                active: true,
437                last_access: Instant::now(),
438            },
439        );
440
441        let sorted = map.sorted_allocations();
442        assert_eq!(sorted.len(), 3);
443        assert_eq!(sorted[0].offset, 0);
444        assert_eq!(sorted[1].offset, 200);
445        assert_eq!(sorted[2].offset, 400);
446    }
447
448    #[test]
449    fn test_compaction_config_default() {
450        let config = CompactionConfig::default();
451        assert_eq!(config.fragmentation_threshold, 0.3);
452        assert_eq!(config.max_fragments, 100);
453        assert!(!config.auto_compact);
454    }
455}