1use crate::error::{GpuAdvancedError, Result};
28use parking_lot::{Mutex, RwLock};
29use std::collections::{BTreeMap, HashMap};
30use std::ops::Range;
31use std::sync::Arc;
32use std::sync::atomic::{AtomicU64, Ordering};
33use std::time::{Duration, Instant};
34use wgpu::{Buffer, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, Device, Queue};
35
36#[derive(Debug, Clone)]
38struct MemoryBlock {
39 offset: u64,
41 size: u64,
43 is_free: bool,
45 allocation_id: Option<u64>,
47 movable: bool,
49 ref_count: u32,
51}
52
53#[derive(Debug, Clone)]
55pub struct DefragMove {
56 pub allocation_id: u64,
58 pub src_offset: u64,
60 pub dst_offset: u64,
62 pub size: u64,
64}
65
66#[derive(Debug, Clone, Default)]
68pub struct DefragmentationPlan {
69 pub moves: Vec<DefragMove>,
71 pub total_bytes: u64,
73 pub expected_fragmentation: f64,
75 pub current_fragmentation: f64,
77}
78
79impl DefragmentationPlan {
80 pub fn is_worthwhile(&self, min_improvement: f64) -> bool {
82 if self.moves.is_empty() {
83 return false;
84 }
85 let improvement = self.current_fragmentation - self.expected_fragmentation;
86 improvement >= min_improvement
87 }
88
89 pub fn move_count(&self) -> usize {
91 self.moves.len()
92 }
93}
94
95#[derive(Debug, Clone)]
97pub struct DefragmentationResult {
98 pub performed: bool,
100 pub blocks_moved: usize,
102 pub bytes_moved: u64,
104 pub fragmentation_before: f64,
106 pub fragmentation_after: f64,
108 pub duration: Duration,
110 pub unmovable_blocks: usize,
112}
113
114impl Default for DefragmentationResult {
115 fn default() -> Self {
116 Self {
117 performed: false,
118 blocks_moved: 0,
119 bytes_moved: 0,
120 fragmentation_before: 0.0,
121 fragmentation_after: 0.0,
122 duration: Duration::ZERO,
123 unmovable_blocks: 0,
124 }
125 }
126}
127
128#[derive(Debug, Clone)]
130pub struct DefragConfig {
131 pub min_fragmentation_threshold: f64,
133 pub min_improvement: f64,
135 pub max_moves_per_pass: usize,
137 pub skip_unmovable: bool,
139 pub compaction_alignment: u64,
141}
142
143impl Default for DefragConfig {
144 fn default() -> Self {
145 Self {
146 min_fragmentation_threshold: 0.2,
147 min_improvement: 0.1,
148 max_moves_per_pass: 100,
149 skip_unmovable: true,
150 compaction_alignment: 256,
151 }
152 }
153}
154
155pub struct MemoryPool {
157 device: Arc<Device>,
159 buffer: Arc<Mutex<Option<Buffer>>>,
161 pool_size: u64,
163 blocks: Arc<RwLock<BTreeMap<u64, MemoryBlock>>>,
165 next_alloc_id: Arc<Mutex<u64>>,
167 usage: BufferUsages,
169 current_usage: Arc<Mutex<u64>>,
171 peak_usage: Arc<Mutex<u64>>,
173 allocation_count: Arc<Mutex<u64>>,
175 deallocation_count: Arc<Mutex<u64>>,
177 defrag_count: Arc<Mutex<u64>>,
179 allocation_offsets: Arc<RwLock<HashMap<u64, u64>>>,
182 defrag_config: RwLock<DefragConfig>,
184 last_defrag_time: Arc<Mutex<Option<Instant>>>,
186 total_bytes_defragged: Arc<AtomicU64>,
188}
189
190pub struct MemoryAllocation {
196 id: u64,
198 original_offset: u64,
200 size: u64,
202 pool: Arc<MemoryPool>,
204}
205
206impl MemoryPool {
207 pub fn new(device: Arc<Device>, pool_size: u64, usage: BufferUsages) -> Result<Self> {
209 Self::with_config(device, pool_size, usage, DefragConfig::default())
210 }
211
212 pub fn with_config(
214 device: Arc<Device>,
215 pool_size: u64,
216 usage: BufferUsages,
217 defrag_config: DefragConfig,
218 ) -> Result<Self> {
219 let usage_with_copy = usage | BufferUsages::COPY_SRC | BufferUsages::COPY_DST;
221
222 let buffer = device.create_buffer(&BufferDescriptor {
224 label: Some("Memory Pool"),
225 size: pool_size,
226 usage: usage_with_copy,
227 mapped_at_creation: false,
228 });
229
230 let mut blocks = BTreeMap::new();
232 blocks.insert(
233 0,
234 MemoryBlock {
235 offset: 0,
236 size: pool_size,
237 is_free: true,
238 allocation_id: None,
239 movable: true,
240 ref_count: 0,
241 },
242 );
243
244 Ok(Self {
245 device,
246 buffer: Arc::new(Mutex::new(Some(buffer))),
247 pool_size,
248 blocks: Arc::new(RwLock::new(blocks)),
249 next_alloc_id: Arc::new(Mutex::new(0)),
250 usage: usage_with_copy,
251 current_usage: Arc::new(Mutex::new(0)),
252 peak_usage: Arc::new(Mutex::new(0)),
253 allocation_count: Arc::new(Mutex::new(0)),
254 deallocation_count: Arc::new(Mutex::new(0)),
255 defrag_count: Arc::new(Mutex::new(0)),
256 allocation_offsets: Arc::new(RwLock::new(HashMap::new())),
257 defrag_config: RwLock::new(defrag_config),
258 last_defrag_time: Arc::new(Mutex::new(None)),
259 total_bytes_defragged: Arc::new(AtomicU64::new(0)),
260 })
261 }
262
263 pub fn allocate(self: &Arc<Self>, size: u64, alignment: u64) -> Result<MemoryAllocation> {
265 let aligned_size = Self::align_up(size, alignment);
266
267 let alloc_id = {
268 let mut next_id = self.next_alloc_id.lock();
269 let id = *next_id;
270 *next_id = next_id.wrapping_add(1);
271 id
272 };
273
274 let (offset, block_offset) = {
276 let blocks = self.blocks.read();
277
278 let mut found: Option<(u64, u64)> = None;
279
280 for (blk_offset, block) in blocks.iter() {
281 if block.is_free && block.size >= aligned_size {
282 let aligned_offset = Self::align_up(*blk_offset, alignment);
283 let waste = aligned_offset - blk_offset;
284
285 if block.size >= aligned_size + waste {
286 found = Some((aligned_offset, *blk_offset));
287 break;
288 }
289 }
290 }
291
292 found.ok_or_else(|| GpuAdvancedError::AllocationFailed {
293 size: aligned_size,
294 available: self.get_available_memory(),
295 })?
296 };
297
298 {
300 let mut blocks = self.blocks.write();
301
302 let block = blocks
303 .remove(&block_offset)
304 .ok_or_else(|| GpuAdvancedError::memory_pool_error("Block not found"))?;
305
306 let waste = offset - block_offset;
307
308 if waste > 0 {
310 blocks.insert(
311 block_offset,
312 MemoryBlock {
313 offset: block_offset,
314 size: waste,
315 is_free: true,
316 allocation_id: None,
317 movable: true,
318 ref_count: 0,
319 },
320 );
321 }
322
323 blocks.insert(
325 offset,
326 MemoryBlock {
327 offset,
328 size: aligned_size,
329 is_free: false,
330 allocation_id: Some(alloc_id),
331 movable: true,
332 ref_count: 1,
333 },
334 );
335
336 let remainder = block.size - aligned_size - waste;
338 if remainder > 0 {
339 blocks.insert(
340 offset + aligned_size,
341 MemoryBlock {
342 offset: offset + aligned_size,
343 size: remainder,
344 is_free: true,
345 allocation_id: None,
346 movable: true,
347 ref_count: 0,
348 },
349 );
350 }
351 }
352
353 {
355 let mut offsets = self.allocation_offsets.write();
356 offsets.insert(alloc_id, offset);
357 }
358
359 {
361 let mut usage = self.current_usage.lock();
362 *usage = usage.saturating_add(aligned_size);
363
364 let mut peak = self.peak_usage.lock();
365 *peak = (*peak).max(*usage);
366
367 let mut count = self.allocation_count.lock();
368 *count = count.saturating_add(1);
369 }
370
371 Ok(MemoryAllocation {
372 id: alloc_id,
373 original_offset: offset,
374 size: aligned_size,
375 pool: Arc::clone(self),
376 })
377 }
378
379 pub fn get_allocation_offset(&self, alloc_id: u64) -> Option<u64> {
382 self.allocation_offsets.read().get(&alloc_id).copied()
383 }
384
385 pub fn pin_allocation(&self, alloc_id: u64) -> Result<()> {
387 let offsets = self.allocation_offsets.read();
388 let offset = offsets
389 .get(&alloc_id)
390 .copied()
391 .ok_or_else(|| GpuAdvancedError::memory_pool_error("Allocation not found"))?;
392 drop(offsets);
393
394 let mut blocks = self.blocks.write();
395 if let Some(block) = blocks.get_mut(&offset) {
396 block.movable = false;
397 Ok(())
398 } else {
399 Err(GpuAdvancedError::memory_pool_error(
400 "Block not found for allocation",
401 ))
402 }
403 }
404
405 pub fn unpin_allocation(&self, alloc_id: u64) -> Result<()> {
407 let offsets = self.allocation_offsets.read();
408 let offset = offsets
409 .get(&alloc_id)
410 .copied()
411 .ok_or_else(|| GpuAdvancedError::memory_pool_error("Allocation not found"))?;
412 drop(offsets);
413
414 let mut blocks = self.blocks.write();
415 if let Some(block) = blocks.get_mut(&offset) {
416 block.movable = true;
417 Ok(())
418 } else {
419 Err(GpuAdvancedError::memory_pool_error(
420 "Block not found for allocation",
421 ))
422 }
423 }
424
425 pub fn set_defrag_config(&self, config: DefragConfig) {
427 *self.defrag_config.write() = config;
428 }
429
430 pub fn get_defrag_config(&self) -> DefragConfig {
432 self.defrag_config.read().clone()
433 }
434
435 fn deallocate(&self, allocation: &MemoryAllocation) -> Result<()> {
437 let current_offset = self
439 .allocation_offsets
440 .read()
441 .get(&allocation.id)
442 .copied()
443 .unwrap_or(allocation.original_offset);
444
445 let mut blocks = self.blocks.write();
446
447 if let Some(block) = blocks.get_mut(¤t_offset) {
449 if block.allocation_id == Some(allocation.id) {
450 block.is_free = true;
451 block.allocation_id = None;
452 block.ref_count = 0;
453 } else {
454 return Err(GpuAdvancedError::memory_pool_error("Invalid allocation ID"));
455 }
456 } else {
457 return Err(GpuAdvancedError::memory_pool_error("Block not found"));
458 }
459
460 {
462 let mut offsets = self.allocation_offsets.write();
463 offsets.remove(&allocation.id);
464 }
465
466 {
468 let mut usage = self.current_usage.lock();
469 *usage = usage.saturating_sub(allocation.size);
470
471 let mut count = self.deallocation_count.lock();
472 *count = count.saturating_add(1);
473 }
474
475 self.coalesce_free_blocks(&mut blocks);
477
478 Ok(())
479 }
480
481 fn coalesce_free_blocks(&self, blocks: &mut BTreeMap<u64, MemoryBlock>) {
483 let mut to_merge: Vec<u64> = Vec::new();
484
485 let mut prev_offset: Option<u64> = None;
486 for (offset, block) in blocks.iter() {
487 if block.is_free {
488 if let Some(prev_off) = prev_offset {
489 if let Some(prev_block) = blocks.get(&prev_off) {
490 if prev_block.is_free && prev_block.offset + prev_block.size == *offset {
491 to_merge.push(*offset);
492 }
493 }
494 }
495 prev_offset = Some(*offset);
496 } else {
497 prev_offset = None;
498 }
499 }
500
501 for offset in to_merge {
503 if let Some(block) = blocks.remove(&offset) {
504 let prev_offset = blocks.range(..offset).next_back().map(|(k, _)| *k);
506
507 if let Some(prev_off) = prev_offset {
508 if let Some(prev_block) = blocks.get_mut(&prev_off) {
509 if prev_block.is_free {
510 prev_block.size += block.size;
511 }
512 }
513 }
514 }
515 }
516 }
517
518 pub fn plan_defragmentation(&self) -> DefragmentationPlan {
523 let config = self.defrag_config.read().clone();
524 let blocks = self.blocks.read();
525
526 let current_fragmentation = self.calculate_fragmentation_internal(&blocks);
527
528 let mut allocated_blocks: Vec<_> = blocks
530 .iter()
531 .filter(|(_, b)| !b.is_free && b.allocation_id.is_some())
532 .map(|(offset, b)| (*offset, b.clone()))
533 .collect();
534
535 allocated_blocks.sort_by_key(|(offset, _)| *offset);
536
537 let mut moves = Vec::new();
539 let mut total_bytes = 0u64;
540 let mut next_offset = 0u64;
541
542 for (current_offset, block) in &allocated_blocks {
543 let aligned_offset = Self::align_up(next_offset, config.compaction_alignment);
544
545 if aligned_offset < *current_offset && block.movable {
547 if let Some(alloc_id) = block.allocation_id {
548 moves.push(DefragMove {
549 allocation_id: alloc_id,
550 src_offset: *current_offset,
551 dst_offset: aligned_offset,
552 size: block.size,
553 });
554 total_bytes += block.size;
555 }
556 next_offset = aligned_offset + block.size;
557 } else {
558 next_offset = current_offset + block.size;
560 }
561 }
562
563 let expected_fragmentation = if moves.is_empty() {
565 current_fragmentation
566 } else {
567 let unmovable_count = allocated_blocks.iter().filter(|(_, b)| !b.movable).count();
570 if unmovable_count == 0 {
571 0.0
572 } else {
573 (unmovable_count as f64 / allocated_blocks.len().max(1) as f64) * 0.5
575 }
576 };
577
578 DefragmentationPlan {
579 moves,
580 total_bytes,
581 expected_fragmentation,
582 current_fragmentation,
583 }
584 }
585
586 pub fn defragment(&self) -> Result<DefragmentationResult> {
594 let start = Instant::now();
595 let plan = self.plan_defragmentation();
596
597 if plan.moves.is_empty() {
598 return Ok(DefragmentationResult {
599 performed: false,
600 fragmentation_before: plan.current_fragmentation,
601 fragmentation_after: plan.current_fragmentation,
602 duration: start.elapsed(),
603 ..Default::default()
604 });
605 }
606
607 let config = self.defrag_config.read().clone();
608
609 if !plan.is_worthwhile(config.min_improvement) {
611 return Ok(DefragmentationResult {
612 performed: false,
613 fragmentation_before: plan.current_fragmentation,
614 fragmentation_after: plan.current_fragmentation,
615 duration: start.elapsed(),
616 ..Default::default()
617 });
618 }
619
620 let result = self.execute_defrag_plan_logical(&plan, &config)?;
622
623 {
625 let mut count = self.defrag_count.lock();
626 *count = count.saturating_add(1);
627 }
628
629 self.total_bytes_defragged
630 .fetch_add(result.bytes_moved, Ordering::Relaxed);
631
632 *self.last_defrag_time.lock() = Some(Instant::now());
633
634 Ok(DefragmentationResult {
635 duration: start.elapsed(),
636 ..result
637 })
638 }
639
640 pub fn defragment_with_queue(&self, queue: &Queue) -> Result<DefragmentationResult> {
651 let start = Instant::now();
652 let plan = self.plan_defragmentation();
653
654 if plan.moves.is_empty() {
655 return Ok(DefragmentationResult {
656 performed: false,
657 fragmentation_before: plan.current_fragmentation,
658 fragmentation_after: plan.current_fragmentation,
659 duration: start.elapsed(),
660 ..Default::default()
661 });
662 }
663
664 let config = self.defrag_config.read().clone();
665
666 if plan.current_fragmentation < config.min_fragmentation_threshold {
668 return Ok(DefragmentationResult {
669 performed: false,
670 fragmentation_before: plan.current_fragmentation,
671 fragmentation_after: plan.current_fragmentation,
672 duration: start.elapsed(),
673 ..Default::default()
674 });
675 }
676
677 if !plan.is_worthwhile(config.min_improvement) {
678 return Ok(DefragmentationResult {
679 performed: false,
680 fragmentation_before: plan.current_fragmentation,
681 fragmentation_after: plan.current_fragmentation,
682 duration: start.elapsed(),
683 ..Default::default()
684 });
685 }
686
687 let result = self.execute_defrag_plan_gpu(&plan, &config, queue)?;
689
690 {
692 let mut count = self.defrag_count.lock();
693 *count = count.saturating_add(1);
694 }
695
696 self.total_bytes_defragged
697 .fetch_add(result.bytes_moved, Ordering::Relaxed);
698
699 *self.last_defrag_time.lock() = Some(Instant::now());
700
701 Ok(DefragmentationResult {
702 duration: start.elapsed(),
703 ..result
704 })
705 }
706
707 fn execute_defrag_plan_logical(
709 &self,
710 plan: &DefragmentationPlan,
711 config: &DefragConfig,
712 ) -> Result<DefragmentationResult> {
713 let mut blocks = self.blocks.write();
714 let mut allocation_offsets = self.allocation_offsets.write();
715
716 let mut blocks_moved = 0usize;
717 let mut bytes_moved = 0u64;
718 let mut unmovable_blocks = 0usize;
719
720 let moves_to_execute: Vec<_> = plan
721 .moves
722 .iter()
723 .take(config.max_moves_per_pass)
724 .cloned()
725 .collect();
726
727 for defrag_move in &moves_to_execute {
728 let block = match blocks.remove(&defrag_move.src_offset) {
730 Some(b) => b,
731 None => {
732 if config.skip_unmovable {
733 unmovable_blocks += 1;
734 continue;
735 } else {
736 return Err(GpuAdvancedError::memory_pool_error(
737 "Block not found during defragmentation",
738 ));
739 }
740 }
741 };
742
743 if !block.movable {
744 blocks.insert(defrag_move.src_offset, block);
746 unmovable_blocks += 1;
747 continue;
748 }
749
750 let new_block = MemoryBlock {
752 offset: defrag_move.dst_offset,
753 size: block.size,
754 is_free: false,
755 allocation_id: block.allocation_id,
756 movable: block.movable,
757 ref_count: block.ref_count,
758 };
759
760 blocks.insert(defrag_move.dst_offset, new_block);
761
762 if let Some(alloc_id) = block.allocation_id {
764 allocation_offsets.insert(alloc_id, defrag_move.dst_offset);
765 }
766
767 blocks_moved += 1;
768 bytes_moved += defrag_move.size;
769 }
770
771 drop(blocks);
773 drop(allocation_offsets);
774 self.rebuild_free_blocks()?;
775
776 let blocks = self.blocks.read();
778 let fragmentation_after = self.calculate_fragmentation_internal(&blocks);
779
780 Ok(DefragmentationResult {
781 performed: blocks_moved > 0,
782 blocks_moved,
783 bytes_moved,
784 fragmentation_before: plan.current_fragmentation,
785 fragmentation_after,
786 duration: Duration::ZERO, unmovable_blocks,
788 })
789 }
790
791 fn execute_defrag_plan_gpu(
793 &self,
794 plan: &DefragmentationPlan,
795 config: &DefragConfig,
796 queue: &Queue,
797 ) -> Result<DefragmentationResult> {
798 let buffer_guard = self.buffer.lock();
799 let buffer = buffer_guard
800 .as_ref()
801 .ok_or_else(|| GpuAdvancedError::memory_pool_error("Pool buffer not available"))?;
802
803 let staging_buffer = self.device.create_buffer(&BufferDescriptor {
806 label: Some("Defrag Staging Buffer"),
807 size: plan.total_bytes,
808 usage: BufferUsages::COPY_SRC | BufferUsages::COPY_DST,
809 mapped_at_creation: false,
810 });
811
812 let moves_to_execute: Vec<_> = plan
813 .moves
814 .iter()
815 .take(config.max_moves_per_pass)
816 .cloned()
817 .collect();
818
819 let mut encoder = self
821 .device
822 .create_command_encoder(&CommandEncoderDescriptor {
823 label: Some("Defrag Copy to Staging"),
824 });
825
826 let mut staging_offset = 0u64;
827 let mut staging_map: Vec<(DefragMove, u64)> = Vec::new();
828
829 for defrag_move in &moves_to_execute {
830 encoder.copy_buffer_to_buffer(
831 buffer,
832 defrag_move.src_offset,
833 &staging_buffer,
834 staging_offset,
835 defrag_move.size,
836 );
837 staging_map.push((defrag_move.clone(), staging_offset));
838 staging_offset += defrag_move.size;
839 }
840
841 queue.submit(std::iter::once(encoder.finish()));
842
843 let mut encoder = self
845 .device
846 .create_command_encoder(&CommandEncoderDescriptor {
847 label: Some("Defrag Copy from Staging"),
848 });
849
850 for (defrag_move, staging_off) in &staging_map {
851 encoder.copy_buffer_to_buffer(
852 &staging_buffer,
853 *staging_off,
854 buffer,
855 defrag_move.dst_offset,
856 defrag_move.size,
857 );
858 }
859
860 queue.submit(std::iter::once(encoder.finish()));
861
862 drop(buffer_guard);
866
867 let mut blocks = self.blocks.write();
869 let mut allocation_offsets = self.allocation_offsets.write();
870
871 let mut blocks_moved = 0usize;
872 let mut bytes_moved = 0u64;
873 let mut unmovable_blocks = 0usize;
874
875 for defrag_move in &moves_to_execute {
876 let block = match blocks.remove(&defrag_move.src_offset) {
878 Some(b) => b,
879 None => {
880 unmovable_blocks += 1;
881 continue;
882 }
883 };
884
885 let new_block = MemoryBlock {
887 offset: defrag_move.dst_offset,
888 size: block.size,
889 is_free: false,
890 allocation_id: block.allocation_id,
891 movable: block.movable,
892 ref_count: block.ref_count,
893 };
894
895 blocks.insert(defrag_move.dst_offset, new_block);
896
897 if let Some(alloc_id) = block.allocation_id {
899 allocation_offsets.insert(alloc_id, defrag_move.dst_offset);
900 }
901
902 blocks_moved += 1;
903 bytes_moved += defrag_move.size;
904 }
905
906 drop(blocks);
907 drop(allocation_offsets);
908
909 self.rebuild_free_blocks()?;
911
912 let blocks = self.blocks.read();
914 let fragmentation_after = self.calculate_fragmentation_internal(&blocks);
915
916 Ok(DefragmentationResult {
917 performed: blocks_moved > 0,
918 blocks_moved,
919 bytes_moved,
920 fragmentation_before: plan.current_fragmentation,
921 fragmentation_after,
922 duration: Duration::ZERO, unmovable_blocks,
924 })
925 }
926
927 fn rebuild_free_blocks(&self) -> Result<()> {
931 let mut blocks = self.blocks.write();
932
933 let allocated_ranges: Vec<(u64, u64)> = blocks
935 .iter()
936 .filter(|(_, b)| !b.is_free)
937 .map(|(offset, b)| (*offset, b.size))
938 .collect();
939
940 let offsets_to_remove: Vec<u64> = blocks
942 .iter()
943 .filter(|(_, b)| b.is_free)
944 .map(|(offset, _)| *offset)
945 .collect();
946
947 for offset in offsets_to_remove {
948 blocks.remove(&offset);
949 }
950
951 let mut last_end = 0u64;
953
954 for (offset, size) in &allocated_ranges {
955 if *offset > last_end {
956 blocks.insert(
958 last_end,
959 MemoryBlock {
960 offset: last_end,
961 size: offset - last_end,
962 is_free: true,
963 allocation_id: None,
964 movable: true,
965 ref_count: 0,
966 },
967 );
968 }
969 last_end = offset + size;
970 }
971
972 if last_end < self.pool_size {
974 blocks.insert(
975 last_end,
976 MemoryBlock {
977 offset: last_end,
978 size: self.pool_size - last_end,
979 is_free: true,
980 allocation_id: None,
981 movable: true,
982 ref_count: 0,
983 },
984 );
985 }
986
987 self.coalesce_free_blocks(&mut blocks);
989
990 Ok(())
991 }
992
993 fn calculate_fragmentation_internal(&self, blocks: &BTreeMap<u64, MemoryBlock>) -> f64 {
995 let free_blocks: Vec<u64> = blocks
996 .values()
997 .filter(|b| b.is_free)
998 .map(|b| b.size)
999 .collect();
1000
1001 self.calculate_fragmentation(&free_blocks)
1002 }
1003
1004 pub fn needs_defragmentation(&self) -> bool {
1006 let config = self.defrag_config.read();
1007 let stats = self.get_stats();
1008
1009 stats.fragmentation >= config.min_fragmentation_threshold
1010 }
1011
1012 pub fn get_fragmentation(&self) -> f64 {
1014 let blocks = self.blocks.read();
1015 self.calculate_fragmentation_internal(&blocks)
1016 }
1017
1018 pub fn get_total_bytes_defragged(&self) -> u64 {
1020 self.total_bytes_defragged.load(Ordering::Relaxed)
1021 }
1022
1023 pub fn time_since_last_defrag(&self) -> Option<Duration> {
1025 self.last_defrag_time
1026 .lock()
1027 .map(|instant| instant.elapsed())
1028 }
1029
1030 pub fn buffer(&self) -> Option<Buffer> {
1032 self.buffer.lock().as_ref().map(|_b| {
1034 self.device.create_buffer(&BufferDescriptor {
1035 label: Some("Memory Pool Access"),
1036 size: self.pool_size,
1037 usage: self.usage,
1038 mapped_at_creation: false,
1039 })
1040 })
1041 }
1042
1043 pub fn get_available_memory(&self) -> u64 {
1045 let blocks = self.blocks.read();
1046 blocks
1047 .values()
1048 .filter(|block| block.is_free)
1049 .map(|block| block.size)
1050 .sum()
1051 }
1052
1053 pub fn get_current_usage(&self) -> u64 {
1055 *self.current_usage.lock()
1056 }
1057
1058 pub fn get_peak_usage(&self) -> u64 {
1060 *self.peak_usage.lock()
1061 }
1062
1063 pub fn get_stats(&self) -> MemoryPoolStats {
1065 let blocks = self.blocks.read();
1066 let free_blocks: Vec<_> = blocks
1067 .values()
1068 .filter(|b| b.is_free)
1069 .map(|b| b.size)
1070 .collect();
1071
1072 let allocated_blocks: Vec<_> = blocks
1073 .values()
1074 .filter(|b| !b.is_free)
1075 .map(|b| b.size)
1076 .collect();
1077
1078 MemoryPoolStats {
1079 pool_size: self.pool_size,
1080 current_usage: *self.current_usage.lock(),
1081 peak_usage: *self.peak_usage.lock(),
1082 available: self.get_available_memory(),
1083 allocation_count: *self.allocation_count.lock(),
1084 deallocation_count: *self.deallocation_count.lock(),
1085 defrag_count: *self.defrag_count.lock(),
1086 free_block_count: free_blocks.len(),
1087 allocated_block_count: allocated_blocks.len(),
1088 largest_free_block: free_blocks.iter().max().copied().unwrap_or(0),
1089 fragmentation: self.calculate_fragmentation(&free_blocks),
1090 }
1091 }
1092
1093 fn calculate_fragmentation(&self, free_blocks: &[u64]) -> f64 {
1095 if free_blocks.is_empty() {
1096 return 0.0;
1097 }
1098
1099 let total_free: u64 = free_blocks.iter().sum();
1100 let largest = free_blocks.iter().max().copied().unwrap_or(0);
1101
1102 if total_free == 0 {
1103 return 0.0;
1104 }
1105
1106 1.0 - (largest as f64 / total_free as f64)
1107 }
1108
1109 fn align_up(value: u64, alignment: u64) -> u64 {
1111 if alignment == 0 {
1112 return value;
1113 }
1114 value.div_ceil(alignment) * alignment
1115 }
1116
1117 pub fn print_stats(&self) {
1119 let stats = self.get_stats();
1120 println!("\nMemory Pool Statistics:");
1121 println!(" Pool size: {} bytes", stats.pool_size);
1122 println!(
1123 " Current usage: {} bytes ({:.1}%)",
1124 stats.current_usage,
1125 (stats.current_usage as f64 / stats.pool_size as f64) * 100.0
1126 );
1127 println!(
1128 " Peak usage: {} bytes ({:.1}%)",
1129 stats.peak_usage,
1130 (stats.peak_usage as f64 / stats.pool_size as f64) * 100.0
1131 );
1132 println!(" Available: {} bytes", stats.available);
1133 println!(" Allocations: {}", stats.allocation_count);
1134 println!(" Deallocations: {}", stats.deallocation_count);
1135 println!(" Defragmentations: {}", stats.defrag_count);
1136 println!(" Free blocks: {}", stats.free_block_count);
1137 println!(" Allocated blocks: {}", stats.allocated_block_count);
1138 println!(" Largest free block: {} bytes", stats.largest_free_block);
1139 println!(" Fragmentation: {:.1}%", stats.fragmentation * 100.0);
1140 }
1141}
1142
1143#[derive(Debug, Clone)]
1145pub struct MemoryPoolStats {
1146 pub pool_size: u64,
1148 pub current_usage: u64,
1150 pub peak_usage: u64,
1152 pub available: u64,
1154 pub allocation_count: u64,
1156 pub deallocation_count: u64,
1158 pub defrag_count: u64,
1160 pub free_block_count: usize,
1162 pub allocated_block_count: usize,
1164 pub largest_free_block: u64,
1166 pub fragmentation: f64,
1168}
1169
1170impl MemoryAllocation {
1171 pub fn offset(&self) -> u64 {
1176 self.pool
1179 .get_allocation_offset(self.id)
1180 .unwrap_or(self.original_offset)
1181 }
1182
1183 pub fn size(&self) -> u64 {
1185 self.size
1186 }
1187
1188 pub fn range(&self) -> Range<u64> {
1190 let offset = self.offset();
1191 offset..(offset + self.size)
1192 }
1193
1194 pub fn id(&self) -> u64 {
1196 self.id
1197 }
1198}
1199
1200impl Drop for MemoryAllocation {
1201 fn drop(&mut self) {
1202 let _ = self.pool.deallocate(self);
1204 }
1205}
1206
1207#[cfg(test)]
1208mod tests {
1209 use super::*;
1210
1211 #[test]
1212 fn test_align_up() {
1213 assert_eq!(MemoryPool::align_up(0, 256), 0);
1214 assert_eq!(MemoryPool::align_up(1, 256), 256);
1215 assert_eq!(MemoryPool::align_up(256, 256), 256);
1216 assert_eq!(MemoryPool::align_up(257, 256), 512);
1217 }
1218
1219 #[test]
1220 fn test_memory_block() {
1221 let block = MemoryBlock {
1222 offset: 0,
1223 size: 1024,
1224 is_free: true,
1225 allocation_id: None,
1226 movable: true,
1227 ref_count: 0,
1228 };
1229
1230 assert!(block.is_free);
1231 assert_eq!(block.size, 1024);
1232 assert!(block.movable);
1233 assert_eq!(block.ref_count, 0);
1234 }
1235}