1use crate::{Buffer, BufferDescriptor, Device};
4use torsh_core::error::{Result, TorshError};
5
6#[cfg(not(feature = "std"))]
7use alloc::{boxed::Box, vec::Vec};
8
9pub trait MemoryManager: Send + Sync {
11 fn allocate(&mut self, descriptor: &BufferDescriptor) -> Result<Buffer>;
13
14 fn deallocate(&mut self, buffer: &Buffer) -> Result<()>;
16
17 fn stats(&self) -> MemoryStats;
19
20 fn garbage_collect(&mut self) -> Result<usize>;
22
23 fn set_pool(&mut self, pool: Box<dyn MemoryPool>) -> Result<()>;
25
26 fn device(&self) -> &Device;
28
29 fn allocate_raw(&mut self, size: usize, alignment: usize) -> Result<*mut u8>;
31
32 fn deallocate_raw(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
34
35 fn supports_unified_memory(&self) -> bool;
37
38 fn allocate_unified(&mut self, size: usize) -> Result<*mut u8>;
40
41 fn deallocate_unified(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
43
44 fn prefetch_to_device(&self, ptr: *mut u8, size: usize) -> Result<()>;
46
47 fn prefetch_to_host(&self, ptr: *mut u8, size: usize) -> Result<()>;
49
50 fn set_memory_advice(&self, ptr: *mut u8, size: usize, advice: MemoryAdvice) -> Result<()>;
52
53 fn available_memory(&self) -> Result<usize>;
55
56 fn total_memory(&self) -> Result<usize>;
58
59 fn synchronize(&self) -> Result<()>;
61
62 fn defragment(&mut self) -> Result<DefragmentationResult>;
64
65 fn needs_defragmentation(&self) -> bool;
67
68 fn fragmentation_info(&self) -> FragmentationInfo;
70
71 fn compact_memory(&mut self) -> Result<CompactionResult>;
73
74 fn set_defragmentation_policy(&mut self, policy: DefragmentationPolicy);
76}
77
78pub trait MemoryPool: Send + Sync {
80 fn allocate(&mut self, size: usize, alignment: usize) -> Result<*mut u8>;
82
83 fn deallocate(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
85
86 fn stats(&self) -> PoolStats;
88
89 fn reset(&mut self) -> Result<()>;
91
92 fn capacity(&self) -> usize;
94
95 fn available(&self) -> usize;
97
98 fn defragment(&mut self) -> Result<DefragmentationResult>;
100
101 fn needs_defragmentation(&self) -> bool;
103
104 fn fragmentation_info(&self) -> FragmentationInfo;
106
107 fn compact(&mut self) -> Result<CompactionResult>;
109}
110
111#[derive(Debug, Clone)]
113pub struct MemoryStats {
114 pub total_memory: usize,
116
117 pub allocated_memory: usize,
119
120 pub available_memory: usize,
122
123 pub peak_memory: usize,
125
126 pub active_allocations: usize,
128
129 pub total_allocations: usize,
131
132 pub total_deallocations: usize,
134
135 pub fragmentation: f32,
137
138 pub efficiency: f32,
140}
141
142impl Default for MemoryStats {
143 fn default() -> Self {
144 Self {
145 total_memory: 0,
146 allocated_memory: 0,
147 available_memory: 0,
148 peak_memory: 0,
149 active_allocations: 0,
150 total_allocations: 0,
151 total_deallocations: 0,
152 fragmentation: 0.0,
153 efficiency: 0.0,
154 }
155 }
156}
157
158impl MemoryStats {
159 pub fn utilization(&self) -> f32 {
161 if self.total_memory == 0 {
162 0.0
163 } else {
164 (self.allocated_memory as f32 / self.total_memory as f32) * 100.0
165 }
166 }
167
168 pub fn is_under_pressure(&self) -> bool {
170 self.utilization() > 90.0 || self.fragmentation > 0.5
171 }
172}
173
174#[derive(Debug, Clone, Default)]
176pub struct PoolStats {
177 pub capacity: usize,
179
180 pub allocated: usize,
182
183 pub available: usize,
185
186 pub free_blocks: usize,
188
189 pub allocated_blocks: usize,
191
192 pub largest_free_block: usize,
194
195 pub smallest_free_block: usize,
197
198 pub average_free_block: usize,
200}
201
202#[derive(Debug)]
208pub struct FreeListPool {
209 base_ptr: *mut u8,
211 total_size: usize,
213 free_blocks: Vec<(usize, usize)>,
215 allocated_blocks: Vec<(usize, usize)>,
217 stats: MemoryStats,
219}
220
221impl FreeListPool {
222 pub fn new(base_ptr: *mut u8, total_size: usize) -> Self {
224 let mut pool = Self {
225 base_ptr,
226 total_size,
227 free_blocks: vec![(0, total_size)],
228 allocated_blocks: Vec::new(),
229 stats: MemoryStats::default(),
230 };
231 pool.update_stats();
232 pool
233 }
234
235 fn find_free_block(&self, size: usize, alignment: usize) -> Option<usize> {
237 self.find_free_block_with_strategy(size, alignment, AllocationStrategy::FirstFit)
238 }
239
240 fn find_free_block_with_strategy(
242 &self,
243 size: usize,
244 alignment: usize,
245 strategy: AllocationStrategy,
246 ) -> Option<usize> {
247 match strategy {
248 AllocationStrategy::FirstFit => self
249 .free_blocks
250 .iter()
251 .enumerate()
252 .find(|(_, &(offset, block_size))| {
253 let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
254 let padding = aligned_offset - offset;
255 padding + size <= block_size
256 })
257 .map(|(idx, _)| idx),
258 AllocationStrategy::BestFit => {
259 let mut best_idx = None;
260 let mut best_size = usize::MAX;
261
262 for (idx, &(offset, block_size)) in self.free_blocks.iter().enumerate() {
263 let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
264 let padding = aligned_offset - offset;
265
266 if padding + size <= block_size && block_size < best_size {
267 best_idx = Some(idx);
268 best_size = block_size;
269 }
270 }
271
272 best_idx
273 }
274 AllocationStrategy::WorstFit => {
275 let mut worst_idx = None;
276 let mut worst_size = 0;
277
278 for (idx, &(offset, block_size)) in self.free_blocks.iter().enumerate() {
279 let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
280 let padding = aligned_offset - offset;
281
282 if padding + size <= block_size && block_size > worst_size {
283 worst_idx = Some(idx);
284 worst_size = block_size;
285 }
286 }
287
288 worst_idx
289 }
290 AllocationStrategy::NextFit => {
291 self.find_free_block_with_strategy(size, alignment, AllocationStrategy::FirstFit)
294 }
295 }
296 }
297
298 fn update_stats(&mut self) {
300 let allocated: usize = self.allocated_blocks.iter().map(|(_, size)| size).sum();
301 let available: usize = self.free_blocks.iter().map(|(_, size)| size).sum();
302
303 self.stats.allocated_memory = allocated;
304 self.stats.available_memory = available;
305 self.stats.active_allocations = self.allocated_blocks.len();
306 self.stats.total_memory = self.total_size;
307 self.stats.efficiency = if self.total_size > 0 {
308 allocated as f32 / self.total_size as f32
309 } else {
310 0.0
311 };
312 self.stats.fragmentation = if available > 0 {
313 1.0 - (self
314 .free_blocks
315 .iter()
316 .map(|(_, size)| *size)
317 .max()
318 .unwrap_or(0) as f32
319 / available as f32)
320 } else {
321 0.0
322 };
323 }
324
325 pub fn capacity(&self) -> usize {
327 self.total_size
328 }
329
330 fn coalesce_free_blocks(&mut self) {
332 if self.free_blocks.len() <= 1 {
333 return;
334 }
335
336 self.free_blocks.sort_by_key(|(offset, _)| *offset);
338
339 let mut i = 0;
341 while i < self.free_blocks.len().saturating_sub(1) {
342 let (offset1, size1) = self.free_blocks[i];
343 let (offset2, size2) = self.free_blocks[i + 1];
344
345 if offset1 + size1 == offset2 {
347 self.free_blocks[i] = (offset1, size1 + size2);
349 self.free_blocks.remove(i + 1);
350 } else {
352 i += 1;
353 }
354 }
355 }
356
357 pub fn detect_leaks(&self) -> Vec<LeakReport> {
359 let mut reports = Vec::new();
362
363 if self.allocated_blocks.len() > 1000 {
364 reports.push(LeakReport {
365 leak_type: LeakType::TooManyAllocations,
366 block_count: self.allocated_blocks.len(),
367 total_size: self.stats.allocated_memory,
368 severity: LeakSeverity::High,
369 description: format!(
370 "Too many active allocations: {}",
371 self.allocated_blocks.len()
372 ),
373 });
374 }
375
376 for &(offset, size) in &self.allocated_blocks {
378 if size > self.total_size / 4 {
379 reports.push(LeakReport {
381 leak_type: LeakType::LargeAllocation,
382 block_count: 1,
383 total_size: size,
384 severity: LeakSeverity::Medium,
385 description: format!("Large allocation at offset {}: {} bytes", offset, size),
386 });
387 }
388 }
389
390 reports
391 }
392
393 pub fn validate_consistency(&self) -> Result<()> {
395 for i in 0..self.allocated_blocks.len() {
397 for j in (i + 1)..self.allocated_blocks.len() {
398 let (offset1, size1) = self.allocated_blocks[i];
399 let (offset2, size2) = self.allocated_blocks[j];
400
401 let end1 = offset1 + size1;
402 let end2 = offset2 + size2;
403
404 if offset1 < end2 && offset2 < end1 {
405 return Err(TorshError::AllocationError(format!(
406 "Overlapping allocations detected: [{}, {}) and [{}, {})",
407 offset1, end1, offset2, end2
408 )));
409 }
410 }
411 }
412
413 for i in 0..self.free_blocks.len() {
415 for j in (i + 1)..self.free_blocks.len() {
416 let (offset1, size1) = self.free_blocks[i];
417 let (offset2, size2) = self.free_blocks[j];
418
419 let end1 = offset1 + size1;
420 let end2 = offset2 + size2;
421
422 if offset1 < end2 && offset2 < end1 {
423 return Err(TorshError::AllocationError(format!(
424 "Overlapping free blocks detected: [{}, {}) and [{}, {})",
425 offset1, end1, offset2, end2
426 )));
427 }
428 }
429 }
430
431 for &(offset, size) in &self.allocated_blocks {
433 if offset + size > self.total_size {
434 return Err(TorshError::AllocationError(format!(
435 "Allocated block extends beyond pool: offset={}, size={}, pool_size={}",
436 offset, size, self.total_size
437 )));
438 }
439 }
440
441 for &(offset, size) in &self.free_blocks {
442 if offset + size > self.total_size {
443 return Err(TorshError::AllocationError(format!(
444 "Free block extends beyond pool: offset={}, size={}, pool_size={}",
445 offset, size, self.total_size
446 )));
447 }
448 }
449
450 Ok(())
451 }
452}
453
454impl MemoryPool for FreeListPool {
455 fn allocate(&mut self, size: usize, alignment: usize) -> Result<*mut u8> {
456 if size == 0 {
458 return Err(TorshError::InvalidArgument(
459 "Allocation size cannot be zero".to_string(),
460 ));
461 }
462
463 if alignment == 0 || !alignment.is_power_of_two() {
464 return Err(TorshError::InvalidArgument(format!(
465 "Alignment must be a power of two and non-zero, got: {}",
466 alignment
467 )));
468 }
469
470 if size > self.total_size || alignment > self.total_size {
472 return Err(TorshError::AllocationError(format!(
473 "Requested size ({}) or alignment ({}) exceeds pool capacity ({})",
474 size, alignment, self.total_size
475 )));
476 }
477
478 if let Some(block_idx) = self.find_free_block(size, alignment) {
479 let (offset, block_size) = self.free_blocks[block_idx];
480 let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
481 let padding = aligned_offset - offset;
482 let required_size = padding + size;
483
484 self.free_blocks.remove(block_idx);
486
487 if padding > 0 {
489 self.free_blocks.push((offset, padding));
490 }
491
492 if required_size < block_size {
494 let remaining_offset = offset + required_size;
495 let remaining_size = block_size - required_size;
496 self.free_blocks.push((remaining_offset, remaining_size));
497 }
498
499 self.allocated_blocks.push((aligned_offset, size));
501
502 self.update_stats();
504
505 Ok(unsafe { self.base_ptr.add(aligned_offset) })
507 } else {
508 Err(TorshError::AllocationError(format!(
509 "Out of memory: requested {} bytes, available memory is {} bytes",
510 size, self.stats.available_memory
511 )))
512 }
513 }
514
515 #[allow(clippy::not_unsafe_ptr_arg_deref)]
516 fn deallocate(&mut self, ptr: *mut u8, size: usize) -> Result<()> {
517 if ptr.is_null() {
519 return Err(TorshError::InvalidArgument(
520 "Cannot deallocate null pointer".to_string(),
521 ));
522 }
523
524 if size == 0 {
525 return Err(TorshError::InvalidArgument(
526 "Cannot deallocate zero-sized block".to_string(),
527 ));
528 }
529
530 if ptr < self.base_ptr || ptr >= unsafe { self.base_ptr.add(self.total_size) } {
532 return Err(TorshError::InvalidArgument(
533 "Pointer outside of memory pool range".to_string(),
534 ));
535 }
536
537 let offset = unsafe { ptr.offset_from(self.base_ptr) } as usize;
540
541 if let Some(pos) = self
543 .allocated_blocks
544 .iter()
545 .position(|&(off, sz)| off == offset && sz == size)
546 {
547 self.allocated_blocks.remove(pos);
548
549 self.free_blocks.push((offset, size));
551
552 self.coalesce_free_blocks();
554
555 self.update_stats();
557
558 Ok(())
559 } else {
560 Err(TorshError::InvalidArgument(
561 "Invalid deallocation: block not found".to_string(),
562 ))
563 }
564 }
565
566 fn stats(&self) -> PoolStats {
567 PoolStats {
568 capacity: self.total_size,
569 allocated: self.stats.allocated_memory,
570 available: self.stats.available_memory,
571 free_blocks: self.free_blocks.len(),
572 allocated_blocks: self.allocated_blocks.len(),
573 largest_free_block: self
574 .free_blocks
575 .iter()
576 .map(|(_, size)| *size)
577 .max()
578 .unwrap_or(0),
579 smallest_free_block: self
580 .free_blocks
581 .iter()
582 .map(|(_, size)| *size)
583 .min()
584 .unwrap_or(0),
585 average_free_block: if self.free_blocks.is_empty() {
586 0
587 } else {
588 self.stats.available_memory / self.free_blocks.len()
589 },
590 }
591 }
592
593 fn reset(&mut self) -> Result<()> {
594 self.free_blocks.clear();
595 self.allocated_blocks.clear();
596 self.free_blocks.push((0, self.total_size));
597 self.update_stats();
598 Ok(())
599 }
600
601 fn capacity(&self) -> usize {
602 self.total_size
603 }
604
605 fn available(&self) -> usize {
606 self.stats.available_memory
607 }
608
609 fn defragment(&mut self) -> Result<DefragmentationResult> {
610 Ok(DefragmentationResult {
612 blocks_moved: 0,
613 memory_compacted: 0,
614 duration_ms: 0.0,
615 fragmentation_before: 0.0,
616 fragmentation_after: 0.0,
617 efficiency_improvement: 0.0,
618 success: true,
619 })
620 }
621
622 fn needs_defragmentation(&self) -> bool {
623 self.free_blocks.len() > 10
625 }
626
627 fn fragmentation_info(&self) -> FragmentationInfo {
628 let free_blocks = self.free_blocks.len();
629 let allocated_blocks = self.allocated_blocks.len();
630 let total_free = self.stats.available_memory;
631 let total_allocated = self.stats.allocated_memory;
632
633 let largest_free = self
634 .free_blocks
635 .iter()
636 .map(|(_, size)| *size)
637 .max()
638 .unwrap_or(0);
639
640 let smallest_free = self
641 .free_blocks
642 .iter()
643 .map(|(_, size)| *size)
644 .min()
645 .unwrap_or(0);
646
647 let average_free = if free_blocks > 0 {
648 total_free / free_blocks
649 } else {
650 0
651 };
652
653 let fragmentation = if self.capacity() > 0 {
654 free_blocks as f32 / (free_blocks + allocated_blocks) as f32
655 } else {
656 0.0
657 };
658
659 FragmentationInfo {
660 overall_fragmentation: fragmentation,
661 external_fragmentation: fragmentation * 0.8,
662 internal_fragmentation: fragmentation * 0.2,
663 free_blocks,
664 allocated_blocks,
665 largest_free_block: largest_free,
666 smallest_free_block: smallest_free,
667 average_free_block: average_free,
668 total_free_memory: total_free,
669 total_allocated_memory: total_allocated,
670 utilization_efficiency: if self.capacity() > 0 {
671 total_allocated as f32 / self.capacity() as f32
672 } else {
673 0.0
674 },
675 allocation_efficiency: if self.capacity() > 0 {
676 total_allocated as f32 / self.capacity() as f32
677 } else {
678 0.0
679 },
680 }
681 }
682
683 fn compact(&mut self) -> Result<CompactionResult> {
684 let free_blocks_before = self.free_blocks.len();
686
687 self.free_blocks.sort_by_key(|(offset, _)| *offset);
689
690 let mut i = 0;
692 while i < self.free_blocks.len().saturating_sub(1) {
693 let (offset1, size1) = self.free_blocks[i];
694 let (offset2, size2) = self.free_blocks[i + 1];
695
696 if offset1 + size1 == offset2 {
697 self.free_blocks[i] = (offset1, size1 + size2);
699 self.free_blocks.remove(i + 1);
700 } else {
701 i += 1;
702 }
703 }
704
705 let free_blocks_after = self.free_blocks.len();
706
707 Ok(CompactionResult {
708 allocations_moved: 0,
709 bytes_moved: 0,
710 duration_ms: 0.0,
711 largest_free_before: self
712 .free_blocks
713 .iter()
714 .map(|(_, size)| *size)
715 .max()
716 .unwrap_or(0),
717 largest_free_after: self
718 .free_blocks
719 .iter()
720 .map(|(_, size)| *size)
721 .max()
722 .unwrap_or(0),
723 free_blocks_before,
724 free_blocks_after,
725 success: true,
726 })
727 }
728}
729
730unsafe impl Send for FreeListPool {}
731unsafe impl Sync for FreeListPool {}
732
733#[derive(Debug, Clone, Copy, PartialEq, Eq)]
735pub enum AllocationStrategy {
736 FirstFit,
738
739 BestFit,
741
742 WorstFit,
744
745 NextFit,
747}
748
749#[derive(Debug, Clone)]
751pub struct AllocationHint {
752 pub lifetime: AllocationLifetime,
754
755 pub access_pattern: AccessPattern,
757
758 pub strategy: AllocationStrategy,
760
761 pub use_pool: bool,
763}
764
765#[derive(Debug, Clone, Copy, PartialEq, Eq)]
767pub enum AllocationLifetime {
768 Temporary,
770
771 Short,
773
774 Medium,
776
777 Long,
779
780 Persistent,
782}
783
784#[derive(Debug, Clone, Copy, PartialEq, Eq)]
786pub enum AccessPattern {
787 Random,
789
790 Sequential,
792
793 ReadMostly,
795
796 WriteMostly,
798
799 Streaming,
801}
802
803impl Default for AllocationHint {
804 fn default() -> Self {
805 Self {
806 lifetime: AllocationLifetime::Medium,
807 access_pattern: AccessPattern::Random,
808 strategy: AllocationStrategy::FirstFit,
809 use_pool: true,
810 }
811 }
812}
813
814#[derive(Debug, Clone, Copy, PartialEq, Eq)]
816pub enum MemoryAdvice {
817 SetPreferredLocation,
819 UnsetPreferredLocation,
821 SetAccessedBy,
823 UnsetAccessedBy,
825 SetReadMostly,
827 UnsetReadMostly,
829}
830
831pub trait MemoryManagerFactory: Send + Sync {
833 fn create_manager(&self, device: &Device) -> Result<Box<dyn MemoryManager>>;
835
836 fn backend_type(&self) -> crate::BackendType;
838
839 fn supports_device(&self, device: &Device) -> bool;
841}
842
843#[derive(Debug, Clone)]
845pub struct MemoryPoolConfig {
846 pub initial_size: usize,
848
849 pub max_size: Option<usize>,
851
852 pub growth_factor: f32,
854
855 pub strategy: AllocationStrategy,
857
858 pub enable_coalescing: bool,
860
861 pub min_block_size: usize,
863
864 pub alignment: usize,
866
867 pub numa_strategy: Option<crate::cpu::memory::NumaAllocationStrategy>,
869}
870
871impl Default for MemoryPoolConfig {
872 fn default() -> Self {
873 Self {
874 initial_size: 64 * 1024 * 1024, max_size: None,
876 growth_factor: 1.5,
877 strategy: AllocationStrategy::FirstFit,
878 enable_coalescing: true,
879 min_block_size: 256,
880 alignment: 16,
881 numa_strategy: None,
882 }
883 }
884}
885
886impl MemoryPoolConfig {
887 pub fn new(initial_size: usize) -> Self {
889 Self {
890 initial_size,
891 ..Default::default()
892 }
893 }
894
895 pub fn with_max_size(mut self, max_size: usize) -> Self {
897 self.max_size = Some(max_size);
898 self
899 }
900
901 pub fn with_growth_factor(mut self, growth_factor: f32) -> Self {
903 self.growth_factor = growth_factor;
904 self
905 }
906
907 pub fn with_strategy(mut self, strategy: AllocationStrategy) -> Self {
909 self.strategy = strategy;
910 self
911 }
912
913 pub fn with_alignment(mut self, alignment: usize) -> Self {
915 self.alignment = alignment;
916 self
917 }
918}
919
920#[derive(Debug, Clone)]
922pub struct DefragmentationResult {
923 pub blocks_moved: usize,
925
926 pub memory_compacted: usize,
928
929 pub duration_ms: f64,
931
932 pub fragmentation_before: f32,
934
935 pub fragmentation_after: f32,
937
938 pub efficiency_improvement: f32,
940
941 pub success: bool,
943}
944
945impl DefragmentationResult {
946 pub fn is_improvement_significant(&self) -> bool {
948 self.success && self.efficiency_improvement > 0.1 }
950
951 pub fn compaction_ratio(&self, total_memory: usize) -> f32 {
953 if total_memory == 0 {
954 0.0
955 } else {
956 self.memory_compacted as f32 / total_memory as f32
957 }
958 }
959}
960
961#[derive(Debug, Clone)]
963pub struct CompactionResult {
964 pub allocations_moved: usize,
966
967 pub bytes_moved: usize,
969
970 pub duration_ms: f64,
972
973 pub largest_free_before: usize,
975
976 pub largest_free_after: usize,
978
979 pub free_blocks_before: usize,
981
982 pub free_blocks_after: usize,
984
985 pub success: bool,
987}
988
989impl CompactionResult {
990 pub fn consolidation_improvement(&self) -> f32 {
992 if self.free_blocks_before == 0 {
993 1.0
994 } else {
995 1.0 - (self.free_blocks_after as f32 / self.free_blocks_before as f32)
996 }
997 }
998
999 pub fn largest_block_improvement(&self) -> f32 {
1001 if self.largest_free_before == 0 {
1002 if self.largest_free_after > 0 {
1003 f32::INFINITY
1004 } else {
1005 0.0
1006 }
1007 } else {
1008 self.largest_free_after as f32 / self.largest_free_before as f32
1009 }
1010 }
1011}
1012
1013#[derive(Debug, Clone, Default)]
1015pub struct FragmentationInfo {
1016 pub overall_fragmentation: f32,
1018
1019 pub external_fragmentation: f32,
1021
1022 pub internal_fragmentation: f32,
1024
1025 pub free_blocks: usize,
1027
1028 pub allocated_blocks: usize,
1030
1031 pub largest_free_block: usize,
1033
1034 pub smallest_free_block: usize,
1036
1037 pub average_free_block: usize,
1039
1040 pub total_free_memory: usize,
1042
1043 pub total_allocated_memory: usize,
1045
1046 pub utilization_efficiency: f32,
1048
1049 pub allocation_efficiency: f32,
1051}
1052
1053impl FragmentationInfo {
1054 pub fn is_severely_fragmented(&self) -> bool {
1056 self.overall_fragmentation > 0.7 || self.external_fragmentation > 0.6
1057 }
1058
1059 pub fn would_benefit_from_defragmentation(&self) -> bool {
1061 self.is_severely_fragmented()
1062 || (self.free_blocks > 10 && self.utilization_efficiency < 0.8)
1063 }
1064
1065 pub fn severity_level(&self) -> FragmentationSeverity {
1067 if self.overall_fragmentation < 0.2 {
1068 FragmentationSeverity::Low
1069 } else if self.overall_fragmentation < 0.5 {
1070 FragmentationSeverity::Medium
1071 } else if self.overall_fragmentation < 0.8 {
1072 FragmentationSeverity::High
1073 } else {
1074 FragmentationSeverity::Critical
1075 }
1076 }
1077}
1078
1079#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
1081pub enum FragmentationSeverity {
1082 Low,
1083 Medium,
1084 High,
1085 Critical,
1086}
1087
1088#[derive(Debug, Clone)]
1090pub struct DefragmentationPolicy {
1091 pub auto_trigger_threshold: f32,
1093
1094 pub min_interval_ms: u64,
1096
1097 pub max_duration_ms: u64,
1099
1100 pub strategy: DefragmentationStrategy,
1102
1103 pub enable_background: bool,
1105
1106 pub priority: DefragmentationPriority,
1108
1109 pub pause_allocations: bool,
1111
1112 pub emergency_threshold: f32,
1114}
1115
1116impl Default for DefragmentationPolicy {
1117 fn default() -> Self {
1118 Self {
1119 auto_trigger_threshold: 0.6,
1120 min_interval_ms: 10_000, max_duration_ms: 5_000, strategy: DefragmentationStrategy::Incremental,
1123 enable_background: true,
1124 priority: DefragmentationPriority::Low,
1125 pause_allocations: false,
1126 emergency_threshold: 0.9,
1127 }
1128 }
1129}
1130
1131#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1133pub enum DefragmentationStrategy {
1134 FullCompaction,
1136
1137 Incremental,
1139
1140 SmallBlocksOnly,
1142
1143 LargeBlocksFirst,
1145
1146 CoalesceOnly,
1148
1149 Generational,
1151}
1152
1153#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
1155pub enum DefragmentationPriority {
1156 Low,
1157 Normal,
1158 High,
1159 Critical,
1160}
1161
1162#[derive(Debug, Clone)]
1164pub struct LeakReport {
1165 pub leak_type: LeakType,
1167 pub block_count: usize,
1169 pub total_size: usize,
1171 pub severity: LeakSeverity,
1173 pub description: String,
1175}
1176
1177#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1179pub enum LeakType {
1180 TooManyAllocations,
1182 LargeAllocation,
1184 LongLivedAllocation,
1186 Fragmentation,
1188}
1189
1190#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
1192pub enum LeakSeverity {
1193 Low,
1194 Medium,
1195 High,
1196 Critical,
1197}
1198
1199#[cfg(test)]
1200mod tests {
1201 use super::*;
1202 use crate::device::{Device, DeviceInfo};
1203 use torsh_core::device::DeviceType;
1204
1205 fn create_test_device() -> Device {
1206 let info = DeviceInfo::default();
1207 Device::new(0, DeviceType::Cpu, "Test CPU".to_string(), info)
1208 }
1209
1210 #[test]
1211 fn test_memory_stats_default() {
1212 let stats = MemoryStats::default();
1213
1214 assert_eq!(stats.total_memory, 0);
1215 assert_eq!(stats.allocated_memory, 0);
1216 assert_eq!(stats.available_memory, 0);
1217 assert_eq!(stats.peak_memory, 0);
1218 assert_eq!(stats.active_allocations, 0);
1219 assert_eq!(stats.total_allocations, 0);
1220 assert_eq!(stats.total_deallocations, 0);
1221 assert_eq!(stats.fragmentation, 0.0);
1222 assert_eq!(stats.efficiency, 0.0);
1223 }
1224
1225 #[test]
1226 fn test_memory_stats_utilization() {
1227 let mut stats = MemoryStats {
1228 total_memory: 1000,
1229 allocated_memory: 300,
1230 ..Default::default()
1231 };
1232
1233 assert!((stats.utilization() - 30.0).abs() < 0.001);
1234
1235 stats.total_memory = 0;
1236 assert_eq!(stats.utilization(), 0.0);
1237 }
1238
1239 #[test]
1240 fn test_memory_stats_pressure() {
1241 let mut stats = MemoryStats {
1242 total_memory: 1000,
1243 allocated_memory: 850, fragmentation: 0.3, ..Default::default()
1246 };
1247
1248 assert!(!stats.is_under_pressure()); stats.allocated_memory = 950; assert!(stats.is_under_pressure()); stats.allocated_memory = 500; stats.fragmentation = 0.6; assert!(stats.is_under_pressure()); }
1257
1258 #[test]
1259 fn test_pool_stats_default() {
1260 let stats = PoolStats::default();
1261
1262 assert_eq!(stats.capacity, 0);
1263 assert_eq!(stats.allocated, 0);
1264 assert_eq!(stats.available, 0);
1265 assert_eq!(stats.free_blocks, 0);
1266 assert_eq!(stats.allocated_blocks, 0);
1267 assert_eq!(stats.largest_free_block, 0);
1268 assert_eq!(stats.smallest_free_block, 0);
1269 assert_eq!(stats.average_free_block, 0);
1270 }
1271
1272 #[test]
1273 fn test_allocation_strategy_variants() {
1274 let strategies = [
1275 AllocationStrategy::FirstFit,
1276 AllocationStrategy::BestFit,
1277 AllocationStrategy::WorstFit,
1278 AllocationStrategy::NextFit,
1279 ];
1280
1281 for (i, strategy1) in strategies.iter().enumerate() {
1283 for (j, strategy2) in strategies.iter().enumerate() {
1284 if i != j {
1285 assert_ne!(strategy1, strategy2);
1286 }
1287 }
1288 }
1289 }
1290
1291 #[test]
1292 fn test_allocation_lifetime_variants() {
1293 let lifetimes = [
1294 AllocationLifetime::Temporary,
1295 AllocationLifetime::Short,
1296 AllocationLifetime::Medium,
1297 AllocationLifetime::Long,
1298 AllocationLifetime::Persistent,
1299 ];
1300
1301 for (i, lifetime1) in lifetimes.iter().enumerate() {
1303 for (j, lifetime2) in lifetimes.iter().enumerate() {
1304 if i != j {
1305 assert_ne!(lifetime1, lifetime2);
1306 }
1307 }
1308 }
1309 }
1310
1311 #[test]
1312 fn test_access_pattern_variants() {
1313 let patterns = [
1314 AccessPattern::Random,
1315 AccessPattern::Sequential,
1316 AccessPattern::ReadMostly,
1317 AccessPattern::WriteMostly,
1318 AccessPattern::Streaming,
1319 ];
1320
1321 for (i, pattern1) in patterns.iter().enumerate() {
1323 for (j, pattern2) in patterns.iter().enumerate() {
1324 if i != j {
1325 assert_ne!(pattern1, pattern2);
1326 }
1327 }
1328 }
1329 }
1330
1331 #[test]
1332 fn test_allocation_hint_default() {
1333 let hint = AllocationHint::default();
1334
1335 assert_eq!(hint.lifetime, AllocationLifetime::Medium);
1336 assert_eq!(hint.access_pattern, AccessPattern::Random);
1337 assert_eq!(hint.strategy, AllocationStrategy::FirstFit);
1338 assert!(hint.use_pool);
1339 }
1340
1341 #[test]
1342 fn test_memory_pool_config_default() {
1343 let config = MemoryPoolConfig::default();
1344
1345 assert_eq!(config.initial_size, 64 * 1024 * 1024); assert_eq!(config.max_size, None);
1347 assert_eq!(config.growth_factor, 1.5);
1348 assert_eq!(config.strategy, AllocationStrategy::FirstFit);
1349 assert!(config.enable_coalescing);
1350 assert_eq!(config.min_block_size, 256);
1351 assert_eq!(config.alignment, 16);
1352 }
1353
1354 #[test]
1355 fn test_memory_pool_config_builder() {
1356 let config = MemoryPoolConfig::new(128 * 1024 * 1024) .with_max_size(1024 * 1024 * 1024) .with_growth_factor(2.0)
1359 .with_strategy(AllocationStrategy::BestFit)
1360 .with_alignment(64);
1361
1362 assert_eq!(config.initial_size, 128 * 1024 * 1024);
1363 assert_eq!(config.max_size, Some(1024 * 1024 * 1024));
1364 assert_eq!(config.growth_factor, 2.0);
1365 assert_eq!(config.strategy, AllocationStrategy::BestFit);
1366 assert_eq!(config.alignment, 64);
1367 }
1368
1369 #[test]
1370 fn test_free_list_pool_creation() {
1371 let _device = create_test_device();
1372 let capacity = 1024 * 1024; let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1376 let ptr = unsafe { std::alloc::alloc(layout) };
1377 assert!(!ptr.is_null());
1378
1379 let pool = FreeListPool::new(ptr, capacity);
1380 assert_eq!(pool.capacity(), capacity);
1381 assert_eq!(pool.available(), capacity);
1382
1383 let stats = pool.stats();
1384 assert_eq!(stats.capacity, capacity);
1385 assert_eq!(stats.available, capacity);
1386 assert_eq!(stats.allocated, 0);
1387 assert_eq!(stats.free_blocks, 1);
1388 assert_eq!(stats.allocated_blocks, 0);
1389 assert_eq!(stats.largest_free_block, capacity);
1390
1391 unsafe {
1393 std::alloc::dealloc(ptr, layout);
1394 }
1395 }
1396
1397 #[test]
1398 fn test_free_list_pool_allocation() {
1399 let _device = create_test_device();
1400 let capacity = 1024;
1401
1402 let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1404 let ptr = unsafe { std::alloc::alloc(layout) };
1405 assert!(!ptr.is_null());
1406
1407 let mut pool = FreeListPool::new(ptr, capacity);
1408
1409 let ptr1 = pool.allocate(256, 16);
1411 assert!(ptr1.is_ok());
1412
1413 let stats = pool.stats();
1414 assert_eq!(stats.allocated, 256);
1415 assert!(stats.available < capacity); assert_eq!(stats.allocated_blocks, 1);
1417
1418 let ptr2 = pool.allocate(128, 16);
1420 assert!(ptr2.is_ok());
1421
1422 let stats = pool.stats();
1423 assert_eq!(stats.allocated, 256 + 128);
1424 assert_eq!(stats.allocated_blocks, 2);
1425
1426 let ptr3 = pool.allocate(1024, 16);
1428 assert!(ptr3.is_err());
1429
1430 unsafe {
1432 std::alloc::dealloc(ptr, layout);
1433 }
1434 }
1435
1436 #[test]
1437 fn test_free_list_pool_deallocation() {
1438 let _device = create_test_device();
1439 let capacity = 1024;
1440
1441 let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1443 let ptr = unsafe { std::alloc::alloc(layout) };
1444 assert!(!ptr.is_null());
1445
1446 let mut pool = FreeListPool::new(ptr, capacity);
1447
1448 let ptr1 = pool.allocate(256, 16).unwrap();
1450 let ptr2 = pool.allocate(128, 16).unwrap();
1451
1452 assert_eq!(pool.stats().allocated_blocks, 2);
1453
1454 let result = pool.deallocate(ptr1, 256);
1456 assert!(result.is_ok());
1457
1458 let stats = pool.stats();
1459 assert_eq!(stats.allocated, 128);
1460 assert_eq!(stats.allocated_blocks, 1);
1461
1462 let result = pool.deallocate(ptr2, 128);
1464 assert!(result.is_ok());
1465
1466 let stats = pool.stats();
1467 assert_eq!(stats.allocated, 0);
1468 assert_eq!(stats.allocated_blocks, 0);
1469
1470 unsafe {
1472 std::alloc::dealloc(ptr, layout);
1473 }
1474 }
1475
1476 #[test]
1477 fn test_free_list_pool_reset() {
1478 let _device = create_test_device();
1479 let capacity = 1024;
1480
1481 let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1483 let ptr = unsafe { std::alloc::alloc(layout) };
1484 assert!(!ptr.is_null());
1485
1486 let mut pool = FreeListPool::new(ptr, capacity);
1487
1488 let _ptr1 = pool.allocate(256, 16).unwrap();
1490 let _ptr2 = pool.allocate(128, 16).unwrap();
1491
1492 assert_eq!(pool.stats().allocated_blocks, 2);
1493
1494 let result = pool.reset();
1496 assert!(result.is_ok());
1497
1498 let stats = pool.stats();
1499 assert_eq!(stats.allocated, 0);
1500 assert_eq!(stats.allocated_blocks, 0);
1501 assert_eq!(stats.free_blocks, 1);
1502 assert_eq!(stats.available, capacity);
1503 assert_eq!(stats.largest_free_block, capacity);
1504
1505 unsafe {
1507 std::alloc::dealloc(ptr, layout);
1508 }
1509 }
1510
1511 #[test]
1512 fn test_free_list_pool_find_free_block() {
1513 let _device = create_test_device();
1514 let capacity = 1024;
1515
1516 let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
1518 let ptr = unsafe { std::alloc::alloc(layout) };
1519 assert!(!ptr.is_null());
1520
1521 let pool = FreeListPool::new(ptr, capacity);
1522
1523 let block_idx = pool.find_free_block(256, 16);
1525 assert!(block_idx.is_some());
1526 assert_eq!(block_idx.unwrap(), 0); let block_idx = pool.find_free_block(2048, 16);
1530 assert!(block_idx.is_none());
1531
1532 unsafe {
1534 std::alloc::dealloc(ptr, layout);
1535 }
1536 }
1537}