1#[allow(dead_code)]
7use std::collections::{BTreeMap, HashMap, VecDeque};
8use std::ptr::NonNull;
9use std::sync::{Arc, Mutex};
10use std::time::{Duration, Instant};
11
12pub struct DefragmentationEngine {
14 config: DefragConfig,
16 stats: DefragStats,
18 active_tasks: Vec<DefragTask>,
20 memory_layout: MemoryLayoutTracker,
22 strategies: Vec<Box<dyn CompactionStrategy>>,
24 performance_history: VecDeque<DefragPerformance>,
26}
27
28#[derive(Debug, Clone)]
30pub struct DefragConfig {
31 pub auto_defrag: bool,
33 pub fragmentation_threshold: f64,
35 pub max_defrag_time: Duration,
37 pub min_free_space: usize,
39 pub incremental_defrag: bool,
41 pub incremental_chunk_size: usize,
43 pub parallel_defrag: bool,
45 pub worker_threads: usize,
47 pub preferred_algorithm: CompactionAlgorithm,
49 pub enable_stats: bool,
51}
52
53impl Default for DefragConfig {
54 fn default() -> Self {
55 Self {
56 auto_defrag: true,
57 fragmentation_threshold: 0.3,
58 max_defrag_time: Duration::from_millis(100),
59 min_free_space: 1024 * 1024, incremental_defrag: true,
61 incremental_chunk_size: 64 * 1024, parallel_defrag: false,
63 worker_threads: 2,
64 preferred_algorithm: CompactionAlgorithm::SlidingCompaction,
65 enable_stats: true,
66 }
67 }
68}
69
70#[derive(Debug, Clone, PartialEq)]
72pub enum CompactionAlgorithm {
73 SlidingCompaction,
75 TwoPointer,
77 MarkSweepCompact,
79 CopyingGC,
81 Generational,
83 Adaptive,
85}
86
87#[derive(Debug, Clone, Default)]
89pub struct DefragStats {
90 pub total_cycles: u64,
92 pub total_bytes_moved: u64,
94 pub total_time_spent: Duration,
96 pub average_fragmentation_reduction: f64,
98 pub successful_cycles: u64,
100 pub failed_cycles: u64,
102 pub average_cycle_time: Duration,
104 pub peak_fragmentation: f64,
106 pub current_fragmentation: f64,
108 pub objects_relocated: u64,
110 pub compaction_efficiency: f64,
112}
113
114#[derive(Debug, Clone)]
116pub struct DefragTask {
117 pub id: u64,
119 pub start_addr: usize,
121 pub size: usize,
123 pub algorithm: CompactionAlgorithm,
125 pub status: TaskStatus,
127 pub created_at: Instant,
129 pub estimated_completion: Option<Duration>,
131 pub priority: TaskPriority,
133}
134
135#[derive(Debug, Clone, PartialEq)]
137pub enum TaskStatus {
138 Pending,
139 Running,
140 Paused,
141 Completed,
142 Failed(String),
143 Cancelled,
144}
145
146#[derive(Debug, Clone, PartialEq, Ord, PartialOrd, Eq)]
148pub enum TaskPriority {
149 Low,
150 Normal,
151 High,
152 Critical,
153}
154
155pub struct MemoryLayoutTracker {
157 free_regions: BTreeMap<usize, FreeRegion>,
159 allocated_blocks: HashMap<usize, AllocatedBlock>,
161 fragmentation_cache: Option<(f64, Instant)>,
163 cache_validity: Duration,
165}
166
167#[derive(Debug, Clone)]
169pub struct FreeRegion {
170 pub address: usize,
171 pub size: usize,
172 pub age: Duration,
173 pub access_frequency: u32,
174 pub adjacent_to_allocated: bool,
175}
176
177#[derive(Debug, Clone)]
179pub struct AllocatedBlock {
180 pub address: usize,
181 pub size: usize,
182 pub allocation_time: Instant,
183 pub last_access: Option<Instant>,
184 pub access_count: u32,
185 pub is_movable: bool,
186 pub reference_count: u32,
187}
188
189pub trait CompactionStrategy: Send + Sync {
191 fn name(&self) -> &str;
192 fn can_handle(&self, layout: &MemoryLayoutTracker) -> bool;
193 fn estimate_benefit(&self, layout: &MemoryLayoutTracker) -> f64;
194 fn execute(
195 &mut self,
196 layout: &mut MemoryLayoutTracker,
197 ) -> Result<CompactionResult, DefragError>;
198 fn get_statistics(&self) -> CompactionStats;
199}
200
201#[derive(Debug, Clone)]
203pub struct CompactionResult {
204 pub bytes_moved: usize,
205 pub objects_relocated: u32,
206 pub fragmentation_reduction: f64,
207 pub time_taken: Duration,
208 pub algorithm_used: CompactionAlgorithm,
209 pub efficiency_score: f64,
210}
211
212#[derive(Debug, Clone, Default)]
214pub struct CompactionStats {
215 pub executions: u64,
216 pub total_bytes_moved: u64,
217 pub total_objects_relocated: u64,
218 pub total_time: Duration,
219 pub average_efficiency: f64,
220 pub success_rate: f64,
221}
222
223#[derive(Debug, Clone)]
225pub struct DefragPerformance {
226 pub timestamp: Instant,
227 pub fragmentation_before: f64,
228 pub fragmentation_after: f64,
229 pub time_taken: Duration,
230 pub bytes_moved: usize,
231 pub algorithm_used: CompactionAlgorithm,
232 pub success: bool,
233}
234
235impl Default for MemoryLayoutTracker {
236 fn default() -> Self {
237 Self::new()
238 }
239}
240
241impl MemoryLayoutTracker {
242 pub fn new() -> Self {
243 Self {
244 free_regions: BTreeMap::new(),
245 allocated_blocks: HashMap::new(),
246 fragmentation_cache: None,
247 cache_validity: Duration::from_millis(100),
248 }
249 }
250
251 pub fn calculate_fragmentation(&mut self) -> f64 {
253 let now = Instant::now();
254
255 if let Some((cached_frag, cache_time)) = self.fragmentation_cache {
257 if now.duration_since(cache_time) < self.cache_validity {
258 return cached_frag;
259 }
260 }
261
262 let fragmentation = if self.free_regions.is_empty() {
263 0.0
264 } else {
265 let total_free_space: usize = self.free_regions.values().map(|r| r.size).sum();
266 let largest_free_block = self
267 .free_regions
268 .values()
269 .map(|r| r.size)
270 .max()
271 .unwrap_or(0);
272
273 if total_free_space == 0 {
274 0.0
275 } else {
276 1.0 - (largest_free_block as f64 / total_free_space as f64)
277 }
278 };
279
280 self.fragmentation_cache = Some((fragmentation, now));
282 fragmentation
283 }
284
285 pub fn add_free_region(&mut self, address: usize, size: usize) {
287 let region = FreeRegion {
288 address,
289 size,
290 age: Duration::from_secs(0),
291 access_frequency: 0,
292 adjacent_to_allocated: self.is_adjacent_to_allocated(address, size),
293 };
294 self.free_regions.insert(address, region);
295 self.invalidate_cache();
296 }
297
298 pub fn add_allocated_block(&mut self, address: usize, size: usize, is_movable: bool) {
300 let block = AllocatedBlock {
301 address,
302 size,
303 allocation_time: Instant::now(),
304 last_access: None,
305 access_count: 0,
306 is_movable,
307 reference_count: 1,
308 };
309 self.allocated_blocks.insert(address, block);
310 self.invalidate_cache();
311 }
312
313 pub fn remove_free_region(&mut self, address: usize) -> Option<FreeRegion> {
315 self.invalidate_cache();
316 self.free_regions.remove(&address)
317 }
318
319 pub fn remove_allocated_block(&mut self, address: usize) -> Option<AllocatedBlock> {
321 self.invalidate_cache();
322 self.allocated_blocks.remove(&address)
323 }
324
325 pub fn get_total_free_space(&self) -> usize {
327 self.free_regions.values().map(|r| r.size).sum()
328 }
329
330 pub fn get_largest_free_block(&self) -> usize {
332 self.free_regions
333 .values()
334 .map(|r| r.size)
335 .max()
336 .unwrap_or(0)
337 }
338
339 pub fn get_movable_blocks(&self) -> Vec<&AllocatedBlock> {
341 self.allocated_blocks
342 .values()
343 .filter(|b| b.is_movable)
344 .collect()
345 }
346
347 fn is_adjacent_to_allocated(&self, address: usize, size: usize) -> bool {
349 let end_address = address + size;
350
351 for block in self.allocated_blocks.values() {
352 let block_end = block.address + block.size;
353
354 if block_end == address || block.address == end_address {
356 return true;
357 }
358 }
359
360 false
361 }
362
363 fn invalidate_cache(&mut self) {
365 self.fragmentation_cache = None;
366 }
367
368 pub fn coalesce_free_regions(&mut self) -> usize {
370 let mut coalesced_count = 0;
371 let mut regions_to_remove = Vec::new();
372 let mut regions_to_add = Vec::new();
373
374 let addresses: Vec<usize> = self.free_regions.keys().cloned().collect();
375
376 for &addr in &addresses {
377 if regions_to_remove.contains(&addr) {
378 continue;
379 }
380
381 if let Some(region) = self.free_regions.get(&addr) {
382 let end_addr = addr + region.size;
383
384 if let Some(next_region) = self.free_regions.get(&end_addr) {
386 let coalesced_region = FreeRegion {
388 address: addr,
389 size: region.size + next_region.size,
390 age: region.age.min(next_region.age),
391 access_frequency: region.access_frequency + next_region.access_frequency,
392 adjacent_to_allocated: region.adjacent_to_allocated
393 || next_region.adjacent_to_allocated,
394 };
395
396 regions_to_remove.push(addr);
397 regions_to_remove.push(end_addr);
398 regions_to_add.push((addr, coalesced_region));
399 coalesced_count += 1;
400 }
401 }
402 }
403
404 for addr in regions_to_remove {
406 self.free_regions.remove(&addr);
407 }
408
409 for (addr, region) in regions_to_add {
410 self.free_regions.insert(addr, region);
411 }
412
413 self.invalidate_cache();
414 coalesced_count
415 }
416}
417
418pub struct SlidingCompactionStrategy {
420 stats: CompactionStats,
421}
422
423impl Default for SlidingCompactionStrategy {
424 fn default() -> Self {
425 Self::new()
426 }
427}
428
429impl SlidingCompactionStrategy {
430 pub fn new() -> Self {
431 Self {
432 stats: CompactionStats::default(),
433 }
434 }
435}
436
437impl CompactionStrategy for SlidingCompactionStrategy {
438 fn name(&self) -> &str {
439 "SlidingCompaction"
440 }
441
442 fn can_handle(&self, layout: &MemoryLayoutTracker) -> bool {
443 !layout.get_movable_blocks().is_empty() && layout.get_total_free_space() > 0
444 }
445
446 fn estimate_benefit(&self, layout: &MemoryLayoutTracker) -> f64 {
447 let movable_blocks = layout.get_movable_blocks();
448 let total_free = layout.get_total_free_space();
449 let largest_free = layout.get_largest_free_block();
450
451 if total_free == 0 {
452 return 0.0;
453 }
454
455 let fragmentation_reduction = (total_free - largest_free) as f64 / total_free as f64;
457 let mobility_factor =
458 movable_blocks.len() as f64 / (layout.allocated_blocks.len() as f64 + 1.0);
459
460 fragmentation_reduction * mobility_factor
461 }
462
463 fn execute(
464 &mut self,
465 layout: &mut MemoryLayoutTracker,
466 ) -> Result<CompactionResult, DefragError> {
467 let start_time = Instant::now();
468 let initial_fragmentation = layout.calculate_fragmentation();
469
470 let movable_blocks: Vec<AllocatedBlock> =
471 layout.get_movable_blocks().into_iter().cloned().collect();
472
473 if movable_blocks.is_empty() {
474 return Err(DefragError::NoMovableBlocks);
475 }
476
477 let mut bytes_moved = 0;
478 let mut objects_relocated = 0;
479 let mut compaction_address = 0;
480
481 if let Some((&first_free_addr, _)) = layout.free_regions.iter().next() {
483 compaction_address = first_free_addr;
484 }
485
486 let mut sorted_blocks = movable_blocks;
488 sorted_blocks.sort_by_key(|b| b.address);
489
490 for block in sorted_blocks {
492 if block.address > compaction_address {
493 layout.remove_allocated_block(block.address);
495 layout.add_allocated_block(compaction_address, block.size, block.is_movable);
496
497 layout.add_free_region(block.address, block.size);
499
500 bytes_moved += block.size;
501 objects_relocated += 1;
502
503 compaction_address += block.size;
504 } else {
505 compaction_address = block.address + block.size;
506 }
507 }
508
509 layout.coalesce_free_regions();
511
512 let final_fragmentation = layout.calculate_fragmentation();
513 let fragmentation_reduction = initial_fragmentation - final_fragmentation;
514 let time_taken = start_time.elapsed();
515
516 self.stats.executions += 1;
518 self.stats.total_bytes_moved += bytes_moved as u64;
519 self.stats.total_objects_relocated += objects_relocated as u64;
520 self.stats.total_time += time_taken;
521
522 let efficiency = if bytes_moved > 0 {
523 fragmentation_reduction / (bytes_moved as f64 / 1024.0 / 1024.0) } else {
525 0.0
526 };
527
528 self.stats.average_efficiency =
529 (self.stats.average_efficiency * (self.stats.executions - 1) as f64 + efficiency)
530 / self.stats.executions as f64;
531 self.stats.success_rate = 1.0; Ok(CompactionResult {
534 bytes_moved,
535 objects_relocated,
536 fragmentation_reduction,
537 time_taken,
538 algorithm_used: CompactionAlgorithm::SlidingCompaction,
539 efficiency_score: efficiency,
540 })
541 }
542
543 fn get_statistics(&self) -> CompactionStats {
544 self.stats.clone()
545 }
546}
547
548pub struct TwoPointerCompactionStrategy {
550 stats: CompactionStats,
551}
552
553impl Default for TwoPointerCompactionStrategy {
554 fn default() -> Self {
555 Self::new()
556 }
557}
558
559impl TwoPointerCompactionStrategy {
560 pub fn new() -> Self {
561 Self {
562 stats: CompactionStats::default(),
563 }
564 }
565}
566
567impl CompactionStrategy for TwoPointerCompactionStrategy {
568 fn name(&self) -> &str {
569 "TwoPointer"
570 }
571
572 fn can_handle(&self, layout: &MemoryLayoutTracker) -> bool {
573 layout.get_movable_blocks().len() >= 2 && layout.get_total_free_space() > 0
574 }
575
576 fn estimate_benefit(&self, layout: &MemoryLayoutTracker) -> f64 {
577 let movable_blocks = layout.get_movable_blocks();
578 let free_space = layout.get_total_free_space();
579
580 if movable_blocks.len() < 2 || free_space == 0 {
581 return 0.0;
582 }
583
584 let mut addresses: Vec<usize> = movable_blocks.iter().map(|b| b.address).collect();
586 addresses.sort();
587
588 let mut total_gaps = 0;
589 for i in 1..addresses.len() {
590 let gap = addresses[i] - addresses[i - 1];
591 if gap > movable_blocks[i - 1].size {
592 total_gaps += gap - movable_blocks[i - 1].size;
593 }
594 }
595
596 total_gaps as f64 / free_space as f64
597 }
598
599 fn execute(
600 &mut self,
601 layout: &mut MemoryLayoutTracker,
602 ) -> Result<CompactionResult, DefragError> {
603 let start_time = Instant::now();
604 let initial_fragmentation = layout.calculate_fragmentation();
605
606 let movable_blocks: Vec<AllocatedBlock> =
607 layout.get_movable_blocks().into_iter().cloned().collect();
608
609 if movable_blocks.len() < 2 {
610 return Err(DefragError::InsufficientBlocks);
611 }
612
613 let mut bytes_moved = 0;
614 let mut objects_relocated = 0;
615
616 let mut sorted_blocks = movable_blocks;
618 sorted_blocks.sort_by_key(|b| b.address);
619
620 let left_ptr = 0;
621 let mut compact_addr = sorted_blocks[0].address;
622
623 for block in &sorted_blocks {
625 if block.address != compact_addr {
626 layout.remove_allocated_block(block.address);
628 layout.add_allocated_block(compact_addr, block.size, block.is_movable);
629
630 layout.add_free_region(block.address, block.size);
632
633 bytes_moved += block.size;
634 objects_relocated += 1;
635 }
636
637 compact_addr += block.size;
638 }
639
640 layout.coalesce_free_regions();
642
643 let final_fragmentation = layout.calculate_fragmentation();
644 let fragmentation_reduction = initial_fragmentation - final_fragmentation;
645 let time_taken = start_time.elapsed();
646
647 self.stats.executions += 1;
649 self.stats.total_bytes_moved += bytes_moved as u64;
650 self.stats.total_objects_relocated += objects_relocated as u64;
651 self.stats.total_time += time_taken;
652
653 let efficiency = if bytes_moved > 0 {
654 fragmentation_reduction / (bytes_moved as f64 / 1024.0 / 1024.0)
655 } else {
656 0.0
657 };
658
659 self.stats.average_efficiency =
660 (self.stats.average_efficiency * (self.stats.executions - 1) as f64 + efficiency)
661 / self.stats.executions as f64;
662
663 Ok(CompactionResult {
664 bytes_moved,
665 objects_relocated,
666 fragmentation_reduction,
667 time_taken,
668 algorithm_used: CompactionAlgorithm::TwoPointer,
669 efficiency_score: efficiency,
670 })
671 }
672
673 fn get_statistics(&self) -> CompactionStats {
674 self.stats.clone()
675 }
676}
677
678impl DefragmentationEngine {
679 pub fn new(config: DefragConfig) -> Self {
680 let strategies: Vec<Box<dyn CompactionStrategy>> = vec![
681 Box::new(SlidingCompactionStrategy::new()),
682 Box::new(TwoPointerCompactionStrategy::new()),
683 ];
684
685 Self {
686 config,
687 stats: DefragStats::default(),
688 active_tasks: Vec::new(),
689 memory_layout: MemoryLayoutTracker::new(),
690 strategies: strategies
691 .into_iter()
692 .map(|s| s as Box<dyn CompactionStrategy>)
693 .collect(),
694 performance_history: VecDeque::with_capacity(1000),
695 }
696 }
697
698 pub fn should_defragment(&mut self) -> bool {
700 if !self.config.auto_defrag {
701 return false;
702 }
703
704 let current_fragmentation = self.memory_layout.calculate_fragmentation();
705 self.stats.current_fragmentation = current_fragmentation;
706
707 current_fragmentation > self.config.fragmentation_threshold
708 && self.memory_layout.get_total_free_space() >= self.config.min_free_space
709 }
710
711 pub fn defragment(&mut self) -> Result<CompactionResult, DefragError> {
713 let start_time = Instant::now();
714
715 if self
716 .active_tasks
717 .iter()
718 .any(|t| t.status == TaskStatus::Running)
719 {
720 return Err(DefragError::DefragmentationInProgress);
721 }
722
723 let strategy_index = self.select_best_strategy()?;
725 let strategy = &mut self.strategies[strategy_index];
726
727 let result = strategy.execute(&mut self.memory_layout)?;
729
730 self.stats.total_cycles += 1;
732 self.stats.total_bytes_moved += result.bytes_moved as u64;
733 self.stats.total_time_spent += result.time_taken;
734 self.stats.successful_cycles += 1;
735 self.stats.objects_relocated += result.objects_relocated as u64;
736 self.stats.average_fragmentation_reduction = (self.stats.average_fragmentation_reduction
737 * (self.stats.total_cycles - 1) as f64
738 + result.fragmentation_reduction)
739 / self.stats.total_cycles as f64;
740
741 let cycle_time = start_time.elapsed();
742 self.stats.average_cycle_time = Duration::from_nanos(
743 (self.stats.average_cycle_time.as_nanos() as u64 * (self.stats.total_cycles - 1)
744 + cycle_time.as_nanos() as u64)
745 / self.stats.total_cycles,
746 );
747
748 let performance = DefragPerformance {
750 timestamp: start_time,
751 fragmentation_before: self.stats.current_fragmentation,
752 fragmentation_after: self.memory_layout.calculate_fragmentation(),
753 time_taken: cycle_time,
754 bytes_moved: result.bytes_moved,
755 algorithm_used: result.algorithm_used.clone(),
756 success: true,
757 };
758
759 self.performance_history.push_back(performance);
760 if self.performance_history.len() > 1000 {
761 self.performance_history.pop_front();
762 }
763
764 Ok(result)
765 }
766
767 fn select_best_strategy(&mut self) -> Result<usize, DefragError> {
769 let mut best_index = 0;
770 let mut best_benefit = 0.0;
771
772 for (i, strategy) in self.strategies.iter().enumerate() {
773 if strategy.can_handle(&self.memory_layout) {
774 let benefit = strategy.estimate_benefit(&self.memory_layout);
775 if benefit > best_benefit {
776 best_benefit = benefit;
777 best_index = i;
778 }
779 }
780 }
781
782 if best_benefit == 0.0 {
783 return Err(DefragError::NoSuitableStrategy);
784 }
785
786 Ok(best_index)
787 }
788
789 pub fn create_task(
791 &mut self,
792 start_addr: usize,
793 size: usize,
794 algorithm: CompactionAlgorithm,
795 priority: TaskPriority,
796 ) -> u64 {
797 let task_id = self.active_tasks.len() as u64;
798 let task = DefragTask {
799 id: task_id,
800 start_addr,
801 size,
802 algorithm,
803 status: TaskStatus::Pending,
804 created_at: Instant::now(),
805 estimated_completion: None,
806 priority,
807 };
808
809 self.active_tasks.push(task);
810 task_id
811 }
812
813 pub fn get_stats(&self) -> &DefragStats {
815 &self.stats
816 }
817
818 pub fn get_performance_history(&self) -> &VecDeque<DefragPerformance> {
820 &self.performance_history
821 }
822
823 pub fn update_layout(
825 &mut self,
826 allocated_blocks: HashMap<usize, AllocatedBlock>,
827 free_regions: BTreeMap<usize, FreeRegion>,
828 ) {
829 self.memory_layout.allocated_blocks = allocated_blocks;
830 self.memory_layout.free_regions = free_regions;
831 self.memory_layout.invalidate_cache();
832 }
833
834 pub fn get_layout(&self) -> &MemoryLayoutTracker {
836 &self.memory_layout
837 }
838
839 pub fn reset(&mut self) {
841 self.stats = DefragStats::default();
842 self.active_tasks.clear();
843 self.memory_layout = MemoryLayoutTracker::new();
844 self.performance_history.clear();
845
846 for strategy in &mut self.strategies {
848 }
850 }
851}
852
853unsafe impl Send for DefragmentationEngine {}
860unsafe impl Sync for DefragmentationEngine {}
861
862#[derive(Debug, Clone)]
864pub enum DefragError {
865 DefragmentationInProgress,
866 NoMovableBlocks,
867 InsufficientBlocks,
868 NoSuitableStrategy,
869 MemoryLayoutCorrupted,
870 TimeoutExceeded,
871 InternalError(String),
872}
873
874impl std::fmt::Display for DefragError {
875 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
876 match self {
877 DefragError::DefragmentationInProgress => {
878 write!(f, "Defragmentation already in progress")
879 }
880 DefragError::NoMovableBlocks => write!(f, "No movable blocks available for compaction"),
881 DefragError::InsufficientBlocks => {
882 write!(f, "Insufficient blocks for compaction strategy")
883 }
884 DefragError::NoSuitableStrategy => {
885 write!(f, "No suitable compaction strategy available")
886 }
887 DefragError::MemoryLayoutCorrupted => write!(f, "Memory layout is corrupted"),
888 DefragError::TimeoutExceeded => write!(f, "Defragmentation timeout exceeded"),
889 DefragError::InternalError(msg) => write!(f, "Internal error: {}", msg),
890 }
891 }
892}
893
894impl std::error::Error for DefragError {}
895
896pub struct ThreadSafeDefragmentationEngine {
898 engine: Arc<Mutex<DefragmentationEngine>>,
899}
900
901impl ThreadSafeDefragmentationEngine {
902 pub fn new(config: DefragConfig) -> Self {
903 Self {
904 engine: Arc::new(Mutex::new(DefragmentationEngine::new(config))),
905 }
906 }
907
908 pub fn should_defragment(&self) -> bool {
909 let mut engine = self.engine.lock().unwrap();
910 engine.should_defragment()
911 }
912
913 pub fn defragment(&self) -> Result<CompactionResult, DefragError> {
914 let mut engine = self.engine.lock().unwrap();
915 engine.defragment()
916 }
917
918 pub fn get_stats(&self) -> DefragStats {
919 let engine = self.engine.lock().unwrap();
920 engine.get_stats().clone()
921 }
922
923 pub fn get_performance_history(&self) -> Vec<DefragPerformance> {
924 let engine = self.engine.lock().unwrap();
925 engine.get_performance_history().iter().cloned().collect()
926 }
927}
928
929#[cfg(test)]
930mod tests {
931 use super::*;
932
933 #[test]
934 fn test_memory_layout_tracker() {
935 let mut tracker = MemoryLayoutTracker::new();
936
937 tracker.add_allocated_block(1000, 500, true);
939 tracker.add_allocated_block(2000, 300, false);
940 tracker.add_free_region(1500, 200);
941 tracker.add_free_region(2500, 800);
942
943 let fragmentation = tracker.calculate_fragmentation();
944 assert!((0.0..=1.0).contains(&fragmentation));
945
946 let total_free = tracker.get_total_free_space();
947 assert_eq!(total_free, 1000);
948
949 let largest_free = tracker.get_largest_free_block();
950 assert_eq!(largest_free, 800);
951 }
952
953 #[test]
954 fn test_sliding_compaction_strategy() {
955 let mut strategy = SlidingCompactionStrategy::new();
956 let mut layout = MemoryLayoutTracker::new();
957
958 layout.add_allocated_block(1000, 500, true);
960 layout.add_free_region(1500, 200);
961 layout.add_allocated_block(2000, 300, true);
962 layout.add_free_region(2300, 500);
963
964 assert!(strategy.can_handle(&layout));
965
966 let benefit = strategy.estimate_benefit(&layout);
967 assert!(benefit > 0.0);
968
969 let result = strategy.execute(&mut layout);
970 assert!(result.is_ok());
971
972 let compaction_result = result.unwrap();
973 assert!(compaction_result.bytes_moved > 0);
974 assert!(compaction_result.objects_relocated > 0);
975 }
976
977 #[test]
978 fn test_defragmentation_engine() {
979 let config = DefragConfig::default();
980 let mut engine = DefragmentationEngine::new(config);
981
982 let mut allocated_blocks = HashMap::new();
984 allocated_blocks.insert(
985 1000,
986 AllocatedBlock {
987 address: 1000,
988 size: 500,
989 allocation_time: Instant::now(),
990 last_access: None,
991 access_count: 0,
992 is_movable: true,
993 reference_count: 1,
994 },
995 );
996
997 let mut free_regions = BTreeMap::new();
998 free_regions.insert(
999 1500,
1000 FreeRegion {
1001 address: 1500,
1002 size: 300,
1003 age: Duration::from_secs(10),
1004 access_frequency: 0,
1005 adjacent_to_allocated: true,
1006 },
1007 );
1008
1009 engine.update_layout(allocated_blocks, free_regions);
1010
1011 let should_defrag = engine.should_defragment();
1013 let stats = engine.get_stats();
1016 assert_eq!(stats.total_cycles, 0); }
1018
1019 #[test]
1020 fn test_coalescing() {
1021 let mut tracker = MemoryLayoutTracker::new();
1022
1023 tracker.add_free_region(1000, 500);
1025 tracker.add_free_region(1500, 300);
1026 tracker.add_free_region(2000, 200); let coalesced = tracker.coalesce_free_regions();
1029 assert_eq!(coalesced, 1); assert_eq!(tracker.free_regions.len(), 2);
1033 }
1034
1035 #[test]
1036 fn test_thread_safe_engine() {
1037 let config = DefragConfig::default();
1038 let engine = ThreadSafeDefragmentationEngine::new(config);
1039
1040 let should_defrag = engine.should_defragment();
1041 let stats = engine.get_stats();
1044 assert_eq!(stats.total_cycles, 0);
1045 }
1046}