use std::collections::BTreeMap;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct FreeBlock {
pub offset: usize,
pub size: usize,
}
#[derive(Debug, Clone, Default)]
pub struct DefragStats {
pub blocks_moved: usize,
pub bytes_compacted: usize,
pub fragmentation_before: f64,
pub fragmentation_after: f64,
}
pub struct DefragPlanner {
total_capacity: usize,
allocated_blocks: BTreeMap<usize, usize>,
free_blocks: Vec<FreeBlock>,
}
impl DefragPlanner {
pub fn new(total_capacity: usize) -> Self {
Self {
total_capacity,
allocated_blocks: BTreeMap::new(),
free_blocks: Vec::new(),
}
}
pub fn record_alloc(&mut self, offset: usize, size: usize) {
self.allocated_blocks.insert(offset, size);
}
pub fn record_free(&mut self, offset: usize, size: usize) {
self.free_blocks.push(FreeBlock { offset, size });
}
pub fn fragmentation_ratio(&self) -> f64 {
let total_free: usize = self.free_blocks.iter().map(|b| b.size).sum();
if total_free == 0 {
return 0.0;
}
let largest = self.free_blocks.iter().map(|b| b.size).max().unwrap_or(0);
let non_contiguous = total_free.saturating_sub(largest);
non_contiguous as f64 / total_free as f64
}
pub fn plan_compaction(&self) -> Vec<(usize, usize, usize)> {
let mut moves = Vec::new();
let mut write_cursor: usize = 0;
for (&offset, &size) in &self.allocated_blocks {
if offset != write_cursor {
moves.push((offset, write_cursor, size));
}
write_cursor += size;
}
moves
}
pub fn execute_compaction(&self, buffer: &mut Vec<u8>) -> DefragStats {
let frag_before = self.fragmentation_ratio();
if buffer.len() < self.total_capacity {
buffer.resize(self.total_capacity, 0);
}
let moves = self.plan_compaction();
let blocks_moved = moves.len();
let bytes_compacted: usize = moves.iter().map(|(_, _, s)| s).sum();
for (from, to, size) in &moves {
buffer.copy_within(*from..*from + *size, *to);
}
let _allocated_total: usize = self.allocated_blocks.values().sum();
let frag_after = 0.0_f64;
DefragStats {
blocks_moved,
bytes_compacted,
fragmentation_before: frag_before,
fragmentation_after: frag_after,
}
}
pub fn coalesce_free_blocks(&mut self) -> usize {
if self.free_blocks.is_empty() {
return 0;
}
self.free_blocks.sort_by_key(|b| b.offset);
let mut merged: Vec<FreeBlock> = Vec::with_capacity(self.free_blocks.len());
let mut merge_count = 0usize;
let mut current = self.free_blocks[0];
for &block in &self.free_blocks[1..] {
if current.offset + current.size == block.offset {
current.size += block.size;
merge_count += 1;
} else {
merged.push(current);
current = block;
}
}
merged.push(current);
self.free_blocks = merged;
merge_count
}
pub fn total_capacity(&self) -> usize {
self.total_capacity
}
pub fn allocated_block_count(&self) -> usize {
self.allocated_blocks.len()
}
pub fn free_block_count(&self) -> usize {
self.free_blocks.len()
}
pub fn allocated_bytes(&self) -> usize {
self.allocated_blocks.values().sum()
}
}
pub struct OnlineDefragmenter {
planner: DefragPlanner,
threshold: f64,
compaction_count: usize,
}
impl OnlineDefragmenter {
pub fn new(capacity: usize, threshold: f64) -> Self {
Self {
planner: DefragPlanner::new(capacity),
threshold: threshold.clamp(0.0, 1.0),
compaction_count: 0,
}
}
pub fn on_alloc(&mut self, offset: usize, size: usize) {
self.planner.record_alloc(offset, size);
}
pub fn on_free(&mut self, offset: usize, size: usize) {
self.planner.record_free(offset, size);
}
pub fn should_compact(&self) -> bool {
self.planner.fragmentation_ratio() > self.threshold
}
pub fn compact(&mut self, buffer: &mut Vec<u8>) -> DefragStats {
let stats = self.planner.execute_compaction(buffer);
self.compaction_count += 1;
let allocated_total = self.planner.allocated_bytes();
let capacity = self.planner.total_capacity();
let mut packed: BTreeMap<usize, usize> = BTreeMap::new();
let mut cursor = 0usize;
for &size in self.planner.allocated_blocks.values() {
packed.insert(cursor, size);
cursor += size;
}
self.planner.allocated_blocks = packed;
self.planner.free_blocks.clear();
if allocated_total < capacity {
self.planner.free_blocks.push(FreeBlock {
offset: allocated_total,
size: capacity - allocated_total,
});
}
stats
}
pub fn compaction_count(&self) -> usize {
self.compaction_count
}
pub fn fragmentation_ratio(&self) -> f64 {
self.planner.fragmentation_ratio()
}
pub fn planner(&self) -> &DefragPlanner {
&self.planner
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_defrag_planner_basic() {
let mut planner = DefragPlanner::new(768);
planner.record_alloc(0, 256);
planner.record_alloc(256, 256);
planner.record_alloc(512, 256);
planner.record_free(256, 256);
let ratio = planner.fragmentation_ratio();
assert_eq!(ratio, 0.0, "single free block => no fragmentation");
planner.record_free(0, 128);
let ratio2 = planner.fragmentation_ratio();
assert!(
ratio2 > 0.0,
"two non-adjacent free blocks => fragmentation > 0"
);
}
#[test]
fn test_defrag_coalesce() {
let mut planner = DefragPlanner::new(1024);
planner.record_free(0, 256);
planner.record_free(256, 256);
let merges = planner.coalesce_free_blocks();
assert_eq!(merges, 1, "exactly one merge expected");
assert_eq!(
planner.free_block_count(),
1,
"should have collapsed to 1 block"
);
assert_eq!(planner.free_blocks[0].size, 512);
assert_eq!(planner.free_blocks[0].offset, 0);
}
#[test]
fn test_defrag_coalesce_non_adjacent() {
let mut planner = DefragPlanner::new(1024);
planner.record_free(0, 128);
planner.record_free(256, 128);
let merges = planner.coalesce_free_blocks();
assert_eq!(merges, 0, "non-adjacent blocks should not merge");
assert_eq!(planner.free_block_count(), 2);
}
#[test]
fn test_defrag_compaction_plan() {
let mut planner = DefragPlanner::new(768);
planner.record_alloc(0, 256);
planner.record_alloc(512, 256);
let moves = planner.plan_compaction();
assert_eq!(moves.len(), 1, "one move expected");
let (from, to, size) = moves[0];
assert_eq!(from, 512);
assert_eq!(to, 256);
assert_eq!(size, 256);
}
#[test]
fn test_defrag_compaction_plan_already_compact() {
let mut planner = DefragPlanner::new(512);
planner.record_alloc(0, 256);
planner.record_alloc(256, 256);
let moves = planner.plan_compaction();
assert!(
moves.is_empty(),
"already compact layout should produce no moves"
);
}
#[test]
fn test_defrag_execute() {
let mut buffer = vec![0u8; 768];
for byte in &mut buffer[0..256] {
*byte = 0xAA;
}
for byte in &mut buffer[512..768] {
*byte = 0xBB;
}
let mut planner = DefragPlanner::new(768);
planner.record_alloc(0, 256); planner.record_alloc(512, 256);
let stats = planner.execute_compaction(&mut buffer);
assert_eq!(stats.blocks_moved, 1);
assert_eq!(stats.bytes_compacted, 256);
assert!(buffer[0..256].iter().all(|&b| b == 0xAA));
assert!(buffer[256..512].iter().all(|&b| b == 0xBB));
}
#[test]
fn test_online_defrag_threshold() {
let mut defrag = OnlineDefragmenter::new(1024, 0.3);
let mut buffer = vec![0u8; 1024];
defrag.on_alloc(0, 256);
defrag.on_alloc(256, 256);
defrag.on_alloc(512, 256);
defrag.on_free(256, 256);
defrag.on_free(0, 256);
defrag.on_alloc(768, 128);
defrag.on_free(768, 128);
let _should = defrag.should_compact();
assert_eq!(defrag.compaction_count(), 0);
defrag.compact(&mut buffer);
assert_eq!(
defrag.compaction_count(),
1,
"compaction count should increment"
);
}
#[test]
fn test_online_defrag_no_compact_below_threshold() {
let mut defrag = OnlineDefragmenter::new(1024, 0.99);
defrag.on_alloc(0, 512);
defrag.on_free(256, 256);
assert!(
!defrag.should_compact(),
"single free block has 0 fragmentation; should not compact"
);
}
}