use crate::{Buffer, BufferDescriptor, Device};
use torsh_core::error::{Result, TorshError};
#[cfg(not(feature = "std"))]
use alloc::{boxed::Box, vec::Vec};
pub trait MemoryManager: Send + Sync {
fn allocate(&mut self, descriptor: &BufferDescriptor) -> Result<Buffer>;
fn deallocate(&mut self, buffer: &Buffer) -> Result<()>;
fn stats(&self) -> MemoryStats;
fn garbage_collect(&mut self) -> Result<usize>;
fn set_pool(&mut self, pool: Box<dyn MemoryPool>) -> Result<()>;
fn device(&self) -> &Device;
fn allocate_raw(&mut self, size: usize, alignment: usize) -> Result<*mut u8>;
fn deallocate_raw(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
fn supports_unified_memory(&self) -> bool;
fn allocate_unified(&mut self, size: usize) -> Result<*mut u8>;
fn deallocate_unified(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
fn prefetch_to_device(&self, ptr: *mut u8, size: usize) -> Result<()>;
fn prefetch_to_host(&self, ptr: *mut u8, size: usize) -> Result<()>;
fn set_memory_advice(&self, ptr: *mut u8, size: usize, advice: MemoryAdvice) -> Result<()>;
fn available_memory(&self) -> Result<usize>;
fn total_memory(&self) -> Result<usize>;
fn synchronize(&self) -> Result<()>;
fn defragment(&mut self) -> Result<DefragmentationResult>;
fn needs_defragmentation(&self) -> bool;
fn fragmentation_info(&self) -> FragmentationInfo;
fn compact_memory(&mut self) -> Result<CompactionResult>;
fn set_defragmentation_policy(&mut self, policy: DefragmentationPolicy);
}
pub trait MemoryPool: Send + Sync {
fn allocate(&mut self, size: usize, alignment: usize) -> Result<*mut u8>;
fn deallocate(&mut self, ptr: *mut u8, size: usize) -> Result<()>;
fn stats(&self) -> PoolStats;
fn reset(&mut self) -> Result<()>;
fn capacity(&self) -> usize;
fn available(&self) -> usize;
fn defragment(&mut self) -> Result<DefragmentationResult>;
fn needs_defragmentation(&self) -> bool;
fn fragmentation_info(&self) -> FragmentationInfo;
fn compact(&mut self) -> Result<CompactionResult>;
}
#[derive(Debug, Clone)]
pub struct MemoryStats {
pub total_memory: usize,
pub allocated_memory: usize,
pub available_memory: usize,
pub peak_memory: usize,
pub active_allocations: usize,
pub total_allocations: usize,
pub total_deallocations: usize,
pub fragmentation: f32,
pub efficiency: f32,
}
impl Default for MemoryStats {
fn default() -> Self {
Self {
total_memory: 0,
allocated_memory: 0,
available_memory: 0,
peak_memory: 0,
active_allocations: 0,
total_allocations: 0,
total_deallocations: 0,
fragmentation: 0.0,
efficiency: 0.0,
}
}
}
impl MemoryStats {
pub fn utilization(&self) -> f32 {
if self.total_memory == 0 {
0.0
} else {
(self.allocated_memory as f32 / self.total_memory as f32) * 100.0
}
}
pub fn is_under_pressure(&self) -> bool {
self.utilization() > 90.0 || self.fragmentation > 0.5
}
}
#[derive(Debug, Clone, Default)]
pub struct PoolStats {
pub capacity: usize,
pub allocated: usize,
pub available: usize,
pub free_blocks: usize,
pub allocated_blocks: usize,
pub largest_free_block: usize,
pub smallest_free_block: usize,
pub average_free_block: usize,
}
#[derive(Debug)]
pub struct FreeListPool {
base_ptr: *mut u8,
total_size: usize,
free_blocks: Vec<(usize, usize)>,
allocated_blocks: Vec<(usize, usize)>,
stats: MemoryStats,
}
impl FreeListPool {
pub fn new(base_ptr: *mut u8, total_size: usize) -> Self {
let mut pool = Self {
base_ptr,
total_size,
free_blocks: vec![(0, total_size)],
allocated_blocks: Vec::new(),
stats: MemoryStats::default(),
};
pool.update_stats();
pool
}
fn find_free_block(&self, size: usize, alignment: usize) -> Option<usize> {
self.find_free_block_with_strategy(size, alignment, AllocationStrategy::FirstFit)
}
fn find_free_block_with_strategy(
&self,
size: usize,
alignment: usize,
strategy: AllocationStrategy,
) -> Option<usize> {
match strategy {
AllocationStrategy::FirstFit => self
.free_blocks
.iter()
.enumerate()
.find(|(_, &(offset, block_size))| {
let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
let padding = aligned_offset - offset;
padding + size <= block_size
})
.map(|(idx, _)| idx),
AllocationStrategy::BestFit => {
let mut best_idx = None;
let mut best_size = usize::MAX;
for (idx, &(offset, block_size)) in self.free_blocks.iter().enumerate() {
let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
let padding = aligned_offset - offset;
if padding + size <= block_size && block_size < best_size {
best_idx = Some(idx);
best_size = block_size;
}
}
best_idx
}
AllocationStrategy::WorstFit => {
let mut worst_idx = None;
let mut worst_size = 0;
for (idx, &(offset, block_size)) in self.free_blocks.iter().enumerate() {
let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
let padding = aligned_offset - offset;
if padding + size <= block_size && block_size > worst_size {
worst_idx = Some(idx);
worst_size = block_size;
}
}
worst_idx
}
AllocationStrategy::NextFit => {
self.find_free_block_with_strategy(size, alignment, AllocationStrategy::FirstFit)
}
}
}
fn update_stats(&mut self) {
let allocated: usize = self.allocated_blocks.iter().map(|(_, size)| size).sum();
let available: usize = self.free_blocks.iter().map(|(_, size)| size).sum();
self.stats.allocated_memory = allocated;
self.stats.available_memory = available;
self.stats.active_allocations = self.allocated_blocks.len();
self.stats.total_memory = self.total_size;
self.stats.efficiency = if self.total_size > 0 {
allocated as f32 / self.total_size as f32
} else {
0.0
};
self.stats.fragmentation = if available > 0 {
1.0 - (self
.free_blocks
.iter()
.map(|(_, size)| *size)
.max()
.unwrap_or(0) as f32
/ available as f32)
} else {
0.0
};
}
pub fn capacity(&self) -> usize {
self.total_size
}
fn coalesce_free_blocks(&mut self) {
if self.free_blocks.len() <= 1 {
return;
}
self.free_blocks.sort_by_key(|(offset, _)| *offset);
let mut i = 0;
while i < self.free_blocks.len().saturating_sub(1) {
let (offset1, size1) = self.free_blocks[i];
let (offset2, size2) = self.free_blocks[i + 1];
if offset1 + size1 == offset2 {
self.free_blocks[i] = (offset1, size1 + size2);
self.free_blocks.remove(i + 1);
} else {
i += 1;
}
}
}
pub fn detect_leaks(&self) -> Vec<LeakReport> {
let mut reports = Vec::new();
if self.allocated_blocks.len() > 1000 {
reports.push(LeakReport {
leak_type: LeakType::TooManyAllocations,
block_count: self.allocated_blocks.len(),
total_size: self.stats.allocated_memory,
severity: LeakSeverity::High,
description: format!(
"Too many active allocations: {}",
self.allocated_blocks.len()
),
});
}
for &(offset, size) in &self.allocated_blocks {
if size > self.total_size / 4 {
reports.push(LeakReport {
leak_type: LeakType::LargeAllocation,
block_count: 1,
total_size: size,
severity: LeakSeverity::Medium,
description: format!("Large allocation at offset {}: {} bytes", offset, size),
});
}
}
reports
}
pub fn validate_consistency(&self) -> Result<()> {
for i in 0..self.allocated_blocks.len() {
for j in (i + 1)..self.allocated_blocks.len() {
let (offset1, size1) = self.allocated_blocks[i];
let (offset2, size2) = self.allocated_blocks[j];
let end1 = offset1 + size1;
let end2 = offset2 + size2;
if offset1 < end2 && offset2 < end1 {
return Err(TorshError::AllocationError(format!(
"Overlapping allocations detected: [{}, {}) and [{}, {})",
offset1, end1, offset2, end2
)));
}
}
}
for i in 0..self.free_blocks.len() {
for j in (i + 1)..self.free_blocks.len() {
let (offset1, size1) = self.free_blocks[i];
let (offset2, size2) = self.free_blocks[j];
let end1 = offset1 + size1;
let end2 = offset2 + size2;
if offset1 < end2 && offset2 < end1 {
return Err(TorshError::AllocationError(format!(
"Overlapping free blocks detected: [{}, {}) and [{}, {})",
offset1, end1, offset2, end2
)));
}
}
}
for &(offset, size) in &self.allocated_blocks {
if offset + size > self.total_size {
return Err(TorshError::AllocationError(format!(
"Allocated block extends beyond pool: offset={}, size={}, pool_size={}",
offset, size, self.total_size
)));
}
}
for &(offset, size) in &self.free_blocks {
if offset + size > self.total_size {
return Err(TorshError::AllocationError(format!(
"Free block extends beyond pool: offset={}, size={}, pool_size={}",
offset, size, self.total_size
)));
}
}
Ok(())
}
}
impl MemoryPool for FreeListPool {
fn allocate(&mut self, size: usize, alignment: usize) -> Result<*mut u8> {
if size == 0 {
return Err(TorshError::InvalidArgument(
"Allocation size cannot be zero".to_string(),
));
}
if alignment == 0 || !alignment.is_power_of_two() {
return Err(TorshError::InvalidArgument(format!(
"Alignment must be a power of two and non-zero, got: {}",
alignment
)));
}
if size > self.total_size || alignment > self.total_size {
return Err(TorshError::AllocationError(format!(
"Requested size ({}) or alignment ({}) exceeds pool capacity ({})",
size, alignment, self.total_size
)));
}
if let Some(block_idx) = self.find_free_block(size, alignment) {
let (offset, block_size) = self.free_blocks[block_idx];
let aligned_offset = (offset + alignment - 1) & !(alignment - 1);
let padding = aligned_offset - offset;
let required_size = padding + size;
self.free_blocks.remove(block_idx);
if padding > 0 {
self.free_blocks.push((offset, padding));
}
if required_size < block_size {
let remaining_offset = offset + required_size;
let remaining_size = block_size - required_size;
self.free_blocks.push((remaining_offset, remaining_size));
}
self.allocated_blocks.push((aligned_offset, size));
self.update_stats();
Ok(unsafe { self.base_ptr.add(aligned_offset) })
} else {
Err(TorshError::AllocationError(format!(
"Out of memory: requested {} bytes, available memory is {} bytes",
size, self.stats.available_memory
)))
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
fn deallocate(&mut self, ptr: *mut u8, size: usize) -> Result<()> {
if ptr.is_null() {
return Err(TorshError::InvalidArgument(
"Cannot deallocate null pointer".to_string(),
));
}
if size == 0 {
return Err(TorshError::InvalidArgument(
"Cannot deallocate zero-sized block".to_string(),
));
}
if ptr < self.base_ptr || ptr >= unsafe { self.base_ptr.add(self.total_size) } {
return Err(TorshError::InvalidArgument(
"Pointer outside of memory pool range".to_string(),
));
}
let offset = unsafe { ptr.offset_from(self.base_ptr) } as usize;
if let Some(pos) = self
.allocated_blocks
.iter()
.position(|&(off, sz)| off == offset && sz == size)
{
self.allocated_blocks.remove(pos);
self.free_blocks.push((offset, size));
self.coalesce_free_blocks();
self.update_stats();
Ok(())
} else {
Err(TorshError::InvalidArgument(
"Invalid deallocation: block not found".to_string(),
))
}
}
fn stats(&self) -> PoolStats {
PoolStats {
capacity: self.total_size,
allocated: self.stats.allocated_memory,
available: self.stats.available_memory,
free_blocks: self.free_blocks.len(),
allocated_blocks: self.allocated_blocks.len(),
largest_free_block: self
.free_blocks
.iter()
.map(|(_, size)| *size)
.max()
.unwrap_or(0),
smallest_free_block: self
.free_blocks
.iter()
.map(|(_, size)| *size)
.min()
.unwrap_or(0),
average_free_block: if self.free_blocks.is_empty() {
0
} else {
self.stats.available_memory / self.free_blocks.len()
},
}
}
fn reset(&mut self) -> Result<()> {
self.free_blocks.clear();
self.allocated_blocks.clear();
self.free_blocks.push((0, self.total_size));
self.update_stats();
Ok(())
}
fn capacity(&self) -> usize {
self.total_size
}
fn available(&self) -> usize {
self.stats.available_memory
}
fn defragment(&mut self) -> Result<DefragmentationResult> {
Ok(DefragmentationResult {
blocks_moved: 0,
memory_compacted: 0,
duration_ms: 0.0,
fragmentation_before: 0.0,
fragmentation_after: 0.0,
efficiency_improvement: 0.0,
success: true,
})
}
fn needs_defragmentation(&self) -> bool {
self.free_blocks.len() > 10
}
fn fragmentation_info(&self) -> FragmentationInfo {
let free_blocks = self.free_blocks.len();
let allocated_blocks = self.allocated_blocks.len();
let total_free = self.stats.available_memory;
let total_allocated = self.stats.allocated_memory;
let largest_free = self
.free_blocks
.iter()
.map(|(_, size)| *size)
.max()
.unwrap_or(0);
let smallest_free = self
.free_blocks
.iter()
.map(|(_, size)| *size)
.min()
.unwrap_or(0);
let average_free = if free_blocks > 0 {
total_free / free_blocks
} else {
0
};
let fragmentation = if self.capacity() > 0 {
free_blocks as f32 / (free_blocks + allocated_blocks) as f32
} else {
0.0
};
FragmentationInfo {
overall_fragmentation: fragmentation,
external_fragmentation: fragmentation * 0.8,
internal_fragmentation: fragmentation * 0.2,
free_blocks,
allocated_blocks,
largest_free_block: largest_free,
smallest_free_block: smallest_free,
average_free_block: average_free,
total_free_memory: total_free,
total_allocated_memory: total_allocated,
utilization_efficiency: if self.capacity() > 0 {
total_allocated as f32 / self.capacity() as f32
} else {
0.0
},
allocation_efficiency: if self.capacity() > 0 {
total_allocated as f32 / self.capacity() as f32
} else {
0.0
},
}
}
fn compact(&mut self) -> Result<CompactionResult> {
let free_blocks_before = self.free_blocks.len();
self.free_blocks.sort_by_key(|(offset, _)| *offset);
let mut i = 0;
while i < self.free_blocks.len().saturating_sub(1) {
let (offset1, size1) = self.free_blocks[i];
let (offset2, size2) = self.free_blocks[i + 1];
if offset1 + size1 == offset2 {
self.free_blocks[i] = (offset1, size1 + size2);
self.free_blocks.remove(i + 1);
} else {
i += 1;
}
}
let free_blocks_after = self.free_blocks.len();
Ok(CompactionResult {
allocations_moved: 0,
bytes_moved: 0,
duration_ms: 0.0,
largest_free_before: self
.free_blocks
.iter()
.map(|(_, size)| *size)
.max()
.unwrap_or(0),
largest_free_after: self
.free_blocks
.iter()
.map(|(_, size)| *size)
.max()
.unwrap_or(0),
free_blocks_before,
free_blocks_after,
success: true,
})
}
}
unsafe impl Send for FreeListPool {}
unsafe impl Sync for FreeListPool {}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AllocationStrategy {
FirstFit,
BestFit,
WorstFit,
NextFit,
}
#[derive(Debug, Clone)]
pub struct AllocationHint {
pub lifetime: AllocationLifetime,
pub access_pattern: AccessPattern,
pub strategy: AllocationStrategy,
pub use_pool: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AllocationLifetime {
Temporary,
Short,
Medium,
Long,
Persistent,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AccessPattern {
Random,
Sequential,
ReadMostly,
WriteMostly,
Streaming,
}
impl Default for AllocationHint {
fn default() -> Self {
Self {
lifetime: AllocationLifetime::Medium,
access_pattern: AccessPattern::Random,
strategy: AllocationStrategy::FirstFit,
use_pool: true,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MemoryAdvice {
SetPreferredLocation,
UnsetPreferredLocation,
SetAccessedBy,
UnsetAccessedBy,
SetReadMostly,
UnsetReadMostly,
}
pub trait MemoryManagerFactory: Send + Sync {
fn create_manager(&self, device: &Device) -> Result<Box<dyn MemoryManager>>;
fn backend_type(&self) -> crate::BackendType;
fn supports_device(&self, device: &Device) -> bool;
}
#[derive(Debug, Clone)]
pub struct MemoryPoolConfig {
pub initial_size: usize,
pub max_size: Option<usize>,
pub growth_factor: f32,
pub strategy: AllocationStrategy,
pub enable_coalescing: bool,
pub min_block_size: usize,
pub alignment: usize,
pub numa_strategy: Option<crate::cpu::memory::NumaAllocationStrategy>,
}
impl Default for MemoryPoolConfig {
fn default() -> Self {
Self {
initial_size: 64 * 1024 * 1024, max_size: None,
growth_factor: 1.5,
strategy: AllocationStrategy::FirstFit,
enable_coalescing: true,
min_block_size: 256,
alignment: 16,
numa_strategy: None,
}
}
}
impl MemoryPoolConfig {
pub fn new(initial_size: usize) -> Self {
Self {
initial_size,
..Default::default()
}
}
pub fn with_max_size(mut self, max_size: usize) -> Self {
self.max_size = Some(max_size);
self
}
pub fn with_growth_factor(mut self, growth_factor: f32) -> Self {
self.growth_factor = growth_factor;
self
}
pub fn with_strategy(mut self, strategy: AllocationStrategy) -> Self {
self.strategy = strategy;
self
}
pub fn with_alignment(mut self, alignment: usize) -> Self {
self.alignment = alignment;
self
}
}
#[derive(Debug, Clone)]
pub struct DefragmentationResult {
pub blocks_moved: usize,
pub memory_compacted: usize,
pub duration_ms: f64,
pub fragmentation_before: f32,
pub fragmentation_after: f32,
pub efficiency_improvement: f32,
pub success: bool,
}
impl DefragmentationResult {
pub fn is_improvement_significant(&self) -> bool {
self.success && self.efficiency_improvement > 0.1 }
pub fn compaction_ratio(&self, total_memory: usize) -> f32 {
if total_memory == 0 {
0.0
} else {
self.memory_compacted as f32 / total_memory as f32
}
}
}
#[derive(Debug, Clone)]
pub struct CompactionResult {
pub allocations_moved: usize,
pub bytes_moved: usize,
pub duration_ms: f64,
pub largest_free_before: usize,
pub largest_free_after: usize,
pub free_blocks_before: usize,
pub free_blocks_after: usize,
pub success: bool,
}
impl CompactionResult {
pub fn consolidation_improvement(&self) -> f32 {
if self.free_blocks_before == 0 {
1.0
} else {
1.0 - (self.free_blocks_after as f32 / self.free_blocks_before as f32)
}
}
pub fn largest_block_improvement(&self) -> f32 {
if self.largest_free_before == 0 {
if self.largest_free_after > 0 {
f32::INFINITY
} else {
0.0
}
} else {
self.largest_free_after as f32 / self.largest_free_before as f32
}
}
}
#[derive(Debug, Clone, Default)]
pub struct FragmentationInfo {
pub overall_fragmentation: f32,
pub external_fragmentation: f32,
pub internal_fragmentation: f32,
pub free_blocks: usize,
pub allocated_blocks: usize,
pub largest_free_block: usize,
pub smallest_free_block: usize,
pub average_free_block: usize,
pub total_free_memory: usize,
pub total_allocated_memory: usize,
pub utilization_efficiency: f32,
pub allocation_efficiency: f32,
}
impl FragmentationInfo {
pub fn is_severely_fragmented(&self) -> bool {
self.overall_fragmentation > 0.7 || self.external_fragmentation > 0.6
}
pub fn would_benefit_from_defragmentation(&self) -> bool {
self.is_severely_fragmented()
|| (self.free_blocks > 10 && self.utilization_efficiency < 0.8)
}
pub fn severity_level(&self) -> FragmentationSeverity {
if self.overall_fragmentation < 0.2 {
FragmentationSeverity::Low
} else if self.overall_fragmentation < 0.5 {
FragmentationSeverity::Medium
} else if self.overall_fragmentation < 0.8 {
FragmentationSeverity::High
} else {
FragmentationSeverity::Critical
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum FragmentationSeverity {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone)]
pub struct DefragmentationPolicy {
pub auto_trigger_threshold: f32,
pub min_interval_ms: u64,
pub max_duration_ms: u64,
pub strategy: DefragmentationStrategy,
pub enable_background: bool,
pub priority: DefragmentationPriority,
pub pause_allocations: bool,
pub emergency_threshold: f32,
}
impl Default for DefragmentationPolicy {
fn default() -> Self {
Self {
auto_trigger_threshold: 0.6,
min_interval_ms: 10_000, max_duration_ms: 5_000, strategy: DefragmentationStrategy::Incremental,
enable_background: true,
priority: DefragmentationPriority::Low,
pause_allocations: false,
emergency_threshold: 0.9,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DefragmentationStrategy {
FullCompaction,
Incremental,
SmallBlocksOnly,
LargeBlocksFirst,
CoalesceOnly,
Generational,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum DefragmentationPriority {
Low,
Normal,
High,
Critical,
}
#[derive(Debug, Clone)]
pub struct LeakReport {
pub leak_type: LeakType,
pub block_count: usize,
pub total_size: usize,
pub severity: LeakSeverity,
pub description: String,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LeakType {
TooManyAllocations,
LargeAllocation,
LongLivedAllocation,
Fragmentation,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum LeakSeverity {
Low,
Medium,
High,
Critical,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::device::{Device, DeviceInfo};
use torsh_core::device::DeviceType;
fn create_test_device() -> Device {
let info = DeviceInfo::default();
Device::new(0, DeviceType::Cpu, "Test CPU".to_string(), info)
}
#[test]
fn test_memory_stats_default() {
let stats = MemoryStats::default();
assert_eq!(stats.total_memory, 0);
assert_eq!(stats.allocated_memory, 0);
assert_eq!(stats.available_memory, 0);
assert_eq!(stats.peak_memory, 0);
assert_eq!(stats.active_allocations, 0);
assert_eq!(stats.total_allocations, 0);
assert_eq!(stats.total_deallocations, 0);
assert_eq!(stats.fragmentation, 0.0);
assert_eq!(stats.efficiency, 0.0);
}
#[test]
fn test_memory_stats_utilization() {
let mut stats = MemoryStats {
total_memory: 1000,
allocated_memory: 300,
..Default::default()
};
assert!((stats.utilization() - 30.0).abs() < 0.001);
stats.total_memory = 0;
assert_eq!(stats.utilization(), 0.0);
}
#[test]
fn test_memory_stats_pressure() {
let mut stats = MemoryStats {
total_memory: 1000,
allocated_memory: 850, fragmentation: 0.3, ..Default::default()
};
assert!(!stats.is_under_pressure());
stats.allocated_memory = 950; assert!(stats.is_under_pressure());
stats.allocated_memory = 500; stats.fragmentation = 0.6; assert!(stats.is_under_pressure()); }
#[test]
fn test_pool_stats_default() {
let stats = PoolStats::default();
assert_eq!(stats.capacity, 0);
assert_eq!(stats.allocated, 0);
assert_eq!(stats.available, 0);
assert_eq!(stats.free_blocks, 0);
assert_eq!(stats.allocated_blocks, 0);
assert_eq!(stats.largest_free_block, 0);
assert_eq!(stats.smallest_free_block, 0);
assert_eq!(stats.average_free_block, 0);
}
#[test]
fn test_allocation_strategy_variants() {
let strategies = [
AllocationStrategy::FirstFit,
AllocationStrategy::BestFit,
AllocationStrategy::WorstFit,
AllocationStrategy::NextFit,
];
for (i, strategy1) in strategies.iter().enumerate() {
for (j, strategy2) in strategies.iter().enumerate() {
if i != j {
assert_ne!(strategy1, strategy2);
}
}
}
}
#[test]
fn test_allocation_lifetime_variants() {
let lifetimes = [
AllocationLifetime::Temporary,
AllocationLifetime::Short,
AllocationLifetime::Medium,
AllocationLifetime::Long,
AllocationLifetime::Persistent,
];
for (i, lifetime1) in lifetimes.iter().enumerate() {
for (j, lifetime2) in lifetimes.iter().enumerate() {
if i != j {
assert_ne!(lifetime1, lifetime2);
}
}
}
}
#[test]
fn test_access_pattern_variants() {
let patterns = [
AccessPattern::Random,
AccessPattern::Sequential,
AccessPattern::ReadMostly,
AccessPattern::WriteMostly,
AccessPattern::Streaming,
];
for (i, pattern1) in patterns.iter().enumerate() {
for (j, pattern2) in patterns.iter().enumerate() {
if i != j {
assert_ne!(pattern1, pattern2);
}
}
}
}
#[test]
fn test_allocation_hint_default() {
let hint = AllocationHint::default();
assert_eq!(hint.lifetime, AllocationLifetime::Medium);
assert_eq!(hint.access_pattern, AccessPattern::Random);
assert_eq!(hint.strategy, AllocationStrategy::FirstFit);
assert!(hint.use_pool);
}
#[test]
fn test_memory_pool_config_default() {
let config = MemoryPoolConfig::default();
assert_eq!(config.initial_size, 64 * 1024 * 1024); assert_eq!(config.max_size, None);
assert_eq!(config.growth_factor, 1.5);
assert_eq!(config.strategy, AllocationStrategy::FirstFit);
assert!(config.enable_coalescing);
assert_eq!(config.min_block_size, 256);
assert_eq!(config.alignment, 16);
}
#[test]
fn test_memory_pool_config_builder() {
let config = MemoryPoolConfig::new(128 * 1024 * 1024) .with_max_size(1024 * 1024 * 1024) .with_growth_factor(2.0)
.with_strategy(AllocationStrategy::BestFit)
.with_alignment(64);
assert_eq!(config.initial_size, 128 * 1024 * 1024);
assert_eq!(config.max_size, Some(1024 * 1024 * 1024));
assert_eq!(config.growth_factor, 2.0);
assert_eq!(config.strategy, AllocationStrategy::BestFit);
assert_eq!(config.alignment, 64);
}
#[test]
fn test_free_list_pool_creation() {
let _device = create_test_device();
let capacity = 1024 * 1024;
let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
assert!(!ptr.is_null());
let pool = FreeListPool::new(ptr, capacity);
assert_eq!(pool.capacity(), capacity);
assert_eq!(pool.available(), capacity);
let stats = pool.stats();
assert_eq!(stats.capacity, capacity);
assert_eq!(stats.available, capacity);
assert_eq!(stats.allocated, 0);
assert_eq!(stats.free_blocks, 1);
assert_eq!(stats.allocated_blocks, 0);
assert_eq!(stats.largest_free_block, capacity);
unsafe {
std::alloc::dealloc(ptr, layout);
}
}
#[test]
fn test_free_list_pool_allocation() {
let _device = create_test_device();
let capacity = 1024;
let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
assert!(!ptr.is_null());
let mut pool = FreeListPool::new(ptr, capacity);
let ptr1 = pool.allocate(256, 16);
assert!(ptr1.is_ok());
let stats = pool.stats();
assert_eq!(stats.allocated, 256);
assert!(stats.available < capacity); assert_eq!(stats.allocated_blocks, 1);
let ptr2 = pool.allocate(128, 16);
assert!(ptr2.is_ok());
let stats = pool.stats();
assert_eq!(stats.allocated, 256 + 128);
assert_eq!(stats.allocated_blocks, 2);
let ptr3 = pool.allocate(1024, 16);
assert!(ptr3.is_err());
unsafe {
std::alloc::dealloc(ptr, layout);
}
}
#[test]
fn test_free_list_pool_deallocation() {
let _device = create_test_device();
let capacity = 1024;
let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
assert!(!ptr.is_null());
let mut pool = FreeListPool::new(ptr, capacity);
let ptr1 = pool.allocate(256, 16).unwrap();
let ptr2 = pool.allocate(128, 16).unwrap();
assert_eq!(pool.stats().allocated_blocks, 2);
let result = pool.deallocate(ptr1, 256);
assert!(result.is_ok());
let stats = pool.stats();
assert_eq!(stats.allocated, 128);
assert_eq!(stats.allocated_blocks, 1);
let result = pool.deallocate(ptr2, 128);
assert!(result.is_ok());
let stats = pool.stats();
assert_eq!(stats.allocated, 0);
assert_eq!(stats.allocated_blocks, 0);
unsafe {
std::alloc::dealloc(ptr, layout);
}
}
#[test]
fn test_free_list_pool_reset() {
let _device = create_test_device();
let capacity = 1024;
let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
assert!(!ptr.is_null());
let mut pool = FreeListPool::new(ptr, capacity);
let _ptr1 = pool.allocate(256, 16).unwrap();
let _ptr2 = pool.allocate(128, 16).unwrap();
assert_eq!(pool.stats().allocated_blocks, 2);
let result = pool.reset();
assert!(result.is_ok());
let stats = pool.stats();
assert_eq!(stats.allocated, 0);
assert_eq!(stats.allocated_blocks, 0);
assert_eq!(stats.free_blocks, 1);
assert_eq!(stats.available, capacity);
assert_eq!(stats.largest_free_block, capacity);
unsafe {
std::alloc::dealloc(ptr, layout);
}
}
#[test]
fn test_free_list_pool_find_free_block() {
let _device = create_test_device();
let capacity = 1024;
let layout = std::alloc::Layout::from_size_align(capacity, 8).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
assert!(!ptr.is_null());
let pool = FreeListPool::new(ptr, capacity);
let block_idx = pool.find_free_block(256, 16);
assert!(block_idx.is_some());
assert_eq!(block_idx.unwrap(), 0);
let block_idx = pool.find_free_block(2048, 16);
assert!(block_idx.is_none());
unsafe {
std::alloc::dealloc(ptr, layout);
}
}
}