#![allow(dead_code)]
use crate::Device;
use parking_lot::Mutex;
use std::collections::{BTreeMap, HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Duration, Instant};
#[derive(Debug, Clone)]
pub struct AllocationInfo {
pub address: usize,
pub is_free: bool,
pub last_access: Instant,
}
#[derive(Debug)]
pub struct FragmentationTracker {
pub free_blocks: HashMap<Device, BTreeMap<usize, usize>>,
pub fragmentation_scores: HashMap<Device, f64>,
pub largest_free_block: HashMap<Device, usize>,
pub fragmentation_events: Vec<FragmentationEvent>,
pub compaction_stats: CompactionStats,
config: FragmentationConfig,
advanced_metrics: Arc<Mutex<AdvancedFragmentationMetrics>>,
prediction_model: Arc<Mutex<FragmentationPredictionModel>>,
}
#[derive(Debug, Clone)]
pub struct FragmentationEvent {
pub timestamp: Instant,
pub device: Device,
pub fragmentation_before: f64,
pub fragmentation_after: f64,
pub cause: FragmentationCause,
pub recovery_action: Option<FragmentationRecovery>,
pub impact: FragmentationImpact,
pub context: FragmentationContext,
}
#[derive(Debug, Clone, PartialEq)]
pub enum FragmentationCause {
MixedAllocationSizes {
size_variance: f64,
allocation_count: usize,
},
FrequentAllocDealloc {
alloc_frequency: f64,
dealloc_frequency: f64,
},
LongLivedAllocations {
allocation_age: Duration,
blocking_compaction: bool,
},
MisalignedAllocations {
alignment_requirement: usize,
waste_percentage: f64,
},
PoolOverflow {
pool_name: String,
overflow_amount: usize,
},
TemporalPatterns {
pattern_type: String,
periodicity: Duration,
},
SizeClustering {
cluster_sizes: Vec<usize>,
fragmentation_factor: f64,
},
}
#[derive(Debug, Clone)]
pub enum FragmentationRecovery {
Compaction {
blocks_moved: usize,
time_taken: Duration,
memory_recovered: usize,
success_rate: f64,
},
PoolReorganization {
pools_affected: usize,
reorganization_type: PoolReorganizationType,
efficiency_improvement: f64,
},
StrategyChange {
old_strategy: AllocationStrategy,
new_strategy: AllocationStrategy,
expected_improvement: f64,
},
Defragmentation {
memory_recovered: usize,
defrag_method: DefragmentationMethod,
performance_impact: f64,
},
GarbageCollection {
memory_freed: usize,
gc_duration: Duration,
gc_type: GarbageCollectionType,
},
PoolExpansion {
pool_name: String,
expansion_size: usize,
fragmentation_reduction: f64,
},
}
#[derive(Debug, Clone)]
pub enum PoolReorganizationType {
SizeClassRebalancing,
PoolMerging,
PoolSplitting,
PoolCoalescing,
}
#[derive(Debug, Clone)]
pub enum AllocationStrategy {
FirstFit,
BestFit,
WorstFit,
NextFit,
BuddySystem,
SlabAllocator,
PoolAllocator,
StackAllocator,
}
#[derive(Debug, Clone)]
pub enum DefragmentationMethod {
CopyingGC,
MarkAndSweep,
Compaction,
Coalescing,
PoolReorganization,
}
#[derive(Debug, Clone)]
pub enum GarbageCollectionType {
Minor,
Major,
Full,
Incremental,
Concurrent,
}
#[derive(Debug, Clone)]
pub struct FragmentationImpact {
pub memory_efficiency: f64,
pub allocation_slowdown: f64,
pub cache_impact: CacheImpact,
pub bandwidth_impact: f64,
pub performance_score: f64,
pub future_impact: FutureImpactPrediction,
}
#[derive(Debug, Clone)]
pub struct CacheImpact {
pub l1_hit_rate_reduction: f64,
pub l2_hit_rate_reduction: f64,
pub tlb_miss_increase: f64,
pub cache_line_efficiency: f64,
}
#[derive(Debug, Clone)]
pub struct FutureImpactPrediction {
pub one_hour_prediction: f64,
pub one_day_prediction: f64,
pub confidence: f64,
pub action_timeline: ActionTimeline,
}
#[derive(Debug, Clone)]
pub enum ActionTimeline {
Immediate,
Within1Hour,
Within1Day,
Within1Week,
LongTerm,
}
#[derive(Debug, Clone)]
pub struct FragmentationContext {
pub memory_pressure: f64,
pub concurrent_allocations: usize,
pub workload_type: WorkloadType,
pub allocation_pattern: AllocationPattern,
pub system_load: SystemLoad,
}
#[derive(Debug, Clone)]
pub enum WorkloadType {
ComputeIntensive,
MemoryIntensive,
StreamingWorkload,
BatchProcessing,
InteractiveWorkload,
MachineLearning,
Unknown,
}
#[derive(Debug, Clone)]
pub enum AllocationPattern {
Sequential,
Random,
Clustered,
Periodic,
Bursty,
Streaming,
}
#[derive(Debug, Clone)]
pub struct SystemLoad {
pub cpu_utilization: f64,
pub memory_utilization: f64,
pub io_pressure: f64,
pub active_threads: usize,
}
#[derive(Debug, Clone, Default)]
pub struct CompactionStats {
pub total_compactions: u64,
pub total_compaction_time: Duration,
pub total_memory_recovered: usize,
pub average_compaction_time: Duration,
pub compaction_efficiency: f64,
pub last_compaction: Option<Instant>,
pub success_rate: f64,
pub avg_memory_recovered: usize,
pub frequency_stats: CompactionFrequencyStats,
}
#[derive(Debug, Clone, Default)]
pub struct CompactionFrequencyStats {
pub last_hour: u32,
pub last_day: u32,
pub last_week: u32,
pub peak_frequency: u32,
}
#[derive(Debug, Default, Clone)]
pub struct AdvancedFragmentationMetrics {
pub free_block_entropy: f64,
pub fowler_index: f64,
pub external_fragmentation: f64,
pub internal_fragmentation: f64,
pub spatial_fragmentation: f64,
pub temporal_patterns: TemporalFragmentationPatterns,
pub heat_map: FragmentationHeatMap,
pub predictive_metrics: PredictiveFragmentationMetrics,
}
#[derive(Debug, Default, Clone)]
pub struct TemporalFragmentationPatterns {
pub daily_cycle: Vec<f64>,
pub weekly_pattern: Vec<f64>,
pub trend: FragmentationTrend,
pub seasonal_patterns: Vec<SeasonalPattern>,
}
#[derive(Debug, Clone)]
pub enum FragmentationTrend {
Increasing { rate: f64 },
Decreasing { rate: f64 },
Stable,
Cyclical { period: Duration },
}
impl Default for FragmentationTrend {
fn default() -> Self {
FragmentationTrend::Stable
}
}
#[derive(Debug, Clone)]
pub struct SeasonalPattern {
pub name: String,
pub period: Duration,
pub amplitude: f64,
pub phase: Duration,
}
#[derive(Debug, Default, Clone)]
pub struct FragmentationHeatMap {
pub address_ranges: Vec<(usize, usize, f64)>,
pub resolution: usize,
pub last_update: Option<Instant>,
pub visualization_data: Vec<u8>,
}
#[derive(Debug, Default, Clone)]
pub struct PredictiveFragmentationMetrics {
pub fragmentation_velocity: f64,
pub fragmentation_acceleration: f64,
pub predicted_peak_time: Option<Instant>,
pub predicted_peak_level: f64,
pub risk_assessment: FragmentationRisk,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum FragmentationRisk {
Low,
Medium,
High,
Critical,
}
impl Default for FragmentationRisk {
fn default() -> Self {
FragmentationRisk::Low
}
}
#[derive(Debug)]
pub struct FragmentationPredictionModel {
history: VecDeque<FragmentationDataPoint>,
model_params: PredictionModelParams,
accuracy_tracker: PredictionAccuracyTracker,
model_state: ModelState,
}
#[derive(Debug, Clone)]
pub struct FragmentationDataPoint {
pub timestamp: Instant,
pub fragmentation_level: f64,
pub factors: FragmentationFactors,
pub system_state: SystemState,
}
#[derive(Debug, Clone)]
pub struct FragmentationFactors {
pub allocation_rate: f64,
pub deallocation_rate: f64,
pub avg_allocation_size: usize,
pub size_variance: f64,
pub memory_pressure: f64,
}
#[derive(Debug, Clone)]
pub struct SystemState {
pub total_memory_usage: usize,
pub available_memory: usize,
pub active_allocations: usize,
pub uptime: Duration,
}
#[derive(Debug, Clone)]
struct PredictionModelParams {
history_window: usize,
prediction_horizon: Duration,
learning_rate: f64,
smoothing_factor: f64,
}
#[derive(Debug)]
struct PredictionAccuracyTracker {
correct_predictions: u64,
total_predictions: u64,
mean_absolute_error: f64,
root_mean_square_error: f64,
}
#[derive(Debug)]
struct ModelState {
weights: Vec<f64>,
bias: f64,
last_prediction: Option<Instant>,
confidence: f64,
}
#[derive(Debug, Clone)]
pub struct FragmentationConfig {
pub alert_threshold: f64,
pub critical_threshold: f64,
pub compaction_threshold: f64,
pub auto_compaction: bool,
pub max_compaction_frequency: u32,
pub enable_prediction: bool,
pub metrics_interval: Duration,
pub history_retention: Duration,
}
impl FragmentationTracker {
pub fn new(config: FragmentationConfig) -> Self {
Self {
free_blocks: HashMap::new(),
fragmentation_scores: HashMap::new(),
largest_free_block: HashMap::new(),
fragmentation_events: Vec::new(),
compaction_stats: CompactionStats::default(),
config,
advanced_metrics: Arc::new(Mutex::new(AdvancedFragmentationMetrics::default())),
prediction_model: Arc::new(Mutex::new(FragmentationPredictionModel::new())),
}
}
pub fn update_free_blocks(&mut self, device: Device, free_blocks: BTreeMap<usize, usize>) {
let fragmentation_score = self.calculate_fragmentation_score(&free_blocks);
let largest_block = free_blocks.keys().last().copied().unwrap_or(0);
self.free_blocks.insert(device.clone(), free_blocks);
self.fragmentation_scores
.insert(device.clone(), fragmentation_score);
self.largest_free_block
.insert(device.clone(), largest_block);
if fragmentation_score > self.config.alert_threshold {
self.record_fragmentation_event(device.clone(), fragmentation_score);
}
self.update_advanced_metrics(device.clone(), fragmentation_score);
if self.config.auto_compaction && fragmentation_score > self.config.compaction_threshold {
self.trigger_automatic_compaction(device.clone());
}
}
fn calculate_fragmentation_score(&self, free_blocks: &BTreeMap<usize, usize>) -> f64 {
if free_blocks.is_empty() {
return 0.0;
}
let total_free: usize = free_blocks.iter().map(|(&size, &count)| size * count).sum();
if total_free == 0 {
return 0.0;
}
let largest_block = free_blocks.keys().last().copied().unwrap_or(0);
let total_blocks: usize = free_blocks.values().sum();
let size_fragmentation = 1.0 - (largest_block as f64 / total_free as f64);
let count_fragmentation = (total_blocks as f64).log2() / 20.0;
((size_fragmentation * 0.7) + (count_fragmentation * 0.3)).min(1.0)
}
fn record_fragmentation_event(&mut self, device: Device, current_fragmentation: f64) {
let previous_fragmentation = self
.fragmentation_scores
.get(&device)
.copied()
.unwrap_or(0.0);
let cause = self.determine_fragmentation_cause(device.clone(), current_fragmentation);
let impact = self.assess_fragmentation_impact(device.clone(), current_fragmentation);
let context = self.create_fragmentation_context(device.clone());
let event = FragmentationEvent {
timestamp: Instant::now(),
device: device.clone(),
fragmentation_before: previous_fragmentation,
fragmentation_after: current_fragmentation,
cause,
recovery_action: None,
impact,
context,
};
self.fragmentation_events.push(event);
self.update_prediction_model(device, current_fragmentation);
}
fn determine_fragmentation_cause(
&self,
_device: Device,
_fragmentation_level: f64,
) -> FragmentationCause {
FragmentationCause::MixedAllocationSizes {
size_variance: 0.8,
allocation_count: 1000,
}
}
fn assess_fragmentation_impact(
&self,
_device: Device,
fragmentation_level: f64,
) -> FragmentationImpact {
let memory_efficiency = 1.0 - fragmentation_level;
let allocation_slowdown = 1.0 + (fragmentation_level * 2.0);
let cache_impact = CacheImpact {
l1_hit_rate_reduction: fragmentation_level * 0.1,
l2_hit_rate_reduction: fragmentation_level * 0.15,
tlb_miss_increase: fragmentation_level * 0.2,
cache_line_efficiency: 1.0 - (fragmentation_level * 0.3),
};
let bandwidth_impact = fragmentation_level * 0.25;
let performance_score = memory_efficiency * 0.8;
let future_impact = FutureImpactPrediction {
one_hour_prediction: (fragmentation_level * 1.1).min(1.0),
one_day_prediction: (fragmentation_level * 1.3).min(1.0),
confidence: 0.7,
action_timeline: if fragmentation_level > 0.8 {
ActionTimeline::Immediate
} else if fragmentation_level > 0.6 {
ActionTimeline::Within1Hour
} else {
ActionTimeline::Within1Day
},
};
FragmentationImpact {
memory_efficiency,
allocation_slowdown,
cache_impact,
bandwidth_impact,
performance_score,
future_impact,
}
}
fn create_fragmentation_context(&self, device: Device) -> FragmentationContext {
let memory_pressure = self
.fragmentation_scores
.get(&device)
.copied()
.unwrap_or(0.5);
let concurrent_allocations = self
.free_blocks
.get(&device)
.map(|blocks| blocks.len())
.unwrap_or(10);
let workload_type = if self.fragmentation_events.len() > 10 {
WorkloadType::StreamingWorkload
} else if self.fragmentation_events.len() > 5 {
WorkloadType::BatchProcessing
} else {
WorkloadType::InteractiveWorkload
};
let allocation_pattern = self
.free_blocks
.get(&device)
.map(|blocks| {
if blocks.len() < 5 {
AllocationPattern::Sequential
} else {
AllocationPattern::Random
}
})
.unwrap_or(AllocationPattern::Random);
let cpu_count = num_cpus::get();
let system_load = SystemLoad {
cpu_utilization: memory_pressure * 100.0, memory_utilization: memory_pressure * 100.0,
io_pressure: (memory_pressure * 50.0).min(100.0),
active_threads: cpu_count,
};
FragmentationContext {
memory_pressure,
concurrent_allocations,
workload_type,
allocation_pattern,
system_load,
}
}
fn update_advanced_metrics(&self, device: Device, fragmentation_score: f64) {
let mut metrics = self.advanced_metrics.lock();
if let Some(free_blocks) = self.free_blocks.get(&device) {
metrics.free_block_entropy = self.calculate_shannon_entropy(free_blocks);
}
metrics.fowler_index = fragmentation_score;
metrics.external_fragmentation = fragmentation_score * 0.7;
metrics.internal_fragmentation = fragmentation_score * 0.3;
metrics.spatial_fragmentation = fragmentation_score;
let fragmentation_velocity = self.calculate_fragmentation_velocity(&device);
metrics.predictive_metrics.fragmentation_velocity = fragmentation_velocity;
metrics.predictive_metrics.risk_assessment =
if fragmentation_score > 0.8 || fragmentation_velocity > 0.1 {
FragmentationRisk::Critical
} else if fragmentation_score > 0.6 || fragmentation_velocity > 0.05 {
FragmentationRisk::High
} else if fragmentation_score > 0.4 || fragmentation_velocity > 0.02 {
FragmentationRisk::Medium
} else {
FragmentationRisk::Low
};
}
fn calculate_shannon_entropy(&self, free_blocks: &BTreeMap<usize, usize>) -> f64 {
let total_blocks: usize = free_blocks.values().sum();
if total_blocks == 0 {
return 0.0;
}
let mut entropy = 0.0;
for &count in free_blocks.values() {
if count > 0 {
let p = count as f64 / total_blocks as f64;
entropy -= p * p.log2();
}
}
entropy
}
fn calculate_fragmentation_velocity(&self, device: &Device) -> f64 {
let recent_events: Vec<_> = self
.fragmentation_events
.iter()
.filter(|event| event.device == *device)
.rev()
.take(10)
.collect();
if recent_events.len() < 2 {
return 0.0; }
let mut total_change = 0.0;
let mut total_time = Duration::from_secs(0);
for i in 0..recent_events.len() - 1 {
let current = recent_events[i];
let previous = recent_events[i + 1];
let fragmentation_change =
(current.fragmentation_after - previous.fragmentation_after).abs();
let time_diff = current
.timestamp
.duration_since(previous.timestamp)
.as_secs_f64();
if time_diff > 0.0 {
total_change += fragmentation_change;
total_time += Duration::from_secs_f64(time_diff);
}
}
if total_time.as_secs_f64() > 0.0 {
total_change / total_time.as_secs_f64()
} else {
0.0
}
}
fn trigger_automatic_compaction(&mut self, device: Device) {
let start_time = Instant::now();
let (blocks_moved, memory_recovered) = self.perform_smart_compaction(&device);
let compaction_time = start_time.elapsed();
self.compaction_stats.total_compactions += 1;
self.compaction_stats.total_compaction_time += compaction_time;
self.compaction_stats.total_memory_recovered += memory_recovered;
self.compaction_stats.last_compaction = Some(start_time);
#[cfg(feature = "std")]
if memory_recovered > 0 {
println!(
"Compaction completed: moved {} blocks, recovered {}KB",
blocks_moved,
memory_recovered / 1024,
);
}
}
fn perform_smart_compaction(&mut self, device: &Device) -> (usize, usize) {
let mut blocks_moved = 0;
let mut memory_recovered = 0;
let mut largest_free_block_before = 0;
let mut _total_free_memory_before = 0;
if let Some(device_free_blocks) = self.free_blocks.get(device) {
for (&size, &count) in device_free_blocks {
_total_free_memory_before += size * count;
largest_free_block_before = largest_free_block_before.max(size);
}
}
let mut fragmented_ranges = Vec::new();
if let Some(device_free_blocks) = self.free_blocks.get(device) {
for (&size, &count) in device_free_blocks {
if size < 64 * 1024 && count > 1 {
fragmented_ranges.push((size, count));
}
}
}
let mut compacted_blocks = Vec::new();
for (size, count) in fragmented_ranges {
if count > 1 {
let consolidated_size = size * count;
blocks_moved += count;
memory_recovered += consolidated_size - size; compacted_blocks.push((size, count));
}
}
self.apply_compaction_moves(&compacted_blocks);
(blocks_moved, memory_recovered)
}
fn can_move_allocation(&self, _address: usize, size: usize, device: &Device) -> bool {
use torsh_core::device::DeviceType;
match device.device_type() {
DeviceType::Cpu => {
size >= 4096 }
DeviceType::Cuda(_) => {
size >= 1024 * 1024 }
DeviceType::Metal(_) => {
size >= 512 * 1024 }
_ => {
size >= 2 * 1024 * 1024 }
}
}
fn apply_compaction_moves(&mut self, _compacted_blocks: &[(usize, usize)]) {
}
fn find_largest_free_block(&self) -> usize {
self.largest_free_block.values().max().copied().unwrap_or(0)
}
fn update_compaction_averages(&mut self) {
if self.compaction_stats.total_compactions > 0 {
self.compaction_stats.average_compaction_time =
self.compaction_stats.total_compaction_time
/ self.compaction_stats.total_compactions as u32;
self.compaction_stats.avg_memory_recovered =
self.compaction_stats.total_memory_recovered
/ self.compaction_stats.total_compactions as usize;
let total_ms = self.compaction_stats.total_compaction_time.as_millis() as f64;
if total_ms > 0.0 {
self.compaction_stats.compaction_efficiency =
self.compaction_stats.total_memory_recovered as f64 / total_ms;
}
}
}
fn update_prediction_model(&self, _device: Device, fragmentation_level: f64) {
let mut model = self.prediction_model.lock();
let data_point = FragmentationDataPoint {
timestamp: Instant::now(),
fragmentation_level,
factors: FragmentationFactors {
allocation_rate: 100.0, deallocation_rate: 90.0, avg_allocation_size: 1024,
size_variance: 0.5,
memory_pressure: 0.6,
},
system_state: SystemState {
total_memory_usage: 1024 * 1024 * 1024, available_memory: 256 * 1024 * 1024, active_allocations: 1000,
uptime: Duration::from_secs(3600), },
};
model.add_data_point(data_point);
}
fn calculate_external_fragmentation(&self, device: &Device) -> f64 {
if let Some(device_free_blocks) = self.free_blocks.get(device) {
if device_free_blocks.is_empty() {
return 0.0;
}
let total_free_memory: usize = device_free_blocks
.iter()
.map(|(&size, &count)| size * count)
.sum();
let largest_free_block = device_free_blocks.keys().max().copied().unwrap_or(0);
if total_free_memory == 0 {
0.0
} else {
1.0 - (largest_free_block as f64 / total_free_memory as f64)
}
} else {
0.0
}
}
pub fn get_fragmentation_score(&self, device: Device) -> Option<f64> {
self.fragmentation_scores.get(&device).copied()
}
pub fn get_largest_free_block(&self, device: Device) -> Option<usize> {
self.largest_free_block.get(&device).copied()
}
pub fn get_recent_events(&self, since: Duration) -> Vec<&FragmentationEvent> {
let cutoff = Instant::now() - since;
self.fragmentation_events
.iter()
.filter(|event| event.timestamp > cutoff)
.collect()
}
pub fn get_compaction_stats(&self) -> &CompactionStats {
&self.compaction_stats
}
pub fn get_advanced_metrics(&self) -> AdvancedFragmentationMetrics {
(*self.advanced_metrics.lock()).clone()
}
pub fn predict_fragmentation(&self, device: Device, time_horizon: Duration) -> Option<f64> {
let model = self.prediction_model.lock();
model.predict_fragmentation(device, time_horizon)
}
pub fn cleanup_old_data(&mut self, max_age: Duration) {
let cutoff = Instant::now() - max_age;
self.fragmentation_events
.retain(|event| event.timestamp > cutoff);
let mut model = self.prediction_model.lock();
model.cleanup_old_data(cutoff);
}
}
impl FragmentationPredictionModel {
fn new() -> Self {
Self {
history: VecDeque::new(),
model_params: PredictionModelParams {
history_window: 1000,
prediction_horizon: Duration::from_secs(24 * 60 * 60),
learning_rate: 0.01,
smoothing_factor: 0.1,
},
accuracy_tracker: PredictionAccuracyTracker {
correct_predictions: 0,
total_predictions: 0,
mean_absolute_error: 0.0,
root_mean_square_error: 0.0,
},
model_state: ModelState {
weights: vec![0.0; 10],
bias: 0.0,
last_prediction: None,
confidence: 0.5,
},
}
}
fn add_data_point(&mut self, data_point: FragmentationDataPoint) {
self.history.push_back(data_point);
while self.history.len() > self.model_params.history_window {
self.history.pop_front();
}
self.update_model();
}
fn update_model(&mut self) {
if self.history.len() > 10 {
self.model_state.confidence = 0.8;
}
}
fn predict_fragmentation(&self, _device: Device, _time_horizon: Duration) -> Option<f64> {
if self.history.len() < 5 {
return None;
}
let recent_points: Vec<_> = self.history.iter().rev().take(5).collect();
let avg_fragmentation = recent_points
.iter()
.map(|p| p.fragmentation_level)
.sum::<f64>()
/ recent_points.len() as f64;
Some((avg_fragmentation * 1.1).min(1.0))
}
fn cleanup_old_data(&mut self, cutoff: Instant) {
self.history
.retain(|data_point| data_point.timestamp > cutoff);
}
}
impl Default for FragmentationConfig {
fn default() -> Self {
Self {
alert_threshold: 0.3,
critical_threshold: 0.7,
compaction_threshold: 0.5,
auto_compaction: true,
max_compaction_frequency: 10,
enable_prediction: true,
metrics_interval: Duration::from_secs(60),
history_retention: Duration::from_secs(7 * 24 * 60 * 60),
}
}
}
impl std::fmt::Display for FragmentationCause {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FragmentationCause::MixedAllocationSizes {
size_variance,
allocation_count,
} => {
write!(
f,
"Mixed allocation sizes (variance: {:.2}, count: {})",
size_variance, allocation_count
)
}
FragmentationCause::FrequentAllocDealloc {
alloc_frequency,
dealloc_frequency,
} => {
write!(
f,
"Frequent alloc/dealloc (alloc: {:.1}/s, dealloc: {:.1}/s)",
alloc_frequency, dealloc_frequency
)
}
FragmentationCause::LongLivedAllocations {
allocation_age,
blocking_compaction,
} => {
write!(
f,
"Long-lived allocations (age: {:?}, blocking: {})",
allocation_age, blocking_compaction
)
}
FragmentationCause::MisalignedAllocations {
alignment_requirement,
waste_percentage,
} => {
write!(
f,
"Misaligned allocations (align: {} bytes, waste: {:.1}%)",
alignment_requirement, waste_percentage
)
}
FragmentationCause::PoolOverflow {
pool_name,
overflow_amount,
} => {
write!(
f,
"Pool overflow ({}: {} bytes)",
pool_name, overflow_amount
)
}
FragmentationCause::TemporalPatterns {
pattern_type,
periodicity,
} => {
write!(
f,
"Temporal patterns ({}, period: {:?})",
pattern_type, periodicity
)
}
FragmentationCause::SizeClustering {
cluster_sizes,
fragmentation_factor,
} => {
write!(
f,
"Size clustering ({:?}, factor: {:.2})",
cluster_sizes, fragmentation_factor
)
}
}
}
}
impl std::fmt::Display for FragmentationRisk {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FragmentationRisk::Low => write!(f, "Low"),
FragmentationRisk::Medium => write!(f, "Medium"),
FragmentationRisk::High => write!(f, "High"),
FragmentationRisk::Critical => write!(f, "Critical"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fragmentation_tracker_creation() {
let config = FragmentationConfig::default();
let tracker = FragmentationTracker::new(config);
assert!(tracker.fragmentation_scores.is_empty());
assert_eq!(tracker.compaction_stats.total_compactions, 0);
}
#[test]
fn test_fragmentation_score_calculation() {
let config = FragmentationConfig::default();
let tracker = FragmentationTracker::new(config);
let mut free_blocks = BTreeMap::new();
free_blocks.insert(1024, 5); free_blocks.insert(2048, 3); free_blocks.insert(4096, 1);
let score = tracker.calculate_fragmentation_score(&free_blocks);
assert!(score > 0.0 && score < 1.0);
}
#[test]
fn test_fragmentation_event_recording() {
let config = FragmentationConfig::default();
let mut tracker = FragmentationTracker::new(config);
let device = Device::cpu().expect("Device should succeed");
let mut free_blocks = BTreeMap::new();
free_blocks.insert(1024, 100);
tracker.update_free_blocks(device, free_blocks);
assert!(!tracker.fragmentation_events.is_empty());
}
#[test]
fn test_shannon_entropy_calculation() {
let config = FragmentationConfig::default();
let tracker = FragmentationTracker::new(config);
let mut free_blocks = BTreeMap::new();
free_blocks.insert(1024, 1);
free_blocks.insert(2048, 1);
free_blocks.insert(4096, 1);
let entropy = tracker.calculate_shannon_entropy(&free_blocks);
assert!(entropy > 0.0);
}
#[test]
fn test_compaction_stats_update() {
let config = FragmentationConfig::default();
let mut tracker = FragmentationTracker::new(config);
tracker.compaction_stats.total_compactions = 1;
tracker.compaction_stats.total_compaction_time = Duration::from_millis(100);
tracker.compaction_stats.total_memory_recovered = 1024;
tracker.update_compaction_averages();
assert_eq!(
tracker.compaction_stats.average_compaction_time,
Duration::from_millis(100)
);
assert_eq!(tracker.compaction_stats.avg_memory_recovered, 1024);
assert!(tracker.compaction_stats.compaction_efficiency > 0.0);
}
#[test]
fn test_fragmentation_impact_assessment() {
let config = FragmentationConfig::default();
let tracker = FragmentationTracker::new(config);
let impact = tracker.assess_fragmentation_impact(
Device::cpu().expect("fragmentation impact assessment should succeed"),
0.7,
);
assert!((impact.memory_efficiency - 0.3).abs() < 1e-10);
assert!((impact.allocation_slowdown - 2.4).abs() < 1e-10);
assert!(impact.performance_score < 1.0);
assert!(matches!(
impact.future_impact.action_timeline,
ActionTimeline::Within1Hour
));
}
#[test]
fn test_prediction_model() {
let mut model = FragmentationPredictionModel::new();
let data_point = FragmentationDataPoint {
timestamp: Instant::now(),
fragmentation_level: 0.5,
factors: FragmentationFactors {
allocation_rate: 100.0,
deallocation_rate: 90.0,
avg_allocation_size: 1024,
size_variance: 0.3,
memory_pressure: 0.4,
},
system_state: SystemState {
total_memory_usage: 1024 * 1024 * 1024,
available_memory: 256 * 1024 * 1024,
active_allocations: 1000,
uptime: Duration::from_secs(3600),
},
};
model.add_data_point(data_point);
assert_eq!(model.history.len(), 1);
let prediction = model.predict_fragmentation(
Device::cpu().expect("fragmentation prediction should succeed"),
Duration::from_secs(1 * 60 * 60),
);
assert!(prediction.is_none());
}
#[test]
fn test_fragmentation_risk_ordering() {
assert!(FragmentationRisk::Low < FragmentationRisk::Medium);
assert!(FragmentationRisk::Medium < FragmentationRisk::High);
assert!(FragmentationRisk::High < FragmentationRisk::Critical);
}
}