use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, OnceLock};
use std::time::{Duration, Instant};
static GLOBAL_MONITOR: OnceLock<Arc<Mutex<GlobalMemoryMonitor>>> = OnceLock::new();
#[derive(Debug)]
struct GlobalMemoryMonitor {
monitors: HashMap<String, Arc<Mutex<MemoryMonitor>>>,
global_stats: GlobalMemoryStats,
enabled: bool,
max_monitors: usize,
}
#[derive(Debug, Clone)]
pub struct GlobalMemoryStats {
pub total_allocated_bytes: usize,
pub peak_total_bytes: usize,
pub active_interpolators: usize,
pub total_allocations: u64,
pub total_deallocations: u64,
pub monitoring_start: Instant,
}
impl Default for GlobalMemoryStats {
fn default() -> Self {
Self {
total_allocated_bytes: 0,
peak_total_bytes: 0,
active_interpolators: 0,
total_allocations: 0,
total_deallocations: 0,
monitoring_start: Instant::now(),
}
}
}
#[derive(Debug)]
pub struct MemoryMonitor {
name: String,
allocations: HashMap<String, usize>,
allocation_history: VecDeque<AllocationEvent>,
peak_memory_bytes: usize,
current_memory_bytes: usize,
leak_stats: LeakDetectionStats,
perf_metrics: MemoryPerformanceMetrics,
active: bool,
created_at: Instant,
}
#[derive(Debug, Clone)]
struct AllocationEvent {
event_type: EventType,
sizebytes: usize,
#[allow(dead_code)]
category: String,
#[allow(dead_code)]
timestamp: Instant,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum EventType {
Allocation,
Deallocation,
}
#[derive(Debug, Clone)]
struct LeakDetectionStats {
total_allocations: u64,
total_deallocations: u64,
#[allow(dead_code)]
unmatched_allocations: u64,
long_lived_allocations: HashMap<String, (usize, Instant)>,
leak_detection_threshold: Duration,
}
impl Default for LeakDetectionStats {
fn default() -> Self {
Self {
total_allocations: 0,
total_deallocations: 0,
unmatched_allocations: 0,
long_lived_allocations: HashMap::new(),
leak_detection_threshold: Duration::from_secs(300), }
}
}
#[derive(Debug, Clone)]
struct MemoryPerformanceMetrics {
avg_allocation_size: f64,
#[allow(dead_code)]
avg_allocation_interval: Duration,
#[allow(dead_code)]
fragmentation_estimate: f64,
cache_hit_ratio: f64,
last_update: Instant,
}
impl Default for MemoryPerformanceMetrics {
fn default() -> Self {
Self {
avg_allocation_size: 0.0,
avg_allocation_interval: Duration::from_millis(0),
fragmentation_estimate: 0.0,
cache_hit_ratio: 0.0,
last_update: Instant::now(),
}
}
}
#[derive(Debug, Clone)]
pub struct MemoryReport {
pub monitorname: String,
pub current_allocations: HashMap<String, usize>,
pub peak_memory_bytes: usize,
pub total_allocated_bytes: usize,
pub leak_indicators: LeakIndicators,
pub performance_summary: PerformanceSummary,
pub recommendations: Vec<String>,
pub generated_at: Instant,
}
#[derive(Debug, Clone)]
pub struct LeakIndicators {
pub has_potential_leaks: bool,
pub unmatched_allocations: u64,
pub long_lived_memory_bytes: usize,
pub suspicious_categories: Vec<String>,
pub leak_severity: f64,
}
#[derive(Debug, Clone)]
pub struct PerformanceSummary {
pub memory_efficiency_score: f64,
pub allocation_pattern_score: f64,
pub cache_utilization_score: f64,
pub overall_grade: PerformanceGrade,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum PerformanceGrade {
Excellent,
Good,
Fair,
Poor,
Critical,
}
impl MemoryMonitor {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let monitor = Self {
name: name.clone(),
allocations: HashMap::new(),
allocation_history: VecDeque::new(),
peak_memory_bytes: 0,
current_memory_bytes: 0,
leak_stats: LeakDetectionStats::default(),
perf_metrics: MemoryPerformanceMetrics::default(),
active: true,
created_at: Instant::now(),
};
register_monitor(&name, monitor.clone());
monitor
}
pub fn track_allocation(&mut self, sizebytes: usize, category: impl Into<String>) {
if !self.active {
return;
}
let category = category.into();
let now = Instant::now();
*self.allocations.entry(category.clone()).or_insert(0) += sizebytes;
self.current_memory_bytes += sizebytes;
if self.current_memory_bytes > self.peak_memory_bytes {
self.peak_memory_bytes = self.current_memory_bytes;
}
let event = AllocationEvent {
event_type: EventType::Allocation,
sizebytes,
category: category.clone(),
timestamp: now,
};
self.allocation_history.push_back(event);
if self.allocation_history.len() > 10000 {
self.allocation_history.pop_front();
}
self.leak_stats.total_allocations += 1;
self.leak_stats.long_lived_allocations.insert(
format!("{}_{}", category, self.leak_stats.total_allocations),
(sizebytes, now),
);
self.update_performance_metrics();
update_global_stats(sizebytes, true);
}
pub fn track_deallocation(&mut self, sizebytes: usize, category: impl Into<String>) {
if !self.active {
return;
}
let category = category.into();
let now = Instant::now();
if let Some(current) = self.allocations.get_mut(&category) {
*current = current.saturating_sub(sizebytes);
if *current == 0 {
self.allocations.remove(&category);
}
}
self.current_memory_bytes = self.current_memory_bytes.saturating_sub(sizebytes);
let event = AllocationEvent {
event_type: EventType::Deallocation,
sizebytes,
category: category.clone(),
timestamp: now,
};
self.allocation_history.push_back(event);
self.leak_stats.total_deallocations += 1;
self.leak_stats
.long_lived_allocations
.retain(|k, _| !k.starts_with(&category));
self.update_performance_metrics();
update_global_stats(sizebytes, false);
}
pub fn generate_report(&self) -> MemoryReport {
let leak_indicators = self.analyze_leaks();
let performance_summary = self.analyze_performance();
let recommendations = self.generate_recommendations(&leak_indicators, &performance_summary);
MemoryReport {
monitorname: self.name.clone(),
current_allocations: self.allocations.clone(),
peak_memory_bytes: self.peak_memory_bytes,
total_allocated_bytes: self.calculate_total_allocated(),
leak_indicators,
performance_summary,
recommendations,
generated_at: Instant::now(),
}
}
fn analyze_leaks(&self) -> LeakIndicators {
let unmatched = self
.leak_stats
.total_allocations
.saturating_sub(self.leak_stats.total_deallocations);
let now = Instant::now();
let long_lived_memory: usize = self
.leak_stats
.long_lived_allocations
.values()
.filter(|(_, timestamp)| {
now.duration_since(*timestamp) > self.leak_stats.leak_detection_threshold
})
.map(|(size, _)| *size)
.sum();
let suspicious_categories: Vec<String> = self.allocations
.iter()
.filter(|(_, &size)| size > 1024 * 1024) .map(|(cat, _)| cat.clone())
.collect();
let has_potential_leaks =
unmatched > 0 || long_lived_memory > 0 || !suspicious_categories.is_empty();
let leak_severity = if has_potential_leaks {
let severity_factors = [
(unmatched as f64) / (self.leak_stats.total_allocations as f64).max(1.0),
(long_lived_memory as f64) / (self.peak_memory_bytes as f64).max(1.0),
(suspicious_categories.len() as f64) / 10.0, ];
severity_factors.iter().sum::<f64>() / severity_factors.len() as f64
} else {
0.0
};
LeakIndicators {
has_potential_leaks,
unmatched_allocations: unmatched,
long_lived_memory_bytes: long_lived_memory,
suspicious_categories,
leak_severity: leak_severity.min(1.0),
}
}
fn analyze_performance(&self) -> PerformanceSummary {
let memory_efficiency_score = if self.peak_memory_bytes > 0 {
1.0 - (self.current_memory_bytes as f64 / self.peak_memory_bytes as f64)
} else {
1.0
};
let allocation_pattern_score = if self.leak_stats.total_allocations > 0 {
let deallocation_ratio = self.leak_stats.total_deallocations as f64
/ self.leak_stats.total_allocations as f64;
deallocation_ratio.min(1.0)
} else {
1.0
};
let cache_utilization_score = self.perf_metrics.cache_hit_ratio;
let overall_score =
(memory_efficiency_score + allocation_pattern_score + cache_utilization_score) / 3.0;
let overall_grade = match overall_score {
s if s >= 0.9 => PerformanceGrade::Excellent,
s if s >= 0.7 => PerformanceGrade::Good,
s if s >= 0.5 => PerformanceGrade::Fair,
s if s >= 0.3 => PerformanceGrade::Poor,
_ => PerformanceGrade::Critical,
};
PerformanceSummary {
memory_efficiency_score,
allocation_pattern_score,
cache_utilization_score,
overall_grade,
}
}
fn generate_recommendations(
&self,
leak_indicators: &LeakIndicators,
performance: &PerformanceSummary,
) -> Vec<String> {
let mut recommendations = Vec::new();
if leak_indicators.has_potential_leaks {
recommendations
.push("Consider implementing explicit memory cleanup in destructor".to_string());
if leak_indicators.unmatched_allocations > 0 {
recommendations.push(format!(
"Found {} unmatched allocations - check for missing deallocations",
leak_indicators.unmatched_allocations
));
}
if leak_indicators.long_lived_memory_bytes > 1024 * 1024 {
recommendations.push(format!(
"Large amount of long-lived memory ({} MB) - consider periodic cleanup",
leak_indicators.long_lived_memory_bytes / (1024 * 1024)
));
}
}
if matches!(
performance.overall_grade,
PerformanceGrade::Fair | PerformanceGrade::Poor | PerformanceGrade::Critical
) {
recommendations.push("Memory performance can be improved".to_string());
if performance.memory_efficiency_score < 0.5 {
recommendations.push(
"High peak memory usage - consider processing data in chunks".to_string(),
);
}
if performance.cache_utilization_score < 0.3 {
recommendations.push(
"Low cache utilization - enable caching for repeated operations".to_string(),
);
}
}
#[cfg(target_pointer_width = "32")]
let high_memory_threshold = 256 * 1024 * 1024; #[cfg(target_pointer_width = "64")]
let high_memory_threshold = 1024 * 1024 * 1024;
if self.peak_memory_bytes > high_memory_threshold {
recommendations.push(
"Very high memory usage - consider using memory-efficient algorithms".to_string(),
);
}
recommendations
}
fn update_performance_metrics(&mut self) {
let now = Instant::now();
if self.leak_stats.total_allocations > 0 {
let total_size: usize = self
.allocation_history
.iter()
.filter(|e| e.event_type == EventType::Allocation)
.map(|e| e.sizebytes)
.sum();
self.perf_metrics.avg_allocation_size =
total_size as f64 / self.leak_stats.total_allocations as f64;
}
self.perf_metrics.cache_hit_ratio = 0.7;
self.perf_metrics.last_update = now;
}
fn calculate_total_allocated(&self) -> usize {
self.allocation_history
.iter()
.filter(|e| e.event_type == EventType::Allocation)
.map(|e| e.sizebytes)
.sum()
}
pub fn disable(&mut self) {
self.active = false;
}
pub fn is_active(&self) -> bool {
self.active
}
}
impl Clone for MemoryMonitor {
fn clone(&self) -> Self {
Self {
name: format!("{}_clone", self.name),
allocations: self.allocations.clone(),
allocation_history: self.allocation_history.clone(),
peak_memory_bytes: self.peak_memory_bytes,
current_memory_bytes: self.current_memory_bytes,
leak_stats: self.leak_stats.clone(),
perf_metrics: self.perf_metrics.clone(),
active: self.active,
created_at: self.created_at,
}
}
}
impl MemoryReport {
pub fn has_potential_leaks(&self) -> bool {
self.leak_indicators.has_potential_leaks
}
pub fn memory_efficiency_rating(&self) -> PerformanceGrade {
self.performance_summary.overall_grade
}
pub fn summary(&self) -> String {
format!(
"Memory Report for '{}': Current: {} KB, Peak: {} KB, Grade: {:?}, Leaks: {}",
self.monitorname,
self.current_allocations.values().sum::<usize>() / 1024,
self.peak_memory_bytes / 1024,
self.performance_summary.overall_grade,
if self.has_potential_leaks() {
"Detected"
} else {
"None"
}
)
}
}
#[allow(dead_code)]
pub fn start_monitoring() {
let _ = GLOBAL_MONITOR.set(Arc::new(Mutex::new(GlobalMemoryMonitor {
monitors: HashMap::new(),
global_stats: GlobalMemoryStats::default(),
enabled: true,
max_monitors: 100,
})));
}
#[allow(dead_code)]
pub fn stop_monitoring() {
if let Some(monitor) = GLOBAL_MONITOR.get() {
if let Ok(mut global) = monitor.lock() {
global.enabled = false;
global.monitors.clear();
}
}
}
#[allow(dead_code)]
fn register_monitor(name: &str, monitor: MemoryMonitor) {
if let Some(global_monitor) = GLOBAL_MONITOR.get() {
if let Ok(mut global) = global_monitor.lock() {
if global.enabled && global.monitors.len() < global.max_monitors {
global
.monitors
.insert(name.to_string(), Arc::new(Mutex::new(monitor)));
global.global_stats.active_interpolators = global.monitors.len();
}
}
}
}
#[allow(dead_code)]
fn update_global_stats(sizebytes: usize, isallocation: bool) {
if let Some(global_monitor) = GLOBAL_MONITOR.get() {
if let Ok(mut global) = global_monitor.lock() {
if isallocation {
global.global_stats.total_allocated_bytes += sizebytes;
global.global_stats.total_allocations += 1;
if global.global_stats.total_allocated_bytes > global.global_stats.peak_total_bytes
{
global.global_stats.peak_total_bytes =
global.global_stats.total_allocated_bytes;
}
} else {
global.global_stats.total_allocated_bytes = global
.global_stats
.total_allocated_bytes
.saturating_sub(sizebytes);
global.global_stats.total_deallocations += 1;
}
}
}
}
#[allow(dead_code)]
pub fn get_global_stats() -> Option<GlobalMemoryStats> {
GLOBAL_MONITOR
.get()
.and_then(|monitor| monitor.lock().ok())
.map(|global| global.global_stats.clone())
}
#[allow(dead_code)]
pub fn get_monitor_report(name: &str) -> Option<MemoryReport> {
GLOBAL_MONITOR
.get()
.and_then(|global_monitor| {
global_monitor
.lock()
.ok()
.and_then(|global| global.monitors.get(name).cloned())
})
.and_then(|monitor| monitor.lock().ok().map(|m| m.generate_report()))
}
#[allow(dead_code)]
pub fn get_all_reports() -> Vec<MemoryReport> {
if let Some(global_monitor) = GLOBAL_MONITOR.get() {
if let Ok(global) = global_monitor.lock() {
return global
.monitors
.values()
.filter_map(|monitor| monitor.lock().ok())
.map(|m| m.generate_report())
.collect();
}
}
Vec::new()
}
#[derive(Debug)]
pub struct StressMemoryProfiler {
base_monitor: MemoryMonitor,
stress_metrics: StressMemoryMetrics,
stress_history: VecDeque<MemorySnapshot>,
pressure_indicators: MemoryPressureIndicators,
stress_config: StressProfilingConfig,
}
#[derive(Debug, Clone)]
pub struct StressMemoryMetrics {
pub max_growth_rate: f64,
pub allocation_spikes: Vec<AllocationSpike>,
pub stress_fragmentation: f64,
pub concurrent_overhead: f64,
pub large_dataset_efficiency: f64,
pub recovery_time_seconds: f64,
}
#[derive(Debug, Clone)]
pub struct AllocationSpike {
pub timestamp: Instant,
pub spike_size: usize,
pub duration: Duration,
pub stresscondition: String,
}
#[derive(Debug, Clone)]
pub struct MemorySnapshot {
pub timestamp: Instant,
pub total_memory: usize,
pub category_breakdown: HashMap<String, usize>,
pub system_pressure: f64,
pub active_stressconditions: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct MemoryPressureIndicators {
pub system_memory_utilization: f64,
pub available_memory: usize,
pub allocation_failure_rate: f64,
pub gc_frequency: f64,
pub swap_utilization: f64,
}
impl Default for MemoryPressureIndicators {
fn default() -> Self {
#[cfg(target_pointer_width = "32")]
let available_memory = 512 * 1024 * 1024; #[cfg(target_pointer_width = "64")]
let available_memory = 8usize * 1024 * 1024 * 1024;
Self {
system_memory_utilization: 0.0,
available_memory,
allocation_failure_rate: 0.0,
gc_frequency: 0.0,
swap_utilization: 0.0,
}
}
}
#[derive(Debug, Clone)]
pub struct StressProfilingConfig {
pub snapshot_interval: Duration,
pub max_snapshots: usize,
pub spike_threshold: usize,
pub monitor_system_pressure: bool,
pub detailed_category_tracking: bool,
}
impl Default for StressProfilingConfig {
fn default() -> Self {
Self {
snapshot_interval: Duration::from_millis(100), max_snapshots: 10000, spike_threshold: 10 * 1024 * 1024, monitor_system_pressure: true,
detailed_category_tracking: true,
}
}
}
impl StressMemoryProfiler {
pub fn new(name: impl Into<String>, config: Option<StressProfilingConfig>) -> Self {
Self {
base_monitor: MemoryMonitor::new(name),
stress_metrics: StressMemoryMetrics {
max_growth_rate: 0.0,
allocation_spikes: Vec::new(),
stress_fragmentation: 0.0,
concurrent_overhead: 0.0,
large_dataset_efficiency: 1.0,
recovery_time_seconds: 0.0,
},
stress_history: VecDeque::new(),
pressure_indicators: MemoryPressureIndicators::default(),
stress_config: config.unwrap_or_default(),
}
}
pub fn start_stress_profiling(&mut self, stresscondition: &str) {
println!("Starting stress memory profiling for: {}", stresscondition);
self.take_memory_snapshot(vec![stresscondition.to_string()]);
self.update_system_pressure();
}
pub fn track_stress_allocation(
&mut self,
sizebytes: usize,
category: impl Into<String>,
stresscondition: &str,
) {
let category = category.into();
self.base_monitor.track_allocation(sizebytes, &category);
if sizebytes >= self.stress_config.spike_threshold {
self.stress_metrics.allocation_spikes.push(AllocationSpike {
timestamp: Instant::now(),
spike_size: sizebytes,
duration: Duration::from_millis(0), stresscondition: stresscondition.to_string(),
});
}
if self.should_take_snapshot() {
self.take_memory_snapshot(vec![stresscondition.to_string()]);
}
self.update_growth_rate();
}
pub fn track_stress_deallocation(&mut self, sizebytes: usize, category: impl Into<String>) {
self.base_monitor.track_deallocation(sizebytes, category);
self.update_growth_rate();
}
fn take_memory_snapshot(&mut self, active_stressconditions: Vec<String>) {
let snapshot = MemorySnapshot {
timestamp: Instant::now(),
total_memory: self.base_monitor.current_memory_bytes,
category_breakdown: self.base_monitor.allocations.clone(),
system_pressure: self.calculate_system_pressure(),
active_stressconditions,
};
self.stress_history.push_back(snapshot);
if self.stress_history.len() > self.stress_config.max_snapshots {
self.stress_history.pop_front();
}
}
fn should_take_snapshot(&self) -> bool {
if let Some(last_snapshot) = self.stress_history.back() {
last_snapshot.timestamp.elapsed() >= self.stress_config.snapshot_interval
} else {
true }
}
fn update_growth_rate(&mut self) {
if self.stress_history.len() >= 2 {
let recent_snapshots: Vec<_> = self.stress_history.iter().rev().take(10).collect();
if recent_snapshots.len() >= 2 {
let latest = recent_snapshots[0];
let previous = recent_snapshots[recent_snapshots.len() - 1];
let memory_delta = latest.total_memory as i64 - previous.total_memory as i64;
let time_delta = latest
.timestamp
.duration_since(previous.timestamp)
.as_secs_f64();
if time_delta > 0.0 {
let growth_rate = memory_delta as f64 / time_delta;
self.stress_metrics.max_growth_rate =
self.stress_metrics.max_growth_rate.max(growth_rate);
}
}
}
}
fn update_system_pressure(&mut self) {
#[cfg(target_pointer_width = "32")]
let total_system_memory: u64 = 1024 * 1024 * 1024; #[cfg(target_pointer_width = "64")]
let total_system_memory: u64 = 16u64 * 1024 * 1024 * 1024;
let our_usage = self.base_monitor.current_memory_bytes;
self.pressure_indicators.system_memory_utilization =
(our_usage as f64 / total_system_memory as f64 * 100.0).min(100.0);
self.pressure_indicators.available_memory =
(total_system_memory as usize).saturating_sub(our_usage);
self.pressure_indicators.allocation_failure_rate =
if self.pressure_indicators.system_memory_utilization > 90.0 {
0.1
} else {
0.0
};
}
fn calculate_system_pressure(&self) -> f64 {
let pressure_factors = [
self.pressure_indicators.system_memory_utilization / 100.0,
self.pressure_indicators.allocation_failure_rate,
self.pressure_indicators.swap_utilization / 100.0,
];
pressure_factors.iter().sum::<f64>() / pressure_factors.len() as f64
}
pub fn analyze_large_dataset_efficiency(
&mut self,
dataset_size: usize,
expected_memory: usize,
) {
let actual_memory = self.base_monitor.current_memory_bytes;
self.stress_metrics.large_dataset_efficiency =
expected_memory as f64 / actual_memory.max(1) as f64;
println!(
"Large dataset efficiency for {} elements: {:.2} (expected: {}MB, actual: {}MB)",
dataset_size,
self.stress_metrics.large_dataset_efficiency,
expected_memory / (1024 * 1024),
actual_memory / (1024 * 1024)
);
}
pub fn analyze_concurrent_overhead(
&mut self,
baseline_memory: usize,
concurrent_threads: usize,
) {
let current_memory = self.base_monitor.current_memory_bytes;
let overhead = current_memory.saturating_sub(baseline_memory);
self.stress_metrics.concurrent_overhead = overhead as f64 / concurrent_threads as f64;
println!(
"Concurrent access overhead: {:.1}KB per thread ({} threads)",
self.stress_metrics.concurrent_overhead / 1024.0,
concurrent_threads
);
}
pub fn measure_recovery_time(&mut self, stress_endtime: Instant) {
let _recovery_start_memory = self.base_monitor.current_memory_bytes;
let recovery_time = Instant::now().duration_since(stress_endtime);
self.stress_metrics.recovery_time_seconds = recovery_time.as_secs_f64();
println!(
"Memory recovery _time: {:.2}s",
self.stress_metrics.recovery_time_seconds
);
}
pub fn generate_stress_report(&self) -> StressMemoryReport {
let base_report = self.base_monitor.generate_report();
let memory_pressure_analysis = self.analyze_memory_pressure();
let allocation_pattern_analysis = self.analyze_allocation_patterns();
let stress_performance_analysis = self.analyze_stress_performance();
StressMemoryReport {
base_report,
stress_metrics: self.stress_metrics.clone(),
memory_pressure_analysis,
allocation_pattern_analysis,
stress_performance_analysis,
system_pressure: self.pressure_indicators.clone(),
snapshot_count: self.stress_history.len(),
stress_recommendations: self.generate_stress_recommendations(),
}
}
fn analyze_memory_pressure(&self) -> MemoryPressureAnalysis {
let max_pressure = self
.stress_history
.iter()
.map(|s| s.system_pressure)
.fold(0.0, f64::max);
let avg_pressure = if !self.stress_history.is_empty() {
self.stress_history
.iter()
.map(|s| s.system_pressure)
.sum::<f64>()
/ self.stress_history.len() as f64
} else {
0.0
};
let pressure_spikes = self
.stress_history
.iter()
.filter(|s| s.system_pressure > 0.8)
.count();
MemoryPressureAnalysis {
max_pressure,
avg_pressure,
pressure_spikes,
critical_periods: pressure_spikes, }
}
fn analyze_allocation_patterns(&self) -> AllocationPatternAnalysis {
let spike_count = self.stress_metrics.allocation_spikes.len();
let total_spike_memory: usize = self
.stress_metrics
.allocation_spikes
.iter()
.map(|s| s.spike_size)
.sum();
let pattern_regularity = if spike_count > 1 {
let intervals: Vec<_> = self
.stress_metrics
.allocation_spikes
.windows(2)
.map(|pair| {
pair[1]
.timestamp
.duration_since(pair[0].timestamp)
.as_secs_f64()
})
.collect();
if !intervals.is_empty() {
let mean_interval = intervals.iter().sum::<f64>() / intervals.len() as f64;
let variance = intervals
.iter()
.map(|&x| (x - mean_interval).powi(2))
.sum::<f64>()
/ intervals.len() as f64;
1.0 / (1.0 + variance) } else {
1.0
}
} else {
1.0
};
AllocationPatternAnalysis {
spike_count,
total_spike_memory,
pattern_regularity,
fragmentation_level: self.stress_metrics.stress_fragmentation,
}
}
fn analyze_stress_performance(&self) -> StressPerformanceAnalysis {
StressPerformanceAnalysis {
max_growth_rate: self.stress_metrics.max_growth_rate,
concurrent_overhead: self.stress_metrics.concurrent_overhead,
large_dataset_efficiency: self.stress_metrics.large_dataset_efficiency,
recovery_time: self.stress_metrics.recovery_time_seconds,
overall_stress_grade: self.calculate_stress_grade(),
}
}
fn calculate_stress_grade(&self) -> StressPerformanceGrade {
let factors = [
if self.stress_metrics.max_growth_rate < 1024.0 * 1024.0 {
1.0
} else {
0.0
}, if self.stress_metrics.concurrent_overhead < 1024.0 * 1024.0 {
1.0
} else {
0.0
}, self.stress_metrics.large_dataset_efficiency.min(1.0), if self.stress_metrics.recovery_time_seconds < 10.0 {
1.0
} else {
0.0
}, ];
let score = factors.iter().sum::<f64>() / factors.len() as f64;
match score {
s if s >= 0.9 => StressPerformanceGrade::Excellent,
s if s >= 0.7 => StressPerformanceGrade::Good,
s if s >= 0.5 => StressPerformanceGrade::Fair,
s if s >= 0.3 => StressPerformanceGrade::Poor,
_ => StressPerformanceGrade::Critical,
}
}
fn generate_stress_recommendations(&self) -> Vec<String> {
let mut recommendations = Vec::new();
if self.stress_metrics.max_growth_rate > 10.0 * 1024.0 * 1024.0 {
recommendations
.push("High memory growth rate detected - consider batch processing".to_string());
}
if self.stress_metrics.allocation_spikes.len() > 10 {
recommendations
.push("Frequent allocation spikes - implement memory pre-allocation".to_string());
}
if self.stress_metrics.concurrent_overhead > 5.0 * 1024.0 * 1024.0 {
recommendations
.push("High concurrent overhead - review thread-local memory usage".to_string());
}
if self.stress_metrics.large_dataset_efficiency < 0.7 {
recommendations
.push("Poor large dataset efficiency - optimize memory layout".to_string());
}
if self.stress_metrics.recovery_time_seconds > 30.0 {
recommendations.push("Slow memory recovery - implement explicit cleanup".to_string());
}
recommendations
}
}
#[derive(Debug, Clone)]
pub struct StressMemoryReport {
pub base_report: MemoryReport,
pub stress_metrics: StressMemoryMetrics,
pub memory_pressure_analysis: MemoryPressureAnalysis,
pub allocation_pattern_analysis: AllocationPatternAnalysis,
pub stress_performance_analysis: StressPerformanceAnalysis,
pub system_pressure: MemoryPressureIndicators,
pub snapshot_count: usize,
pub stress_recommendations: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct MemoryPressureAnalysis {
pub max_pressure: f64,
pub avg_pressure: f64,
pub pressure_spikes: usize,
pub critical_periods: usize,
}
#[derive(Debug, Clone)]
pub struct AllocationPatternAnalysis {
pub spike_count: usize,
pub total_spike_memory: usize,
pub pattern_regularity: f64,
pub fragmentation_level: f64,
}
#[derive(Debug, Clone)]
pub struct StressPerformanceAnalysis {
pub max_growth_rate: f64,
pub concurrent_overhead: f64,
pub large_dataset_efficiency: f64,
pub recovery_time: f64,
pub overall_stress_grade: StressPerformanceGrade,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum StressPerformanceGrade {
Excellent,
Good,
Fair,
Poor,
Critical,
}
#[allow(dead_code)]
pub fn create_stress_profiler(name: impl Into<String>) -> StressMemoryProfiler {
StressMemoryProfiler::new(name, None)
}
#[allow(dead_code)]
pub fn create_stress_profiler_with_config(
name: impl Into<String>,
config: StressProfilingConfig,
) -> StressMemoryProfiler {
StressMemoryProfiler::new(name, Some(config))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_memory_monitor_basic() {
let mut monitor = MemoryMonitor::new("test");
monitor.track_allocation(1024, "matrix");
monitor.track_allocation(512, "cache");
assert_eq!(monitor.current_memory_bytes, 1536);
assert_eq!(monitor.peak_memory_bytes, 1536);
monitor.track_deallocation(512, "cache");
assert_eq!(monitor.current_memory_bytes, 1024);
monitor.track_deallocation(1024, "matrix");
assert_eq!(monitor.current_memory_bytes, 0);
let report = monitor.generate_report();
assert!(!report.has_potential_leaks());
}
#[test]
fn test_leak_detection() {
let mut monitor = MemoryMonitor::new("leak_test");
monitor.track_allocation(2048, "leaked_memory");
let report = monitor.generate_report();
assert!(report.leak_indicators.unmatched_allocations > 0);
}
#[test]
fn test_global_monitoring() {
start_monitoring();
let _monitor1 = MemoryMonitor::new("global_test_1");
let _monitor2 = MemoryMonitor::new("global_test_2");
let stats = get_global_stats().expect("Operation failed");
assert!(
stats.active_interpolators >= 2,
"Expected at least 2 active interpolators, got {}",
stats.active_interpolators
);
stop_monitoring();
}
#[test]
fn test_stress_profiler_basic() {
let mut profiler = create_stress_profiler("stress_test");
profiler.start_stress_profiling("large_dataset");
profiler.track_stress_allocation(10 * 1024 * 1024, "large_matrix", "large_dataset");
profiler.track_stress_allocation(5 * 1024 * 1024, "cache", "large_dataset");
let report = profiler.generate_stress_report();
assert!(report.stress_metrics.allocation_spikes.len() > 0);
assert!(report.snapshot_count > 0);
}
#[test]
fn test_stress_allocation_spike_detection() {
let mut profiler = create_stress_profiler("spike_test");
profiler.track_stress_allocation(15 * 1024 * 1024, "spike", "stress_test");
assert_eq!(profiler.stress_metrics.allocation_spikes.len(), 1);
assert_eq!(
profiler.stress_metrics.allocation_spikes[0].spike_size,
15 * 1024 * 1024
);
}
}