use std::fmt;
use std::sync::RwLock;
use std::time::SystemTime;
#[derive(Debug, Clone)]
pub struct AllocationHistogram {
buckets: [u64; 32],
total_allocations: u64,
}
impl AllocationHistogram {
#[must_use]
pub fn new() -> Self {
Self {
buckets: [0; 32],
total_allocations: 0,
}
}
#[must_use]
pub fn bucket_index(size: usize) -> usize {
if size < 16 {
return 0;
}
let bit_len = usize::BITS - size.leading_zeros();
let idx = (bit_len as usize).saturating_sub(5);
idx.min(31)
}
pub fn record(&mut self, size: usize) {
let idx = Self::bucket_index(size);
self.buckets[idx] = self.buckets[idx].saturating_add(1);
self.total_allocations = self.total_allocations.saturating_add(1);
}
#[must_use]
pub fn bucket_range(index: usize) -> (usize, usize) {
let clamped = index.min(31);
let min = 1_usize << (clamped + 4);
if clamped >= 31 {
(min, usize::MAX)
} else {
let max = 1_usize << (clamped + 5);
(min, max)
}
}
#[must_use]
pub fn bucket_counts(&self) -> &[u64; 32] {
&self.buckets
}
#[must_use]
pub fn total_allocations(&self) -> u64 {
self.total_allocations
}
#[must_use]
pub fn percentile(&self, p: f64) -> usize {
if self.total_allocations == 0 {
return 0;
}
let p_clamped = p.clamp(0.0, 100.0);
let target = ((p_clamped / 100.0) * self.total_allocations as f64).ceil() as u64;
let target = target.max(1);
let mut cumulative: u64 = 0;
for (i, &count) in self.buckets.iter().enumerate() {
cumulative = cumulative.saturating_add(count);
if cumulative >= target {
let (low, _) = Self::bucket_range(i);
return low;
}
}
let (low, _) = Self::bucket_range(31);
low
}
#[must_use]
pub fn median(&self) -> usize {
self.percentile(50.0)
}
}
impl Default for AllocationHistogram {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Default)]
pub struct FragmentationMetrics {
pub total_free_bytes: usize,
pub largest_free_block: usize,
pub free_block_count: u32,
}
impl FragmentationMetrics {
#[must_use]
pub fn new(total_free: usize, largest_block: usize, block_count: u32) -> Self {
Self {
total_free_bytes: total_free,
largest_free_block: largest_block,
free_block_count: block_count,
}
}
#[must_use]
pub fn fragmentation_ratio(&self) -> f64 {
if self.total_free_bytes == 0 {
return 0.0;
}
1.0 - (self.largest_free_block as f64 / self.total_free_bytes as f64)
}
#[must_use]
pub fn average_free_block_size(&self) -> usize {
if self.free_block_count == 0 {
return 0;
}
self.total_free_bytes / self.free_block_count as usize
}
}
#[derive(Debug, Clone, Default)]
pub struct PoolReport {
pub allocated_bytes: usize,
pub peak_bytes: usize,
pub allocation_count: u64,
pub free_count: u64,
pub fragmentation: FragmentationMetrics,
pub histogram: AllocationHistogram,
pub timestamp_ns: u64,
}
impl fmt::Display for PoolReport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "=== OxiCUDA Pool Report ===")?;
writeln!(f, "Allocated: {} bytes", self.allocated_bytes)?;
writeln!(f, "Peak: {} bytes", self.peak_bytes)?;
writeln!(f, "Allocs: {}", self.allocation_count)?;
writeln!(f, "Frees: {}", self.free_count)?;
writeln!(
f,
"Active: {}",
self.allocation_count.saturating_sub(self.free_count)
)?;
writeln!(f, "--- Fragmentation ---")?;
writeln!(f, "Free bytes: {}", self.fragmentation.total_free_bytes)?;
writeln!(f, "Largest blk: {}", self.fragmentation.largest_free_block)?;
writeln!(f, "Free blocks: {}", self.fragmentation.free_block_count)?;
writeln!(
f,
"Frag ratio: {:.4}",
self.fragmentation.fragmentation_ratio()
)?;
writeln!(f, "--- Histogram ---")?;
for (i, &count) in self.histogram.bucket_counts().iter().enumerate() {
if count > 0 {
let (lo, hi) = AllocationHistogram::bucket_range(i);
if i == 31 {
writeln!(f, "[{lo}+): {count}")?;
} else {
writeln!(f, "[{lo}, {hi}): {count}")?;
}
}
}
writeln!(f, "Median alloc size: {} bytes", self.histogram.median())?;
writeln!(f, "Timestamp: {} ns", self.timestamp_ns)?;
write!(f, "===========================")?;
Ok(())
}
}
#[derive(Debug)]
pub struct PoolStatsTracker {
inner: RwLock<TrackerInner>,
}
#[derive(Debug, Clone)]
struct TrackerInner {
allocated_bytes: usize,
peak_bytes: usize,
allocation_count: u64,
free_count: u64,
histogram: AllocationHistogram,
}
impl TrackerInner {
fn new() -> Self {
Self {
allocated_bytes: 0,
peak_bytes: 0,
allocation_count: 0,
free_count: 0,
histogram: AllocationHistogram::new(),
}
}
}
impl PoolStatsTracker {
#[must_use]
pub fn new() -> Self {
Self {
inner: RwLock::new(TrackerInner::new()),
}
}
pub fn record_alloc(&self, size: usize) {
if let Ok(mut guard) = self.inner.write() {
guard.allocated_bytes = guard.allocated_bytes.saturating_add(size);
if guard.allocated_bytes > guard.peak_bytes {
guard.peak_bytes = guard.allocated_bytes;
}
guard.allocation_count = guard.allocation_count.saturating_add(1);
guard.histogram.record(size);
}
}
pub fn record_free(&self, size: usize) {
if let Ok(mut guard) = self.inner.write() {
guard.allocated_bytes = guard.allocated_bytes.saturating_sub(size);
guard.free_count = guard.free_count.saturating_add(1);
}
}
#[must_use]
pub fn snapshot(&self) -> PoolReport {
let timestamp_ns = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0);
if let Ok(guard) = self.inner.read() {
PoolReport {
allocated_bytes: guard.allocated_bytes,
peak_bytes: guard.peak_bytes,
allocation_count: guard.allocation_count,
free_count: guard.free_count,
fragmentation: FragmentationMetrics::default(),
histogram: guard.histogram.clone(),
timestamp_ns,
}
} else {
PoolReport {
timestamp_ns,
..PoolReport::default()
}
}
}
pub fn reset(&self) {
if let Ok(mut guard) = self.inner.write() {
*guard = TrackerInner::new();
}
}
#[must_use]
pub fn current_allocated(&self) -> usize {
self.inner.read().map(|g| g.allocated_bytes).unwrap_or(0)
}
#[must_use]
pub fn peak_allocated(&self) -> usize {
self.inner.read().map(|g| g.peak_bytes).unwrap_or(0)
}
pub fn trim(&self) -> usize {
if let Ok(mut guard) = self.inner.write() {
let freed = guard.allocated_bytes;
guard.allocated_bytes = 0;
freed
} else {
0
}
}
#[must_use]
pub fn is_fully_trimmed(&self) -> bool {
self.current_allocated() == 0
}
#[must_use]
pub fn has_leaks(&self) -> bool {
self.current_allocated() > 0
}
}
impl Default for PoolStatsTracker {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use std::thread;
#[test]
fn histogram_bucket_index_zero() {
assert_eq!(AllocationHistogram::bucket_index(0), 0);
}
#[test]
fn histogram_bucket_index_one() {
assert_eq!(AllocationHistogram::bucket_index(1), 0);
}
#[test]
fn histogram_bucket_index_sixteen() {
assert_eq!(AllocationHistogram::bucket_index(16), 0);
}
#[test]
fn histogram_bucket_index_thirty_two() {
assert_eq!(AllocationHistogram::bucket_index(32), 1);
}
#[test]
fn histogram_bucket_index_sixty_four() {
assert_eq!(AllocationHistogram::bucket_index(64), 2);
}
#[test]
fn histogram_bucket_index_1024() {
assert_eq!(AllocationHistogram::bucket_index(1024), 6);
}
#[test]
fn histogram_bucket_index_1mb() {
assert_eq!(AllocationHistogram::bucket_index(1 << 20), 16);
}
#[test]
fn histogram_bucket_index_1gb() {
assert_eq!(AllocationHistogram::bucket_index(1 << 30), 26);
}
#[test]
fn histogram_record_and_retrieval() {
let mut hist = AllocationHistogram::new();
hist.record(64);
hist.record(64);
hist.record(128);
assert_eq!(hist.total_allocations(), 3);
assert_eq!(hist.bucket_counts()[2], 2); assert_eq!(hist.bucket_counts()[3], 1); }
#[test]
fn histogram_bucket_range() {
let (lo, hi) = AllocationHistogram::bucket_range(0);
assert_eq!(lo, 16);
assert_eq!(hi, 32);
let (lo, hi) = AllocationHistogram::bucket_range(6);
assert_eq!(lo, 1024);
assert_eq!(hi, 2048);
let (lo, hi) = AllocationHistogram::bucket_range(31);
assert_eq!(lo, 1 << 35);
assert_eq!(hi, usize::MAX);
}
#[test]
fn histogram_percentile_empty() {
let hist = AllocationHistogram::new();
assert_eq!(hist.percentile(50.0), 0);
}
#[test]
fn histogram_percentile_single_bucket() {
let mut hist = AllocationHistogram::new();
for _ in 0..100 {
hist.record(256); }
assert_eq!(hist.percentile(0.0), 256);
assert_eq!(hist.percentile(50.0), 256);
assert_eq!(hist.percentile(100.0), 256);
}
#[test]
fn histogram_percentile_two_buckets() {
let mut hist = AllocationHistogram::new();
for _ in 0..30 {
hist.record(64);
}
for _ in 0..70 {
hist.record(1024);
}
assert_eq!(hist.percentile(30.0), 64);
assert_eq!(hist.percentile(31.0), 1024);
assert_eq!(hist.median(), 1024);
}
#[test]
fn fragmentation_no_fragmentation() {
let m = FragmentationMetrics::new(1024, 1024, 1);
let ratio = m.fragmentation_ratio();
assert!((ratio - 0.0).abs() < f64::EPSILON);
}
#[test]
fn fragmentation_high_fragmentation() {
let m = FragmentationMetrics::new(1000, 100, 10);
let ratio = m.fragmentation_ratio();
assert!((ratio - 0.9).abs() < f64::EPSILON);
}
#[test]
fn fragmentation_zero_free() {
let m = FragmentationMetrics::new(0, 0, 0);
assert!((m.fragmentation_ratio() - 0.0).abs() < f64::EPSILON);
assert_eq!(m.average_free_block_size(), 0);
}
#[test]
fn fragmentation_average_block_size() {
let m = FragmentationMetrics::new(1000, 500, 4);
assert_eq!(m.average_free_block_size(), 250);
}
#[test]
fn tracker_alloc_free_sequence() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(1024);
tracker.record_alloc(2048);
assert_eq!(tracker.current_allocated(), 3072);
tracker.record_free(1024);
assert_eq!(tracker.current_allocated(), 2048);
}
#[test]
fn tracker_peak_tracking() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(1000);
tracker.record_alloc(2000);
tracker.record_free(2000);
assert_eq!(tracker.current_allocated(), 1000);
assert_eq!(tracker.peak_allocated(), 3000);
}
#[test]
fn tracker_snapshot() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(512);
tracker.record_alloc(1024);
tracker.record_free(512);
let report = tracker.snapshot();
assert_eq!(report.allocated_bytes, 1024);
assert_eq!(report.peak_bytes, 1536);
assert_eq!(report.allocation_count, 2);
assert_eq!(report.free_count, 1);
assert!(report.timestamp_ns > 0);
}
#[test]
fn tracker_reset() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(4096);
tracker.record_alloc(8192);
tracker.reset();
assert_eq!(tracker.current_allocated(), 0);
assert_eq!(tracker.peak_allocated(), 0);
let report = tracker.snapshot();
assert_eq!(report.allocation_count, 0);
assert_eq!(report.free_count, 0);
}
#[test]
fn tracker_thread_safety() {
let tracker = Arc::new(PoolStatsTracker::new());
let mut handles = Vec::new();
for _ in 0..8 {
let t = Arc::clone(&tracker);
handles.push(thread::spawn(move || {
for _ in 0..100 {
t.record_alloc(64);
}
for _ in 0..50 {
t.record_free(64);
}
}));
}
for h in handles {
h.join().expect("thread panicked");
}
let report = tracker.snapshot();
assert_eq!(report.allocation_count, 800);
assert_eq!(report.free_count, 400);
assert_eq!(tracker.current_allocated(), 25600);
}
#[test]
fn display_formatting() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(256);
tracker.record_alloc(1024);
let report = tracker.snapshot();
let text = format!("{report}");
assert!(text.contains("OxiCUDA Pool Report"));
assert!(text.contains("Allocated:"));
assert!(text.contains("Peak:"));
assert!(text.contains("Histogram"));
assert!(text.contains("Median alloc size:"));
}
#[test]
fn pool_report_default() {
let report = PoolReport::default();
assert_eq!(report.allocated_bytes, 0);
assert_eq!(report.peak_bytes, 0);
assert_eq!(report.allocation_count, 0);
assert_eq!(report.free_count, 0);
assert_eq!(report.timestamp_ns, 0);
}
#[test]
fn pool_trim_after_all_frees_is_clean() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(4096);
tracker.record_free(4096);
assert_eq!(
tracker.current_allocated(),
0,
"after freeing all, allocated should be 0"
);
let freed = tracker.trim();
assert_eq!(freed, 0, "trim on fully-freed pool returns 0");
assert!(
tracker.is_fully_trimmed(),
"after trim, pool should be fully trimmed"
);
assert!(
!tracker.has_leaks(),
"no leaks after complete alloc/free cycle"
);
}
#[test]
fn pool_trim_outstanding_bytes() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(8192);
assert_eq!(tracker.current_allocated(), 8192);
assert!(tracker.has_leaks(), "8K outstanding → has leaks");
let freed = tracker.trim();
assert_eq!(freed, 8192, "trim should return the outstanding 8K");
assert!(tracker.is_fully_trimmed(), "after trim, fully trimmed");
assert!(!tracker.has_leaks(), "no leaks after trim");
}
#[test]
fn pool_trim_partial_free_still_has_leaks() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(4096);
tracker.record_alloc(2048);
tracker.record_free(4096);
assert_eq!(tracker.current_allocated(), 2048, "2K still outstanding");
assert!(
tracker.has_leaks(),
"2K outstanding after partial free → has leaks"
);
assert!(
!tracker.is_fully_trimmed(),
"not fully trimmed while 2K outstanding"
);
let freed = tracker.trim();
assert_eq!(freed, 2048, "trim releases the remaining 2K");
assert!(tracker.is_fully_trimmed(), "fully trimmed after trim()");
}
#[test]
fn pool_trim_empty_tracker_is_clean() {
let tracker = PoolStatsTracker::new();
assert!(tracker.is_fully_trimmed(), "fresh tracker is fully trimmed");
assert!(!tracker.has_leaks(), "fresh tracker has no leaks");
let freed = tracker.trim();
assert_eq!(freed, 0, "trim on empty tracker returns 0");
}
#[test]
fn alloc_async_api_exists() {
let tracker = PoolStatsTracker::new();
tracker.record_alloc(1024);
assert_eq!(tracker.current_allocated(), 1024);
tracker.record_free(1024);
assert_eq!(tracker.current_allocated(), 0);
let freed = tracker.trim();
assert_eq!(freed, 0);
assert!(tracker.is_fully_trimmed());
}
}