1use std::fmt;
12use std::sync::RwLock;
13use std::time::SystemTime;
14
15#[derive(Debug, Clone)]
29pub struct AllocationHistogram {
30 buckets: [u64; 32],
31 total_allocations: u64,
32}
33
34impl AllocationHistogram {
35 #[must_use]
37 pub fn new() -> Self {
38 Self {
39 buckets: [0; 32],
40 total_allocations: 0,
41 }
42 }
43
44 #[must_use]
50 pub fn bucket_index(size: usize) -> usize {
51 if size < 16 {
52 return 0;
53 }
54 let bit_len = usize::BITS - size.leading_zeros();
58 let idx = (bit_len as usize).saturating_sub(5);
60 idx.min(31)
61 }
62
63 pub fn record(&mut self, size: usize) {
65 let idx = Self::bucket_index(size);
66 self.buckets[idx] = self.buckets[idx].saturating_add(1);
67 self.total_allocations = self.total_allocations.saturating_add(1);
68 }
69
70 #[must_use]
75 pub fn bucket_range(index: usize) -> (usize, usize) {
76 let clamped = index.min(31);
77 let min = 1_usize << (clamped + 4);
78 if clamped >= 31 {
79 (min, usize::MAX)
80 } else {
81 let max = 1_usize << (clamped + 5);
82 (min, max)
83 }
84 }
85
86 #[must_use]
88 pub fn bucket_counts(&self) -> &[u64; 32] {
89 &self.buckets
90 }
91
92 #[must_use]
94 pub fn total_allocations(&self) -> u64 {
95 self.total_allocations
96 }
97
98 #[must_use]
105 pub fn percentile(&self, p: f64) -> usize {
106 if self.total_allocations == 0 {
107 return 0;
108 }
109
110 let p_clamped = p.clamp(0.0, 100.0);
111 let target = ((p_clamped / 100.0) * self.total_allocations as f64).ceil() as u64;
112 let target = target.max(1);
113
114 let mut cumulative: u64 = 0;
115 for (i, &count) in self.buckets.iter().enumerate() {
116 cumulative = cumulative.saturating_add(count);
117 if cumulative >= target {
118 let (low, _) = Self::bucket_range(i);
119 return low;
120 }
121 }
122
123 let (low, _) = Self::bucket_range(31);
125 low
126 }
127
128 #[must_use]
130 pub fn median(&self) -> usize {
131 self.percentile(50.0)
132 }
133}
134
135impl Default for AllocationHistogram {
136 fn default() -> Self {
137 Self::new()
138 }
139}
140
141#[derive(Debug, Clone, Default)]
147pub struct FragmentationMetrics {
148 pub total_free_bytes: usize,
150 pub largest_free_block: usize,
152 pub free_block_count: u32,
154}
155
156impl FragmentationMetrics {
157 #[must_use]
159 pub fn new(total_free: usize, largest_block: usize, block_count: u32) -> Self {
160 Self {
161 total_free_bytes: total_free,
162 largest_free_block: largest_block,
163 free_block_count: block_count,
164 }
165 }
166
167 #[must_use]
175 pub fn fragmentation_ratio(&self) -> f64 {
176 if self.total_free_bytes == 0 {
177 return 0.0;
178 }
179 1.0 - (self.largest_free_block as f64 / self.total_free_bytes as f64)
180 }
181
182 #[must_use]
184 pub fn average_free_block_size(&self) -> usize {
185 if self.free_block_count == 0 {
186 return 0;
187 }
188 self.total_free_bytes / self.free_block_count as usize
189 }
190}
191
192#[derive(Debug, Clone, Default)]
198pub struct PoolReport {
199 pub allocated_bytes: usize,
201 pub peak_bytes: usize,
203 pub allocation_count: u64,
205 pub free_count: u64,
207 pub fragmentation: FragmentationMetrics,
209 pub histogram: AllocationHistogram,
211 pub timestamp_ns: u64,
213}
214
215impl fmt::Display for PoolReport {
216 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
217 writeln!(f, "=== OxiCUDA Pool Report ===")?;
218 writeln!(f, "Allocated: {} bytes", self.allocated_bytes)?;
219 writeln!(f, "Peak: {} bytes", self.peak_bytes)?;
220 writeln!(f, "Allocs: {}", self.allocation_count)?;
221 writeln!(f, "Frees: {}", self.free_count)?;
222 writeln!(
223 f,
224 "Active: {}",
225 self.allocation_count.saturating_sub(self.free_count)
226 )?;
227 writeln!(f, "--- Fragmentation ---")?;
228 writeln!(f, "Free bytes: {}", self.fragmentation.total_free_bytes)?;
229 writeln!(f, "Largest blk: {}", self.fragmentation.largest_free_block)?;
230 writeln!(f, "Free blocks: {}", self.fragmentation.free_block_count)?;
231 writeln!(
232 f,
233 "Frag ratio: {:.4}",
234 self.fragmentation.fragmentation_ratio()
235 )?;
236 writeln!(f, "--- Histogram ---")?;
237 for (i, &count) in self.histogram.bucket_counts().iter().enumerate() {
238 if count > 0 {
239 let (lo, hi) = AllocationHistogram::bucket_range(i);
240 if i == 31 {
241 writeln!(f, "[{lo}+): {count}")?;
242 } else {
243 writeln!(f, "[{lo}, {hi}): {count}")?;
244 }
245 }
246 }
247 writeln!(f, "Median alloc size: {} bytes", self.histogram.median())?;
248 writeln!(f, "Timestamp: {} ns", self.timestamp_ns)?;
249 write!(f, "===========================")?;
250 Ok(())
251 }
252}
253
254#[derive(Debug)]
263pub struct PoolStatsTracker {
264 inner: RwLock<TrackerInner>,
265}
266
267#[derive(Debug, Clone)]
268struct TrackerInner {
269 allocated_bytes: usize,
270 peak_bytes: usize,
271 allocation_count: u64,
272 free_count: u64,
273 histogram: AllocationHistogram,
274}
275
276impl TrackerInner {
277 fn new() -> Self {
278 Self {
279 allocated_bytes: 0,
280 peak_bytes: 0,
281 allocation_count: 0,
282 free_count: 0,
283 histogram: AllocationHistogram::new(),
284 }
285 }
286}
287
288impl PoolStatsTracker {
289 #[must_use]
291 pub fn new() -> Self {
292 Self {
293 inner: RwLock::new(TrackerInner::new()),
294 }
295 }
296
297 pub fn record_alloc(&self, size: usize) {
302 if let Ok(mut guard) = self.inner.write() {
303 guard.allocated_bytes = guard.allocated_bytes.saturating_add(size);
304 if guard.allocated_bytes > guard.peak_bytes {
305 guard.peak_bytes = guard.allocated_bytes;
306 }
307 guard.allocation_count = guard.allocation_count.saturating_add(1);
308 guard.histogram.record(size);
309 }
310 }
311
312 pub fn record_free(&self, size: usize) {
316 if let Ok(mut guard) = self.inner.write() {
317 guard.allocated_bytes = guard.allocated_bytes.saturating_sub(size);
318 guard.free_count = guard.free_count.saturating_add(1);
319 }
320 }
321
322 #[must_use]
327 pub fn snapshot(&self) -> PoolReport {
328 let timestamp_ns = SystemTime::now()
329 .duration_since(SystemTime::UNIX_EPOCH)
330 .map(|d| d.as_nanos() as u64)
331 .unwrap_or(0);
332
333 if let Ok(guard) = self.inner.read() {
334 PoolReport {
335 allocated_bytes: guard.allocated_bytes,
336 peak_bytes: guard.peak_bytes,
337 allocation_count: guard.allocation_count,
338 free_count: guard.free_count,
339 fragmentation: FragmentationMetrics::default(),
340 histogram: guard.histogram.clone(),
341 timestamp_ns,
342 }
343 } else {
344 PoolReport {
345 timestamp_ns,
346 ..PoolReport::default()
347 }
348 }
349 }
350
351 pub fn reset(&self) {
353 if let Ok(mut guard) = self.inner.write() {
354 *guard = TrackerInner::new();
355 }
356 }
357
358 #[must_use]
360 pub fn current_allocated(&self) -> usize {
361 self.inner.read().map(|g| g.allocated_bytes).unwrap_or(0)
362 }
363
364 #[must_use]
366 pub fn peak_allocated(&self) -> usize {
367 self.inner.read().map(|g| g.peak_bytes).unwrap_or(0)
368 }
369
370 pub fn trim(&self) -> usize {
382 if let Ok(mut guard) = self.inner.write() {
383 let freed = guard.allocated_bytes;
384 guard.allocated_bytes = 0;
385 freed
386 } else {
387 0
388 }
389 }
390
391 #[must_use]
396 pub fn is_fully_trimmed(&self) -> bool {
397 self.current_allocated() == 0
398 }
399
400 #[must_use]
404 pub fn has_leaks(&self) -> bool {
405 self.current_allocated() > 0
406 }
407}
408
409impl Default for PoolStatsTracker {
410 fn default() -> Self {
411 Self::new()
412 }
413}
414
415#[cfg(test)]
420mod tests {
421 use super::*;
422 use std::sync::Arc;
423 use std::thread;
424
425 #[test]
430 fn histogram_bucket_index_zero() {
431 assert_eq!(AllocationHistogram::bucket_index(0), 0);
432 }
433
434 #[test]
435 fn histogram_bucket_index_one() {
436 assert_eq!(AllocationHistogram::bucket_index(1), 0);
437 }
438
439 #[test]
440 fn histogram_bucket_index_sixteen() {
441 assert_eq!(AllocationHistogram::bucket_index(16), 0);
443 }
444
445 #[test]
446 fn histogram_bucket_index_thirty_two() {
447 assert_eq!(AllocationHistogram::bucket_index(32), 1);
449 }
450
451 #[test]
452 fn histogram_bucket_index_sixty_four() {
453 assert_eq!(AllocationHistogram::bucket_index(64), 2);
455 }
456
457 #[test]
458 fn histogram_bucket_index_1024() {
459 assert_eq!(AllocationHistogram::bucket_index(1024), 6);
461 }
462
463 #[test]
464 fn histogram_bucket_index_1mb() {
465 assert_eq!(AllocationHistogram::bucket_index(1 << 20), 16);
467 }
468
469 #[test]
470 fn histogram_bucket_index_1gb() {
471 assert_eq!(AllocationHistogram::bucket_index(1 << 30), 26);
473 }
474
475 #[test]
476 fn histogram_record_and_retrieval() {
477 let mut hist = AllocationHistogram::new();
478 hist.record(64);
479 hist.record(64);
480 hist.record(128);
481 assert_eq!(hist.total_allocations(), 3);
482 assert_eq!(hist.bucket_counts()[2], 2); assert_eq!(hist.bucket_counts()[3], 1); }
485
486 #[test]
487 fn histogram_bucket_range() {
488 let (lo, hi) = AllocationHistogram::bucket_range(0);
489 assert_eq!(lo, 16);
490 assert_eq!(hi, 32);
491
492 let (lo, hi) = AllocationHistogram::bucket_range(6);
493 assert_eq!(lo, 1024);
494 assert_eq!(hi, 2048);
495
496 let (lo, hi) = AllocationHistogram::bucket_range(31);
497 assert_eq!(lo, 1 << 35);
498 assert_eq!(hi, usize::MAX);
499 }
500
501 #[test]
502 fn histogram_percentile_empty() {
503 let hist = AllocationHistogram::new();
504 assert_eq!(hist.percentile(50.0), 0);
505 }
506
507 #[test]
508 fn histogram_percentile_single_bucket() {
509 let mut hist = AllocationHistogram::new();
510 for _ in 0..100 {
511 hist.record(256); }
513 assert_eq!(hist.percentile(0.0), 256);
514 assert_eq!(hist.percentile(50.0), 256);
515 assert_eq!(hist.percentile(100.0), 256);
516 }
517
518 #[test]
519 fn histogram_percentile_two_buckets() {
520 let mut hist = AllocationHistogram::new();
521 for _ in 0..30 {
523 hist.record(64);
524 }
525 for _ in 0..70 {
526 hist.record(1024);
527 }
528 assert_eq!(hist.percentile(30.0), 64);
530 assert_eq!(hist.percentile(31.0), 1024);
532 assert_eq!(hist.median(), 1024);
533 }
534
535 #[test]
540 fn fragmentation_no_fragmentation() {
541 let m = FragmentationMetrics::new(1024, 1024, 1);
543 let ratio = m.fragmentation_ratio();
544 assert!((ratio - 0.0).abs() < f64::EPSILON);
545 }
546
547 #[test]
548 fn fragmentation_high_fragmentation() {
549 let m = FragmentationMetrics::new(1000, 100, 10);
551 let ratio = m.fragmentation_ratio();
552 assert!((ratio - 0.9).abs() < f64::EPSILON);
553 }
554
555 #[test]
556 fn fragmentation_zero_free() {
557 let m = FragmentationMetrics::new(0, 0, 0);
558 assert!((m.fragmentation_ratio() - 0.0).abs() < f64::EPSILON);
559 assert_eq!(m.average_free_block_size(), 0);
560 }
561
562 #[test]
563 fn fragmentation_average_block_size() {
564 let m = FragmentationMetrics::new(1000, 500, 4);
565 assert_eq!(m.average_free_block_size(), 250);
566 }
567
568 #[test]
573 fn tracker_alloc_free_sequence() {
574 let tracker = PoolStatsTracker::new();
575 tracker.record_alloc(1024);
576 tracker.record_alloc(2048);
577 assert_eq!(tracker.current_allocated(), 3072);
578 tracker.record_free(1024);
579 assert_eq!(tracker.current_allocated(), 2048);
580 }
581
582 #[test]
583 fn tracker_peak_tracking() {
584 let tracker = PoolStatsTracker::new();
585 tracker.record_alloc(1000);
586 tracker.record_alloc(2000);
587 tracker.record_free(2000);
589 assert_eq!(tracker.current_allocated(), 1000);
591 assert_eq!(tracker.peak_allocated(), 3000);
592 }
593
594 #[test]
595 fn tracker_snapshot() {
596 let tracker = PoolStatsTracker::new();
597 tracker.record_alloc(512);
598 tracker.record_alloc(1024);
599 tracker.record_free(512);
600
601 let report = tracker.snapshot();
602 assert_eq!(report.allocated_bytes, 1024);
603 assert_eq!(report.peak_bytes, 1536);
604 assert_eq!(report.allocation_count, 2);
605 assert_eq!(report.free_count, 1);
606 assert!(report.timestamp_ns > 0);
607 }
608
609 #[test]
610 fn tracker_reset() {
611 let tracker = PoolStatsTracker::new();
612 tracker.record_alloc(4096);
613 tracker.record_alloc(8192);
614 tracker.reset();
615 assert_eq!(tracker.current_allocated(), 0);
616 assert_eq!(tracker.peak_allocated(), 0);
617 let report = tracker.snapshot();
618 assert_eq!(report.allocation_count, 0);
619 assert_eq!(report.free_count, 0);
620 }
621
622 #[test]
623 fn tracker_thread_safety() {
624 let tracker = Arc::new(PoolStatsTracker::new());
625 let mut handles = Vec::new();
626
627 for _ in 0..8 {
628 let t = Arc::clone(&tracker);
629 handles.push(thread::spawn(move || {
630 for _ in 0..100 {
631 t.record_alloc(64);
632 }
633 for _ in 0..50 {
634 t.record_free(64);
635 }
636 }));
637 }
638
639 for h in handles {
640 h.join().expect("thread panicked");
641 }
642
643 let report = tracker.snapshot();
645 assert_eq!(report.allocation_count, 800);
646 assert_eq!(report.free_count, 400);
647 assert_eq!(tracker.current_allocated(), 25600);
649 }
650
651 #[test]
652 fn display_formatting() {
653 let tracker = PoolStatsTracker::new();
654 tracker.record_alloc(256);
655 tracker.record_alloc(1024);
656 let report = tracker.snapshot();
657 let text = format!("{report}");
658 assert!(text.contains("OxiCUDA Pool Report"));
659 assert!(text.contains("Allocated:"));
660 assert!(text.contains("Peak:"));
661 assert!(text.contains("Histogram"));
662 assert!(text.contains("Median alloc size:"));
663 }
664
665 #[test]
666 fn pool_report_default() {
667 let report = PoolReport::default();
668 assert_eq!(report.allocated_bytes, 0);
669 assert_eq!(report.peak_bytes, 0);
670 assert_eq!(report.allocation_count, 0);
671 assert_eq!(report.free_count, 0);
672 assert_eq!(report.timestamp_ns, 0);
673 }
674
675 #[test]
680 fn pool_trim_after_all_frees_is_clean() {
681 let tracker = PoolStatsTracker::new();
683 tracker.record_alloc(4096);
684 tracker.record_free(4096);
685
686 assert_eq!(
688 tracker.current_allocated(),
689 0,
690 "after freeing all, allocated should be 0"
691 );
692
693 let freed = tracker.trim();
694 assert_eq!(freed, 0, "trim on fully-freed pool returns 0");
696 assert!(
697 tracker.is_fully_trimmed(),
698 "after trim, pool should be fully trimmed"
699 );
700 assert!(
701 !tracker.has_leaks(),
702 "no leaks after complete alloc/free cycle"
703 );
704 }
705
706 #[test]
707 fn pool_trim_outstanding_bytes() {
708 let tracker = PoolStatsTracker::new();
710 tracker.record_alloc(8192);
711
712 assert_eq!(tracker.current_allocated(), 8192);
713 assert!(tracker.has_leaks(), "8K outstanding → has leaks");
714
715 let freed = tracker.trim();
716 assert_eq!(freed, 8192, "trim should return the outstanding 8K");
717 assert!(tracker.is_fully_trimmed(), "after trim, fully trimmed");
718 assert!(!tracker.has_leaks(), "no leaks after trim");
719 }
720
721 #[test]
722 fn pool_trim_partial_free_still_has_leaks() {
723 let tracker = PoolStatsTracker::new();
726 tracker.record_alloc(4096);
727 tracker.record_alloc(2048);
728 tracker.record_free(4096);
729
730 assert_eq!(tracker.current_allocated(), 2048, "2K still outstanding");
732 assert!(
733 tracker.has_leaks(),
734 "2K outstanding after partial free → has leaks"
735 );
736 assert!(
737 !tracker.is_fully_trimmed(),
738 "not fully trimmed while 2K outstanding"
739 );
740
741 let freed = tracker.trim();
743 assert_eq!(freed, 2048, "trim releases the remaining 2K");
744 assert!(tracker.is_fully_trimmed(), "fully trimmed after trim()");
745 }
746
747 #[test]
748 fn pool_trim_empty_tracker_is_clean() {
749 let tracker = PoolStatsTracker::new();
751 assert!(tracker.is_fully_trimmed(), "fresh tracker is fully trimmed");
752 assert!(!tracker.has_leaks(), "fresh tracker has no leaks");
753 let freed = tracker.trim();
754 assert_eq!(freed, 0, "trim on empty tracker returns 0");
755 }
756
757 #[test]
758 fn alloc_async_api_exists() {
759 let tracker = PoolStatsTracker::new();
767
768 tracker.record_alloc(1024);
770 assert_eq!(tracker.current_allocated(), 1024);
771
772 tracker.record_free(1024);
774 assert_eq!(tracker.current_allocated(), 0);
775
776 let freed = tracker.trim();
778 assert_eq!(freed, 0);
779 assert!(tracker.is_fully_trimmed());
780 }
781}