Skip to main content

fluxbench_core/
allocator.rs

1//! Global Allocator Interceptor
2//!
3//! Tracks heap allocations during benchmark execution.
4
5use std::alloc::{GlobalAlloc, Layout, System};
6use std::sync::atomic::{AtomicU64, Ordering};
7
8/// Thread-local allocation counter
9static ALLOCATED_BYTES: AtomicU64 = AtomicU64::new(0);
10static ALLOCATION_COUNT: AtomicU64 = AtomicU64::new(0);
11
12/// Tracking allocator that wraps the system allocator
13pub struct TrackingAllocator;
14
15unsafe impl GlobalAlloc for TrackingAllocator {
16    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
17        // SAFETY: Delegates to the system allocator with the provided layout.
18        let ptr = unsafe { System.alloc(layout) };
19        if !ptr.is_null() {
20            ALLOCATED_BYTES.fetch_add(layout.size() as u64, Ordering::Relaxed);
21            ALLOCATION_COUNT.fetch_add(1, Ordering::Relaxed);
22        }
23        ptr
24    }
25
26    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
27        // SAFETY: Pointer/layout are provided by corresponding allocation operations.
28        unsafe { System.dealloc(ptr, layout) };
29        // Note: We don't decrement on dealloc to track total allocations
30    }
31
32    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
33        // SAFETY: Delegates to the system allocator with the provided layout.
34        let ptr = unsafe { System.alloc_zeroed(layout) };
35        if !ptr.is_null() {
36            ALLOCATED_BYTES.fetch_add(layout.size() as u64, Ordering::Relaxed);
37            ALLOCATION_COUNT.fetch_add(1, Ordering::Relaxed);
38        }
39        ptr
40    }
41
42    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
43        // SAFETY: Pointer/layout originate from allocator contracts; new_size is caller-provided.
44        let new_ptr = unsafe { System.realloc(ptr, layout, new_size) };
45        if !new_ptr.is_null() {
46            // Track the size difference
47            if new_size > layout.size() {
48                ALLOCATED_BYTES.fetch_add((new_size - layout.size()) as u64, Ordering::Relaxed);
49            }
50            ALLOCATION_COUNT.fetch_add(1, Ordering::Relaxed);
51        }
52        new_ptr
53    }
54}
55
56/// Get current allocation statistics
57pub fn current_allocation() -> (u64, u64) {
58    (
59        ALLOCATED_BYTES.load(Ordering::Relaxed),
60        ALLOCATION_COUNT.load(Ordering::Relaxed),
61    )
62}
63
64/// Reset allocation counters (call before each iteration)
65pub fn reset_allocation_counter() {
66    ALLOCATED_BYTES.store(0, Ordering::Relaxed);
67    ALLOCATION_COUNT.store(0, Ordering::Relaxed);
68}
69
70#[cfg(test)]
71mod tests {
72    use super::*;
73
74    #[test]
75    fn test_reset_allocation_counter() {
76        // Set some values
77        ALLOCATED_BYTES.store(1000, std::sync::atomic::Ordering::Relaxed);
78        ALLOCATION_COUNT.store(5, std::sync::atomic::Ordering::Relaxed);
79
80        // Reset
81        reset_allocation_counter();
82
83        let (bytes, count) = current_allocation();
84        assert_eq!(bytes, 0);
85        assert_eq!(count, 0);
86    }
87
88    #[test]
89    fn test_current_allocation_reads_atomics() {
90        ALLOCATED_BYTES.store(2048, std::sync::atomic::Ordering::Relaxed);
91        ALLOCATION_COUNT.store(10, std::sync::atomic::Ordering::Relaxed);
92
93        let (bytes, count) = current_allocation();
94        assert_eq!(bytes, 2048);
95        assert_eq!(count, 10);
96
97        // Clean up
98        reset_allocation_counter();
99    }
100
101    // Note: Full allocation tracking test requires TrackingAllocator
102    // to be installed as #[global_allocator] in the binary crate.
103    // This is done by end users, not the library.
104}