tracking_allocator/allocator.rs
1use std::alloc::{handle_alloc_error, GlobalAlloc, Layout, System};
2
3use crate::token::try_with_suspended_allocation_group;
4use crate::{get_global_tracker, AllocationGroupId};
5
6/// Tracking allocator implementation.
7///
8/// This allocator must be installed via `#[global_allocator]` in order to take effect. More
9/// information on using this allocator can be found in the examples, or directly in the standard
10/// library docs for [`GlobalAlloc`].
11pub struct Allocator<A> {
12 inner: A,
13}
14
15impl<A> Allocator<A> {
16 /// Creates a new `Allocator` that wraps another allocator.
17 #[must_use]
18 pub const fn from_allocator(allocator: A) -> Self {
19 Self { inner: allocator }
20 }
21}
22
23impl Allocator<System> {
24 /// Creates a new `Allocator` that wraps the system allocator.
25 #[must_use]
26 pub const fn system() -> Allocator<System> {
27 Self::from_allocator(System)
28 }
29}
30
31impl<A: GlobalAlloc> Allocator<A> {
32 unsafe fn get_wrapped_allocation(
33 &self,
34 object_layout: Layout,
35 ) -> (*mut usize, *mut u8, Layout) {
36 // Allocate our wrapped layout and make sure the allocation succeeded.
37 let (actual_layout, offset_to_object) = get_wrapped_layout(object_layout);
38 let actual_ptr = self.inner.alloc(actual_layout);
39 if actual_ptr.is_null() {
40 handle_alloc_error(actual_layout);
41 }
42
43 // Zero out the group ID field to make sure it's in the `None` state.
44 //
45 // SAFETY: We know that `actual_ptr` is at least aligned enough for casting it to `*mut usize` as the layout for
46 // the allocation backing this pointer ensures the first field in the layout is `usize.
47 #[allow(clippy::cast_ptr_alignment)]
48 let group_id_ptr = actual_ptr.cast::<usize>();
49 group_id_ptr.write(0);
50
51 // SAFETY: If the allocation succeeded and `actual_ptr` is valid, then it must be valid to advance by
52 // `offset_to_object` as it would land within the allocation.
53 let object_ptr = actual_ptr.wrapping_add(offset_to_object);
54
55 (group_id_ptr, object_ptr, actual_layout)
56 }
57}
58
59impl Default for Allocator<System> {
60 fn default() -> Self {
61 Self::from_allocator(System)
62 }
63}
64
65unsafe impl<A: GlobalAlloc> GlobalAlloc for Allocator<A> {
66 #[track_caller]
67 unsafe fn alloc(&self, object_layout: Layout) -> *mut u8 {
68 let (group_id_ptr, object_ptr, wrapped_layout) = self.get_wrapped_allocation(object_layout);
69 let object_addr = object_ptr as usize;
70 let object_size = object_layout.size();
71 let wrapped_size = wrapped_layout.size();
72
73 if let Some(tracker) = get_global_tracker() {
74 try_with_suspended_allocation_group(
75 #[inline(always)]
76 |group_id| {
77 // We only set the group ID in the wrapper header if we're tracking an allocation, because when it
78 // comes back to us during deallocation, we want to skip doing any checks at all if it's already
79 // zero.
80 //
81 // If we never track the allocation, tracking the deallocation will only produce incorrect numbers,
82 // and that includes even if we just used the rule of "always attribute allocations to the root
83 // allocation group by default".
84 group_id_ptr.write(group_id.as_usize().get());
85 tracker.allocated(object_addr, object_size, wrapped_size, group_id);
86 },
87 );
88 }
89
90 object_ptr
91 }
92
93 #[track_caller]
94 unsafe fn dealloc(&self, object_ptr: *mut u8, object_layout: Layout) {
95 // Regenerate the wrapped layout so we know where we have to look, as the pointer we've given relates to the
96 // requested layout, not the wrapped layout that was actually allocated.
97 let (wrapped_layout, offset_to_object) = get_wrapped_layout(object_layout);
98
99 // SAFETY: We only ever return pointers to the actual requested object layout, not our wrapped layout. Since
100 // global allocators cannot be changed at runtime, we know that if we're here, then the given pointer, and the
101 // allocation it refers to, was allocated by us. Thus, since we wrap _all_ allocations, we know that this object
102 // pointer can be safely subtracted by `offset_to_object` to get back to the group ID field in our wrapper.
103 let actual_ptr = object_ptr.wrapping_sub(offset_to_object);
104
105 // SAFETY: We know that `actual_ptr` is at least aligned enough for casting it to `*mut usize` as the layout for
106 // the allocation backing this pointer ensures the first field in the layout is `usize.
107 #[allow(clippy::cast_ptr_alignment)]
108 let raw_group_id = actual_ptr.cast::<usize>().read();
109
110 // Deallocate before tracking, just to make sure we're reclaiming memory as soon as possible.
111 self.inner.dealloc(actual_ptr, wrapped_layout);
112
113 let object_addr = object_ptr as usize;
114 let object_size = object_layout.size();
115 let wrapped_size = wrapped_layout.size();
116
117 if let Some(tracker) = get_global_tracker() {
118 if let Some(source_group_id) = AllocationGroupId::from_raw(raw_group_id) {
119 try_with_suspended_allocation_group(
120 #[inline(always)]
121 |current_group_id| {
122 tracker.deallocated(
123 object_addr,
124 object_size,
125 wrapped_size,
126 source_group_id,
127 current_group_id,
128 );
129 },
130 );
131 }
132 }
133 }
134}
135
136fn get_wrapped_layout(object_layout: Layout) -> (Layout, usize) {
137 static HEADER_LAYOUT: Layout = Layout::new::<usize>();
138
139 // We generate a new allocation layout that gives us a location to store the active allocation group ID ahead
140 // of the requested allocation, which lets us always attempt to retrieve it on the deallocation path. We'll
141 // always set this to zero, and conditionally update it to the actual allocation group ID if tracking is enabled.
142 let (actual_layout, offset_to_object) = HEADER_LAYOUT
143 .extend(object_layout)
144 .expect("wrapping requested layout resulted in overflow");
145 let actual_layout = actual_layout.pad_to_align();
146
147 (actual_layout, offset_to_object)
148}