tracking_allocator/token.rs
1use std::{
2 cell::RefCell,
3 num::NonZeroUsize,
4 sync::atomic::{AtomicUsize, Ordering},
5};
6
7use crate::{stack::GroupStack, util::PhantomNotSend};
8
9thread_local! {
10 /// The currently executing allocation token.
11 ///
12 /// Any allocations which occur on this thread will be associated with whichever token is
13 /// present at the time of the allocation.
14 pub(crate) static LOCAL_ALLOCATION_GROUP_STACK: RefCell<GroupStack> =
15 RefCell::new(GroupStack::new());
16}
17
18fn push_group_to_stack(group: AllocationGroupId) {
19 LOCAL_ALLOCATION_GROUP_STACK.with(|stack| stack.borrow_mut().push(group));
20}
21
22fn pop_group_from_stack() -> AllocationGroupId {
23 LOCAL_ALLOCATION_GROUP_STACK.with(|stack| stack.borrow_mut().pop())
24}
25
26/// The identifier that uniquely identifiers an allocation group.
27#[derive(Clone, Debug, PartialEq, Eq, Hash)]
28pub struct AllocationGroupId(NonZeroUsize);
29
30impl AllocationGroupId {
31 /// Attempts to create an `AllocationGroupId` from a raw `usize`.
32 ///
33 /// If the raw value is zero, `None` is returned.
34 pub(crate) fn from_raw(id: usize) -> Option<Self> {
35 NonZeroUsize::new(id).map(Self)
36 }
37}
38
39impl AllocationGroupId {
40 /// The group ID used for allocations which are not made within a registered allocation group.
41 pub const ROOT: Self = Self(unsafe { NonZeroUsize::new_unchecked(1) });
42
43 /// Gets the integer representation of this group ID.
44 #[must_use]
45 pub const fn as_usize(&self) -> NonZeroUsize {
46 self.0
47 }
48
49 fn register() -> Option<AllocationGroupId> {
50 static GROUP_ID: AtomicUsize = AtomicUsize::new(AllocationGroupId::ROOT.0.get() + 1);
51 static HIGHEST_GROUP_ID: AtomicUsize =
52 AtomicUsize::new(AllocationGroupId::ROOT.0.get() + 1);
53
54 let group_id = GROUP_ID.fetch_add(1, Ordering::Relaxed);
55 let highest_group_id = HIGHEST_GROUP_ID.fetch_max(group_id, Ordering::AcqRel);
56
57 if group_id >= highest_group_id {
58 let group_id = NonZeroUsize::new(group_id).expect("bug: GROUP_ID overflowed");
59 Some(AllocationGroupId(group_id))
60 } else {
61 None
62 }
63 }
64}
65
66/// A token that allows controlling when an allocation group is active or inactive.
67///
68/// Allocation groups represent the core mechanism for categorizing allocation activity, where the group must be active
69/// for (de)allocation events to be attributed to it. Practically speaking, allocation groups are simply an internal
70/// identifier that is used to identify the "owner" of an allocation.
71///
72/// ## Usage
73///
74/// In order for an allocation group to be attached to an allocation, it must be "entered." [`AllocationGroupToken`]
75/// functions similarly to something like a mutex, where "entering" the token conumes the token and provides a guard:
76/// [`AllocationGuard`]. This guard is tied to the allocation group being active: if the guard is dropped, or if it is
77/// exited manually, the allocation group is no longer active.
78///
79/// [`AllocationGuard`] also tracks if another allocation group was active prior to entering, and ensures it is set back
80/// as the active allocation group when the guard is dropped. This allows allocation groups to be nested within each
81/// other.
82pub struct AllocationGroupToken(AllocationGroupId);
83
84impl AllocationGroupToken {
85 /// Registers an allocation group token.
86 ///
87 /// Allocation groups use an internal identifier that is incremented atomically, and monotonically, when
88 /// registration occurs. This identifier, thus, has a limit based on the pointer size of the architecture. In other
89 /// words, on 32-bit systems, a limit of 2^32 allocation groups can be registered before this identifier space is
90 /// exhausted. On 64-bit systems, this limit is 2^64.
91 ///
92 /// If the number of registered allocation groups exceeds the limit, `None` is returned. This is a permanent state
93 /// until the application exits. Otherwise, `Some` is returned.
94 pub fn register() -> Option<AllocationGroupToken> {
95 AllocationGroupId::register().map(AllocationGroupToken)
96 }
97
98 /// Gets the ID associated with this allocation group.
99 #[must_use]
100 pub fn id(&self) -> AllocationGroupId {
101 self.0.clone()
102 }
103
104 #[cfg(feature = "tracing-compat")]
105 pub(crate) fn into_unsafe(self) -> UnsafeAllocationGroupToken {
106 UnsafeAllocationGroupToken::new(self.0)
107 }
108
109 /// Enters the allocation group, marking it as the active allocation group on this thread.
110 ///
111 /// If another allocation group is currently active, it is replaced, and restored either when this allocation guard
112 /// is dropped, or when [`AllocationGuard::exit`] is called.
113 pub fn enter(&mut self) -> AllocationGuard<'_> {
114 AllocationGuard::enter(self)
115 }
116}
117
118#[cfg(feature = "tracing-compat")]
119#[cfg_attr(docsrs, doc(cfg(feature = "tracing-compat")))]
120impl AllocationGroupToken {
121 /// Attaches this allocation group to a tracing [`Span`][tracing::Span].
122 ///
123 /// When the span is entered or exited, the allocation group will also transition from inactive to active, and vise
124 /// versa. In effect, all allocations that occur while the span is entered will be associated with the allocation
125 /// group.
126 pub fn attach_to_span(self, span: &tracing::Span) {
127 use crate::tracing::WithAllocationGroup;
128
129 let mut unsafe_token = Some(self.into_unsafe());
130
131 tracing::dispatcher::get_default(move |dispatch| {
132 if let Some(id) = span.id() {
133 if let Some(ctx) = dispatch.downcast_ref::<WithAllocationGroup>() {
134 let unsafe_token = unsafe_token.take().expect("token already consumed");
135 ctx.with_allocation_group(dispatch, &id, unsafe_token);
136 }
137 }
138 });
139 }
140}
141
142/// Guard that updates the current thread to track allocations for the associated allocation group.
143///
144/// ## Drop behavior
145///
146/// This guard has a [`Drop`] implementation that resets the active allocation group back to the last previously active
147/// allocation group. Calling [`exit`][exit] is generally preferred for being explicit about when the allocation group
148/// begins and ends, though.
149///
150/// ## Moving across threads
151///
152/// [`AllocationGuard`] is specifically marked as `!Send` as the active allocation group is tracked at a per-thread
153/// level. If you acquire an `AllocationGuard` and need to resume computation on another thread, such as across an
154/// await point or when simply sending objects to another thread, you must first [`exit`][exit] the guard and move the
155/// resulting [`AllocationGroupToken`]. Once on the new thread, you can then reacquire the guard.
156///
157/// [exit]: AllocationGuard::exit
158pub struct AllocationGuard<'token> {
159 token: &'token mut AllocationGroupToken,
160
161 /// ```compile_fail
162 /// use tracking_allocator::AllocationGuard;
163 /// trait AssertSend: Send {}
164 ///
165 /// impl AssertSend for AllocationGuard {}
166 /// ```
167 _ns: PhantomNotSend,
168}
169
170impl<'token> AllocationGuard<'token> {
171 pub(crate) fn enter(token: &'token mut AllocationGroupToken) -> Self {
172 // Push this group onto the stack.
173 push_group_to_stack(token.id());
174
175 Self {
176 token,
177 _ns: PhantomNotSend::default(),
178 }
179 }
180
181 fn exit_inner(&mut self) {
182 #[allow(unused_variables)]
183 let current = pop_group_from_stack();
184 debug_assert_eq!(
185 current,
186 self.token.id(),
187 "popped group from stack but got unexpected group"
188 );
189 }
190
191 /// Exits the allocation group, restoring the previously active allocation group on this thread.
192 pub fn exit(mut self) {
193 self.exit_inner();
194 }
195}
196
197impl<'token> Drop for AllocationGuard<'token> {
198 fn drop(&mut self) {
199 self.exit_inner();
200 }
201}
202
203/// Unmanaged allocation group token used specifically with `tracing`.
204///
205/// ## Safety
206///
207/// While users would normally work directly with [`AllocationGroupToken`] and [`AllocationGuard`], we cannot store
208/// [`AllocationGuard`] in span data as it is `!Send`, and tracing spans can be sent across threads.
209///
210/// However, `tracing` itself employs a guard for entering spans. The guard is `!Send`, which ensures that the guard
211/// cannot be sent across threads. Since the same guard is used to know when a span has been exited, `tracing` ensures
212/// that between a span being entered and exited, it cannot move threads.
213///
214/// Thus, we build off of that invariant, and use this stripped down token to manually enter and exit the allocation
215/// group in a specialized `tracing_subscriber` layer that we control.
216#[cfg(feature = "tracing-compat")]
217pub(crate) struct UnsafeAllocationGroupToken {
218 id: AllocationGroupId,
219}
220
221#[cfg(feature = "tracing-compat")]
222impl UnsafeAllocationGroupToken {
223 /// Creates a new `UnsafeAllocationGroupToken`.
224 pub fn new(id: AllocationGroupId) -> Self {
225 Self { id }
226 }
227
228 /// Enters the allocation group, marking it as the active allocation group on this thread.
229 ///
230 /// If another allocation group is currently active, it is replaced, and restored either when this allocation guard
231 /// is dropped, or when [`AllocationGuard::exit`] is called.
232 ///
233 /// Functionally equivalent to [`AllocationGroupToken::enter`].
234 pub fn enter(&mut self) {
235 push_group_to_stack(self.id.clone());
236 }
237
238 /// Exits the allocation group, restoring the previously active allocation group on this thread.
239 ///
240 /// Functionally equivalent to [`AllocationGuard::exit`].
241 pub fn exit(&mut self) {
242 #[allow(unused_variables)]
243 let current = pop_group_from_stack();
244 debug_assert_eq!(
245 current, self.id,
246 "popped group from stack but got unexpected group"
247 );
248 }
249}
250
251/// Calls `f` after suspending the active allocation group, if it was not already suspended.
252///
253/// If the active allocation group is not currently suspended, then `f` is called, after suspending it, with a reference
254/// to the suspended allocation group. If any other call to `try_with_suspended_allocation_group` happens while this
255/// method call is on the stack, `f` in those calls with itself not be called.
256#[inline(always)]
257pub(crate) fn try_with_suspended_allocation_group<F>(f: F)
258where
259 F: FnOnce(AllocationGroupId),
260{
261 let _ = LOCAL_ALLOCATION_GROUP_STACK.try_with(
262 #[inline(always)]
263 |stack| {
264 // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, which allows callers to skip trying to run
265 // `f` if they cannot mutably borrow the local allocation group stack. As `try_borrow_mut` will only let one
266 // mutable borrow happen at a time, the tracker logic is never reentrant.
267 if let Ok(stack) = stack.try_borrow_mut() {
268 f(stack.current());
269 }
270 },
271 );
272}
273
274/// Calls `f` after suspending the active allocation group.
275///
276/// In constrast to `try_with_suspended_allocation_group`, this method will always call `f` after attempting to suspend
277/// the active allocation group, even if it was already suspended.
278///
279/// In practice, this method is primarily useful for "run this function and don't track any allocations at all" while
280/// `try_with_suspended_allocation_group` is primarily useful for "run this function if nobody else is tracking
281/// allocations right now".
282#[inline(always)]
283pub(crate) fn with_suspended_allocation_group<F, R>(f: F) -> R
284where
285 F: FnOnce() -> R,
286{
287 LOCAL_ALLOCATION_GROUP_STACK.with(
288 #[inline(always)]
289 |stack| {
290 // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, as `try_borrow_mut` will only let one
291 // mutable borrow happen at a time. As we simply want to ensure that the allocation group is suspended, we
292 // don't care what the return value is: calling `try_borrow_mut` and holding on to the result until the end
293 // of the scope is sufficient to either suspend the allocation group or know that it's already suspended and
294 // will stay that way until we're done in this method.
295 let _result = stack.try_borrow_mut();
296 f()
297 },
298 )
299}