re_memory/
accounting_allocator.rs1use std::sync::LazyLock;
4use std::sync::atomic::Ordering::Relaxed;
5use std::sync::atomic::{AtomicBool, AtomicUsize};
6
7use parking_lot::Mutex;
8
9use crate::CountAndSize;
10use crate::allocation_tracker::{AllocationTracker, CallstackStatistics, PtrHash};
11
12static BIG_ALLOCATION_TRACKER: LazyLock<Mutex<AllocationTracker>> =
16 LazyLock::new(|| Mutex::new(AllocationTracker::with_stochastic_rate(1)));
17
18static MEDIUM_ALLOCATION_TRACKER: LazyLock<Mutex<AllocationTracker>> =
20 LazyLock::new(|| Mutex::new(AllocationTracker::with_stochastic_rate(64)));
21
22thread_local! {
23 static IS_THREAD_IN_ALLOCATION_TRACKER: std::cell::Cell<bool> = const { std::cell::Cell::new(false) };
28}
29
30struct AtomicCountAndSize {
33 pub count: AtomicUsize,
35
36 pub size: AtomicUsize,
38}
39
40impl AtomicCountAndSize {
41 pub const fn zero() -> Self {
42 Self {
43 count: AtomicUsize::new(0),
44 size: AtomicUsize::new(0),
45 }
46 }
47
48 fn load(&self) -> CountAndSize {
49 CountAndSize {
50 count: self.count.load(Relaxed),
51 size: self.size.load(Relaxed),
52 }
53 }
54
55 fn add(&self, size: usize) {
57 self.count.fetch_add(1, Relaxed);
58 self.size.fetch_add(size, Relaxed);
59 }
60
61 fn sub(&self, size: usize) {
63 self.count.fetch_sub(1, Relaxed);
64 self.size.fetch_sub(size, Relaxed);
65 }
66}
67
68struct GlobalStats {
69 pub live: AtomicCountAndSize,
71
72 pub track_callstacks: AtomicBool,
75
76 pub small_size: AtomicUsize,
78
79 pub medium_size: AtomicUsize,
82
83 pub untracked: AtomicCountAndSize,
85
86 pub stochastically_tracked: AtomicCountAndSize,
88
89 pub fully_tracked: AtomicCountAndSize,
91
92 pub overhead: AtomicCountAndSize,
94}
95
96static GLOBAL: GlobalStats = GlobalStats {
97 live: AtomicCountAndSize::zero(),
98 track_callstacks: AtomicBool::new(false),
99 small_size: AtomicUsize::new(1024),
100 medium_size: AtomicUsize::new(1024 * 1024),
101 untracked: AtomicCountAndSize::zero(),
102 stochastically_tracked: AtomicCountAndSize::zero(),
103 fully_tracked: AtomicCountAndSize::zero(),
104 overhead: AtomicCountAndSize::zero(),
105};
106
107#[derive(Clone, Copy, Debug)]
113pub struct TrackingOptions {
114 pub small_size: usize,
116
117 pub medium_size: usize,
120}
121
122impl Default for TrackingOptions {
123 fn default() -> Self {
124 Self {
125 small_size: 1024,
126 medium_size: 1024 * 1024,
127 }
128 }
129}
130
131pub fn global_allocs() -> Option<CountAndSize> {
138 let count_and_size = GLOBAL.live.load();
139 (count_and_size.count > 0).then_some(count_and_size)
140}
141
142pub fn is_tracking_callstacks() -> bool {
144 GLOBAL.track_callstacks.load(Relaxed)
145}
146
147pub fn set_tracking_options(options: TrackingOptions) {
149 let TrackingOptions {
150 small_size,
151 medium_size,
152 } = options;
153
154 GLOBAL.small_size.store(small_size, Relaxed);
155 GLOBAL.medium_size.store(medium_size, Relaxed);
156}
157
158pub fn set_tracking_callstacks(track: bool) {
166 let did_track = GLOBAL.track_callstacks.swap(track, Relaxed);
167 if !did_track && track {
168 re_log::info!("Turning on stochastic tracking of all allocations");
169 }
170}
171
172#[cfg(not(target_arch = "wasm32"))]
178pub fn turn_on_tracking_if_env_var(env_var: &str) {
179 if std::env::var(env_var).is_ok() {
180 set_tracking_callstacks(true);
181 re_log::info!("{env_var} found - turning on tracking of all large allocations");
182 }
183}
184
185const MAX_CALLSTACKS: usize = 32;
189
190#[derive(Debug)]
194pub struct TrackingStatistics {
195 pub track_size_threshold: usize,
197
198 pub untracked: CountAndSize,
200
201 pub stochastically_tracked: CountAndSize,
203
204 pub fully_tracked: CountAndSize,
206
207 pub overhead: CountAndSize,
209
210 pub top_callstacks: Vec<CallstackStatistics>,
212}
213
214pub fn tracking_stats() -> Option<TrackingStatistics> {
220 fn tracker_stats(
223 allocation_tracker: &AllocationTracker,
224 ) -> smallvec::SmallVec<[CallstackStatistics; MAX_CALLSTACKS]> {
225 let top_callstacks: smallvec::SmallVec<[CallstackStatistics; MAX_CALLSTACKS]> =
226 allocation_tracker
227 .top_callstacks(MAX_CALLSTACKS)
228 .into_iter()
229 .collect();
230 assert!(
231 !top_callstacks.spilled(),
232 "We shouldn't leak any allocations"
233 );
234 top_callstacks
235 }
236
237 GLOBAL.track_callstacks.load(Relaxed).then(|| {
238 IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
239 is_thread_in_allocation_tracker.set(true);
241 let mut top_big_callstacks = tracker_stats(&BIG_ALLOCATION_TRACKER.lock());
242 let mut top_medium_callstacks = tracker_stats(&MEDIUM_ALLOCATION_TRACKER.lock());
243 is_thread_in_allocation_tracker.set(false);
244
245 let mut top_callstacks: Vec<_> = top_big_callstacks
246 .drain(..)
247 .chain(top_medium_callstacks.drain(..))
248 .collect();
249
250 #[expect(clippy::cast_possible_wrap)]
251 top_callstacks.sort_by_key(|c| -(c.extant.size as i64));
252
253 TrackingStatistics {
254 track_size_threshold: GLOBAL.small_size.load(Relaxed),
255 untracked: GLOBAL.untracked.load(),
256 stochastically_tracked: GLOBAL.stochastically_tracked.load(),
257 fully_tracked: GLOBAL.fully_tracked.load(),
258 overhead: GLOBAL.overhead.load(),
259 top_callstacks,
260 }
261 })
262 })
263}
264
265#[derive(Default)]
280pub struct AccountingAllocator<InnerAllocator> {
281 allocator: InnerAllocator,
282}
283
284impl<InnerAllocator> AccountingAllocator<InnerAllocator> {
285 pub const fn new(allocator: InnerAllocator) -> Self {
286 Self { allocator }
287 }
288}
289
290#[expect(unsafe_code)]
291unsafe impl<InnerAllocator: std::alloc::GlobalAlloc> std::alloc::GlobalAlloc
294 for AccountingAllocator<InnerAllocator>
295{
296 unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
297 let ptr = unsafe { self.allocator.alloc(layout) };
300
301 note_alloc(ptr, layout.size());
302
303 ptr
304 }
305
306 unsafe fn alloc_zeroed(&self, layout: std::alloc::Layout) -> *mut u8 {
307 let ptr = unsafe { self.allocator.alloc_zeroed(layout) };
310
311 note_alloc(ptr, layout.size());
312
313 ptr
314 }
315
316 unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
317 unsafe { self.allocator.dealloc(ptr, layout) };
320
321 note_dealloc(ptr, layout.size());
322 }
323
324 unsafe fn realloc(
325 &self,
326 old_ptr: *mut u8,
327 layout: std::alloc::Layout,
328 new_size: usize,
329 ) -> *mut u8 {
330 note_dealloc(old_ptr, layout.size());
331
332 let new_ptr = unsafe { self.allocator.realloc(old_ptr, layout, new_size) };
335
336 note_alloc(new_ptr, new_size);
337
338 new_ptr
339 }
340}
341
342#[inline]
343fn note_alloc(ptr: *mut u8, size: usize) {
344 GLOBAL.live.add(size);
345
346 if GLOBAL.track_callstacks.load(Relaxed) {
347 if size < GLOBAL.small_size.load(Relaxed) {
348 GLOBAL.untracked.add(size);
350 } else {
351 IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
355 if !is_thread_in_allocation_tracker.get() {
356 is_thread_in_allocation_tracker.set(true);
357
358 let ptr_hash = PtrHash::new(ptr);
359 if size < GLOBAL.medium_size.load(Relaxed) {
360 GLOBAL.stochastically_tracked.add(size);
361 MEDIUM_ALLOCATION_TRACKER.lock().on_alloc(ptr_hash, size);
362 } else {
363 GLOBAL.fully_tracked.add(size);
364 BIG_ALLOCATION_TRACKER.lock().on_alloc(ptr_hash, size);
365 }
366
367 is_thread_in_allocation_tracker.set(false);
368 } else {
369 GLOBAL.overhead.add(size);
371 }
372 });
373 }
374 }
375}
376
377#[inline]
378fn note_dealloc(ptr: *mut u8, size: usize) {
379 GLOBAL.live.sub(size);
380
381 if GLOBAL.track_callstacks.load(Relaxed) {
382 if size < GLOBAL.small_size.load(Relaxed) {
383 GLOBAL.untracked.sub(size);
385 } else {
386 IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
389 if !is_thread_in_allocation_tracker.get() {
390 is_thread_in_allocation_tracker.set(true);
391
392 let ptr_hash = PtrHash::new(ptr);
393 if size < GLOBAL.medium_size.load(Relaxed) {
394 GLOBAL.stochastically_tracked.sub(size);
395 MEDIUM_ALLOCATION_TRACKER.lock().on_dealloc(ptr_hash, size);
396 } else {
397 GLOBAL.fully_tracked.sub(size);
398 BIG_ALLOCATION_TRACKER.lock().on_dealloc(ptr_hash, size);
399 }
400
401 is_thread_in_allocation_tracker.set(false);
402 } else {
403 GLOBAL.overhead.sub(size);
405 }
406 });
407 }
408 }
409}