re_memory/
accounting_allocator.rs1use std::sync::{
4 LazyLock,
5 atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed},
6};
7
8use parking_lot::Mutex;
9
10use crate::{
11 CountAndSize,
12 allocation_tracker::{AllocationTracker, CallstackStatistics, PtrHash},
13};
14
15static BIG_ALLOCATION_TRACKER: LazyLock<Mutex<AllocationTracker>> =
19 LazyLock::new(|| Mutex::new(AllocationTracker::with_stochastic_rate(1)));
20
21static MEDIUM_ALLOCATION_TRACKER: LazyLock<Mutex<AllocationTracker>> =
23 LazyLock::new(|| Mutex::new(AllocationTracker::with_stochastic_rate(64)));
24
25thread_local! {
26 static IS_THREAD_IN_ALLOCATION_TRACKER: std::cell::Cell<bool> = const { std::cell::Cell::new(false) };
31}
32
33struct AtomicCountAndSize {
36 pub count: AtomicUsize,
38
39 pub size: AtomicUsize,
41}
42
43impl AtomicCountAndSize {
44 pub const fn zero() -> Self {
45 Self {
46 count: AtomicUsize::new(0),
47 size: AtomicUsize::new(0),
48 }
49 }
50
51 fn load(&self) -> CountAndSize {
52 CountAndSize {
53 count: self.count.load(Relaxed),
54 size: self.size.load(Relaxed),
55 }
56 }
57
58 fn add(&self, size: usize) {
60 self.count.fetch_add(1, Relaxed);
61 self.size.fetch_add(size, Relaxed);
62 }
63
64 fn sub(&self, size: usize) {
66 self.count.fetch_sub(1, Relaxed);
67 self.size.fetch_sub(size, Relaxed);
68 }
69}
70
71struct GlobalStats {
72 pub live: AtomicCountAndSize,
74
75 pub track_callstacks: AtomicBool,
78
79 pub small_size: AtomicUsize,
81
82 pub medium_size: AtomicUsize,
85
86 pub untracked: AtomicCountAndSize,
88
89 pub stochastically_tracked: AtomicCountAndSize,
91
92 pub fully_tracked: AtomicCountAndSize,
94
95 pub overhead: AtomicCountAndSize,
97}
98
99static GLOBAL: GlobalStats = GlobalStats {
100 live: AtomicCountAndSize::zero(),
101 track_callstacks: AtomicBool::new(false),
102 small_size: AtomicUsize::new(1024),
103 medium_size: AtomicUsize::new(1024 * 1024),
104 untracked: AtomicCountAndSize::zero(),
105 stochastically_tracked: AtomicCountAndSize::zero(),
106 fully_tracked: AtomicCountAndSize::zero(),
107 overhead: AtomicCountAndSize::zero(),
108};
109
110#[derive(Clone, Copy, Debug)]
116pub struct TrackingOptions {
117 pub small_size: usize,
119
120 pub medium_size: usize,
123}
124
125impl Default for TrackingOptions {
126 fn default() -> Self {
127 Self {
128 small_size: 1024,
129 medium_size: 1024 * 1024,
130 }
131 }
132}
133
134pub fn global_allocs() -> Option<CountAndSize> {
141 let count_and_size = GLOBAL.live.load();
142 (count_and_size.count > 0).then_some(count_and_size)
143}
144
145pub fn is_tracking_callstacks() -> bool {
147 GLOBAL.track_callstacks.load(Relaxed)
148}
149
150pub fn set_tracking_options(options: TrackingOptions) {
152 let TrackingOptions {
153 small_size,
154 medium_size,
155 } = options;
156
157 GLOBAL.small_size.store(small_size, Relaxed);
158 GLOBAL.medium_size.store(medium_size, Relaxed);
159}
160
161pub fn set_tracking_callstacks(track: bool) {
169 GLOBAL.track_callstacks.store(track, Relaxed);
170 if track {
171 re_log::info!("Turning on stochastic tracking of all allocations");
172 }
173}
174
175#[cfg(not(target_arch = "wasm32"))]
181pub fn turn_on_tracking_if_env_var(env_var: &str) {
182 if std::env::var(env_var).is_ok() {
183 set_tracking_callstacks(true);
184 re_log::info!("{env_var} found - turning on tracking of all large allocations");
185 }
186}
187
188const MAX_CALLSTACKS: usize = 128;
191
192#[derive(Debug)]
193pub struct TrackingStatistics {
194 pub track_size_threshold: usize,
196
197 pub untracked: CountAndSize,
199
200 pub stochastically_tracked: CountAndSize,
202
203 pub fully_tracked: CountAndSize,
205
206 pub overhead: CountAndSize,
208
209 pub top_callstacks: Vec<CallstackStatistics>,
211}
212
213pub fn tracking_stats() -> Option<TrackingStatistics> {
219 fn tracker_stats(
222 allocation_tracker: &AllocationTracker,
223 ) -> smallvec::SmallVec<[CallstackStatistics; MAX_CALLSTACKS]> {
224 let top_callstacks: smallvec::SmallVec<[CallstackStatistics; MAX_CALLSTACKS]> =
225 allocation_tracker
226 .top_callstacks(MAX_CALLSTACKS)
227 .into_iter()
228 .collect();
229 assert!(
230 !top_callstacks.spilled(),
231 "We shouldn't leak any allocations"
232 );
233 top_callstacks
234 }
235
236 GLOBAL.track_callstacks.load(Relaxed).then(|| {
237 IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
238 is_thread_in_allocation_tracker.set(true);
240 let mut top_big_callstacks = tracker_stats(&BIG_ALLOCATION_TRACKER.lock());
241 let mut top_medium_callstacks = tracker_stats(&MEDIUM_ALLOCATION_TRACKER.lock());
242 is_thread_in_allocation_tracker.set(false);
243
244 let mut top_callstacks: Vec<_> = top_big_callstacks
245 .drain(..)
246 .chain(top_medium_callstacks.drain(..))
247 .collect();
248
249 #[expect(clippy::cast_possible_wrap)]
250 top_callstacks.sort_by_key(|c| -(c.extant.size as i64));
251
252 TrackingStatistics {
253 track_size_threshold: GLOBAL.small_size.load(Relaxed),
254 untracked: GLOBAL.untracked.load(),
255 stochastically_tracked: GLOBAL.stochastically_tracked.load(),
256 fully_tracked: GLOBAL.fully_tracked.load(),
257 overhead: GLOBAL.overhead.load(),
258 top_callstacks,
259 }
260 })
261 })
262}
263
264#[derive(Default)]
279pub struct AccountingAllocator<InnerAllocator> {
280 allocator: InnerAllocator,
281}
282
283impl<InnerAllocator> AccountingAllocator<InnerAllocator> {
284 pub const fn new(allocator: InnerAllocator) -> Self {
285 Self { allocator }
286 }
287}
288
289#[expect(unsafe_code)]
290unsafe impl<InnerAllocator: std::alloc::GlobalAlloc> std::alloc::GlobalAlloc
293 for AccountingAllocator<InnerAllocator>
294{
295 unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
296 let ptr = unsafe { self.allocator.alloc(layout) };
299
300 note_alloc(ptr, layout.size());
301
302 ptr
303 }
304
305 unsafe fn alloc_zeroed(&self, layout: std::alloc::Layout) -> *mut u8 {
306 let ptr = unsafe { self.allocator.alloc_zeroed(layout) };
309
310 note_alloc(ptr, layout.size());
311
312 ptr
313 }
314
315 unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
316 unsafe { self.allocator.dealloc(ptr, layout) };
319
320 note_dealloc(ptr, layout.size());
321 }
322
323 unsafe fn realloc(
324 &self,
325 old_ptr: *mut u8,
326 layout: std::alloc::Layout,
327 new_size: usize,
328 ) -> *mut u8 {
329 note_dealloc(old_ptr, layout.size());
330
331 let new_ptr = unsafe { self.allocator.realloc(old_ptr, layout, new_size) };
334
335 note_alloc(new_ptr, new_size);
336
337 new_ptr
338 }
339}
340
341#[inline]
342fn note_alloc(ptr: *mut u8, size: usize) {
343 GLOBAL.live.add(size);
344
345 if GLOBAL.track_callstacks.load(Relaxed) {
346 if size < GLOBAL.small_size.load(Relaxed) {
347 GLOBAL.untracked.add(size);
349 } else {
350 IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
354 if !is_thread_in_allocation_tracker.get() {
355 is_thread_in_allocation_tracker.set(true);
356
357 let ptr_hash = PtrHash::new(ptr);
358 if size < GLOBAL.medium_size.load(Relaxed) {
359 GLOBAL.stochastically_tracked.add(size);
360 MEDIUM_ALLOCATION_TRACKER.lock().on_alloc(ptr_hash, size);
361 } else {
362 GLOBAL.fully_tracked.add(size);
363 BIG_ALLOCATION_TRACKER.lock().on_alloc(ptr_hash, size);
364 }
365
366 is_thread_in_allocation_tracker.set(false);
367 } else {
368 GLOBAL.overhead.add(size);
370 }
371 });
372 }
373 }
374}
375
376#[inline]
377fn note_dealloc(ptr: *mut u8, size: usize) {
378 GLOBAL.live.sub(size);
379
380 if GLOBAL.track_callstacks.load(Relaxed) {
381 if size < GLOBAL.small_size.load(Relaxed) {
382 GLOBAL.untracked.sub(size);
384 } else {
385 IS_THREAD_IN_ALLOCATION_TRACKER.with(|is_thread_in_allocation_tracker| {
388 if !is_thread_in_allocation_tracker.get() {
389 is_thread_in_allocation_tracker.set(true);
390
391 let ptr_hash = PtrHash::new(ptr);
392 if size < GLOBAL.medium_size.load(Relaxed) {
393 GLOBAL.stochastically_tracked.sub(size);
394 MEDIUM_ALLOCATION_TRACKER.lock().on_dealloc(ptr_hash, size);
395 } else {
396 GLOBAL.fully_tracked.sub(size);
397 BIG_ALLOCATION_TRACKER.lock().on_dealloc(ptr_hash, size);
398 }
399
400 is_thread_in_allocation_tracker.set(false);
401 } else {
402 GLOBAL.overhead.sub(size);
404 }
405 });
406 }
407 }
408}