alloc_tracker/thread_span.rs
1//! Thread-local allocation tracking span.
2
3use std::marker::PhantomData;
4use std::sync::{Arc, Mutex};
5
6use crate::allocator::get_or_init_thread_counters;
7use crate::{ERR_POISONED_LOCK, Operation, OperationMetrics};
8
9/// A tracked span of code that tracks allocations on this thread between creation and drop.
10///
11/// This span tracks allocations made by the current thread only.
12///
13/// # Examples
14///
15/// ```
16/// use alloc_tracker::{Allocator, Operation, Session};
17///
18/// #[global_allocator]
19/// static ALLOCATOR: Allocator<std::alloc::System> = Allocator::system();
20///
21/// let session = Session::new();
22/// let mean_calc = session.operation("test");
23/// {
24/// let _span = mean_calc.measure_thread();
25/// // Perform some operation that allocates memory
26/// let _data = String::from("Hello, world!");
27/// } // Thread allocation is automatically tracked and recorded here
28/// ```
29#[derive(Debug)]
30#[must_use = "Measurements are taken between creation and drop"]
31pub struct ThreadSpan {
32 metrics: Arc<Mutex<OperationMetrics>>,
33 start_bytes: u64,
34 start_count: u64,
35 iterations: u64,
36
37 _single_threaded: PhantomData<*const ()>,
38}
39
40impl ThreadSpan {
41 pub(crate) fn new(operation: &Operation, iterations: u64) -> Self {
42 assert!(iterations != 0);
43
44 let counters = get_or_init_thread_counters();
45 let start_bytes = counters.bytes();
46 let start_count = counters.count();
47
48 Self {
49 metrics: operation.metrics(),
50 start_bytes,
51 start_count,
52 iterations,
53 _single_threaded: PhantomData,
54 }
55 }
56
57 /// Sets the number of iterations for this span.
58 ///
59 /// This allows you to specify how many iterations this span represents,
60 /// which is used to calculate the mean allocation per iteration when the span is dropped.
61 ///
62 /// # Examples
63 ///
64 /// ```
65 /// use alloc_tracker::{Allocator, Session};
66 ///
67 /// #[global_allocator]
68 /// static ALLOCATOR: Allocator<std::alloc::System> = Allocator::system();
69 ///
70 /// let session = Session::new();
71 /// let operation = session.operation("batch_work");
72 /// {
73 /// let _span = operation.measure_thread().iterations(1000);
74 /// for _ in 0..1000 {
75 /// // Perform the same operation 1000 times
76 /// let _data = vec![42];
77 /// }
78 /// } // Total allocation is measured once and divided by 1000
79 /// ```
80 ///
81 /// # Panics
82 ///
83 /// Panics if `iterations` is zero.
84 pub fn iterations(mut self, iterations: u64) -> Self {
85 assert!(iterations != 0, "Iterations cannot be zero");
86 self.iterations = iterations;
87 self
88 }
89
90 /// Calculates the allocation deltas since this span was created.
91 #[must_use]
92 #[cfg_attr(test, mutants::skip)] // The != 1 fork is broadly applicable, so mutations fail. Intentional.
93 fn to_deltas(&self) -> (u64, u64) {
94 let counters = get_or_init_thread_counters();
95 let current_bytes = counters.bytes();
96 let current_count = counters.count();
97
98 let total_bytes_delta = current_bytes
99 .checked_sub(self.start_bytes)
100 .expect("thread bytes allocated could not possibly decrease");
101
102 let total_count_delta = current_count
103 .checked_sub(self.start_count)
104 .expect("thread allocations count could not possibly decrease");
105
106 if self.iterations > 1 {
107 // Divide total allocation by iterations to get per-iteration allocation
108 let bytes_delta = total_bytes_delta
109 .checked_div(self.iterations)
110 .expect("guarded by if condition");
111 let count_delta = total_count_delta
112 .checked_div(self.iterations)
113 .expect("guarded by if condition");
114 (bytes_delta, count_delta)
115 } else {
116 (total_bytes_delta, total_count_delta)
117 }
118 }
119}
120
121impl Drop for ThreadSpan {
122 fn drop(&mut self) {
123 let (bytes_delta, count_delta) = self.to_deltas();
124 let mut data = self.metrics.lock().expect(ERR_POISONED_LOCK);
125 data.add_iterations(bytes_delta, count_delta, self.iterations);
126 }
127}
128
129#[cfg(test)]
130#[cfg_attr(coverage_nightly, coverage(off))]
131mod tests {
132 use super::*;
133
134 // Static assertions for thread safety
135 // ThreadSpan should NOT be Send or Sync due to PhantomData<*const ()>
136 static_assertions::assert_not_impl_all!(ThreadSpan: Send);
137 static_assertions::assert_not_impl_all!(ThreadSpan: Sync);
138}