1#![crate_name = "heapmon"]
2
3use core::fmt::{self, Debug, Formatter};
4
5use std::alloc::{GlobalAlloc, Layout, System};
6use std::cell::RefCell;
7use std::cmp::{self, Ordering};
8use std::collections::HashMap;
9use std::fs::{self, File};
10use std::sync::Mutex;
11use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed};
12use std::{thread, time::{self, UNIX_EPOCH}};
13
14use regex::Regex;
15
16use core::ffi::c_void;
17
18use backtrace;
19use once_cell::sync::Lazy;
20
21use log::*;
22
23pub mod util;
24
25thread_local! {
26 static ENTERED: RefCell<bool> = RefCell::new(false);
27}
28
29const NSKIP: usize = 8; type BacktraceRec = Vec<usize>;
31
32struct DebugBacktraceRec<'a>(pub &'a BacktraceRec);
33impl<'a> Debug for DebugBacktraceRec<'a> {
34 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
35 f.debug_list()
36 .entries(self.0.iter().filter(|ip| **ip != 0).map(|ip| {
37 let mut dbgstr = "".to_string();
38 backtrace::resolve(*ip as *mut c_void, |s| dbgstr = format!("{:?}", s));
39 dbgstr
40 }))
41 .finish()
42 }
43}
44
45#[derive(Clone, Eq, PartialEq)]
46struct AllocRec {
47 size: usize,
48 seqno: usize,
49 bt: BacktraceRec,
50}
51
52impl Debug for AllocRec {
53 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
54 f.debug_struct("AllocRec")
55 .field("size", &self.size)
56 .field("seqno", &self.seqno)
57 .field("bt", &DebugBacktraceRec(&self.bt))
58 .finish()
59 }
60}
61
62#[derive(Clone, Debug)]
63struct SummaryRec {
64 count: usize,
65 #[allow(unused)]
66 arec: AllocRec,
67}
68
69#[derive(Copy, Clone, Debug)]
70pub enum SummaryOrder {
71 FirstSeen,
72 MemoryUsed,
73}
74
75fn first_seen(a: &SummaryRec, b: &SummaryRec) -> Ordering {
76 (a.arec.seqno).cmp(&(b.arec.seqno))
77}
78
79fn memory_used(a: &SummaryRec, b: &SummaryRec) -> Ordering {
80 let asz = a.count * a.arec.size;
81 let bsz = b.count * b.arec.size;
82 if asz != bsz {
83 asz.cmp(&bsz).reverse()
84 } else {
85 (a.arec.seqno).cmp(&(b.arec.seqno))
86 }
87}
88
89impl SummaryRec {
90 fn format_output(&self) -> String {
91 let mut output = String::new();
92 output += &format!("\n{} bytes = {} blocks * {} bytes, min_seqno {}",
93 pretty_print_usize(self.count * self.arec.size),
94 pretty_print_usize(self.count),
95 pretty_print_usize(self.arec.size),
96 self.arec.seqno);
97 let mut ndx = 0;
98 for ip in self.arec.bt.iter().filter(|ip| **ip != 0) {
99 backtrace::resolve(*ip as *mut c_void, |s| {
100 output += &format!("\n {:>2}: {}", ndx, s.name().unwrap());
101 if let Some(filename) = s.filename() {
102 output += &format!("\n {}:{}",
103 filename.display(), s.lineno().unwrap_or(0));
104 }
105 });
106 ndx += 1;
107 }
108 output
109 }
110}
111
112pub struct HeapMon<A> {
113 inner: A,
114 is_enabled: AtomicBool,
115 seqno: AtomicUsize,
116 filters: Mutex<Vec<Regex>>,
117 outstanding: Lazy<Mutex<HashMap<usize, AllocRec>>>,
118 heapsz: AtomicUsize,
119 is_peakhold: AtomicBool,
120 peak: Lazy<Mutex<(usize, usize)>>, history: Lazy<Mutex<Vec<AllocRec>>>,
122}
123
124impl<A: Sync> HeapMon<A> {
125 pub const fn from_allocator(allocator: A) -> Self {
127 Self {
128 inner: allocator,
129 is_enabled: AtomicBool::new(false),
130 seqno: AtomicUsize::new(0),
131 filters: Mutex::new(Vec::new()),
132 outstanding: Lazy::new(|| Mutex::new(HashMap::new())),
133 heapsz: AtomicUsize::new(0),
134 is_peakhold: AtomicBool::new(false),
135 peak: Lazy::new(|| Mutex::new((0, 0))),
136 history: Lazy::new(|| Mutex::new(Vec::new())),
137 }
138 }
139
140 pub fn filter(&self, regexstr: &str) {
142 self.filters.lock().unwrap().push(Regex::new(regexstr).expect("regex"));
143 }
144
145 pub fn enable(&self) {
147 info!("heapmon::enable at seqno {}, heapsz {}",
148 self.seqno.load(Relaxed), self.heapsz.load(Relaxed));
149 self.is_enabled.store(true, Relaxed);
150 }
151
152 pub fn disable(&self) -> (usize, usize) {
154 self.is_enabled.store(false, Relaxed);
155 let peak = self.peak.lock().unwrap();
156 let heapsz = self.heapsz.load(Relaxed);
157 info!("heapmon::disable at seqno {}, heapsz {}, peakseqno {}, peakheapsz {}",
158 self.seqno.load(Relaxed), heapsz, peak.0, peak.1,
159 );
160 (heapsz, peak.1)
161 }
162
163 pub fn peakhold(&self) {
165 info!("heapmon::peakhold at seqno {}, heapsz {}",
166 self.seqno.load(Relaxed), self.heapsz.load(Relaxed));
167 self.is_peakhold.store(true, Relaxed);
168 self.is_enabled.store(true, Relaxed);
169 }
170
171 pub fn reset(&self) {
173 info!("heapmon::reset");
174 self.is_enabled.store(false, Relaxed);
175 self.seqno.store(0, Relaxed);
176 self.outstanding.lock().unwrap().clear();
177 self.heapsz.store(0, Relaxed);
178 self.is_peakhold.store(false, Relaxed);
179 *self.peak.lock().unwrap() = (0, 0);
180 self.history.lock().unwrap().clear();
181 }
182
183 fn aggregate_record(rollup: &mut HashMap<(BacktraceRec, usize), SummaryRec>,
184 arec: &AllocRec) {
185 let key = (arec.bt.clone(), arec.size);
186 if !rollup.contains_key(&key) {
187 rollup.insert(key.clone(), SummaryRec { count: 0, arec: arec.clone() });
188 }
189 let srec = rollup.get_mut(&key).unwrap();
190 srec.count += 1;
191 srec.arec.seqno = cmp::min(srec.arec.seqno, arec.seqno);
192 }
193
194 pub fn dump(&self, summary_order: SummaryOrder, thresh: usize, label: &str) {
196 info!("heapmon::dump starting");
197 let peak = self.peak.lock().unwrap();
198 let is_peakhold = self.is_peakhold.load(Relaxed);
199 let mut rollup: HashMap<(BacktraceRec, usize), SummaryRec> = HashMap::new();
200
201 for (_ptr, arec) in self.outstanding.lock().unwrap().iter() {
203 if !is_peakhold || arec.seqno <= peak.0 {
204 Self::aggregate_record(&mut rollup, arec);
205 }
206 }
207
208 for arec in self.history.lock().unwrap().iter() {
210 if arec.seqno <= peak.0 {
211 Self::aggregate_record(&mut rollup, arec);
212 }
213 }
214
215 let mut summary: Vec<SummaryRec> = rollup.iter().map(|(_k, srec)| srec.clone()).collect();
216 summary.sort_by(match summary_order {
217 SummaryOrder::FirstSeen => first_seen,
218 SummaryOrder::MemoryUsed => memory_used,
219 });
220 let mut outsz = 0;
221 let mut outbuf = String::new();
222 'summaries: for srec in summary.iter() {
223 let outstr = srec.format_output();
224 for regex in self.filters.lock().unwrap().iter() {
225 if regex.is_match(&outstr) {
226 continue 'summaries;
227 }
228 }
229 outsz += srec.count * srec.arec.size;
230 outbuf.push_str(&outstr);
231 outbuf.push_str("\n");
232 }
233 if outsz > thresh {
234 info!("Filtered peak size for {} is {}", label, pretty_print_usize(outsz));
235 info!("{}", outbuf);
236 }
237 info!("heapmon::dump finished");
238 }
239
240 fn allocated(&self, layout: Layout, ptr: *mut u8) {
241 let sz = layout.size();
242 let seqno = self.seqno.fetch_add(1, Relaxed);
243 let heapsz = self.heapsz.fetch_add(sz, Relaxed) + sz;
244 let mut arec = AllocRec { size: sz, seqno, bt: Vec::with_capacity(32) };
245 let mut ndx = 0;
246 backtrace::trace(|frame| {
247 if ndx >= NSKIP {
248 arec.bt.push(frame.ip() as usize);
249 }
250 ndx += 1;
251 true
252 });
253 assert_eq!(self.outstanding.lock().unwrap().insert(ptr as usize, arec), None);
254 let mut peak = self.peak.lock().unwrap();
255 if heapsz > peak.1 {
256 *peak = (seqno, heapsz);
257 self.history.lock().unwrap().clear();
258 }
259 }
260
261 fn deallocated(&self, layout: Layout, ptr: *mut u8) {
262 if let Some(arec) = self.outstanding.lock().unwrap().remove(&(ptr as usize)) {
264 let _heapsz = self.heapsz.fetch_sub(layout.size(), Relaxed) - layout.size();
265 if self.is_peakhold.load(Relaxed) {
266 self.history.lock().unwrap().push(arec);
267 }
268 }
269 }
270}
271
272pub fn watch<A: Sync>(cp: &str, heapmon: &'static HeapMon<A>, summary_order: SummaryOrder) {
274 info!("touch the {} file to enable, touch again to disable and dump", cp);
275 let mut watching = false; let ctlpath = cp.to_string().clone();
277 ctlfile_create(&ctlpath);
278 let mut last_mtime = ctlfile_mtime(&ctlpath);
279 thread::spawn(move || {
280 loop {
281 thread::sleep(time::Duration::from_millis(1000));
282 let mtime = ctlfile_mtime(&ctlpath);
283 if mtime > last_mtime {
284 last_mtime = mtime;
286 if !watching {
287 heapmon.reset();
288 heapmon.enable();
289 } else {
290 heapmon.disable();
291 heapmon.dump(summary_order, 0, "watch");
292 }
293 watching = !watching;
294 }
295 }
296 });
297}
298
299impl HeapMon<System> {
300 pub const fn system() -> HeapMon<System> {
302 Self::from_allocator(System)
303 }
304}
305
306fn ctlfile_create(ctlpath: &str) {
307 File::create(ctlpath).expect("File::create");
308}
309
310fn ctlfile_mtime(ctlpath: &str) -> u64 {
311 fs::metadata(ctlpath)
312 .unwrap()
313 .modified()
314 .unwrap()
315 .duration_since(UNIX_EPOCH)
316 .unwrap()
317 .as_secs()
318}
319
320
321unsafe impl<A: GlobalAlloc + Sync> GlobalAlloc for HeapMon<A> {
322 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
323 let ptr = self.inner.alloc(layout);
324 if self.is_enabled.load(Relaxed) {
325 ENTERED.with(|e| {
326 if !*e.borrow() {
327 *e.borrow_mut() = true;
328 self.allocated(layout, ptr);
329 *e.borrow_mut() = false;
330 }
331 })
332 }
333 ptr
334 }
335
336 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
337 if self.is_enabled.load(Relaxed) {
338 ENTERED.with(|e| {
339 if !*e.borrow() {
340 *e.borrow_mut() = true;
341 self.deallocated(layout, ptr);
342 *e.borrow_mut() = false;
343 }
344 })
345 }
346 self.inner.dealloc(ptr, layout)
347 }
348}
349
350fn pretty_print_usize(i: usize) -> String {
351 let mut s = String::new();
352 let i_str = i.to_string();
353 let a = i_str.chars().rev().enumerate();
354 for (idx, val) in a {
355 if idx != 0 && idx % 3 == 0 {
356 s.insert(0, '_');
357 }
358 s.insert(0, val);
359 }
360 format!("{}", s)
361}