tree_sitter_cli/fuzz/
allocations.rs1use std::{
2 collections::HashMap,
3 os::raw::c_void,
4 sync::{
5 atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
6 Mutex,
7 },
8};
9
10#[ctor::ctor]
11unsafe fn initialize_allocation_recording() {
12 tree_sitter::set_allocator(
13 Some(ts_record_malloc),
14 Some(ts_record_calloc),
15 Some(ts_record_realloc),
16 Some(ts_record_free),
17 );
18}
19
20#[derive(Debug, PartialEq, Eq, Hash)]
21struct Allocation(*const c_void);
22unsafe impl Send for Allocation {}
23unsafe impl Sync for Allocation {}
24
25#[derive(Default)]
26struct AllocationRecorder {
27 enabled: AtomicBool,
28 allocation_count: AtomicUsize,
29 outstanding_allocations: Mutex<HashMap<Allocation, usize>>,
30}
31
32thread_local! {
33 static RECORDER: AllocationRecorder = AllocationRecorder::default();
34}
35
36extern "C" {
37 fn malloc(size: usize) -> *mut c_void;
38 fn calloc(count: usize, size: usize) -> *mut c_void;
39 fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void;
40 fn free(ptr: *mut c_void);
41}
42
43pub fn record<T>(f: impl FnOnce() -> T) -> T {
44 record_checked(f).unwrap()
45}
46
47pub fn record_checked<T>(f: impl FnOnce() -> T) -> Result<T, String> {
48 RECORDER.with(|recorder| {
49 recorder.enabled.store(true, SeqCst);
50 recorder.allocation_count.store(0, SeqCst);
51 recorder.outstanding_allocations.lock().unwrap().clear();
52 });
53
54 let value = f();
55
56 let outstanding_allocation_indices = RECORDER.with(|recorder| {
57 recorder.enabled.store(false, SeqCst);
58 recorder.allocation_count.store(0, SeqCst);
59 recorder
60 .outstanding_allocations
61 .lock()
62 .unwrap()
63 .drain()
64 .map(|e| e.1)
65 .collect::<Vec<_>>()
66 });
67 if !outstanding_allocation_indices.is_empty() {
68 return Err(format!(
69 "Leaked allocation indices: {outstanding_allocation_indices:?}",
70 ));
71 }
72 Ok(value)
73}
74
75fn record_alloc(ptr: *mut c_void) {
76 RECORDER.with(|recorder| {
77 if recorder.enabled.load(SeqCst) {
78 let count = recorder.allocation_count.fetch_add(1, SeqCst);
79 recorder
80 .outstanding_allocations
81 .lock()
82 .unwrap()
83 .insert(Allocation(ptr), count);
84 }
85 });
86}
87
88fn record_dealloc(ptr: *mut c_void) {
89 RECORDER.with(|recorder| {
90 if recorder.enabled.load(SeqCst) {
91 recorder
92 .outstanding_allocations
93 .lock()
94 .unwrap()
95 .remove(&Allocation(ptr));
96 }
97 });
98}
99
100#[must_use]
105pub unsafe extern "C" fn ts_record_malloc(size: usize) -> *mut c_void {
106 let result = malloc(size);
107 record_alloc(result);
108 result
109}
110
111#[must_use]
116pub unsafe extern "C" fn ts_record_calloc(count: usize, size: usize) -> *mut c_void {
117 let result = calloc(count, size);
118 record_alloc(result);
119 result
120}
121
122#[must_use]
127pub unsafe extern "C" fn ts_record_realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
128 let result = realloc(ptr, size);
129 if ptr.is_null() {
130 record_alloc(result);
131 } else if !core::ptr::eq(ptr, result) {
132 record_dealloc(ptr);
133 record_alloc(result);
134 }
135 result
136}
137
138pub unsafe extern "C" fn ts_record_free(ptr: *mut c_void) {
143 record_dealloc(ptr);
144 free(ptr);
145}