cesium_allocator/allocator.rs
1use std::ffi::{
2 c_char,
3 c_void,
4};
5
6use cesium_libmimalloc_sys as mi;
7use cesium_libmimalloc_sys::allocator::mi_free;
8use mi::{
9 heap::*,
10 mi_block_visit_fun,
11};
12
13/// A general-purpose memory allocator. It's recommended to use the Allocator
14/// Pool to manage allocator instances. It is important to be aware that `*mut
15/// u8` return types are type-friendly wrappers on top of
16/// [`libc::c_void`](libc::c_void), which is just a `void*` in C.
17pub struct Allocator {
18 id: u32,
19 heap: *mut mi_heap_t,
20}
21
22impl Default for Allocator {
23 /// Create an allocator that uses the default heap.
24 ///
25 /// Note: If called multiple times, it will contain the same reference to
26 /// the same underlying heap. There are not multiple heaps.
27 fn default() -> Self {
28 Allocator {
29 id: 0,
30 heap: unsafe { mi_heap_get_default() },
31 }
32 }
33}
34
35impl Allocator {
36 pub fn new(id: u32, heap: *mut mi_heap_t) -> Self {
37 Allocator { id, heap }
38 }
39
40 pub fn id(self) -> u32 {
41 self.id
42 }
43
44 /// Release outstanding resources in a specific heap.
45 pub fn collect(&self, force: bool) {
46 unsafe {
47 mi_heap_collect(self.heap, force);
48 }
49 }
50
51 /// Allocate `size` bytes.
52 ///
53 /// Returns pointer to the allocated memory or null if out of memory.
54 /// Returns a unique pointer if called with `size` 0.
55 pub fn malloc(&self, size: usize) -> *mut u8 {
56 unsafe { mi_heap_malloc(self.heap, size) as *mut u8 }
57 }
58
59 pub fn free(&self, p: *mut u8) {
60 unsafe { mi_free(p as *mut c_void) }
61 }
62
63 /// Allocate zero-initialized `size` bytes.
64 ///
65 /// Returns a pointer to newly allocated zero-initialized memory, or null if
66 /// out of memory.
67 pub fn zalloc(&self, size: usize) -> *mut u8 {
68 unsafe { mi_heap_zalloc(self.heap, size) as *mut u8 }
69 }
70
71 /// Allocate `count` items of `size` length each.
72 ///
73 /// Returns `0` if `count * size` overflows or on out-of-memory.
74 ///
75 /// All items are initialized to zero.
76 pub fn calloc(&self, count: usize, size: usize) -> *mut u8 {
77 unsafe { mi_heap_calloc(self.heap, count, size) as *mut u8 }
78 }
79
80 /// Allocate `count` items of `size` length each.
81 ///
82 /// Returns `0` if `count * size` overflows or on out-of-memory,
83 /// otherwise returns the same as [`malloc(count *
84 /// size)`](Allocator::malloc). Equivalent to
85 /// [`calloc`](Allocator::calloc), but returns uninitialized (and not
86 /// zeroed) bytes.
87 pub fn mallocn(&self, count: usize, size: usize) -> *mut u8 {
88 unsafe { mi_heap_mallocn(self.heap, count, size) as *mut u8 }
89 }
90
91 /// Allocate an object of no more than [`SMALL_SIZE_MAX`](MI_SMALL_SIZE_MAX)
92 /// bytes.
93 ///
94 /// Does not check that `size` is indeed small.
95 ///
96 /// Note: Currently [`malloc_small`](Allocator::malloc_small) checks if
97 /// `size` is small and calls this if
98 /// so at runtime, so its' only worth using if you know for certain.
99 pub fn malloc_small(&self, size: usize) -> *mut u8 {
100 unsafe { mi_heap_malloc_small(self.heap, size) as *mut u8 }
101 }
102
103 /// Zero initialized re-allocation.
104 ///
105 /// In general, only valid on memory originally allocated by zero
106 /// initialization: [`calloc`](Allocator::calloc),
107 /// [`zalloc`](Allocator::zalloc),
108 /// [`zalloc_aligned`](Allocator::zalloc_aligned), ...
109 pub fn realloc(&self, p: *mut u8, newsize: usize) -> *mut u8 {
110 unsafe { mi_heap_realloc(self.heap, p as *mut c_void, newsize) as *mut u8 }
111 }
112
113 /// Re-allocate memory to `count` elements of `size` bytes.
114 ///
115 /// The realloc equivalent of the [`mallocn`](Allocator::mallocn) interface.
116 /// Returns `null` if `count * size` overflows or on out-of-memory,
117 /// otherwise returns the same as [`realloc(p, count *
118 /// size)`](Allocator::realloc).
119 pub fn reallocn(&self, p: *mut u8, count: usize, size: usize) -> *mut u8 {
120 unsafe { mi_heap_reallocn(self.heap, p as *mut c_void, count, size) as *mut u8 }
121 }
122
123 /// Re-allocate memory to `newsize` bytes.
124 ///
125 /// This differs from [`realloc`](Allocator::realloc) in that on failure,
126 /// `p` is freed.
127 pub fn reallocf(&self, p: *mut u8, newsize: usize) -> *mut u8 {
128 unsafe { mi_heap_reallocf(self.heap, p as *mut c_void, newsize) as *mut u8 }
129 }
130
131 /// Allocate and duplicate a nul-terminated C string. Because this could be
132 /// either an i8 or u8, the original type is left unwrapped.
133 pub fn strdup(&self, s: *const c_char) -> *mut c_char {
134 unsafe { mi_heap_strdup(self.heap, s) }
135 }
136
137 /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
138 /// Because this could be either an i8 or u8, the original type is left
139 /// unwrapped.
140 pub fn strndup(&self, s: *const c_char, n: usize) -> *mut c_char {
141 unsafe { mi_heap_strndup(self.heap, s, n) }
142 }
143
144 /// Resolve a file path name, producing a `C` string which can be passed to
145 /// [`free`](Allocator::free).
146 ///
147 /// `resolved_name` should be null, but can also point to a buffer of at
148 /// least `PATH_MAX` bytes.
149 ///
150 /// If successful, returns a pointer to the resolved absolute file name, or
151 /// `null` on failure (with `errno` set to the error code).
152 ///
153 /// If `resolved_name` was `null`, the returned result should be freed with
154 /// [`free`](Allocator::free).
155 ///
156 /// This can rarely be useful in FFI code, but is mostly included for
157 /// completeness.
158 pub fn realpath(&self, fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char {
159 unsafe { mi_heap_realpath(self.heap, fname, resolved_name) }
160 }
161
162 /// Allocate `size` bytes aligned by `alignment`.
163 ///
164 /// Return pointer to the allocated memory or null if out of memory.
165 ///
166 /// Returns a unique pointer if called with `size` 0.
167 pub fn malloc_aligned(&self, size: usize, alignment: usize) -> *mut u8 {
168 unsafe { mi_heap_malloc_aligned(self.heap, size, alignment) as *mut u8 }
169 }
170
171 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
172 ///
173 /// Note that the resulting pointer itself is not aligned by the alignment,
174 /// but after `offset` bytes it will be. This can be useful for allocating
175 /// data with an inline header, where the data has a specific alignment
176 /// requirement.
177 ///
178 /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
179 /// to `alignment`.
180 pub fn malloc_aligned_at(&self, size: usize, alignment: usize, offset: usize) -> *mut u8 {
181 unsafe { mi_heap_malloc_aligned_at(self.heap, size, alignment, offset) as *mut u8 }
182 }
183
184 /// Allocate `size` bytes aligned by `alignment`, initialized to zero.
185 ///
186 /// Return pointer to the allocated memory or null if out of memory.
187 ///
188 /// Returns a unique pointer if called with `size` 0.
189 pub fn zalloc_aligned(&self, size: usize, alignment: usize) -> *mut u8 {
190 unsafe { mi_heap_zalloc_aligned(self.heap, size, alignment) as *mut u8 }
191 }
192
193 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
194 /// zero-initialized.
195 ///
196 /// This is a [`zalloc`](Allocator::zalloc) equivalent of
197 /// [`malloc_aligned_at`](Allocator::malloc_aligned_at).
198 pub fn zalloc_aligned_at(&self, size: usize, alignment: usize, offset: usize) -> *mut u8 {
199 unsafe { mi_heap_zalloc_aligned_at(self.heap, size, alignment, offset) as *mut u8 }
200 }
201
202 /// Allocate `size * count` bytes aligned by `alignment`.
203 ///
204 /// Return pointer to the allocated memory or null if out of memory or if
205 /// `size * count` overflows.
206 ///
207 /// Returns a unique pointer if called with `size * count` 0.
208 pub fn calloc_aligned(&self, count: usize, size: usize, alignment: usize) -> *mut u8 {
209 unsafe { mi_heap_calloc_aligned(self.heap, count, size, alignment) as *mut u8 }
210 }
211
212 /// Allocate `size * count` bytes aligned by `alignment` at a specified
213 /// `offset`, zero-initialized.
214 ///
215 /// This is a [`calloc`](Allocator::calloc) equivalent of
216 /// [`malloc_aligned_at`](Allocator::malloc_aligned_at).
217 pub fn calloc_aligned_at(
218 &self,
219 count: usize,
220 size: usize,
221 alignment: usize,
222 offset: usize,
223 ) -> *mut u8 {
224 unsafe { mi_heap_calloc_aligned_at(self.heap, count, size, alignment, offset) as *mut u8 }
225 }
226
227 /// Re-allocate memory to `newsize` bytes, aligned by `alignment`.
228 ///
229 /// Return pointer to the allocated memory or null if out of memory. If null
230 /// is returned, the pointer `p` is not freed. Otherwise the original
231 /// pointer is either freed or returned as the reallocated result (in case
232 /// it fits in-place with the new size).
233 ///
234 /// If `p` is null, it behaves as
235 /// [`malloc_aligned`](Allocator::malloc_aligned). If `new_size` is
236 /// larger than the original `size` allocated for `p`, the bytes after
237 /// `size` are uninitialized.
238 pub fn realloc_aligned(&self, p: *mut u8, new_size: usize, alignment: usize) -> *mut u8 {
239 unsafe {
240 mi_heap_realloc_aligned(self.heap, p as *mut c_void, new_size, alignment) as *mut u8
241 }
242 }
243
244 /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
245 /// specified `offset`.
246 ///
247 /// This is a [`realloc`](Allocator::realloc) equivalent of
248 /// [`malloc_aligned_at`](Allocator::malloc_aligned_at).
249 pub fn realloc_aligned_at(
250 &self,
251 p: *mut u8,
252 newsize: usize,
253 alignment: usize,
254 offset: usize,
255 ) -> *mut u8 {
256 unsafe {
257 mi_heap_realloc_aligned_at(self.heap, p as *mut c_void, newsize, alignment, offset)
258 as *mut u8
259 }
260 }
261
262 /// Zero initialized [re-allocation](Allocator::realloc).
263 ///
264 /// In general, only valid on memory originally allocated by zero
265 /// initialization: [`calloc`](Allocator::calloc),
266 /// [`zalloc`](Allocator::zalloc),
267 /// [`zalloc_aligned`](Allocator::zalloc_aligned), ...
268 pub fn rezalloc(&self, p: *mut u8, newsize: usize) -> *mut u8 {
269 unsafe { mi_heap_rezalloc(self.heap, p as *mut c_void, newsize) as *mut u8 }
270 }
271
272 /// Zero initialized [re-allocation](Allocator::realloc), following `calloc`
273 /// paramater conventions.
274 ///
275 /// In general, only valid on memory originally allocated by zero
276 /// initialization: [`calloc`](Allocator::calloc),
277 /// [`zalloc`](Allocator::zalloc),
278 /// [`zalloc_aligned`](Allocator::zalloc_aligned), ...
279 pub fn recalloc(&self, p: *mut u8, newcount: usize, size: usize) -> *mut u8 {
280 unsafe { mi_heap_recalloc(self.heap, p as *mut c_void, newcount, size) as *mut u8 }
281 }
282
283 /// Aligned version of [`rezalloc`](Allocator::rezalloc).
284 pub fn rezalloc_aligned(&self, p: *mut u8, newsize: usize, alignment: usize) -> *mut u8 {
285 unsafe {
286 mi_heap_rezalloc_aligned(self.heap, p as *mut c_void, newsize, alignment) as *mut u8
287 }
288 }
289
290 /// Offset-aligned version of [`rezalloc`](Allocator::rezalloc).
291 pub fn rezalloc_aligned_at(
292 &self,
293 p: *mut u8,
294 newsize: usize,
295 alignment: usize,
296 offset: usize,
297 ) -> *mut u8 {
298 unsafe {
299 mi_heap_rezalloc_aligned_at(self.heap, p as *mut c_void, newsize, alignment, offset)
300 as *mut u8
301 }
302 }
303
304 /// Aligned version of [`recalloc`](Allocator::recalloc).
305 pub fn recalloc_aligned(
306 &self,
307 p: *mut u8,
308 newcount: usize,
309 size: usize,
310 alignment: usize,
311 ) -> *mut u8 {
312 unsafe {
313 mi_heap_recalloc_aligned(self.heap, p as *mut c_void, newcount, size, alignment)
314 as *mut u8
315 }
316 }
317
318 /// Offset-aligned version of [`recalloc`](Allocator::recalloc).
319 pub fn recalloc_aligned_at(
320 &self,
321 p: *mut u8,
322 newcount: usize,
323 size: usize,
324 alignment: usize,
325 offset: usize,
326 ) -> *mut u8 {
327 unsafe {
328 mi_heap_recalloc_aligned_at(
329 self.heap,
330 p as *mut c_void,
331 newcount,
332 size,
333 alignment,
334 offset,
335 ) as *mut u8
336 }
337 }
338
339 /// Does a heap contain a pointer to a previously allocated block?
340 ///
341 /// `p` must be a pointer to a previously allocated block (in any heap) --
342 /// it cannot be some random pointer!
343 ///
344 /// Returns `true` if the block pointed to by `p` is in the `heap`.
345 ///
346 /// See [`check_owned`](Allocator::check_owned).
347 pub fn contains_block(&self, p: *const u8) -> bool {
348 unsafe { mi_heap_contains_block(self.heap, p as *const c_void) }
349 }
350
351 /// Check safely if any pointer is part of a heap.
352 ///
353 /// `p` may be any pointer -- not required to be previously allocated by the
354 /// given heap or any other known heap. Returns `true` if `p` points to a
355 /// block in the given heap, false otherwise.
356 ///
357 /// Note: expensive function, linear in the pages in the heap.
358 ///
359 /// See [`contains_block`](Allocator::contains_block), [`get_default`], and
360 /// [`is_in_region`]
361 pub fn check_owned(&self, p: *const u8) -> bool {
362 unsafe { mi_heap_check_owned(self.heap, p as *const c_void) }
363 }
364
365 /// Visit all areas and blocks in `heap`.
366 ///
367 /// If `visit_all_blocks` is false, the `visitor` is only called once for
368 /// every heap area. If it's true, the `visitor` is also called for every
369 /// allocated block inside every area (with `!block.is_null()`). Return
370 /// `false` from the `visitor` to return early.
371 ///
372 /// `arg` is an extra argument passed into the `visitor`.
373 ///
374 /// Returns `true` if all areas and blocks were visited.
375 ///
376 /// Passing a `None` visitor is allowed, and is a no-op.
377 pub fn visit_blocks(
378 &self,
379 visit_all_blocks: bool,
380 visitor: mi_block_visit_fun,
381 arg: *mut u8,
382 ) -> bool {
383 unsafe { mi_heap_visit_blocks(self.heap, visit_all_blocks, visitor, arg as *mut c_void) }
384 }
385}