libmimalloc_sys/extended.rs
1#![allow(nonstandard_style)]
2
3use core::ffi::c_void;
4
5use cty::{c_char, c_int, c_long, c_ulonglong};
6
7/// The maximum number of bytes which may be used as an argument to a function
8/// in the `_small` family ([`mi_malloc_small`], [`mi_zalloc_small`], etc).
9pub const MI_SMALL_SIZE_MAX: usize = 128 * core::mem::size_of::<*mut c_void>();
10
11extern "C" {
12 /// Allocate `count` items of `size` length each.
13 ///
14 /// Returns `null` if `count * size` overflows or on out-of-memory.
15 ///
16 /// All items are initialized to zero.
17 pub fn mi_calloc(count: usize, size: usize) -> *mut c_void;
18
19 /// Allocate `count` items of `size` length each.
20 ///
21 /// Returns `null` if `count * size` overflows or on out-of-memory,
22 /// otherwise returns the same as [`mi_malloc(count *
23 /// size)`](crate::mi_malloc).
24 /// Equivalent to [`mi_calloc`], but returns uninitialized (and not zeroed)
25 /// bytes.
26 pub fn mi_mallocn(count: usize, size: usize) -> *mut c_void;
27
28 /// Re-allocate memory to `count` elements of `size` bytes.
29 ///
30 /// The realloc equivalent of the [`mi_mallocn`] interface. Returns `null`
31 /// if `count * size` overflows or on out-of-memory, otherwise returns the
32 /// same as [`mi_realloc(p, count * size)`](crate::mi_realloc).
33 pub fn mi_reallocn(p: *mut c_void, count: usize, size: usize) -> *mut c_void;
34
35 /// Try to re-allocate memory to `newsize` bytes _in place_.
36 ///
37 /// Returns null on out-of-memory or if the memory could not be expanded in
38 /// place. On success, returns the same pointer as `p`.
39 ///
40 /// If `newsize` is larger than the original `size` allocated for `p`, the
41 /// bytes after `size` are uninitialized.
42 ///
43 /// If null is returned, the original pointer is not freed.
44 ///
45 /// Note: Conceptually, this is a realloc-like which returns null if it
46 /// would be forced to reallocate memory and copy. In practice it's
47 /// equivalent testing against [`mi_usable_size`](crate::mi_usable_size).
48 pub fn mi_expand(p: *mut c_void, newsize: usize) -> *mut c_void;
49
50 /// Re-allocate memory to `newsize` bytes.
51 ///
52 /// This differs from [`mi_realloc`](crate::mi_realloc) in that on failure,
53 /// `p` is freed.
54 pub fn mi_reallocf(p: *mut c_void, newsize: usize) -> *mut c_void;
55
56 /// Allocate and duplicate a nul-terminated C string.
57 ///
58 /// This can be useful for Rust code when interacting with the FFI.
59 pub fn mi_strdup(s: *const c_char) -> *mut c_char;
60
61 /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
62 ///
63 /// This can be useful for Rust code when interacting with the FFI.
64 pub fn mi_strndup(s: *const c_char, n: usize) -> *mut c_char;
65
66 /// Resolve a file path name, producing a `C` string which can be passed to
67 /// [`mi_free`](crate::mi_free).
68 ///
69 /// `resolved_name` should be null, but can also point to a buffer of at
70 /// least `PATH_MAX` bytes.
71 ///
72 /// If successful, returns a pointer to the resolved absolute file name, or
73 /// `null` on failure (with `errno` set to the error code).
74 ///
75 /// If `resolved_name` was `null`, the returned result should be freed with
76 /// [`mi_free`](crate::mi_free).
77 ///
78 /// This can rarely be useful in FFI code, but is mostly included for
79 /// completeness.
80 pub fn mi_realpath(fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char;
81
82 /// Allocate `size * count` bytes aligned by `alignment`.
83 ///
84 /// Return pointer to the allocated memory or null if out of memory or if
85 /// `size * count` overflows.
86 ///
87 /// Returns a unique pointer if called with `size * count` 0.
88 pub fn mi_calloc_aligned(count: usize, size: usize, alignment: usize) -> *mut c_void;
89
90 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
91 ///
92 /// Note that the resulting pointer itself is not aligned by the alignment,
93 /// but after `offset` bytes it will be. This can be useful for allocating
94 /// data with an inline header, where the data has a specific alignment
95 /// requirement.
96 ///
97 /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
98 /// to `alignment`.
99 pub fn mi_malloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
100
101 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
102 /// zero-initialized.
103 ///
104 /// This is a [`mi_zalloc`](crate::mi_zalloc) equivalent of [`mi_malloc_aligned_at`].
105 pub fn mi_zalloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
106
107 /// Allocate `size` of bytes aligned by `alignment` and place the address of the
108 /// allocated memory to `ptr`.
109 ///
110 /// Returns zero on success, invalid argument for invalid alignment, or out-of-memory.
111 pub fn mi_posix_memalign(ptr: *mut *mut c_void, alignment: usize, size: usize) -> c_int;
112
113 /// Allocate `size` bytes aligned by `alignment` with alignment as the first
114 /// parameter.
115 ///
116 /// Return pointer to the allocated memory or null if out of memory.
117 pub fn mi_aligned_alloc(alignment: usize, size: usize) -> *mut c_void;
118
119 /// Allocate `size * count` bytes aligned by `alignment` at a specified
120 /// `offset`, zero-initialized.
121 ///
122 /// This is a [`calloc`](crate::mi_calloc) equivalent of [`mi_malloc_aligned_at`].
123 pub fn mi_calloc_aligned_at(
124 count: usize,
125 size: usize,
126 alignment: usize,
127 offset: usize,
128 ) -> *mut c_void;
129
130 /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
131 /// specified `offset`.
132 ///
133 /// This is a [`realloc`](crate::mi_realloc) equivalent of [`mi_malloc_aligned_at`].
134 pub fn mi_realloc_aligned_at(
135 p: *mut c_void,
136 newsize: usize,
137 alignment: usize,
138 offset: usize,
139 ) -> *mut c_void;
140
141 /// Zero initialized [re-allocation](crate::mi_realloc).
142 ///
143 /// In general, only valid on memory originally allocated by zero
144 /// initialization: [`mi_calloc`](crate::mi_calloc),
145 /// [`mi_zalloc`](crate::mi_zalloc),
146 /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
147 pub fn mi_rezalloc(p: *mut c_void, newsize: usize) -> *mut c_void;
148
149 /// Zero initialized [re-allocation](crate::mi_realloc), following `calloc`
150 /// paramater conventions.
151 ///
152 /// In general, only valid on memory originally allocated by zero
153 /// initialization: [`mi_calloc`](crate::mi_calloc),
154 /// [`mi_zalloc`](crate::mi_zalloc),
155 /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
156 pub fn mi_recalloc(p: *mut c_void, newcount: usize, size: usize) -> *mut c_void;
157
158 /// Aligned version of [`mi_rezalloc`].
159 pub fn mi_rezalloc_aligned(p: *mut c_void, newsize: usize, alignment: usize) -> *mut c_void;
160
161 /// Offset-aligned version of [`mi_rezalloc`].
162 pub fn mi_rezalloc_aligned_at(
163 p: *mut c_void,
164 newsize: usize,
165 alignment: usize,
166 offset: usize,
167 ) -> *mut c_void;
168
169 /// Aligned version of [`mi_recalloc`].
170 pub fn mi_recalloc_aligned(
171 p: *mut c_void,
172 newcount: usize,
173 size: usize,
174 alignment: usize,
175 ) -> *mut c_void;
176
177 /// Offset-aligned version of [`mi_recalloc`].
178 pub fn mi_recalloc_aligned_at(
179 p: *mut c_void,
180 newcount: usize,
181 size: usize,
182 alignment: usize,
183 offset: usize,
184 ) -> *mut c_void;
185
186 /// Allocate an object of no more than [`MI_SMALL_SIZE_MAX`] bytes.
187 ///
188 /// Does not check that `size` is indeed small.
189 ///
190 /// Note: Currently [`mi_malloc`](crate::mi_malloc) checks if `size` is
191 /// small and calls this if
192 /// so at runtime, so its' only worth using if you know for certain.
193 pub fn mi_malloc_small(size: usize) -> *mut c_void;
194
195 /// Allocate an zero-initialized object of no more than
196 /// [`MI_SMALL_SIZE_MAX`] bytes.
197 ///
198 /// Does not check that `size` is indeed small.
199 ///
200 /// Note: Currently [`mi_zalloc`](crate::mi_zalloc) checks if `size` is
201 /// small and calls this if so at runtime, so its' only worth using if you
202 /// know for certain.
203 pub fn mi_zalloc_small(size: usize) -> *mut c_void;
204
205 /// Free a small object. Only use to free objects from [`mi_malloc_small`]
206 /// or [`mi_zalloc_small`]. Potentially a tiny bit faster than [`mi_free`](crate::mi_free).
207 pub fn mi_free_small(p: *mut c_void);
208
209 /// Return the available bytes in a memory block.
210 ///
211 /// The returned size can be used to call `mi_expand` successfully.
212 pub fn mi_usable_size(p: *const c_void) -> usize;
213
214 /// Return the used allocation size.
215 ///
216 /// Returns the size `n` that will be allocated, where `n >= size`.
217 ///
218 /// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. This
219 /// can be used to reduce internal wasted space when allocating buffers for
220 /// example.
221 ///
222 /// See [`mi_usable_size`](crate::mi_usable_size).
223 pub fn mi_good_size(size: usize) -> usize;
224
225 /// Eagerly free memory.
226 ///
227 /// If `force` is true, aggressively return memory to the OS (can be
228 /// expensive!)
229 ///
230 /// Regular code should not have to call this function. It can be beneficial
231 /// in very narrow circumstances; in particular, when a long running thread
232 /// allocates a lot of blocks that are freed by other threads it may improve
233 /// resource usage by calling this every once in a while.
234 pub fn mi_collect(force: bool);
235
236 /// Checked free: If `p` came from mimalloc's heap (as decided by
237 /// [`mi_is_in_heap_region`]), this is [`mi_free(p)`](crate::mi_free), but
238 /// otherwise it is a no-op.
239 pub fn mi_cfree(p: *mut c_void);
240
241 /// Returns true if this is a pointer into a memory region that has been
242 /// reserved by the mimalloc heap.
243 ///
244 /// This function is described by the mimalloc documentation as "relatively
245 /// fast".
246 ///
247 /// See also [`mi_heap_check_owned`], which is (much) slower and slightly
248 /// more precise, but only concerns a single `mi_heap`.
249 pub fn mi_is_in_heap_region(p: *const c_void) -> bool;
250
251 /// Layout-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
252 /// the size and alignment as well.
253 ///
254 /// Note: unlike some allocators that require this information for
255 /// performance, mimalloc doesn't need it (as of the current version,
256 /// v2.0.0), and so it currently implements this as a (debug) assertion that
257 /// verifies that `p` is actually aligned to `alignment` and is usable for
258 /// at least `size` bytes, before delegating to `mi_free`.
259 ///
260 /// However, currently there's no way to have this crate enable mimalloc's
261 /// debug assertions, so these checks aren't particularly useful.
262 ///
263 /// Note: It's legal to pass null to this function, and you are not required
264 /// to use this to deallocate memory from an aligned allocation function.
265 pub fn mi_free_size_aligned(p: *mut c_void, size: usize, alignment: usize);
266
267 /// Size-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
268 /// the size and alignment as well.
269 ///
270 /// Note: unlike some allocators that require this information for
271 /// performance, mimalloc doesn't need it (as of the current version,
272 /// v2.0.0), and so it currently implements this as a (debug) assertion that
273 /// verifies that `p` is actually aligned to `alignment` and is usable for
274 /// at least `size` bytes, before delegating to `mi_free`.
275 ///
276 /// However, currently there's no way to have this crate enable mimalloc's
277 /// debug assertions, so these checks aren't particularly useful.
278 ///
279 /// Note: It's legal to pass null to this function.
280 pub fn mi_free_size(p: *mut c_void, size: usize);
281
282 /// Alignment-aware deallocation: Like [`mi_free`](crate::mi_free), but
283 /// accepts the size and alignment as well.
284 ///
285 /// Note: unlike some allocators that require this information for
286 /// performance, mimalloc doesn't need it (as of the current version,
287 /// v2.0.0), and so it currently implements this as a (debug) assertion that
288 /// verifies that `p` is actually aligned to `alignment` and is usable for
289 /// at least `size` bytes, before delegating to `mi_free`.
290 ///
291 /// However, currently there's no way to have this crate enable mimalloc's
292 /// debug assertions, so these checks aren't particularly useful.
293 ///
294 /// Note: It's legal to pass null to this function.
295 pub fn mi_free_aligned(p: *mut c_void, alignment: usize);
296
297 /// Print the main statistics.
298 ///
299 /// Ignores the passed in argument, and outputs to the registered output
300 /// function or stderr by default.
301 ///
302 /// Most detailed when using a debug build.
303 pub fn mi_stats_print(_: *mut c_void);
304
305 /// Print the main statistics.
306 ///
307 /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
308 /// passed as it's second parameter.
309 ///
310 /// Most detailed when using a debug build.
311 pub fn mi_stats_print_out(out: mi_output_fun, arg: *mut c_void);
312
313 /// Reset statistics.
314 ///
315 /// Note: This function is thread safe.
316 pub fn mi_stats_reset();
317
318 /// Merge thread local statistics with the main statistics and reset.
319 ///
320 /// Note: This function is thread safe.
321 #[cfg(feature = "v2")]
322 pub fn mi_stats_merge();
323
324 /// Return the mimalloc version number.
325 ///
326 /// For example version 1.6.3 would return the number `163`.
327 pub fn mi_version() -> c_int;
328
329 /// Initialize mimalloc on a thread.
330 ///
331 /// Should not be used as on most systems (pthreads, windows) this is done
332 /// automatically.
333 pub fn mi_thread_init();
334
335 /// Initialize the process.
336 ///
337 /// Should not be used on most systems, as it's called by thread_init or the
338 /// process loader.
339 pub fn mi_process_init();
340
341 /// Return process information (time and memory usage). All parameters are
342 /// optional (nullable) out-params:
343 ///
344 /// | Parameter | Description |
345 /// | :- | :- |
346 /// | `elapsed_msecs` | Elapsed wall-clock time of the process in milli-seconds. |
347 /// | `user_msecs` | User time in milli-seconds (as the sum over all threads). |
348 /// | `system_msecs` | System time in milli-seconds. |
349 /// | `current_rss` | Current working set size (touched pages). |
350 /// | `peak_rss` | Peak working set size (touched pages). |
351 /// | `current_commit` | Current committed memory (backed by the page file). |
352 /// | `peak_commit` | Peak committed memory (backed by the page file). |
353 /// | `page_faults` | Count of hard page faults. |
354 ///
355 /// The `current_rss` is precise on Windows and MacOSX; other systems
356 /// estimate this using `current_commit`. The `commit` is precise on Windows
357 /// but estimated on other systems as the amount of read/write accessible
358 /// memory reserved by mimalloc.
359 pub fn mi_process_info(
360 elapsed_msecs: *mut usize,
361 user_msecs: *mut usize,
362 system_msecs: *mut usize,
363 current_rss: *mut usize,
364 peak_rss: *mut usize,
365 current_commit: *mut usize,
366 peak_commit: *mut usize,
367 page_faults: *mut usize,
368 );
369
370 /// Uninitialize mimalloc on a thread.
371 ///
372 /// Should not be used as on most systems (pthreads, windows) this is done
373 /// automatically. Ensures that any memory that is not freed yet (but will
374 /// be freed by other threads in the future) is properly handled.
375 ///
376 /// Note: This function is thread safe.
377 pub fn mi_thread_done();
378
379 /// Print out heap statistics for this thread.
380 ///
381 /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
382 /// passed as it's second parameter
383 ///
384 /// Most detailed when using a debug build.
385 ///
386 /// Note: This function is thread safe.
387 pub fn mi_thread_stats_print_out(out: mi_output_fun, arg: *mut c_void);
388
389 /// Register an output function.
390 ///
391 /// - `out` The output function, use `None` to output to stderr.
392 /// - `arg` Argument that will be passed on to the output function.
393 ///
394 /// The `out` function is called to output any information from mimalloc,
395 /// like verbose or warning messages.
396 ///
397 /// Note: This function is thread safe.
398 pub fn mi_register_output(out: mi_output_fun, arg: *mut c_void);
399
400 /// Register a deferred free function.
401 ///
402 /// - `deferred_free` Address of a deferred free-ing function or `None` to
403 /// unregister.
404 /// - `arg` Argument that will be passed on to the deferred free function.
405 ///
406 /// Some runtime systems use deferred free-ing, for example when using
407 /// reference counting to limit the worst case free time.
408 ///
409 /// Such systems can register (re-entrant) deferred free function to free
410 /// more memory on demand.
411 ///
412 /// - When the `force` parameter is `true` all possible memory should be
413 /// freed.
414 ///
415 /// - The per-thread `heartbeat` parameter is monotonically increasing and
416 /// guaranteed to be deterministic if the program allocates
417 /// deterministically.
418 ///
419 /// - The `deferred_free` function is guaranteed to be called
420 /// deterministically after some number of allocations (regardless of
421 /// freeing or available free memory).
422 ///
423 /// At most one `deferred_free` function can be active.
424 ///
425 /// Note: This function is thread safe.
426 pub fn mi_register_deferred_free(out: mi_deferred_free_fun, arg: *mut c_void);
427
428 /// Register an error callback function.
429 ///
430 /// The `errfun` function is called on an error in mimalloc after emitting
431 /// an error message (through the output function).
432 ///
433 /// It as always legal to just return from the `errfun` function in which
434 /// case allocation functions generally return null or ignore the condition.
435 ///
436 /// The default function only calls abort() when compiled in secure mode
437 /// with an `EFAULT` error. The possible error codes are:
438 ///
439 /// - `EAGAIN` (11): Double free was detected (only in debug and secure
440 /// mode).
441 /// - `EFAULT` (14): Corrupted free list or meta-data was detected (only in
442 /// debug and secure mode).
443 /// - `ENOMEM` (12): Not enough memory available to satisfy the request.
444 /// - `EOVERFLOW` (75): Too large a request, for example in `mi_calloc`, the
445 /// `count` and `size` parameters are too large.
446 /// - `EINVAL` (22): Trying to free or re-allocate an invalid pointer.
447 ///
448 /// Note: This function is thread safe.
449 pub fn mi_register_error(out: mi_error_fun, arg: *mut c_void);
450}
451
452/// An output callback. Must be thread-safe.
453///
454/// See [`mi_stats_print_out`], [`mi_thread_stats_print_out`], [`mi_register_output`]
455pub type mi_output_fun = Option<unsafe extern "C" fn(msg: *const c_char, arg: *mut c_void)>;
456
457/// Type of deferred free functions. Must be thread-safe.
458///
459/// - `force`: If true, all outstanding items should be freed.
460/// - `heartbeat` A monotonically increasing count.
461/// - `arg` Argument that was passed at registration to hold extra state.
462///
463/// See [`mi_register_deferred_free`]
464pub type mi_deferred_free_fun =
465 Option<unsafe extern "C" fn(force: bool, heartbeat: c_ulonglong, arg: *mut c_void)>;
466
467/// Type of error callback functions. Must be thread-safe.
468///
469/// - `err`: Error code (see [`mi_register_error`] for a list).
470/// - `arg`: Argument that was passed at registration to hold extra state.
471///
472/// See [`mi_register_error`]
473pub type mi_error_fun = Option<unsafe extern "C" fn(code: c_int, arg: *mut c_void)>;
474
475/// Runtime options. All options are false by default.
476pub type mi_option_t = c_int;
477
478#[cfg(all(feature = "arena", feature = "v2"))]
479/// Arena Id
480pub type mi_arena_id_t = c_int;
481
482#[cfg(all(feature = "arena", not(feature = "v2")))]
483/// Arena Id
484pub type mi_arena_id_t = *mut c_void;
485
486// Note: mimalloc doc website seems to have the order of show_stats and
487// show_errors reversed as of 1.6.3, however what I have here is correct:
488// https://github.com/microsoft/mimalloc/issues/266#issuecomment-653822341
489
490/// Print error messages to `stderr`.
491pub const mi_option_show_errors: mi_option_t = 0;
492
493/// Print statistics to `stderr` when the program is done.
494pub const mi_option_show_stats: mi_option_t = 1;
495
496/// Print verbose messages to `stderr`.
497pub const mi_option_verbose: mi_option_t = 2;
498
499/// ### The following options are experimental
500///
501/// Option (experimental) Use large OS pages (2MiB in size) if possible.
502///
503/// Use large OS pages (2MiB) when available; for some workloads this can
504/// significantly improve performance. Use mi_option_verbose to check if
505/// the large OS pages are enabled -- usually one needs to explicitly allow
506/// large OS pages (as on Windows and Linux). However, sometimes the OS is
507/// very slow to reserve contiguous physical memory for large OS pages so
508/// use with care on systems that can have fragmented memory (for that
509/// reason, we generally recommend to use mi_option_reserve_huge_os_pages
510/// instead whenever possible).
511pub const mi_option_large_os_pages: mi_option_t = 6;
512
513/// Option (experimental) The number of huge OS pages (1GiB in size) to reserve at the start of the program.
514///
515/// This reserves the huge pages at startup and sometimes this can give a large (latency) performance
516/// improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in
517/// combination with this setting. Just like large OS pages, use with care as reserving contiguous
518/// physical memory can take a long time when memory is fragmented (but reserving the huge pages is
519/// done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on
520/// Windows and Linux)). With huge OS pages, it may be beneficial to set the setting
521/// mi_option_eager_commit_delay=N (N is 1 by default) to delay the initial N segments (of 4MiB) of
522/// a thread to not allocate in the huge OS pages; this prevents threads that are short lived and
523/// allocate just a little to take up space in the huge OS page area (which cannot be reset).
524pub const mi_option_reserve_huge_os_pages: mi_option_t = 7;
525
526/// Option (experimental) Reserve huge OS pages at node N.
527///
528/// The huge pages are usually allocated evenly among NUMA nodes.
529/// You can use mi_option_reserve_huge_os_pages_at=N where `N` is the numa node (starting at 0) to allocate all
530/// the huge pages at a specific numa node instead.
531pub const mi_option_reserve_huge_os_pages_at: mi_option_t = 8;
532
533/// Option (experimental) Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
534pub const mi_option_reserve_os_memory: mi_option_t = 9;
535
536#[cfg(feature = "v2")]
537/// Option (experimental) the first N segments per thread are not eagerly committed (=1).
538pub const mi_option_eager_commit_delay: mi_option_t = 14;
539
540/// Option (experimental) Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime.
541pub const mi_option_use_numa_nodes: mi_option_t = 16;
542
543/// Option (experimental) If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
544pub const mi_option_limit_os_alloc: mi_option_t = 17;
545
546/// Option (experimental) OS tag to assign to mimalloc'd memory
547pub const mi_option_os_tag: mi_option_t = 18;
548
549/// Option (experimental)
550pub const mi_option_max_errors: mi_option_t = 19;
551
552/// Option (experimental)
553pub const mi_option_max_warnings: mi_option_t = 20;
554
555#[cfg(feature = "v2")]
556/// Option (experimental)
557pub const mi_option_max_segment_reclaim: mi_option_t = 21;
558
559/// Last option.
560#[cfg(feature = "v2")]
561pub const _mi_option_last: mi_option_t = 38;
562#[cfg(not(feature = "v2"))]
563pub const _mi_option_last: mi_option_t = 46;
564
565extern "C" {
566 // Note: mi_option_{enable,disable} aren't exposed because they're redundant
567 // and because of https://github.com/microsoft/mimalloc/issues/266.
568
569 /// Returns true if the provided option is enabled.
570 ///
571 /// Note: this function is not thread safe.
572 pub fn mi_option_is_enabled(option: mi_option_t) -> bool;
573
574 /// Enable or disable the given option.
575 ///
576 /// Note: this function is not thread safe.
577 pub fn mi_option_set_enabled(option: mi_option_t, enable: bool);
578
579 /// If the given option has not yet been initialized with [`mi_option_set`]
580 /// or [`mi_option_set_enabled`], enables or disables the option. If it has,
581 /// this function does nothing.
582 ///
583 /// Note: this function is not thread safe.
584 pub fn mi_option_set_enabled_default(option: mi_option_t, enable: bool);
585
586 /// Returns the value of the provided option.
587 ///
588 /// The value of boolean options is 1 or 0, however experimental options
589 /// exist which take a numeric value, which is the intended use of this
590 /// function.
591 ///
592 /// These options are not exposed as constants for stability reasons,
593 /// however you can still use them as arguments to this and other
594 /// `mi_option_` functions if needed, see the mimalloc documentation for
595 /// details: https://microsoft.github.io/mimalloc/group__options.html
596 ///
597 /// Note: this function is not thread safe.
598 pub fn mi_option_get(option: mi_option_t) -> c_long;
599
600 /// Set the option to the given value.
601 ///
602 /// The value of boolean options is 1 or 0, however experimental options
603 /// exist which take a numeric value, which is the intended use of this
604 /// function.
605 ///
606 /// These options are not exposed as constants for stability reasons,
607 /// however you can still use them as arguments to this and other
608 /// `mi_option_` functions if needed,
609 ///
610 /// Note: this function is not thread safe.
611 pub fn mi_option_set(option: mi_option_t, value: c_long);
612
613 /// If the given option has not yet been initialized with [`mi_option_set`]
614 /// or [`mi_option_set_enabled`], sets the option to the given value. If it
615 /// has, this function does nothing.
616 ///
617 /// The value of boolean options is 1 or 0, however experimental options
618 /// exist which take a numeric value, which is the intended use of this
619 /// function.
620 ///
621 /// These options are not exposed as constants for stability reasons,
622 /// however you can still use them as arguments to this and other
623 /// `mi_option_` functions if needed.
624 ///
625 /// Note: this function is not thread safe.
626 pub fn mi_option_set_default(option: mi_option_t, value: c_long);
627}
628
629/// First-class heaps that can be destroyed in one go.
630///
631/// Note: The pointers allocated out of a heap can be be freed using
632/// [`mi_free`](crate::mi_free) -- there is no `mi_heap_free`.
633///
634/// # Example
635///
636/// ```
637/// use libmimalloc_sys as mi;
638/// unsafe {
639/// let h = mi::mi_heap_new();
640/// assert!(!h.is_null());
641/// let p = mi::mi_heap_malloc(h, 50);
642/// assert!(!p.is_null());
643///
644/// // use p...
645/// mi::mi_free(p);
646///
647/// // Clean up the heap. Note that pointers allocated from `h`
648/// // are *not* invalided by `mi_heap_delete`. You would have
649/// // to use (the very dangerous) `mi_heap_destroy` for that
650/// // behavior
651/// mi::mi_heap_delete(h);
652/// }
653/// ```
654pub enum mi_heap_t {}
655
656/// An area of heap space contains blocks of a single size.
657///
658/// The bytes in freed blocks are `committed - used`.
659#[repr(C)]
660#[derive(Debug, Clone, Copy)]
661pub struct mi_heap_area_t {
662 /// Start of the area containing heap blocks.
663 pub blocks: *mut c_void,
664 /// Bytes reserved for this area.
665 pub reserved: usize,
666 /// Current committed bytes of this area.
667 pub committed: usize,
668 /// Bytes in use by allocated blocks.
669 pub used: usize,
670 /// Size in bytes of one block.
671 pub block_size: usize,
672 /// Size in bytes of a full block including padding and metadata.
673 pub full_block_size: usize,
674 /// Heap tag associated with this area (not available in v3)
675 #[cfg(feature = "v2")]
676 pub heap_tag: i32,
677 /// Reserved / internal (replaces `heap_tag` in v3)
678 #[cfg(not(feature = "v2"))]
679 pub reserved1: *mut c_void,
680}
681
682/// Visitor function passed to [`mi_heap_visit_blocks`]
683///
684/// Should return `true` to continue, and `false` to stop visiting (i.e. break)
685///
686/// This function is always first called for every `area` with `block` as a null
687/// pointer. If `visit_all_blocks` was `true`, the function is then called for
688/// every allocated block in that area.
689pub type mi_block_visit_fun = Option<
690 unsafe extern "C" fn(
691 heap: *const mi_heap_t,
692 area: *const mi_heap_area_t,
693 block: *mut c_void,
694 block_size: usize,
695 arg: *mut c_void,
696 ) -> bool,
697>;
698
699extern "C" {
700 /// Create a new heap that can be used for allocation.
701 pub fn mi_heap_new() -> *mut mi_heap_t;
702
703 /// Delete a previously allocated heap.
704 ///
705 /// This will release resources and migrate any still allocated blocks in
706 /// this heap (efficienty) to the default heap.
707 ///
708 /// If `heap` is the default heap, the default heap is set to the backing
709 /// heap.
710 pub fn mi_heap_delete(heap: *mut mi_heap_t);
711
712 /// Destroy a heap, freeing all its still allocated blocks.
713 ///
714 /// Use with care as this will free all blocks still allocated in the heap.
715 /// However, this can be a very efficient way to free all heap memory in one
716 /// go.
717 ///
718 /// If `heap` is the default heap, the default heap is set to the backing
719 /// heap.
720 pub fn mi_heap_destroy(heap: *mut mi_heap_t);
721
722 /// Set the default heap to use for [`mi_malloc`](crate::mi_malloc) et al.
723 ///
724 /// Returns the previous default heap.
725 #[cfg(feature = "v2")]
726 pub fn mi_heap_set_default(heap: *mut mi_heap_t) -> *mut mi_heap_t;
727
728 /// Get the default heap that is used for [`mi_malloc`](crate::mi_malloc) et al.
729 #[cfg(feature = "v2")]
730 pub fn mi_heap_get_default() -> *mut mi_heap_t;
731
732 /// Get the backing heap.
733 ///
734 /// The _backing_ heap is the initial default heap for a thread and always
735 /// available for allocations. It cannot be destroyed or deleted except by
736 /// exiting the thread.
737 #[cfg(feature = "v2")]
738 pub fn mi_heap_get_backing() -> *mut mi_heap_t;
739
740 /// Release outstanding resources in a specific heap.
741 ///
742 /// See also [`mi_collect`].
743 pub fn mi_heap_collect(heap: *mut mi_heap_t, force: bool);
744
745 /// Equivalent to [`mi_malloc`](crate::mi_malloc), but allocates out of the
746 /// specific heap instead of the default.
747 pub fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
748
749 /// Equivalent to [`mi_zalloc`](crate::mi_zalloc), but allocates out of the
750 /// specific heap instead of the default.
751 pub fn mi_heap_zalloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
752
753 /// Equivalent to [`mi_calloc`], but allocates out of the specific heap
754 /// instead of the default.
755 pub fn mi_heap_calloc(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
756
757 /// Equivalent to [`mi_mallocn`], but allocates out of the specific heap
758 /// instead of the default.
759 pub fn mi_heap_mallocn(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
760
761 /// Equivalent to [`mi_malloc_small`], but allocates out of the specific
762 /// heap instead of the default.
763 ///
764 /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
765 pub fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
766
767 /// Equivalent to [`mi_zalloc_small`], but allocates out of the specific
768 /// heap instead of the default.
769 ///
770 /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
771 pub fn mi_heap_zalloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
772
773 /// Equivalent to [`mi_realloc`](crate::mi_realloc), but allocates out of
774 /// the specific heap instead of the default.
775 pub fn mi_heap_realloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
776
777 /// Equivalent to [`mi_reallocn`], but allocates out of the specific heap
778 /// instead of the default.
779 pub fn mi_heap_reallocn(
780 heap: *mut mi_heap_t,
781 p: *mut c_void,
782 count: usize,
783 size: usize,
784 ) -> *mut c_void;
785
786 /// Equivalent to [`mi_reallocf`], but allocates out of the specific heap
787 /// instead of the default.
788 pub fn mi_heap_reallocf(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
789
790 /// Equivalent to [`mi_strdup`], but allocates out of the specific heap
791 /// instead of the default.
792 pub fn mi_heap_strdup(heap: *mut mi_heap_t, s: *const c_char) -> *mut c_char;
793
794 /// Equivalent to [`mi_strndup`], but allocates out of the specific heap
795 /// instead of the default.
796 pub fn mi_heap_strndup(heap: *mut mi_heap_t, s: *const c_char, n: usize) -> *mut c_char;
797
798 /// Equivalent to [`mi_realpath`], but allocates out of the specific heap
799 /// instead of the default.
800 pub fn mi_heap_realpath(
801 heap: *mut mi_heap_t,
802 fname: *const c_char,
803 resolved_name: *mut c_char,
804 ) -> *mut c_char;
805
806 /// Equivalent to [`mi_malloc_aligned`](crate::mi_malloc_aligned), but
807 /// allocates out of the specific heap instead of the default.
808 pub fn mi_heap_malloc_aligned(
809 heap: *mut mi_heap_t,
810 size: usize,
811 alignment: usize,
812 ) -> *mut c_void;
813
814 /// Equivalent to [`mi_malloc_aligned_at`], but allocates out of the
815 /// specific heap instead of the default.
816 pub fn mi_heap_malloc_aligned_at(
817 heap: *mut mi_heap_t,
818 size: usize,
819 alignment: usize,
820 offset: usize,
821 ) -> *mut c_void;
822
823 /// Equivalent to [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), but
824 /// allocates out of the specific heap instead of the default.
825 pub fn mi_heap_zalloc_aligned(
826 heap: *mut mi_heap_t,
827 size: usize,
828 alignment: usize,
829 ) -> *mut c_void;
830
831 /// Equivalent to [`mi_zalloc_aligned_at`], but allocates out of the
832 /// specific heap instead of the default.
833 pub fn mi_heap_zalloc_aligned_at(
834 heap: *mut mi_heap_t,
835 size: usize,
836 alignment: usize,
837 offset: usize,
838 ) -> *mut c_void;
839
840 /// Equivalent to [`mi_calloc_aligned`], but allocates out of the specific
841 /// heap instead of the default.
842 pub fn mi_heap_calloc_aligned(
843 heap: *mut mi_heap_t,
844 count: usize,
845 size: usize,
846 alignment: usize,
847 ) -> *mut c_void;
848
849 /// Equivalent to [`mi_calloc_aligned_at`], but allocates out of the
850 /// specific heap instead of the default.
851 pub fn mi_heap_calloc_aligned_at(
852 heap: *mut mi_heap_t,
853 count: usize,
854 size: usize,
855 alignment: usize,
856 offset: usize,
857 ) -> *mut c_void;
858
859 /// Equivalent to [`mi_realloc_aligned`](crate::mi_realloc_aligned), but allocates out of the specific
860 /// heap instead of the default.
861 pub fn mi_heap_realloc_aligned(
862 heap: *mut mi_heap_t,
863 p: *mut c_void,
864 newsize: usize,
865 alignment: usize,
866 ) -> *mut c_void;
867
868 /// Equivalent to [`mi_realloc_aligned_at`], but allocates out of the
869 /// specific heap instead of the default.
870 pub fn mi_heap_realloc_aligned_at(
871 heap: *mut mi_heap_t,
872 p: *mut c_void,
873 newsize: usize,
874 alignment: usize,
875 offset: usize,
876 ) -> *mut c_void;
877
878 /// Equivalent to [`mi_rezalloc`], but allocates out of the specific heap
879 /// instead of the default.
880 pub fn mi_heap_rezalloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
881
882 /// Equivalent to [`mi_recalloc`], but allocates out of the specific heap
883 /// instead of the default.
884 pub fn mi_heap_recalloc(
885 heap: *mut mi_heap_t,
886 p: *mut c_void,
887 newcount: usize,
888 size: usize,
889 ) -> *mut c_void;
890
891 /// Equivalent to [`mi_rezalloc_aligned`], but allocates out of the specific
892 /// heap instead of the default.
893 pub fn mi_heap_rezalloc_aligned(
894 heap: *mut mi_heap_t,
895 p: *mut c_void,
896 newsize: usize,
897 alignment: usize,
898 ) -> *mut c_void;
899
900 /// Equivalent to [`mi_rezalloc_aligned_at`], but allocates out of the
901 /// specific heap instead of the default.
902 pub fn mi_heap_rezalloc_aligned_at(
903 heap: *mut mi_heap_t,
904 p: *mut c_void,
905 newsize: usize,
906 alignment: usize,
907 offset: usize,
908 ) -> *mut c_void;
909
910 /// Equivalent to [`mi_recalloc_aligned`], but allocates out of the
911 /// specific heap instead of the default.
912 pub fn mi_heap_recalloc_aligned(
913 heap: *mut mi_heap_t,
914 p: *mut c_void,
915 newcount: usize,
916 size: usize,
917 alignment: usize,
918 ) -> *mut c_void;
919
920 /// Equivalent to [`mi_recalloc_aligned_at`], but allocates out of the
921 /// specific heap instead of the default.
922 pub fn mi_heap_recalloc_aligned_at(
923 heap: *mut mi_heap_t,
924 p: *mut c_void,
925 newcount: usize,
926 size: usize,
927 alignment: usize,
928 offset: usize,
929 ) -> *mut c_void;
930
931 /// Does a heap contain a pointer to a previously allocated block?
932 ///
933 /// `p` must be a pointer to a previously allocated block (in any heap) -- it cannot be some
934 /// random pointer!
935 ///
936 /// Returns `true` if the block pointed to by `p` is in the `heap`.
937 ///
938 /// See [`mi_heap_check_owned`].
939 #[cfg(feature = "v2")]
940 pub fn mi_heap_contains_block(heap: *mut mi_heap_t, p: *const c_void) -> bool;
941
942 /// Check safely if any pointer is part of a heap.
943 ///
944 /// `p` may be any pointer -- not required to be previously allocated by the
945 /// given heap or any other mimalloc heap. Returns `true` if `p` points to a
946 /// block in the given heap, false otherwise.
947 ///
948 /// Note: expensive function, linear in the pages in the heap.
949 ///
950 /// See [`mi_heap_contains_block`], [`mi_heap_get_default`], and
951 /// [`mi_is_in_heap_region`]
952 #[cfg(feature = "v2")]
953 pub fn mi_heap_check_owned(heap: *mut mi_heap_t, p: *const c_void) -> bool;
954
955 /// Check safely if any pointer is part of the default heap of this thread.
956 ///
957 /// `p` may be any pointer -- not required to be previously allocated by the
958 /// default heap for this thread, or any other mimalloc heap. Returns `true`
959 /// if `p` points to a block in the default heap, false otherwise.
960 ///
961 /// Note: expensive function, linear in the pages in the heap.
962 ///
963 /// See [`mi_heap_contains_block`], [`mi_heap_get_default`]
964 pub fn mi_check_owned(p: *const c_void) -> bool;
965
966 /// Visit all areas and blocks in `heap`.
967 ///
968 /// If `visit_all_blocks` is false, the `visitor` is only called once for
969 /// every heap area. If it's true, the `visitor` is also called for every
970 /// allocated block inside every area (with `!block.is_null()`). Return
971 /// `false` from the `visitor` to return early.
972 ///
973 /// `arg` is an extra argument passed into the `visitor`.
974 ///
975 /// Returns `true` if all areas and blocks were visited.
976 ///
977 /// Passing a `None` visitor is allowed, and is a no-op.
978 #[cfg(feature = "v2")]
979 pub fn mi_heap_visit_blocks(
980 heap: *const mi_heap_t,
981 visit_all_blocks: bool,
982 visitor: mi_block_visit_fun,
983 arg: *mut c_void,
984 ) -> bool;
985
986 /// Visit all areas and blocks in `heap`.
987 ///
988 /// If `visit_all_blocks` is false, the `visitor` is only called once for
989 /// every heap area. If it's true, the `visitor` is also called for every
990 /// allocated block inside every area (with `!block.is_null()`). Return
991 /// `false` from the `visitor` to return early.
992 ///
993 /// `arg` is an extra argument passed into the `visitor`.
994 ///
995 /// Returns `true` if all areas and blocks were visited.
996 ///
997 /// Passing a `None` visitor is allowed, and is a no-op.
998 ///
999 /// Note: in v3 the `heap` parameter is non-const compared to v2.
1000 #[cfg(not(feature = "v2"))]
1001 pub fn mi_heap_visit_blocks(
1002 heap: *mut mi_heap_t,
1003 visit_all_blocks: bool,
1004 visitor: mi_block_visit_fun,
1005 arg: *mut c_void,
1006 ) -> bool;
1007
1008 #[cfg(feature = "arena")]
1009 /// Create a heap that only allocates in the specified arena
1010 pub fn mi_heap_new_in_arena(arena_id: mi_arena_id_t) -> *mut mi_heap_t;
1011
1012 #[cfg(feature = "arena")]
1013 /// Reserve OS memory for use by mimalloc. Reserved areas are used
1014 /// before allocating from the OS again. By reserving a large area upfront,
1015 /// allocation can be more efficient, and can be better managed on systems
1016 /// without `mmap`/`VirtualAlloc` (like WASM for example).
1017 ///
1018 /// - `size` The size to reserve.
1019 /// - `commit` Commit the memory upfront.
1020 /// - `allow_large` Allow large OS pages (2MiB) to be used?
1021 /// - `exclusive` Only allow allocations if specifically for this arena.
1022 /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1023 ///
1024 /// Returns 0 if successful, and an error code otherwise (e.g. `ENOMEM`)
1025 pub fn mi_reserve_os_memory_ex(
1026 size: usize,
1027 commit: bool,
1028 allow_large: bool,
1029 exclusive: bool,
1030 arena_id: *mut mi_arena_id_t,
1031 ) -> c_int;
1032
1033 /// Check if the heap page containing `p` is under-utilized.
1034 ///
1035 /// # Safety
1036 /// Assumes the page belonging to `p` is only accessed by the calling thread.
1037 pub fn mi_unsafe_heap_page_is_under_utilized(
1038 heap: *mut mi_heap_t,
1039 p: *mut c_void,
1040 perc_threshold: usize,
1041 ) -> bool;
1042
1043 #[cfg(feature = "v3")]
1044 /// Return the minimum size for an arena (v3 only).
1045 pub fn mi_arena_min_size() -> usize;
1046
1047 #[cfg(feature = "v3")]
1048 /// Equivalent to [`mi_heap_zalloc_small`], but for a thread-local heap (`theap`) in v3.
1049 ///
1050 /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
1051 pub fn mi_theap_zalloc_small(theap: *mut mi_heap_t, size: usize) -> *mut c_void;
1052
1053 #[cfg(feature = "arena")]
1054 /// Manage a particular memory area for use by mimalloc.
1055 /// This is just like `mi_reserve_os_memory_ex` except that the area should already be
1056 /// allocated in some manner and available for use my mimalloc.
1057 ///
1058 /// # Safety
1059 /// mimalloc will likely segfault when allocating from the arena if the arena `start` & `size`
1060 /// aren't aligned with mimalloc's `MI_SEGMENT_ALIGN` (e.g. 32MB on x86_64 machines).
1061 ///
1062 /// - `start` Start of the memory area
1063 /// - `size` The size of the memory area. Must be large than `MI_ARENA_BLOCK_SIZE` (e.g. 64MB
1064 /// on x86_64 machines).
1065 /// - `commit` Set true if the memory range is already commited.
1066 /// - `is_large` Set true if the memory range consists of large files, or if the memory should
1067 /// not be decommitted or protected (like rdma etc.).
1068 /// - `is_zero` Set true if the memory range consists only of zeros.
1069 /// - `numa_node` Possible associated numa node or `-1`.
1070 /// - `exclusive` Only allow allocations if specifically for this arena.
1071 /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1072 ///
1073 /// Returns `true` if arena was successfully allocated
1074 pub fn mi_manage_os_memory_ex(
1075 start: *const c_void,
1076 size: usize,
1077 is_committed: bool,
1078 is_large: bool,
1079 is_zero: bool,
1080 numa_node: c_int,
1081 exclusive: bool,
1082 arena_id: *mut mi_arena_id_t,
1083 ) -> bool;
1084}
1085
1086#[cfg(test)]
1087mod tests {
1088 use super::*;
1089
1090 #[test]
1091 fn it_calculates_usable_size() {
1092 let ptr = unsafe { mi_malloc(32) } as *mut u8;
1093 let usable_size = unsafe { mi_usable_size(ptr as *mut c_void) };
1094 assert!(
1095 usable_size >= 32,
1096 "usable_size should at least equal to the allocated size"
1097 );
1098 }
1099
1100 #[test]
1101 fn runtime_stable_option() {
1102 unsafe {
1103 assert_eq!(mi_option_get(mi_option_show_errors), 0);
1104 mi_option_set(mi_option_show_errors, 1);
1105 assert_eq!(mi_option_get(mi_option_show_errors), 1);
1106
1107 assert_eq!(mi_option_get(mi_option_show_stats), 0);
1108 mi_option_set(mi_option_show_stats, 1);
1109 assert_eq!(mi_option_get(mi_option_show_stats), 1);
1110
1111 assert_eq!(mi_option_get(mi_option_verbose), 0);
1112 mi_option_set(mi_option_verbose, 1);
1113 assert_eq!(mi_option_get(mi_option_verbose), 1);
1114 }
1115 }
1116}