libmimalloc_sys2/
extended.rs

1#![allow(nonstandard_style)]
2
3use core::ffi::c_void;
4
5use cty::{c_char, c_int, c_long, c_ulonglong};
6
7/// The maximum number of bytes which may be used as an argument to a function
8/// in the `_small` family ([`mi_malloc_small`], [`mi_zalloc_small`], etc).
9pub const MI_SMALL_SIZE_MAX: usize = 128 * core::mem::size_of::<*mut c_void>();
10
11extern "C" {
12    /// Allocate `count` items of `size` length each.
13    ///
14    /// Returns `null` if `count * size` overflows or on out-of-memory.
15    ///
16    /// All items are initialized to zero.
17    pub fn mi_calloc(count: usize, size: usize) -> *mut c_void;
18
19    /// Allocate `count` items of `size` length each.
20    ///
21    /// Returns `null` if `count * size` overflows or on out-of-memory,
22    /// otherwise returns the same as [`mi_malloc(count *
23    /// size)`](crate::mi_malloc).
24    /// Equivalent to [`mi_calloc`], but returns uninitialized (and not zeroed)
25    /// bytes.
26    pub fn mi_mallocn(count: usize, size: usize) -> *mut c_void;
27
28    /// Re-allocate memory to `count` elements of `size` bytes.
29    ///
30    /// The realloc equivalent of the [`mi_mallocn`] interface. Returns `null`
31    /// if `count * size` overflows or on out-of-memory, otherwise returns the
32    /// same as [`mi_realloc(p, count * size)`](crate::mi_realloc).
33    pub fn mi_reallocn(p: *mut c_void, count: usize, size: usize) -> *mut c_void;
34
35    /// Try to re-allocate memory to `newsize` bytes _in place_.
36    ///
37    /// Returns null on out-of-memory or if the memory could not be expanded in
38    /// place. On success, returns the same pointer as `p`.
39    ///
40    /// If `newsize` is larger than the original `size` allocated for `p`, the
41    /// bytes after `size` are uninitialized.
42    ///
43    /// If null is returned, the original pointer is not freed.
44    ///
45    /// Note: Conceptually, this is a realloc-like which returns null if it
46    /// would be forced to reallocate memory and copy. In practice it's
47    /// equivalent testing against [`mi_usable_size`](crate::mi_usable_size).
48    pub fn mi_expand(p: *mut c_void, newsize: usize) -> *mut c_void;
49
50    /// Re-allocate memory to `newsize` bytes.
51    ///
52    /// This differs from [`mi_realloc`](crate::mi_realloc) in that on failure,
53    /// `p` is freed.
54    pub fn mi_reallocf(p: *mut c_void, newsize: usize) -> *mut c_void;
55
56    /// Allocate and duplicate a nul-terminated C string.
57    ///
58    /// This can be useful for Rust code when interacting with the FFI.
59    pub fn mi_strdup(s: *const c_char) -> *mut c_char;
60
61    /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
62    ///
63    /// This can be useful for Rust code when interacting with the FFI.
64    pub fn mi_strndup(s: *const c_char, n: usize) -> *mut c_char;
65
66    /// Resolve a file path name, producing a `C` string which can be passed to
67    /// [`mi_free`](crate::mi_free).
68    ///
69    /// `resolved_name` should be null, but can also point to a buffer of at
70    /// least `PATH_MAX` bytes.
71    ///
72    /// If successful, returns a pointer to the resolved absolute file name, or
73    /// `null` on failure (with `errno` set to the error code).
74    ///
75    /// If `resolved_name` was `null`, the returned result should be freed with
76    /// [`mi_free`](crate::mi_free).
77    ///
78    /// This can rarely be useful in FFI code, but is mostly included for
79    /// completeness.
80    pub fn mi_realpath(fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char;
81
82    /// Allocate `size * count` bytes aligned by `alignment`.
83    ///
84    /// Return pointer to the allocated memory or null if out of memory or if
85    /// `size * count` overflows.
86    ///
87    /// Returns a unique pointer if called with `size * count` 0.
88    pub fn mi_calloc_aligned(count: usize, size: usize, alignment: usize) -> *mut c_void;
89
90    /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
91    ///
92    /// Note that the resulting pointer itself is not aligned by the alignment,
93    /// but after `offset` bytes it will be. This can be useful for allocating
94    /// data with an inline header, where the data has a specific alignment
95    /// requirement.
96    ///
97    /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
98    /// to `alignment`.
99    pub fn mi_malloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
100
101    /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
102    /// zero-initialized.
103    ///
104    /// This is a [`mi_zalloc`](crate::mi_zalloc) equivalent of [`mi_malloc_aligned_at`].
105    pub fn mi_zalloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
106
107    /// Allocate `size` of bytes aligned by `alignment` and place the address of the
108    /// allocated memory to `ptr`.
109    ///
110    /// Returns zero on success, invalid argument for invalid alignment, or out-of-memory.
111    pub fn mi_posix_memalign(ptr: *mut *mut c_void, alignment: usize, size: usize) -> c_int;
112
113    /// Allocate `size` bytes aligned by `alignment` with alignment as the first
114    /// parameter.
115    ///
116    /// Return pointer to the allocated memory or null if out of memory.
117    pub fn mi_aligned_alloc(alignment: usize, size: usize) -> *mut c_void;
118
119    /// Allocate `size * count` bytes aligned by `alignment` at a specified
120    /// `offset`, zero-initialized.
121    ///
122    /// This is a [`calloc`](crate::mi_calloc) equivalent of [`mi_malloc_aligned_at`].
123    pub fn mi_calloc_aligned_at(
124        count: usize,
125        size: usize,
126        alignment: usize,
127        offset: usize,
128    ) -> *mut c_void;
129
130    /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
131    /// specified `offset`.
132    ///
133    /// This is a [`realloc`](crate::mi_realloc) equivalent of [`mi_malloc_aligned_at`].
134    pub fn mi_realloc_aligned_at(
135        p: *mut c_void,
136        newsize: usize,
137        alignment: usize,
138        offset: usize,
139    ) -> *mut c_void;
140
141    /// Zero initialized [re-allocation](crate::mi_realloc).
142    ///
143    /// In general, only valid on memory originally allocated by zero
144    /// initialization: [`mi_calloc`](crate::mi_calloc),
145    /// [`mi_zalloc`](crate::mi_zalloc),
146    /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
147    pub fn mi_rezalloc(p: *mut c_void, newsize: usize) -> *mut c_void;
148
149    /// Zero initialized [re-allocation](crate::mi_realloc), following `calloc`
150    /// paramater conventions.
151    ///
152    /// In general, only valid on memory originally allocated by zero
153    /// initialization: [`mi_calloc`](crate::mi_calloc),
154    /// [`mi_zalloc`](crate::mi_zalloc),
155    /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
156    pub fn mi_recalloc(p: *mut c_void, newcount: usize, size: usize) -> *mut c_void;
157
158    /// Aligned version of [`mi_rezalloc`].
159    pub fn mi_rezalloc_aligned(p: *mut c_void, newsize: usize, alignment: usize) -> *mut c_void;
160
161    /// Offset-aligned version of [`mi_rezalloc`].
162    pub fn mi_rezalloc_aligned_at(
163        p: *mut c_void,
164        newsize: usize,
165        alignment: usize,
166        offset: usize,
167    ) -> *mut c_void;
168
169    /// Aligned version of [`mi_recalloc`].
170    pub fn mi_recalloc_aligned(
171        p: *mut c_void,
172        newcount: usize,
173        size: usize,
174        alignment: usize,
175    ) -> *mut c_void;
176
177    /// Offset-aligned version of [`mi_recalloc`].
178    pub fn mi_recalloc_aligned_at(
179        p: *mut c_void,
180        newcount: usize,
181        size: usize,
182        alignment: usize,
183        offset: usize,
184    ) -> *mut c_void;
185
186    /// Allocate an object of no more than [`MI_SMALL_SIZE_MAX`] bytes.
187    ///
188    /// Does not check that `size` is indeed small.
189    ///
190    /// Note: Currently [`mi_malloc`](crate::mi_malloc) checks if `size` is
191    /// small and calls this if
192    /// so at runtime, so its' only worth using if you know for certain.
193    pub fn mi_malloc_small(size: usize) -> *mut c_void;
194
195    /// Allocate an zero-initialized object of no more than
196    /// [`MI_SMALL_SIZE_MAX`] bytes.
197    ///
198    /// Does not check that `size` is indeed small.
199    ///
200    /// Note: Currently [`mi_zalloc`](crate::mi_zalloc) checks if `size` is
201    /// small and calls this if so at runtime, so its' only worth using if you
202    /// know for certain.
203    pub fn mi_zalloc_small(size: usize) -> *mut c_void;
204
205    /// Return the available bytes in a memory block.
206    ///
207    /// The returned size can be used to call `mi_expand` successfully.
208    pub fn mi_usable_size(p: *const c_void) -> usize;
209
210    /// Return the used allocation size.
211    ///
212    /// Returns the size `n` that will be allocated, where `n >= size`.
213    ///
214    /// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. This
215    /// can be used to reduce internal wasted space when allocating buffers for
216    /// example.
217    ///
218    /// See [`mi_usable_size`](crate::mi_usable_size).
219    pub fn mi_good_size(size: usize) -> usize;
220
221    /// Eagerly free memory.
222    ///
223    /// If `force` is true, aggressively return memory to the OS (can be
224    /// expensive!)
225    ///
226    /// Regular code should not have to call this function. It can be beneficial
227    /// in very narrow circumstances; in particular, when a long running thread
228    /// allocates a lot of blocks that are freed by other threads it may improve
229    /// resource usage by calling this every once in a while.
230    pub fn mi_collect(force: bool);
231
232    /// Checked free: If `p` came from mimalloc's heap (as decided by
233    /// [`mi_is_in_heap_region`]), this is [`mi_free(p)`](crate::mi_free), but
234    /// otherwise it is a no-op.
235    pub fn mi_cfree(p: *mut c_void);
236
237    /// Returns true if this is a pointer into a memory region that has been
238    /// reserved by the mimalloc heap.
239    ///
240    /// This function is described by the mimalloc documentation as "relatively
241    /// fast".
242    ///
243    /// See also [`mi_heap_check_owned`], which is (much) slower and slightly
244    /// more precise, but only concerns a single `mi_heap`.
245    pub fn mi_is_in_heap_region(p: *const c_void) -> bool;
246
247    /// Layout-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
248    /// the size and alignment as well.
249    ///
250    /// Note: unlike some allocators that require this information for
251    /// performance, mimalloc doesn't need it (as of the current version,
252    /// v2.0.0), and so it currently implements this as a (debug) assertion that
253    /// verifies that `p` is actually aligned to `alignment` and is usable for
254    /// at least `size` bytes, before delegating to `mi_free`.
255    ///
256    /// However, currently there's no way to have this crate enable mimalloc's
257    /// debug assertions, so these checks aren't particularly useful.
258    ///
259    /// Note: It's legal to pass null to this function, and you are not required
260    /// to use this to deallocate memory from an aligned allocation function.
261    pub fn mi_free_size_aligned(p: *mut c_void, size: usize, alignment: usize);
262
263    /// Size-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
264    /// the size and alignment as well.
265    ///
266    /// Note: unlike some allocators that require this information for
267    /// performance, mimalloc doesn't need it (as of the current version,
268    /// v2.0.0), and so it currently implements this as a (debug) assertion that
269    /// verifies that `p` is actually aligned to `alignment` and is usable for
270    /// at least `size` bytes, before delegating to `mi_free`.
271    ///
272    /// However, currently there's no way to have this crate enable mimalloc's
273    /// debug assertions, so these checks aren't particularly useful.
274    ///
275    /// Note: It's legal to pass null to this function.
276    pub fn mi_free_size(p: *mut c_void, size: usize);
277
278    /// Alignment-aware deallocation: Like [`mi_free`](crate::mi_free), but
279    /// accepts the size and alignment as well.
280    ///
281    /// Note: unlike some allocators that require this information for
282    /// performance, mimalloc doesn't need it (as of the current version,
283    /// v2.0.0), and so it currently implements this as a (debug) assertion that
284    /// verifies that `p` is actually aligned to `alignment` and is usable for
285    /// at least `size` bytes, before delegating to `mi_free`.
286    ///
287    /// However, currently there's no way to have this crate enable mimalloc's
288    /// debug assertions, so these checks aren't particularly useful.
289    ///
290    /// Note: It's legal to pass null to this function.
291    pub fn mi_free_aligned(p: *mut c_void, alignment: usize);
292
293    /// Print the main statistics.
294    ///
295    /// Ignores the passed in argument, and outputs to the registered output
296    /// function or stderr by default.
297    ///
298    /// Most detailed when using a debug build.
299    pub fn mi_stats_print(_: *mut c_void);
300
301    /// Print the main statistics.
302    ///
303    /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
304    /// passed as it's second parameter.
305    ///
306    /// Most detailed when using a debug build.
307    pub fn mi_stats_print_out(out: mi_output_fun, arg: *mut c_void);
308
309    /// Reset statistics.
310    ///
311    /// Note: This function is thread safe.
312    pub fn mi_stats_reset();
313
314    /// Merge thread local statistics with the main statistics and reset.
315    ///
316    /// Note: This function is thread safe.
317    pub fn mi_stats_merge();
318
319    /// Return the mimalloc version number.
320    ///
321    /// For example version 1.6.3 would return the number `163`.
322    pub fn mi_version() -> c_int;
323
324    /// Initialize mimalloc on a thread.
325    ///
326    /// Should not be used as on most systems (pthreads, windows) this is done
327    /// automatically.
328    pub fn mi_thread_init();
329
330    /// Initialize the process.
331    ///
332    /// Should not be used on most systems, as it's called by thread_init or the
333    /// process loader.
334    pub fn mi_process_init();
335
336    /// Return process information (time and memory usage). All parameters are
337    /// optional (nullable) out-params:
338    ///
339    /// | Parameter        | Description |
340    /// | :-               | :- |
341    /// | `elapsed_msecs`  | Elapsed wall-clock time of the process in milli-seconds. |
342    /// | `user_msecs`     | User time in milli-seconds (as the sum over all threads). |
343    /// | `system_msecs`   | System time in milli-seconds. |
344    /// | `current_rss`    | Current working set size (touched pages). |
345    /// | `peak_rss`       | Peak working set size (touched pages). |
346    /// | `current_commit` | Current committed memory (backed by the page file). |
347    /// | `peak_commit`    | Peak committed memory (backed by the page file). |
348    /// | `page_faults`    | Count of hard page faults. |
349    ///
350    /// The `current_rss` is precise on Windows and MacOSX; other systems
351    /// estimate this using `current_commit`. The `commit` is precise on Windows
352    /// but estimated on other systems as the amount of read/write accessible
353    /// memory reserved by mimalloc.
354    pub fn mi_process_info(
355        elapsed_msecs: *mut usize,
356        user_msecs: *mut usize,
357        system_msecs: *mut usize,
358        current_rss: *mut usize,
359        peak_rss: *mut usize,
360        current_commit: *mut usize,
361        peak_commit: *mut usize,
362        page_faults: *mut usize,
363    );
364
365    /// Uninitialize mimalloc on a thread.
366    ///
367    /// Should not be used as on most systems (pthreads, windows) this is done
368    /// automatically. Ensures that any memory that is not freed yet (but will
369    /// be freed by other threads in the future) is properly handled.
370    ///
371    /// Note: This function is thread safe.
372    pub fn mi_thread_done();
373
374    /// Print out heap statistics for this thread.
375    ///
376    /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
377    /// passed as it's second parameter
378    ///
379    /// Most detailed when using a debug build.
380    ///
381    /// Note: This function is thread safe.
382    pub fn mi_thread_stats_print_out(out: mi_output_fun, arg: *mut c_void);
383
384    /// Register an output function.
385    ///
386    /// - `out` The output function, use `None` to output to stderr.
387    /// - `arg` Argument that will be passed on to the output function.
388    ///
389    /// The `out` function is called to output any information from mimalloc,
390    /// like verbose or warning messages.
391    ///
392    /// Note: This function is thread safe.
393    pub fn mi_register_output(out: mi_output_fun, arg: *mut c_void);
394
395    /// Register a deferred free function.
396    ///
397    /// - `deferred_free` Address of a deferred free-ing function or `None` to
398    ///   unregister.
399    /// - `arg` Argument that will be passed on to the deferred free function.
400    ///
401    /// Some runtime systems use deferred free-ing, for example when using
402    /// reference counting to limit the worst case free time.
403    ///
404    /// Such systems can register (re-entrant) deferred free function to free
405    /// more memory on demand.
406    ///
407    /// - When the `force` parameter is `true` all possible memory should be
408    ///   freed.
409    ///
410    /// - The per-thread `heartbeat` parameter is monotonically increasing and
411    ///   guaranteed to be deterministic if the program allocates
412    ///   deterministically.
413    ///
414    /// - The `deferred_free` function is guaranteed to be called
415    ///   deterministically after some number of allocations (regardless of
416    ///   freeing or available free memory).
417    ///
418    /// At most one `deferred_free` function can be active.
419    ///
420    /// Note: This function is thread safe.
421    pub fn mi_register_deferred_free(out: mi_deferred_free_fun, arg: *mut c_void);
422
423    /// Register an error callback function.
424    ///
425    /// The `errfun` function is called on an error in mimalloc after emitting
426    /// an error message (through the output function).
427    ///
428    /// It as always legal to just return from the `errfun` function in which
429    /// case allocation functions generally return null or ignore the condition.
430    ///
431    /// The default function only calls abort() when compiled in secure mode
432    /// with an `EFAULT` error. The possible error codes are:
433    ///
434    /// - `EAGAIN` (11): Double free was detected (only in debug and secure
435    ///   mode).
436    /// - `EFAULT` (14): Corrupted free list or meta-data was detected (only in
437    ///   debug and secure mode).
438    /// - `ENOMEM` (12): Not enough memory available to satisfy the request.
439    /// - `EOVERFLOW` (75): Too large a request, for example in `mi_calloc`, the
440    ///   `count` and `size` parameters are too large.
441    /// - `EINVAL` (22): Trying to free or re-allocate an invalid pointer.
442    ///
443    /// Note: This function is thread safe.
444    pub fn mi_register_error(out: mi_error_fun, arg: *mut c_void);
445}
446
447/// An output callback. Must be thread-safe.
448///
449/// See [`mi_stats_print_out`], [`mi_thread_stats_print_out`], [`mi_register_output`]
450pub type mi_output_fun = Option<unsafe extern "C" fn(msg: *const c_char, arg: *mut c_void)>;
451
452/// Type of deferred free functions. Must be thread-safe.
453///
454/// - `force`: If true, all outstanding items should be freed.
455/// - `heartbeat` A monotonically increasing count.
456/// - `arg` Argument that was passed at registration to hold extra state.
457///
458/// See [`mi_register_deferred_free`]
459pub type mi_deferred_free_fun =
460    Option<unsafe extern "C" fn(force: bool, heartbeat: c_ulonglong, arg: *mut c_void)>;
461
462/// Type of error callback functions. Must be thread-safe.
463///
464/// - `err`: Error code (see [`mi_register_error`] for a list).
465/// - `arg`: Argument that was passed at registration to hold extra state.
466///
467/// See [`mi_register_error`]
468pub type mi_error_fun = Option<unsafe extern "C" fn(code: c_int, arg: *mut c_void)>;
469
470/// Runtime options. All options are false by default.
471pub type mi_option_t = c_int;
472
473#[cfg(feature = "arena")]
474/// Arena Id
475pub type mi_arena_id_t = c_int;
476
477// Note: mimalloc doc website seems to have the order of show_stats and
478// show_errors reversed as of 1.6.3, however what I have here is correct:
479// https://github.com/microsoft/mimalloc/issues/266#issuecomment-653822341
480
481/// Print error messages to `stderr`.
482pub const mi_option_show_errors: mi_option_t = 0;
483
484/// Print statistics to `stderr` when the program is done.
485pub const mi_option_show_stats: mi_option_t = 1;
486
487/// Print verbose messages to `stderr`.
488pub const mi_option_verbose: mi_option_t = 2;
489
490/// ### The following options are experimental
491///
492/// Option (experimental) Use large OS pages (2MiB in size) if possible.
493///
494/// Use large OS pages (2MiB) when available; for some workloads this can
495/// significantly improve performance. Use mi_option_verbose to check if
496/// the large OS pages are enabled -- usually one needs to explicitly allow
497/// large OS pages (as on Windows and Linux). However, sometimes the OS is
498/// very slow to reserve contiguous physical memory for large OS pages so
499/// use with care on systems that can have fragmented memory (for that
500/// reason, we generally recommend to use mi_option_reserve_huge_os_pages
501/// instead whenever possible).
502pub const mi_option_large_os_pages: mi_option_t = 6;
503
504/// Option (experimental) The number of huge OS pages (1GiB in size) to reserve at the start of the program.
505///
506/// This reserves the huge pages at startup and sometimes this can give a large (latency) performance
507/// improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in
508/// combination with this setting. Just like large OS pages, use with care as reserving contiguous
509/// physical memory can take a long time when memory is fragmented (but reserving the huge pages is
510/// done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on
511/// Windows and Linux)). With huge OS pages, it may be beneficial to set the setting
512/// mi_option_eager_commit_delay=N (N is 1 by default) to delay the initial N segments (of 4MiB) of
513/// a thread to not allocate in the huge OS pages; this prevents threads that are short lived and
514/// allocate just a little to take up space in the huge OS page area (which cannot be reset).
515pub const mi_option_reserve_huge_os_pages: mi_option_t = 7;
516
517/// Option (experimental) Reserve huge OS pages at node N.
518///
519/// The huge pages are usually allocated evenly among NUMA nodes.
520/// You can use mi_option_reserve_huge_os_pages_at=N where `N` is the numa node (starting at 0) to allocate all
521/// the huge pages at a specific numa node instead.
522pub const mi_option_reserve_huge_os_pages_at: mi_option_t = 8;
523
524/// Option (experimental) Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
525pub const mi_option_reserve_os_memory: mi_option_t = 9;
526
527/// Option (experimental) the first N segments per thread are not eagerly committed (=1).
528pub const mi_option_eager_commit_delay: mi_option_t = 14;
529
530/// Option (experimental) Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime.
531pub const mi_option_use_numa_nodes: mi_option_t = 16;
532
533/// Option (experimental) If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
534pub const mi_option_limit_os_alloc: mi_option_t = 17;
535
536/// Option (experimental) OS tag to assign to mimalloc'd memory
537pub const mi_option_os_tag: mi_option_t = 18;
538
539/// Option (experimental)
540pub const mi_option_max_errors: mi_option_t = 19;
541
542/// Option (experimental)
543pub const mi_option_max_warnings: mi_option_t = 20;
544
545/// Option (experimental)
546pub const mi_option_max_segment_reclaim: mi_option_t = 21;
547
548/// Option (experimental)
549pub const mi_option_destroy_on_exit: mi_option_t = 22;
550
551/// Option (experimental)
552pub const mi_option_arena_reserve: mi_option_t = 23;
553
554/// Option (experimental)
555pub const mi_option_arena_purge_mult: mi_option_t = 24;
556
557/// Option (experimental)
558pub const mi_option_purge_extend_delay: mi_option_t = 25;
559
560/// Option (experimental)
561pub const mi_option_abandoned_reclaim_on_free: mi_option_t = 26;
562
563/// Option (experimental)
564pub const mi_option_disallow_arena_alloc: mi_option_t = 27;
565
566/// Option (experimental)
567pub const mi_option_retry_on_oom: mi_option_t = 28;
568
569/// Option (experimental)
570pub const mi_option_visit_abandoned: mi_option_t = 29;
571
572/// Option (experimental)
573pub const mi_option_guarded_min: mi_option_t = 30;
574
575/// Option (experimental)
576pub const mi_option_guarded_max: mi_option_t = 31;
577
578/// Option (experimental)
579pub const mi_option_guarded_precise: mi_option_t = 32;
580
581/// Option (experimental)
582pub const mi_option_guarded_sample_rate: mi_option_t = 33;
583
584/// Option (experimental)
585pub const mi_option_guarded_sample_seed: mi_option_t = 34;
586
587/// Option (experimental)
588pub const mi_option_target_segments_per_thread: mi_option_t = 35;
589
590/// Option (experimental)
591pub const mi_option_generic_collect: mi_option_t = 36;
592
593/// Last option.
594pub const _mi_option_last: mi_option_t = 37;
595
596extern "C" {
597    // Note: mi_option_{enable,disable} aren't exposed because they're redundant
598    // and because of https://github.com/microsoft/mimalloc/issues/266.
599
600    /// Returns true if the provided option is enabled.
601    ///
602    /// Note: this function is not thread safe.
603    pub fn mi_option_is_enabled(option: mi_option_t) -> bool;
604
605    /// Enable or disable the given option.
606    ///
607    /// Note: this function is not thread safe.
608    pub fn mi_option_set_enabled(option: mi_option_t, enable: bool);
609
610    /// If the given option has not yet been initialized with [`mi_option_set`]
611    /// or [`mi_option_set_enabled`], enables or disables the option. If it has,
612    /// this function does nothing.
613    ///
614    /// Note: this function is not thread safe.
615    pub fn mi_option_set_enabled_default(option: mi_option_t, enable: bool);
616
617    /// Returns the value of the provided option.
618    ///
619    /// The value of boolean options is 1 or 0, however experimental options
620    /// exist which take a numeric value, which is the intended use of this
621    /// function.
622    ///
623    /// These options are not exposed as constants for stability reasons,
624    /// however you can still use them as arguments to this and other
625    /// `mi_option_` functions if needed, see the mimalloc documentation for
626    /// details: https://microsoft.github.io/mimalloc/group__options.html
627    ///
628    /// Note: this function is not thread safe.
629    pub fn mi_option_get(option: mi_option_t) -> c_long;
630
631    /// Set the option to the given value.
632    ///
633    /// The value of boolean options is 1 or 0, however experimental options
634    /// exist which take a numeric value, which is the intended use of this
635    /// function.
636    ///
637    /// These options are not exposed as constants for stability reasons,
638    /// however you can still use them as arguments to this and other
639    /// `mi_option_` functions if needed,
640    ///
641    /// Note: this function is not thread safe.
642    pub fn mi_option_set(option: mi_option_t, value: c_long);
643
644    /// If the given option has not yet been initialized with [`mi_option_set`]
645    /// or [`mi_option_set_enabled`], sets the option to the given value. If it
646    /// has, this function does nothing.
647    ///
648    /// The value of boolean options is 1 or 0, however experimental options
649    /// exist which take a numeric value, which is the intended use of this
650    /// function.
651    ///
652    /// These options are not exposed as constants for stability reasons,
653    /// however you can still use them as arguments to this and other
654    /// `mi_option_` functions if needed.
655    ///
656    /// Note: this function is not thread safe.
657    pub fn mi_option_set_default(option: mi_option_t, value: c_long);
658}
659
660/// First-class heaps that can be destroyed in one go.
661///
662/// Note: The pointers allocated out of a heap can be be freed using
663/// [`mi_free`](crate::mi_free) -- there is no `mi_heap_free`.
664///
665/// # Example
666///
667/// ```
668/// use libmimalloc_sys as mi;
669/// unsafe {
670///     let h = mi::mi_heap_new();
671///     assert!(!h.is_null());
672///     let p = mi::mi_heap_malloc(h, 50);
673///     assert!(!p.is_null());
674///
675///     // use p...
676///     mi::mi_free(p);
677///
678///     // Clean up the heap. Note that pointers allocated from `h`
679///     // are *not* invalided by `mi_heap_delete`. You would have
680///     // to use (the very dangerous) `mi_heap_destroy` for that
681///     // behavior
682///     mi::mi_heap_delete(h);
683/// }
684/// ```
685pub enum mi_heap_t {}
686
687/// An area of heap space contains blocks of a single size.
688///
689/// The bytes in freed blocks are `committed - used`.
690#[repr(C)]
691#[derive(Debug, Clone, Copy)]
692pub struct mi_heap_area_t {
693    /// Start of the area containing heap blocks.
694    pub blocks: *mut c_void,
695    /// Bytes reserved for this area.
696    pub reserved: usize,
697    /// Current committed bytes of this area.
698    pub committed: usize,
699    /// Bytes in use by allocated blocks.
700    pub used: usize,
701    /// Size in bytes of one block.
702    pub block_size: usize,
703    /// Size in bytes of a full block including padding and metadata.
704    pub full_block_size: usize,
705    /// Heap tag associated with this area (see \a mi_heap_new_ex)
706    pub heap_tag: c_int,
707}
708
709/// Visitor function passed to [`mi_heap_visit_blocks`]
710///
711/// Should return `true` to continue, and `false` to stop visiting (i.e. break)
712///
713/// This function is always first called for every `area` with `block` as a null
714/// pointer. If `visit_all_blocks` was `true`, the function is then called for
715/// every allocated block in that area.
716pub type mi_block_visit_fun = Option<
717    unsafe extern "C" fn(
718        heap: *const mi_heap_t,
719        area: *const mi_heap_area_t,
720        block: *mut c_void,
721        block_size: usize,
722        arg: *mut c_void,
723    ) -> bool,
724>;
725
726extern "C" {
727    /// Create a new heap that can be used for allocation.
728    pub fn mi_heap_new() -> *mut mi_heap_t;
729
730    /// Delete a previously allocated heap.
731    ///
732    /// This will release resources and migrate any still allocated blocks in
733    /// this heap (efficienty) to the default heap.
734    ///
735    /// If `heap` is the default heap, the default heap is set to the backing
736    /// heap.
737    pub fn mi_heap_delete(heap: *mut mi_heap_t);
738
739    /// Destroy a heap, freeing all its still allocated blocks.
740    ///
741    /// Use with care as this will free all blocks still allocated in the heap.
742    /// However, this can be a very efficient way to free all heap memory in one
743    /// go.
744    ///
745    /// If `heap` is the default heap, the default heap is set to the backing
746    /// heap.
747    pub fn mi_heap_destroy(heap: *mut mi_heap_t);
748
749    /// Set the default heap to use for [`mi_malloc`](crate::mi_malloc) et al.
750    ///
751    /// Returns the previous default heap.
752    pub fn mi_heap_set_default(heap: *mut mi_heap_t) -> *mut mi_heap_t;
753
754    /// Get the default heap that is used for [`mi_malloc`](crate::mi_malloc) et al.
755    pub fn mi_heap_get_default() -> *mut mi_heap_t;
756
757    /// Get the backing heap.
758    ///
759    /// The _backing_ heap is the initial default heap for a thread and always
760    /// available for allocations. It cannot be destroyed or deleted except by
761    /// exiting the thread.
762    pub fn mi_heap_get_backing() -> *mut mi_heap_t;
763
764    /// Release outstanding resources in a specific heap.
765    ///
766    /// See also [`mi_collect`].
767    pub fn mi_heap_collect(heap: *mut mi_heap_t, force: bool);
768
769    /// Equivalent to [`mi_malloc`](crate::mi_malloc), but allocates out of the
770    /// specific heap instead of the default.
771    pub fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
772
773    /// Equivalent to [`mi_zalloc`](crate::mi_zalloc), but allocates out of the
774    /// specific heap instead of the default.
775    pub fn mi_heap_zalloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
776
777    /// Equivalent to [`mi_calloc`], but allocates out of the specific heap
778    /// instead of the default.
779    pub fn mi_heap_calloc(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
780
781    /// Equivalent to [`mi_mallocn`], but allocates out of the specific heap
782    /// instead of the default.
783    pub fn mi_heap_mallocn(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
784
785    /// Equivalent to [`mi_malloc_small`], but allocates out of the specific
786    /// heap instead of the default.
787    ///
788    /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
789    pub fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
790
791    /// Equivalent to [`mi_realloc`](crate::mi_realloc), but allocates out of
792    /// the specific heap instead of the default.
793    pub fn mi_heap_realloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
794
795    /// Equivalent to [`mi_reallocn`], but allocates out of the specific heap
796    /// instead of the default.
797    pub fn mi_heap_reallocn(
798        heap: *mut mi_heap_t,
799        p: *mut c_void,
800        count: usize,
801        size: usize,
802    ) -> *mut c_void;
803
804    /// Equivalent to [`mi_reallocf`], but allocates out of the specific heap
805    /// instead of the default.
806    pub fn mi_heap_reallocf(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
807
808    /// Equivalent to [`mi_strdup`], but allocates out of the specific heap
809    /// instead of the default.
810    pub fn mi_heap_strdup(heap: *mut mi_heap_t, s: *const c_char) -> *mut c_char;
811
812    /// Equivalent to [`mi_strndup`], but allocates out of the specific heap
813    /// instead of the default.
814    pub fn mi_heap_strndup(heap: *mut mi_heap_t, s: *const c_char, n: usize) -> *mut c_char;
815
816    /// Equivalent to [`mi_realpath`], but allocates out of the specific heap
817    /// instead of the default.
818    pub fn mi_heap_realpath(
819        heap: *mut mi_heap_t,
820        fname: *const c_char,
821        resolved_name: *mut c_char,
822    ) -> *mut c_char;
823
824    /// Equivalent to [`mi_malloc_aligned`](crate::mi_malloc_aligned), but
825    /// allocates out of the specific heap instead of the default.
826    pub fn mi_heap_malloc_aligned(
827        heap: *mut mi_heap_t,
828        size: usize,
829        alignment: usize,
830    ) -> *mut c_void;
831
832    /// Equivalent to [`mi_malloc_aligned_at`], but allocates out of the
833    /// specific heap instead of the default.
834    pub fn mi_heap_malloc_aligned_at(
835        heap: *mut mi_heap_t,
836        size: usize,
837        alignment: usize,
838        offset: usize,
839    ) -> *mut c_void;
840
841    /// Equivalent to [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), but
842    /// allocates out of the specific heap instead of the default.
843    pub fn mi_heap_zalloc_aligned(
844        heap: *mut mi_heap_t,
845        size: usize,
846        alignment: usize,
847    ) -> *mut c_void;
848
849    /// Equivalent to [`mi_zalloc_aligned_at`], but allocates out of the
850    /// specific heap instead of the default.
851    pub fn mi_heap_zalloc_aligned_at(
852        heap: *mut mi_heap_t,
853        size: usize,
854        alignment: usize,
855        offset: usize,
856    ) -> *mut c_void;
857
858    /// Equivalent to [`mi_calloc_aligned`], but allocates out of the specific
859    /// heap instead of the default.
860    pub fn mi_heap_calloc_aligned(
861        heap: *mut mi_heap_t,
862        count: usize,
863        size: usize,
864        alignment: usize,
865    ) -> *mut c_void;
866
867    /// Equivalent to [`mi_calloc_aligned_at`], but allocates out of the
868    /// specific heap instead of the default.
869    pub fn mi_heap_calloc_aligned_at(
870        heap: *mut mi_heap_t,
871        count: usize,
872        size: usize,
873        alignment: usize,
874        offset: usize,
875    ) -> *mut c_void;
876
877    /// Equivalent to [`mi_realloc_aligned`](crate::mi_realloc_aligned), but allocates out of the specific
878    /// heap instead of the default.
879    pub fn mi_heap_realloc_aligned(
880        heap: *mut mi_heap_t,
881        p: *mut c_void,
882        newsize: usize,
883        alignment: usize,
884    ) -> *mut c_void;
885
886    /// Equivalent to [`mi_realloc_aligned_at`], but allocates out of the
887    /// specific heap instead of the default.
888    pub fn mi_heap_realloc_aligned_at(
889        heap: *mut mi_heap_t,
890        p: *mut c_void,
891        newsize: usize,
892        alignment: usize,
893        offset: usize,
894    ) -> *mut c_void;
895
896    /// Equivalent to [`mi_rezalloc`], but allocates out of the specific heap
897    /// instead of the default.
898    pub fn mi_heap_rezalloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
899
900    /// Equivalent to [`mi_recalloc`], but allocates out of the specific heap
901    /// instead of the default.
902    pub fn mi_heap_recalloc(
903        heap: *mut mi_heap_t,
904        p: *mut c_void,
905        newcount: usize,
906        size: usize,
907    ) -> *mut c_void;
908
909    /// Equivalent to [`mi_rezalloc_aligned`], but allocates out of the specific
910    /// heap instead of the default.
911    pub fn mi_heap_rezalloc_aligned(
912        heap: *mut mi_heap_t,
913        p: *mut c_void,
914        newsize: usize,
915        alignment: usize,
916    ) -> *mut c_void;
917
918    /// Equivalent to [`mi_rezalloc_aligned_at`], but allocates out of the
919    /// specific heap instead of the default.
920    pub fn mi_heap_rezalloc_aligned_at(
921        heap: *mut mi_heap_t,
922        p: *mut c_void,
923        newsize: usize,
924        alignment: usize,
925        offset: usize,
926    ) -> *mut c_void;
927
928    /// Equivalent to [`mi_recalloc_aligned`], but allocates out of the
929    /// specific heap instead of the default.
930    pub fn mi_heap_recalloc_aligned(
931        heap: *mut mi_heap_t,
932        p: *mut c_void,
933        newcount: usize,
934        size: usize,
935        alignment: usize,
936    ) -> *mut c_void;
937
938    /// Equivalent to [`mi_recalloc_aligned_at`], but allocates out of the
939    /// specific heap instead of the default.
940    pub fn mi_heap_recalloc_aligned_at(
941        heap: *mut mi_heap_t,
942        p: *mut c_void,
943        newcount: usize,
944        size: usize,
945        alignment: usize,
946        offset: usize,
947    ) -> *mut c_void;
948
949    /// Does a heap contain a pointer to a previously allocated block?
950    ///
951    /// `p` must be a pointer to a previously allocated block (in any heap) -- it cannot be some
952    /// random pointer!
953    ///
954    /// Returns `true` if the block pointed to by `p` is in the `heap`.
955    ///
956    /// See [`mi_heap_check_owned`].
957    pub fn mi_heap_contains_block(heap: *mut mi_heap_t, p: *const c_void) -> bool;
958
959    /// Check safely if any pointer is part of a heap.
960    ///
961    /// `p` may be any pointer -- not required to be previously allocated by the
962    /// given heap or any other mimalloc heap. Returns `true` if `p` points to a
963    /// block in the given heap, false otherwise.
964    ///
965    /// Note: expensive function, linear in the pages in the heap.
966    ///
967    /// See [`mi_heap_contains_block`], [`mi_heap_get_default`], and
968    /// [`mi_is_in_heap_region`]
969    pub fn mi_heap_check_owned(heap: *mut mi_heap_t, p: *const c_void) -> bool;
970
971    /// Check safely if any pointer is part of the default heap of this thread.
972    ///
973    /// `p` may be any pointer -- not required to be previously allocated by the
974    /// default heap for this thread, or any other mimalloc heap. Returns `true`
975    /// if `p` points to a block in the default heap, false otherwise.
976    ///
977    /// Note: expensive function, linear in the pages in the heap.
978    ///
979    /// See [`mi_heap_contains_block`], [`mi_heap_get_default`]
980    pub fn mi_check_owned(p: *const c_void) -> bool;
981
982    /// Visit all areas and blocks in `heap`.
983    ///
984    /// If `visit_all_blocks` is false, the `visitor` is only called once for
985    /// every heap area. If it's true, the `visitor` is also called for every
986    /// allocated block inside every area (with `!block.is_null()`). Return
987    /// `false` from the `visitor` to return early.
988    ///
989    /// `arg` is an extra argument passed into the `visitor`.
990    ///
991    /// Returns `true` if all areas and blocks were visited.
992    ///
993    /// Passing a `None` visitor is allowed, and is a no-op.
994    pub fn mi_heap_visit_blocks(
995        heap: *const mi_heap_t,
996        visit_all_blocks: bool,
997        visitor: mi_block_visit_fun,
998        arg: *mut c_void,
999    ) -> bool;
1000
1001    #[cfg(feature = "arena")]
1002    /// Create a heap that only allocates in the specified arena
1003    pub fn mi_heap_new_in_arena(arena_id: mi_arena_id_t) -> *mut mi_heap_t;
1004
1005    #[cfg(feature = "arena")]
1006    /// Reserve OS memory for use by mimalloc. Reserved areas are used
1007    /// before allocating from the OS again. By reserving a large area upfront,
1008    /// allocation can be more efficient, and can be better managed on systems
1009    /// without `mmap`/`VirtualAlloc` (like WASM for example).
1010    ///
1011    /// - `size` The size to reserve.
1012    /// - `commit` Commit the memory upfront.
1013    /// - `allow_large` Allow large OS pages (2MiB) to be used?
1014    /// - `exclusive` Only allow allocations if specifically for this arena.
1015    /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1016    ///
1017    /// Returns 0 if successful, and an error code otherwise (e.g. `ENOMEM`)
1018    pub fn mi_reserve_os_memory_ex(
1019        size: usize,
1020        commit: bool,
1021        allow_large: bool,
1022        exclusive: bool,
1023        arena_id: *mut mi_arena_id_t,
1024    ) -> c_int;
1025
1026    #[cfg(feature = "arena")]
1027    /// Manage a particular memory area for use by mimalloc.
1028    /// This is just like `mi_reserve_os_memory_ex` except that the area should already be
1029    /// allocated in some manner and available for use my mimalloc.
1030    ///
1031    /// # Safety
1032    /// mimalloc will likely segfault when allocating from the arena if the arena `start` & `size`
1033    /// aren't aligned with mimalloc's `MI_SEGMENT_ALIGN` (e.g. 32MB on x86_64 machines).
1034    ///
1035    /// - `start` Start of the memory area
1036    /// - `size` The size of the memory area. Must be large than `MI_ARENA_BLOCK_SIZE` (e.g. 64MB
1037    ///          on x86_64 machines).
1038    /// - `commit` Set true if the memory range is already commited.
1039    /// - `is_large` Set true if the memory range consists of large files, or if the memory should
1040    ///              not be decommitted or protected (like rdma etc.).
1041    /// - `is_zero` Set true if the memory range consists only of zeros.
1042    /// - `numa_node` Possible associated numa node or `-1`.
1043    /// - `exclusive` Only allow allocations if specifically for this arena.
1044    /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1045    ///
1046    /// Returns `true` if arena was successfully allocated
1047    pub fn mi_manage_os_memory_ex(
1048        start: *const c_void,
1049        size: usize,
1050        is_committed: bool,
1051        is_large: bool,
1052        is_zero: bool,
1053        numa_node: c_int,
1054        exclusive: bool,
1055        arena_id: *mut mi_arena_id_t,
1056    ) -> bool;
1057}
1058
1059#[cfg(test)]
1060mod tests {
1061    use super::*;
1062    use crate::mi_malloc;
1063
1064    #[test]
1065    fn it_calculates_usable_size() {
1066        let ptr = unsafe { mi_malloc(32) } as *mut u8;
1067        let usable_size = unsafe { mi_usable_size(ptr as *mut c_void) };
1068        assert!(
1069            usable_size >= 32,
1070            "usable_size should at least equal to the allocated size"
1071        );
1072    }
1073
1074    #[test]
1075    fn runtime_stable_option() {
1076        unsafe {
1077            assert_eq!(mi_option_get(mi_option_show_errors), 0);
1078            mi_option_set(mi_option_show_errors, 1);
1079            assert_eq!(mi_option_get(mi_option_show_errors), 1);
1080
1081            assert_eq!(mi_option_get(mi_option_show_stats), 0);
1082            mi_option_set(mi_option_show_stats, 1);
1083            assert_eq!(mi_option_get(mi_option_show_stats), 1);
1084
1085            assert_eq!(mi_option_get(mi_option_verbose), 0);
1086            mi_option_set(mi_option_verbose, 1);
1087            assert_eq!(mi_option_get(mi_option_verbose), 1);
1088        }
1089    }
1090}