libmimalloc_sys2/
extended.rs

1#![allow(nonstandard_style)]
2
3use core::ffi::c_void;
4
5use cty::{c_char, c_int, c_long, c_ulonglong};
6
7/// The maximum number of bytes which may be used as an argument to a function
8/// in the `_small` family ([`mi_malloc_small`], [`mi_zalloc_small`], etc).
9pub const MI_SMALL_SIZE_MAX: usize = 128 * core::mem::size_of::<*mut c_void>();
10
11extern "C" {
12    /// Allocate `count` items of `size` length each.
13    ///
14    /// Returns `null` if `count * size` overflows or on out-of-memory.
15    ///
16    /// All items are initialized to zero.
17    pub fn mi_calloc(count: usize, size: usize) -> *mut c_void;
18
19    /// Allocate `count` items of `size` length each.
20    ///
21    /// Returns `null` if `count * size` overflows or on out-of-memory,
22    /// otherwise returns the same as [`mi_malloc(count *
23    /// size)`](crate::mi_malloc).
24    /// Equivalent to [`mi_calloc`], but returns uninitialized (and not zeroed)
25    /// bytes.
26    pub fn mi_mallocn(count: usize, size: usize) -> *mut c_void;
27
28    /// Re-allocate memory to `count` elements of `size` bytes.
29    ///
30    /// The realloc equivalent of the [`mi_mallocn`] interface. Returns `null`
31    /// if `count * size` overflows or on out-of-memory, otherwise returns the
32    /// same as [`mi_realloc(p, count * size)`](crate::mi_realloc).
33    pub fn mi_reallocn(p: *mut c_void, count: usize, size: usize) -> *mut c_void;
34
35    /// Try to re-allocate memory to `newsize` bytes _in place_.
36    ///
37    /// Returns null on out-of-memory or if the memory could not be expanded in
38    /// place. On success, returns the same pointer as `p`.
39    ///
40    /// If `newsize` is larger than the original `size` allocated for `p`, the
41    /// bytes after `size` are uninitialized.
42    ///
43    /// If null is returned, the original pointer is not freed.
44    ///
45    /// Note: Conceptually, this is a realloc-like which returns null if it
46    /// would be forced to reallocate memory and copy. In practice it's
47    /// equivalent testing against [`mi_usable_size`](crate::mi_usable_size).
48    pub fn mi_expand(p: *mut c_void, newsize: usize) -> *mut c_void;
49
50    /// Re-allocate memory to `newsize` bytes.
51    ///
52    /// This differs from [`mi_realloc`](crate::mi_realloc) in that on failure,
53    /// `p` is freed.
54    pub fn mi_reallocf(p: *mut c_void, newsize: usize) -> *mut c_void;
55
56    /// Allocate and duplicate a nul-terminated C string.
57    ///
58    /// This can be useful for Rust code when interacting with the FFI.
59    pub fn mi_strdup(s: *const c_char) -> *mut c_char;
60
61    /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
62    ///
63    /// This can be useful for Rust code when interacting with the FFI.
64    pub fn mi_strndup(s: *const c_char, n: usize) -> *mut c_char;
65
66    /// Resolve a file path name, producing a `C` string which can be passed to
67    /// [`mi_free`](crate::mi_free).
68    ///
69    /// `resolved_name` should be null, but can also point to a buffer of at
70    /// least `PATH_MAX` bytes.
71    ///
72    /// If successful, returns a pointer to the resolved absolute file name, or
73    /// `null` on failure (with `errno` set to the error code).
74    ///
75    /// If `resolved_name` was `null`, the returned result should be freed with
76    /// [`mi_free`](crate::mi_free).
77    ///
78    /// This can rarely be useful in FFI code, but is mostly included for
79    /// completeness.
80    pub fn mi_realpath(fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char;
81
82    /// Allocate `size * count` bytes aligned by `alignment`.
83    ///
84    /// Return pointer to the allocated memory or null if out of memory or if
85    /// `size * count` overflows.
86    ///
87    /// Returns a unique pointer if called with `size * count` 0.
88    pub fn mi_calloc_aligned(count: usize, size: usize, alignment: usize) -> *mut c_void;
89
90    /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
91    ///
92    /// Note that the resulting pointer itself is not aligned by the alignment,
93    /// but after `offset` bytes it will be. This can be useful for allocating
94    /// data with an inline header, where the data has a specific alignment
95    /// requirement.
96    ///
97    /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
98    /// to `alignment`.
99    pub fn mi_malloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
100
101    /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
102    /// zero-initialized.
103    ///
104    /// This is a [`mi_zalloc`](crate::mi_zalloc) equivalent of [`mi_malloc_aligned_at`].
105    pub fn mi_zalloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
106
107    /// Allocate `size` of bytes aligned by `alignment` and place the address of the
108    /// allocated memory to `ptr`.
109    ///
110    /// Returns zero on success, invalid argument for invalid alignment, or out-of-memory.
111    pub fn mi_posix_memalign(ptr: *mut *mut c_void, alignment: usize, size: usize) -> c_int;
112
113    /// Allocate `size` bytes aligned by `alignment` with alignment as the first
114    /// parameter.
115    ///
116    /// Return pointer to the allocated memory or null if out of memory.
117    pub fn mi_aligned_alloc(alignment: usize, size: usize) -> *mut c_void;
118
119    /// Allocate `size * count` bytes aligned by `alignment` at a specified
120    /// `offset`, zero-initialized.
121    ///
122    /// This is a [`calloc`](crate::mi_calloc) equivalent of [`mi_malloc_aligned_at`].
123    pub fn mi_calloc_aligned_at(
124        count: usize,
125        size: usize,
126        alignment: usize,
127        offset: usize,
128    ) -> *mut c_void;
129
130    /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
131    /// specified `offset`.
132    ///
133    /// This is a [`realloc`](crate::mi_realloc) equivalent of [`mi_malloc_aligned_at`].
134    pub fn mi_realloc_aligned_at(
135        p: *mut c_void,
136        newsize: usize,
137        alignment: usize,
138        offset: usize,
139    ) -> *mut c_void;
140
141    /// Zero initialized [re-allocation](crate::mi_realloc).
142    ///
143    /// In general, only valid on memory originally allocated by zero
144    /// initialization: [`mi_calloc`](crate::mi_calloc),
145    /// [`mi_zalloc`](crate::mi_zalloc),
146    /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
147    pub fn mi_rezalloc(p: *mut c_void, newsize: usize) -> *mut c_void;
148
149    /// Zero initialized [re-allocation](crate::mi_realloc), following `calloc`
150    /// paramater conventions.
151    ///
152    /// In general, only valid on memory originally allocated by zero
153    /// initialization: [`mi_calloc`](crate::mi_calloc),
154    /// [`mi_zalloc`](crate::mi_zalloc),
155    /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
156    pub fn mi_recalloc(p: *mut c_void, newcount: usize, size: usize) -> *mut c_void;
157
158    /// Aligned version of [`mi_rezalloc`].
159    pub fn mi_rezalloc_aligned(p: *mut c_void, newsize: usize, alignment: usize) -> *mut c_void;
160
161    /// Offset-aligned version of [`mi_rezalloc`].
162    pub fn mi_rezalloc_aligned_at(
163        p: *mut c_void,
164        newsize: usize,
165        alignment: usize,
166        offset: usize,
167    ) -> *mut c_void;
168
169    /// Aligned version of [`mi_recalloc`].
170    pub fn mi_recalloc_aligned(
171        p: *mut c_void,
172        newcount: usize,
173        size: usize,
174        alignment: usize,
175    ) -> *mut c_void;
176
177    /// Offset-aligned version of [`mi_recalloc`].
178    pub fn mi_recalloc_aligned_at(
179        p: *mut c_void,
180        newcount: usize,
181        size: usize,
182        alignment: usize,
183        offset: usize,
184    ) -> *mut c_void;
185
186    /// Allocate an object of no more than [`MI_SMALL_SIZE_MAX`] bytes.
187    ///
188    /// Does not check that `size` is indeed small.
189    ///
190    /// Note: Currently [`mi_malloc`](crate::mi_malloc) checks if `size` is
191    /// small and calls this if
192    /// so at runtime, so its' only worth using if you know for certain.
193    pub fn mi_malloc_small(size: usize) -> *mut c_void;
194
195    /// Allocate an zero-initialized object of no more than
196    /// [`MI_SMALL_SIZE_MAX`] bytes.
197    ///
198    /// Does not check that `size` is indeed small.
199    ///
200    /// Note: Currently [`mi_zalloc`](crate::mi_zalloc) checks if `size` is
201    /// small and calls this if so at runtime, so its' only worth using if you
202    /// know for certain.
203    pub fn mi_zalloc_small(size: usize) -> *mut c_void;
204
205    /// Return the available bytes in a memory block.
206    ///
207    /// The returned size can be used to call `mi_expand` successfully.
208    pub fn mi_usable_size(p: *const c_void) -> usize;
209
210    /// Return the used allocation size.
211    ///
212    /// Returns the size `n` that will be allocated, where `n >= size`.
213    ///
214    /// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. This
215    /// can be used to reduce internal wasted space when allocating buffers for
216    /// example.
217    ///
218    /// See [`mi_usable_size`](crate::mi_usable_size).
219    pub fn mi_good_size(size: usize) -> usize;
220
221    /// Eagerly free memory.
222    ///
223    /// If `force` is true, aggressively return memory to the OS (can be
224    /// expensive!)
225    ///
226    /// Regular code should not have to call this function. It can be beneficial
227    /// in very narrow circumstances; in particular, when a long running thread
228    /// allocates a lot of blocks that are freed by other threads it may improve
229    /// resource usage by calling this every once in a while.
230    pub fn mi_collect(force: bool);
231
232    /// Checked free: If `p` came from mimalloc's heap (as decided by
233    /// [`mi_is_in_heap_region`]), this is [`mi_free(p)`](crate::mi_free), but
234    /// otherwise it is a no-op.
235    pub fn mi_cfree(p: *mut c_void);
236
237    /// Returns true if this is a pointer into a memory region that has been
238    /// reserved by the mimalloc heap.
239    ///
240    /// This function is described by the mimalloc documentation as "relatively
241    /// fast".
242    ///
243    /// See also [`mi_heap_check_owned`], which is (much) slower and slightly
244    /// more precise, but only concerns a single `mi_heap`.
245    pub fn mi_is_in_heap_region(p: *const c_void) -> bool;
246
247    /// Layout-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
248    /// the size and alignment as well.
249    ///
250    /// Note: unlike some allocators that require this information for
251    /// performance, mimalloc doesn't need it (as of the current version,
252    /// v2.0.0), and so it currently implements this as a (debug) assertion that
253    /// verifies that `p` is actually aligned to `alignment` and is usable for
254    /// at least `size` bytes, before delegating to `mi_free`.
255    ///
256    /// However, currently there's no way to have this crate enable mimalloc's
257    /// debug assertions, so these checks aren't particularly useful.
258    ///
259    /// Note: It's legal to pass null to this function, and you are not required
260    /// to use this to deallocate memory from an aligned allocation function.
261    pub fn mi_free_size_aligned(p: *mut c_void, size: usize, alignment: usize);
262
263    /// Size-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
264    /// the size and alignment as well.
265    ///
266    /// Note: unlike some allocators that require this information for
267    /// performance, mimalloc doesn't need it (as of the current version,
268    /// v2.0.0), and so it currently implements this as a (debug) assertion that
269    /// verifies that `p` is actually aligned to `alignment` and is usable for
270    /// at least `size` bytes, before delegating to `mi_free`.
271    ///
272    /// However, currently there's no way to have this crate enable mimalloc's
273    /// debug assertions, so these checks aren't particularly useful.
274    ///
275    /// Note: It's legal to pass null to this function.
276    pub fn mi_free_size(p: *mut c_void, size: usize);
277
278    /// Alignment-aware deallocation: Like [`mi_free`](crate::mi_free), but
279    /// accepts the size and alignment as well.
280    ///
281    /// Note: unlike some allocators that require this information for
282    /// performance, mimalloc doesn't need it (as of the current version,
283    /// v2.0.0), and so it currently implements this as a (debug) assertion that
284    /// verifies that `p` is actually aligned to `alignment` and is usable for
285    /// at least `size` bytes, before delegating to `mi_free`.
286    ///
287    /// However, currently there's no way to have this crate enable mimalloc's
288    /// debug assertions, so these checks aren't particularly useful.
289    ///
290    /// Note: It's legal to pass null to this function.
291    pub fn mi_free_aligned(p: *mut c_void, alignment: usize);
292
293    /// Print the main statistics.
294    ///
295    /// Ignores the passed in argument, and outputs to the registered output
296    /// function or stderr by default.
297    ///
298    /// Most detailed when using a debug build.
299    pub fn mi_stats_print(_: *mut c_void);
300
301    /// Print the main statistics.
302    ///
303    /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
304    /// passed as it's second parameter.
305    ///
306    /// Most detailed when using a debug build.
307    pub fn mi_stats_print_out(out: mi_output_fun, arg: *mut c_void);
308
309    /// Reset statistics.
310    ///
311    /// Note: This function is thread safe.
312    pub fn mi_stats_reset();
313
314    /// Merge thread local statistics with the main statistics and reset.
315    ///
316    /// Note: This function is thread safe.
317    pub fn mi_stats_merge();
318
319    /// Return the mimalloc version number.
320    ///
321    /// For example version 1.6.3 would return the number `163`.
322    pub fn mi_version() -> c_int;
323
324    /// Initialize mimalloc on a thread.
325    ///
326    /// Should not be used as on most systems (pthreads, windows) this is done
327    /// automatically.
328    pub fn mi_thread_init();
329
330    /// Initialize the process.
331    ///
332    /// Should not be used on most systems, as it's called by thread_init or the
333    /// process loader.
334    pub fn mi_process_init();
335
336    /// Return process information (time and memory usage). All parameters are
337    /// optional (nullable) out-params:
338    ///
339    /// | Parameter        | Description |
340    /// | :-               | :- |
341    /// | `elapsed_msecs`  | Elapsed wall-clock time of the process in milli-seconds. |
342    /// | `user_msecs`     | User time in milli-seconds (as the sum over all threads). |
343    /// | `system_msecs`   | System time in milli-seconds. |
344    /// | `current_rss`    | Current working set size (touched pages). |
345    /// | `peak_rss`       | Peak working set size (touched pages). |
346    /// | `current_commit` | Current committed memory (backed by the page file). |
347    /// | `peak_commit`    | Peak committed memory (backed by the page file). |
348    /// | `page_faults`    | Count of hard page faults. |
349    ///
350    /// The `current_rss` is precise on Windows and MacOSX; other systems
351    /// estimate this using `current_commit`. The `commit` is precise on Windows
352    /// but estimated on other systems as the amount of read/write accessible
353    /// memory reserved by mimalloc.
354    pub fn mi_process_info(
355        elapsed_msecs: *mut usize,
356        user_msecs: *mut usize,
357        system_msecs: *mut usize,
358        current_rss: *mut usize,
359        peak_rss: *mut usize,
360        current_commit: *mut usize,
361        peak_commit: *mut usize,
362        page_faults: *mut usize,
363    );
364
365    /// Uninitialize mimalloc on a thread.
366    ///
367    /// Should not be used as on most systems (pthreads, windows) this is done
368    /// automatically. Ensures that any memory that is not freed yet (but will
369    /// be freed by other threads in the future) is properly handled.
370    ///
371    /// Note: This function is thread safe.
372    pub fn mi_thread_done();
373
374    /// Print out heap statistics for this thread.
375    ///
376    /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
377    /// passed as it's second parameter
378    ///
379    /// Most detailed when using a debug build.
380    ///
381    /// Note: This function is thread safe.
382    pub fn mi_thread_stats_print_out(out: mi_output_fun, arg: *mut c_void);
383
384    /// Register an output function.
385    ///
386    /// - `out` The output function, use `None` to output to stderr.
387    /// - `arg` Argument that will be passed on to the output function.
388    ///
389    /// The `out` function is called to output any information from mimalloc,
390    /// like verbose or warning messages.
391    ///
392    /// Note: This function is thread safe.
393    pub fn mi_register_output(out: mi_output_fun, arg: *mut c_void);
394
395    /// Register a deferred free function.
396    ///
397    /// - `deferred_free` Address of a deferred free-ing function or `None` to
398    ///   unregister.
399    /// - `arg` Argument that will be passed on to the deferred free function.
400    ///
401    /// Some runtime systems use deferred free-ing, for example when using
402    /// reference counting to limit the worst case free time.
403    ///
404    /// Such systems can register (re-entrant) deferred free function to free
405    /// more memory on demand.
406    ///
407    /// - When the `force` parameter is `true` all possible memory should be
408    ///   freed.
409    ///
410    /// - The per-thread `heartbeat` parameter is monotonically increasing and
411    ///   guaranteed to be deterministic if the program allocates
412    ///   deterministically.
413    ///
414    /// - The `deferred_free` function is guaranteed to be called
415    ///   deterministically after some number of allocations (regardless of
416    ///   freeing or available free memory).
417    ///
418    /// At most one `deferred_free` function can be active.
419    ///
420    /// Note: This function is thread safe.
421    pub fn mi_register_deferred_free(out: mi_deferred_free_fun, arg: *mut c_void);
422
423    /// Register an error callback function.
424    ///
425    /// The `errfun` function is called on an error in mimalloc after emitting
426    /// an error message (through the output function).
427    ///
428    /// It as always legal to just return from the `errfun` function in which
429    /// case allocation functions generally return null or ignore the condition.
430    ///
431    /// The default function only calls abort() when compiled in secure mode
432    /// with an `EFAULT` error. The possible error codes are:
433    ///
434    /// - `EAGAIN` (11): Double free was detected (only in debug and secure
435    ///   mode).
436    /// - `EFAULT` (14): Corrupted free list or meta-data was detected (only in
437    ///   debug and secure mode).
438    /// - `ENOMEM` (12): Not enough memory available to satisfy the request.
439    /// - `EOVERFLOW` (75): Too large a request, for example in `mi_calloc`, the
440    ///   `count` and `size` parameters are too large.
441    /// - `EINVAL` (22): Trying to free or re-allocate an invalid pointer.
442    ///
443    /// Note: This function is thread safe.
444    pub fn mi_register_error(out: mi_error_fun, arg: *mut c_void);
445}
446
447/// An output callback. Must be thread-safe.
448///
449/// See [`mi_stats_print_out`], [`mi_thread_stats_print_out`], [`mi_register_output`]
450pub type mi_output_fun = Option<unsafe extern "C" fn(msg: *const c_char, arg: *mut c_void)>;
451
452/// Type of deferred free functions. Must be thread-safe.
453///
454/// - `force`: If true, all outstanding items should be freed.
455/// - `heartbeat` A monotonically increasing count.
456/// - `arg` Argument that was passed at registration to hold extra state.
457///
458/// See [`mi_register_deferred_free`]
459pub type mi_deferred_free_fun =
460    Option<unsafe extern "C" fn(force: bool, heartbeat: c_ulonglong, arg: *mut c_void)>;
461
462/// Type of error callback functions. Must be thread-safe.
463///
464/// - `err`: Error code (see [`mi_register_error`] for a list).
465/// - `arg`: Argument that was passed at registration to hold extra state.
466///
467/// See [`mi_register_error`]
468pub type mi_error_fun = Option<unsafe extern "C" fn(code: c_int, arg: *mut c_void)>;
469
470/// Runtime options. All options are false by default.
471pub type mi_option_t = c_int;
472
473#[cfg(feature = "arena")]
474/// Arena Id
475pub type mi_arena_id_t = c_int;
476
477// Note: mimalloc doc website seems to have the order of show_stats and
478// show_errors reversed as of 1.6.3, however what I have here is correct:
479// https://github.com/microsoft/mimalloc/issues/266#issuecomment-653822341
480
481/// Print error messages to `stderr`.
482pub const mi_option_show_errors: mi_option_t = 0;
483
484/// Print statistics to `stderr` when the program is done.
485pub const mi_option_show_stats: mi_option_t = 1;
486
487/// Print verbose messages to `stderr`.
488pub const mi_option_verbose: mi_option_t = 2;
489
490/// ### The following options are experimental
491///
492/// Option (experimental) Use large OS pages (2MiB in size) if possible.
493///
494/// Use large OS pages (2MiB) when available; for some workloads this can
495/// significantly improve performance. Use mi_option_verbose to check if
496/// the large OS pages are enabled -- usually one needs to explicitly allow
497/// large OS pages (as on Windows and Linux). However, sometimes the OS is
498/// very slow to reserve contiguous physical memory for large OS pages so
499/// use with care on systems that can have fragmented memory (for that
500/// reason, we generally recommend to use mi_option_reserve_huge_os_pages
501/// instead whenever possible).
502pub const mi_option_large_os_pages: mi_option_t = 6;
503
504/// Option (experimental) The number of huge OS pages (1GiB in size) to reserve at the start of the program.
505///
506/// This reserves the huge pages at startup and sometimes this can give a large (latency) performance
507/// improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in
508/// combination with this setting. Just like large OS pages, use with care as reserving contiguous
509/// physical memory can take a long time when memory is fragmented (but reserving the huge pages is
510/// done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on
511/// Windows and Linux)). With huge OS pages, it may be beneficial to set the setting
512/// mi_option_eager_commit_delay=N (N is 1 by default) to delay the initial N segments (of 4MiB) of
513/// a thread to not allocate in the huge OS pages; this prevents threads that are short lived and
514/// allocate just a little to take up space in the huge OS page area (which cannot be reset).
515pub const mi_option_reserve_huge_os_pages: mi_option_t = 7;
516
517/// Option (experimental) Reserve huge OS pages at node N.
518///
519/// The huge pages are usually allocated evenly among NUMA nodes.
520/// You can use mi_option_reserve_huge_os_pages_at=N where `N` is the numa node (starting at 0) to allocate all
521/// the huge pages at a specific numa node instead.
522pub const mi_option_reserve_huge_os_pages_at: mi_option_t = 8;
523
524/// Option (experimental) Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
525pub const mi_option_reserve_os_memory: mi_option_t = 9;
526
527/// Option (experimental) the first N segments per thread are not eagerly committed (=1).
528pub const mi_option_eager_commit_delay: mi_option_t = 14;
529
530/// Option (experimental) Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime.
531pub const mi_option_use_numa_nodes: mi_option_t = 16;
532
533/// Option (experimental) If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
534pub const mi_option_limit_os_alloc: mi_option_t = 17;
535
536/// Option (experimental) OS tag to assign to mimalloc'd memory
537pub const mi_option_os_tag: mi_option_t = 18;
538
539/// Option (experimental)
540pub const mi_option_max_errors: mi_option_t = 19;
541
542/// Option (experimental)
543pub const mi_option_max_warnings: mi_option_t = 20;
544
545/// Option (experimental)
546pub const mi_option_max_segment_reclaim: mi_option_t = 21;
547
548/// Last option.
549pub const _mi_option_last: mi_option_t = 36;
550
551extern "C" {
552    // Note: mi_option_{enable,disable} aren't exposed because they're redundant
553    // and because of https://github.com/microsoft/mimalloc/issues/266.
554
555    /// Returns true if the provided option is enabled.
556    ///
557    /// Note: this function is not thread safe.
558    pub fn mi_option_is_enabled(option: mi_option_t) -> bool;
559
560    /// Enable or disable the given option.
561    ///
562    /// Note: this function is not thread safe.
563    pub fn mi_option_set_enabled(option: mi_option_t, enable: bool);
564
565    /// If the given option has not yet been initialized with [`mi_option_set`]
566    /// or [`mi_option_set_enabled`], enables or disables the option. If it has,
567    /// this function does nothing.
568    ///
569    /// Note: this function is not thread safe.
570    pub fn mi_option_set_enabled_default(option: mi_option_t, enable: bool);
571
572    /// Returns the value of the provided option.
573    ///
574    /// The value of boolean options is 1 or 0, however experimental options
575    /// exist which take a numeric value, which is the intended use of this
576    /// function.
577    ///
578    /// These options are not exposed as constants for stability reasons,
579    /// however you can still use them as arguments to this and other
580    /// `mi_option_` functions if needed, see the mimalloc documentation for
581    /// details: https://microsoft.github.io/mimalloc/group__options.html
582    ///
583    /// Note: this function is not thread safe.
584    pub fn mi_option_get(option: mi_option_t) -> c_long;
585
586    /// Set the option to the given value.
587    ///
588    /// The value of boolean options is 1 or 0, however experimental options
589    /// exist which take a numeric value, which is the intended use of this
590    /// function.
591    ///
592    /// These options are not exposed as constants for stability reasons,
593    /// however you can still use them as arguments to this and other
594    /// `mi_option_` functions if needed,
595    ///
596    /// Note: this function is not thread safe.
597    pub fn mi_option_set(option: mi_option_t, value: c_long);
598
599    /// If the given option has not yet been initialized with [`mi_option_set`]
600    /// or [`mi_option_set_enabled`], sets the option to the given value. If it
601    /// has, this function does nothing.
602    ///
603    /// The value of boolean options is 1 or 0, however experimental options
604    /// exist which take a numeric value, which is the intended use of this
605    /// function.
606    ///
607    /// These options are not exposed as constants for stability reasons,
608    /// however you can still use them as arguments to this and other
609    /// `mi_option_` functions if needed.
610    ///
611    /// Note: this function is not thread safe.
612    pub fn mi_option_set_default(option: mi_option_t, value: c_long);
613}
614
615/// First-class heaps that can be destroyed in one go.
616///
617/// Note: The pointers allocated out of a heap can be be freed using
618/// [`mi_free`](crate::mi_free) -- there is no `mi_heap_free`.
619///
620/// # Example
621///
622/// ```
623/// use libmimalloc_sys as mi;
624/// unsafe {
625///     let h = mi::mi_heap_new();
626///     assert!(!h.is_null());
627///     let p = mi::mi_heap_malloc(h, 50);
628///     assert!(!p.is_null());
629///
630///     // use p...
631///     mi::mi_free(p);
632///
633///     // Clean up the heap. Note that pointers allocated from `h`
634///     // are *not* invalided by `mi_heap_delete`. You would have
635///     // to use (the very dangerous) `mi_heap_destroy` for that
636///     // behavior
637///     mi::mi_heap_delete(h);
638/// }
639/// ```
640pub enum mi_heap_t {}
641
642/// An area of heap space contains blocks of a single size.
643///
644/// The bytes in freed blocks are `committed - used`.
645#[repr(C)]
646#[derive(Debug, Clone, Copy)]
647pub struct mi_heap_area_t {
648    /// Start of the area containing heap blocks.
649    pub blocks: *mut c_void,
650    /// Bytes reserved for this area.
651    pub reserved: usize,
652    /// Current committed bytes of this area.
653    pub committed: usize,
654    /// Bytes in use by allocated blocks.
655    pub used: usize,
656    /// Size in bytes of one block.
657    pub block_size: usize,
658    /// Size in bytes of a full block including padding and metadata.
659    pub full_block_size: usize,
660    /// Heap tag associated with this area (see \a mi_heap_new_ex)
661    pub heap_tag: c_int,
662}
663
664/// Visitor function passed to [`mi_heap_visit_blocks`]
665///
666/// Should return `true` to continue, and `false` to stop visiting (i.e. break)
667///
668/// This function is always first called for every `area` with `block` as a null
669/// pointer. If `visit_all_blocks` was `true`, the function is then called for
670/// every allocated block in that area.
671pub type mi_block_visit_fun = Option<
672    unsafe extern "C" fn(
673        heap: *const mi_heap_t,
674        area: *const mi_heap_area_t,
675        block: *mut c_void,
676        block_size: usize,
677        arg: *mut c_void,
678    ) -> bool,
679>;
680
681extern "C" {
682    /// Create a new heap that can be used for allocation.
683    pub fn mi_heap_new() -> *mut mi_heap_t;
684
685    /// Delete a previously allocated heap.
686    ///
687    /// This will release resources and migrate any still allocated blocks in
688    /// this heap (efficienty) to the default heap.
689    ///
690    /// If `heap` is the default heap, the default heap is set to the backing
691    /// heap.
692    pub fn mi_heap_delete(heap: *mut mi_heap_t);
693
694    /// Destroy a heap, freeing all its still allocated blocks.
695    ///
696    /// Use with care as this will free all blocks still allocated in the heap.
697    /// However, this can be a very efficient way to free all heap memory in one
698    /// go.
699    ///
700    /// If `heap` is the default heap, the default heap is set to the backing
701    /// heap.
702    pub fn mi_heap_destroy(heap: *mut mi_heap_t);
703
704    /// Set the default heap to use for [`mi_malloc`](crate::mi_malloc) et al.
705    ///
706    /// Returns the previous default heap.
707    pub fn mi_heap_set_default(heap: *mut mi_heap_t) -> *mut mi_heap_t;
708
709    /// Get the default heap that is used for [`mi_malloc`](crate::mi_malloc) et al.
710    pub fn mi_heap_get_default() -> *mut mi_heap_t;
711
712    /// Get the backing heap.
713    ///
714    /// The _backing_ heap is the initial default heap for a thread and always
715    /// available for allocations. It cannot be destroyed or deleted except by
716    /// exiting the thread.
717    pub fn mi_heap_get_backing() -> *mut mi_heap_t;
718
719    /// Release outstanding resources in a specific heap.
720    ///
721    /// See also [`mi_collect`].
722    pub fn mi_heap_collect(heap: *mut mi_heap_t, force: bool);
723
724    /// Equivalent to [`mi_malloc`](crate::mi_malloc), but allocates out of the
725    /// specific heap instead of the default.
726    pub fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
727
728    /// Equivalent to [`mi_zalloc`](crate::mi_zalloc), but allocates out of the
729    /// specific heap instead of the default.
730    pub fn mi_heap_zalloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
731
732    /// Equivalent to [`mi_calloc`], but allocates out of the specific heap
733    /// instead of the default.
734    pub fn mi_heap_calloc(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
735
736    /// Equivalent to [`mi_mallocn`], but allocates out of the specific heap
737    /// instead of the default.
738    pub fn mi_heap_mallocn(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
739
740    /// Equivalent to [`mi_malloc_small`], but allocates out of the specific
741    /// heap instead of the default.
742    ///
743    /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
744    pub fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
745
746    /// Equivalent to [`mi_realloc`](crate::mi_realloc), but allocates out of
747    /// the specific heap instead of the default.
748    pub fn mi_heap_realloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
749
750    /// Equivalent to [`mi_reallocn`], but allocates out of the specific heap
751    /// instead of the default.
752    pub fn mi_heap_reallocn(
753        heap: *mut mi_heap_t,
754        p: *mut c_void,
755        count: usize,
756        size: usize,
757    ) -> *mut c_void;
758
759    /// Equivalent to [`mi_reallocf`], but allocates out of the specific heap
760    /// instead of the default.
761    pub fn mi_heap_reallocf(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
762
763    /// Equivalent to [`mi_strdup`], but allocates out of the specific heap
764    /// instead of the default.
765    pub fn mi_heap_strdup(heap: *mut mi_heap_t, s: *const c_char) -> *mut c_char;
766
767    /// Equivalent to [`mi_strndup`], but allocates out of the specific heap
768    /// instead of the default.
769    pub fn mi_heap_strndup(heap: *mut mi_heap_t, s: *const c_char, n: usize) -> *mut c_char;
770
771    /// Equivalent to [`mi_realpath`], but allocates out of the specific heap
772    /// instead of the default.
773    pub fn mi_heap_realpath(
774        heap: *mut mi_heap_t,
775        fname: *const c_char,
776        resolved_name: *mut c_char,
777    ) -> *mut c_char;
778
779    /// Equivalent to [`mi_malloc_aligned`](crate::mi_malloc_aligned), but
780    /// allocates out of the specific heap instead of the default.
781    pub fn mi_heap_malloc_aligned(
782        heap: *mut mi_heap_t,
783        size: usize,
784        alignment: usize,
785    ) -> *mut c_void;
786
787    /// Equivalent to [`mi_malloc_aligned_at`], but allocates out of the
788    /// specific heap instead of the default.
789    pub fn mi_heap_malloc_aligned_at(
790        heap: *mut mi_heap_t,
791        size: usize,
792        alignment: usize,
793        offset: usize,
794    ) -> *mut c_void;
795
796    /// Equivalent to [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), but
797    /// allocates out of the specific heap instead of the default.
798    pub fn mi_heap_zalloc_aligned(
799        heap: *mut mi_heap_t,
800        size: usize,
801        alignment: usize,
802    ) -> *mut c_void;
803
804    /// Equivalent to [`mi_zalloc_aligned_at`], but allocates out of the
805    /// specific heap instead of the default.
806    pub fn mi_heap_zalloc_aligned_at(
807        heap: *mut mi_heap_t,
808        size: usize,
809        alignment: usize,
810        offset: usize,
811    ) -> *mut c_void;
812
813    /// Equivalent to [`mi_calloc_aligned`], but allocates out of the specific
814    /// heap instead of the default.
815    pub fn mi_heap_calloc_aligned(
816        heap: *mut mi_heap_t,
817        count: usize,
818        size: usize,
819        alignment: usize,
820    ) -> *mut c_void;
821
822    /// Equivalent to [`mi_calloc_aligned_at`], but allocates out of the
823    /// specific heap instead of the default.
824    pub fn mi_heap_calloc_aligned_at(
825        heap: *mut mi_heap_t,
826        count: usize,
827        size: usize,
828        alignment: usize,
829        offset: usize,
830    ) -> *mut c_void;
831
832    /// Equivalent to [`mi_realloc_aligned`](crate::mi_realloc_aligned), but allocates out of the specific
833    /// heap instead of the default.
834    pub fn mi_heap_realloc_aligned(
835        heap: *mut mi_heap_t,
836        p: *mut c_void,
837        newsize: usize,
838        alignment: usize,
839    ) -> *mut c_void;
840
841    /// Equivalent to [`mi_realloc_aligned_at`], but allocates out of the
842    /// specific heap instead of the default.
843    pub fn mi_heap_realloc_aligned_at(
844        heap: *mut mi_heap_t,
845        p: *mut c_void,
846        newsize: usize,
847        alignment: usize,
848        offset: usize,
849    ) -> *mut c_void;
850
851    /// Equivalent to [`mi_rezalloc`], but allocates out of the specific heap
852    /// instead of the default.
853    pub fn mi_heap_rezalloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
854
855    /// Equivalent to [`mi_recalloc`], but allocates out of the specific heap
856    /// instead of the default.
857    pub fn mi_heap_recalloc(
858        heap: *mut mi_heap_t,
859        p: *mut c_void,
860        newcount: usize,
861        size: usize,
862    ) -> *mut c_void;
863
864    /// Equivalent to [`mi_rezalloc_aligned`], but allocates out of the specific
865    /// heap instead of the default.
866    pub fn mi_heap_rezalloc_aligned(
867        heap: *mut mi_heap_t,
868        p: *mut c_void,
869        newsize: usize,
870        alignment: usize,
871    ) -> *mut c_void;
872
873    /// Equivalent to [`mi_rezalloc_aligned_at`], but allocates out of the
874    /// specific heap instead of the default.
875    pub fn mi_heap_rezalloc_aligned_at(
876        heap: *mut mi_heap_t,
877        p: *mut c_void,
878        newsize: usize,
879        alignment: usize,
880        offset: usize,
881    ) -> *mut c_void;
882
883    /// Equivalent to [`mi_recalloc_aligned`], but allocates out of the
884    /// specific heap instead of the default.
885    pub fn mi_heap_recalloc_aligned(
886        heap: *mut mi_heap_t,
887        p: *mut c_void,
888        newcount: usize,
889        size: usize,
890        alignment: usize,
891    ) -> *mut c_void;
892
893    /// Equivalent to [`mi_recalloc_aligned_at`], but allocates out of the
894    /// specific heap instead of the default.
895    pub fn mi_heap_recalloc_aligned_at(
896        heap: *mut mi_heap_t,
897        p: *mut c_void,
898        newcount: usize,
899        size: usize,
900        alignment: usize,
901        offset: usize,
902    ) -> *mut c_void;
903
904    /// Does a heap contain a pointer to a previously allocated block?
905    ///
906    /// `p` must be a pointer to a previously allocated block (in any heap) -- it cannot be some
907    /// random pointer!
908    ///
909    /// Returns `true` if the block pointed to by `p` is in the `heap`.
910    ///
911    /// See [`mi_heap_check_owned`].
912    pub fn mi_heap_contains_block(heap: *mut mi_heap_t, p: *const c_void) -> bool;
913
914    /// Check safely if any pointer is part of a heap.
915    ///
916    /// `p` may be any pointer -- not required to be previously allocated by the
917    /// given heap or any other mimalloc heap. Returns `true` if `p` points to a
918    /// block in the given heap, false otherwise.
919    ///
920    /// Note: expensive function, linear in the pages in the heap.
921    ///
922    /// See [`mi_heap_contains_block`], [`mi_heap_get_default`], and
923    /// [`mi_is_in_heap_region`]
924    pub fn mi_heap_check_owned(heap: *mut mi_heap_t, p: *const c_void) -> bool;
925
926    /// Check safely if any pointer is part of the default heap of this thread.
927    ///
928    /// `p` may be any pointer -- not required to be previously allocated by the
929    /// default heap for this thread, or any other mimalloc heap. Returns `true`
930    /// if `p` points to a block in the default heap, false otherwise.
931    ///
932    /// Note: expensive function, linear in the pages in the heap.
933    ///
934    /// See [`mi_heap_contains_block`], [`mi_heap_get_default`]
935    pub fn mi_check_owned(p: *const c_void) -> bool;
936
937    /// Visit all areas and blocks in `heap`.
938    ///
939    /// If `visit_all_blocks` is false, the `visitor` is only called once for
940    /// every heap area. If it's true, the `visitor` is also called for every
941    /// allocated block inside every area (with `!block.is_null()`). Return
942    /// `false` from the `visitor` to return early.
943    ///
944    /// `arg` is an extra argument passed into the `visitor`.
945    ///
946    /// Returns `true` if all areas and blocks were visited.
947    ///
948    /// Passing a `None` visitor is allowed, and is a no-op.
949    pub fn mi_heap_visit_blocks(
950        heap: *const mi_heap_t,
951        visit_all_blocks: bool,
952        visitor: mi_block_visit_fun,
953        arg: *mut c_void,
954    ) -> bool;
955
956    #[cfg(feature = "arena")]
957    /// Create a heap that only allocates in the specified arena
958    pub fn mi_heap_new_in_arena(arena_id: mi_arena_id_t) -> *mut mi_heap_t;
959
960    #[cfg(feature = "arena")]
961    /// Reserve OS memory for use by mimalloc. Reserved areas are used
962    /// before allocating from the OS again. By reserving a large area upfront,
963    /// allocation can be more efficient, and can be better managed on systems
964    /// without `mmap`/`VirtualAlloc` (like WASM for example).
965    ///
966    /// - `size` The size to reserve.
967    /// - `commit` Commit the memory upfront.
968    /// - `allow_large` Allow large OS pages (2MiB) to be used?
969    /// - `exclusive` Only allow allocations if specifically for this arena.
970    /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
971    ///
972    /// Returns 0 if successful, and an error code otherwise (e.g. `ENOMEM`)
973    pub fn mi_reserve_os_memory_ex(
974        size: usize,
975        commit: bool,
976        allow_large: bool,
977        exclusive: bool,
978        arena_id: *mut mi_arena_id_t,
979    ) -> c_int;
980
981    #[cfg(feature = "arena")]
982    /// Manage a particular memory area for use by mimalloc.
983    /// This is just like `mi_reserve_os_memory_ex` except that the area should already be
984    /// allocated in some manner and available for use my mimalloc.
985    ///
986    /// # Safety
987    /// mimalloc will likely segfault when allocating from the arena if the arena `start` & `size`
988    /// aren't aligned with mimalloc's `MI_SEGMENT_ALIGN` (e.g. 32MB on x86_64 machines).
989    ///
990    /// - `start` Start of the memory area
991    /// - `size` The size of the memory area. Must be large than `MI_ARENA_BLOCK_SIZE` (e.g. 64MB
992    ///          on x86_64 machines).
993    /// - `commit` Set true if the memory range is already commited.
994    /// - `is_large` Set true if the memory range consists of large files, or if the memory should
995    ///              not be decommitted or protected (like rdma etc.).
996    /// - `is_zero` Set true if the memory range consists only of zeros.
997    /// - `numa_node` Possible associated numa node or `-1`.
998    /// - `exclusive` Only allow allocations if specifically for this arena.
999    /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1000    ///
1001    /// Returns `true` if arena was successfully allocated
1002    pub fn mi_manage_os_memory_ex(
1003        start: *const c_void,
1004        size: usize,
1005        is_committed: bool,
1006        is_large: bool,
1007        is_zero: bool,
1008        numa_node: c_int,
1009        exclusive: bool,
1010        arena_id: *mut mi_arena_id_t,
1011    ) -> bool;
1012}
1013
1014#[cfg(test)]
1015mod tests {
1016    use super::*;
1017    use crate::mi_malloc;
1018
1019    #[test]
1020    fn it_calculates_usable_size() {
1021        let ptr = unsafe { mi_malloc(32) } as *mut u8;
1022        let usable_size = unsafe { mi_usable_size(ptr as *mut c_void) };
1023        assert!(
1024            usable_size >= 32,
1025            "usable_size should at least equal to the allocated size"
1026        );
1027    }
1028
1029    #[test]
1030    fn runtime_stable_option() {
1031        unsafe {
1032            assert_eq!(mi_option_get(mi_option_show_errors), 0);
1033            mi_option_set(mi_option_show_errors, 1);
1034            assert_eq!(mi_option_get(mi_option_show_errors), 1);
1035
1036            assert_eq!(mi_option_get(mi_option_show_stats), 0);
1037            mi_option_set(mi_option_show_stats, 1);
1038            assert_eq!(mi_option_get(mi_option_show_stats), 1);
1039
1040            assert_eq!(mi_option_get(mi_option_verbose), 0);
1041            mi_option_set(mi_option_verbose, 1);
1042            assert_eq!(mi_option_get(mi_option_verbose), 1);
1043        }
1044    }
1045}