memapi_mimalloc_sys/extended.rs
1#![allow(nonstandard_style)]
2
3use libc::{c_char, c_int, c_long, c_ulonglong, c_void};
4use core::mem::size_of;
5
6// TODO: rewrite docs to be consistent with memapi's doc style
7
8/// The maximum number of bytes which may be used as an argument to a function
9/// in the `_small` family ([`mi_malloc_small`], [`mi_zalloc_small`], etc).
10pub const MI_SMALL_SIZE_MAX: usize = 128 * size_of::<*mut c_void>();
11
12extern "C" {
13 /// Allocate `count` items of `size` length each.
14 ///
15 /// Returns `null` if `count * size` overflows or on out-of-memory.
16 ///
17 /// All items are initialized to zero.
18 pub fn mi_calloc(count: usize, size: usize) -> *mut c_void;
19
20 /// Allocate `count` items of `size` length each.
21 ///
22 /// Returns `null` if `count * size` overflows or on out-of-memory,
23 /// otherwise returns the same as [`mi_malloc(count *
24 /// size)`](crate::mi_malloc).
25 /// Equivalent to [`mi_calloc`], but returns uninitialized (and not zeroed)
26 /// bytes.
27 pub fn mi_mallocn(count: usize, size: usize) -> *mut c_void;
28
29 /// Re-allocate memory to `count` elements of `size` bytes.
30 ///
31 /// The realloc equivalent of the [`mi_mallocn`] interface. Returns `null`
32 /// if `count * size` overflows or on out-of-memory, otherwise returns the
33 /// same as [`mi_realloc(p, count * size)`](crate::mi_realloc).
34 pub fn mi_reallocn(p: *mut c_void, count: usize, size: usize) -> *mut c_void;
35
36 /// Try to re-allocate memory to `newsize` bytes _in place_.
37 ///
38 /// Returns null on out-of-memory or if the memory could not be expanded in
39 /// place. On success, returns the same pointer as `p`.
40 ///
41 /// If `newsize` is larger than the original `size` allocated for `p`, the
42 /// bytes after `size` are uninitialized.
43 ///
44 /// If null is returned, the original pointer is not freed.
45 ///
46 /// Note: Conceptually, this is a realloc-like which returns null if it
47 /// would be forced to reallocate memory and copy. In practice it's
48 /// equivalent testing against [`mi_usable_size`](mi_usable_size).
49 pub fn mi_expand(p: *mut c_void, newsize: usize) -> *mut c_void;
50
51 /// Re-allocate memory to `newsize` bytes.
52 ///
53 /// This differs from [`mi_realloc`](crate::mi_realloc) in that on failure,
54 /// `p` is freed.
55 pub fn mi_reallocf(p: *mut c_void, newsize: usize) -> *mut c_void;
56
57 /// Allocate and duplicate a nul-terminated C string.
58 ///
59 /// This can be useful for Rust code when interacting with the FFI.
60 pub fn mi_strdup(s: *const c_char) -> *mut c_char;
61
62 /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
63 ///
64 /// This can be useful for Rust code when interacting with the FFI.
65 pub fn mi_strndup(s: *const c_char, n: usize) -> *mut c_char;
66
67 /// Resolve a file path name, producing a `C` string which can be passed to
68 /// [`mi_free`](crate::mi_free).
69 ///
70 /// `resolved_name` should be null, but can also point to a buffer of at
71 /// least `PATH_MAX` bytes.
72 ///
73 /// If successful, returns a pointer to the resolved absolute file name, or
74 /// `null` on failure (with `errno` set to the error code).
75 ///
76 /// If `resolved_name` was `null`, the returned result should be freed with
77 /// [`mi_free`](crate::mi_free).
78 ///
79 /// This can rarely be useful in FFI code, but is mostly included for
80 /// completeness.
81 pub fn mi_realpath(fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char;
82
83 /// Allocate `size * count` bytes aligned by `alignment`.
84 ///
85 /// Return pointer to the allocated memory or null if out of memory or if
86 /// `size * count` overflows.
87 ///
88 /// Returns a unique pointer if called with `size * count` 0.
89 pub fn mi_calloc_aligned(count: usize, size: usize, alignment: usize) -> *mut c_void;
90
91 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
92 ///
93 /// Note that the resulting pointer itself is not aligned by the alignment,
94 /// but after `offset` bytes it will be. This can be useful for allocating
95 /// data with an inline header, where the data has a specific alignment
96 /// requirement.
97 ///
98 /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
99 /// to `alignment`.
100 pub fn mi_malloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
101
102 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
103 /// zero-initialized.
104 ///
105 /// This is a [`mi_zalloc`](crate::mi_zalloc) equivalent of [`mi_malloc_aligned_at`].
106 pub fn mi_zalloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
107
108 /// Allocate `size` of bytes aligned by `alignment` and place the address of the
109 /// allocated memory to `ptr`.
110 ///
111 /// Returns zero on success, invalid argument for invalid alignment, or out-of-memory.
112 pub fn mi_posix_memalign(ptr: *mut *mut c_void, alignment: usize, size: usize) -> c_int;
113
114 /// Allocate `size` bytes aligned by `alignment` with alignment as the first
115 /// parameter.
116 ///
117 /// Return pointer to the allocated memory or null if out of memory.
118 pub fn mi_aligned_alloc(alignment: usize, size: usize) -> *mut c_void;
119
120 /// Allocate `size * count` bytes aligned by `alignment` at a specified
121 /// `offset`, zero-initialized.
122 ///
123 /// This is a [`calloc`](mi_calloc) equivalent of [`mi_malloc_aligned_at`].
124 pub fn mi_calloc_aligned_at(
125 count: usize,
126 size: usize,
127 alignment: usize,
128 offset: usize,
129 ) -> *mut c_void;
130
131 /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
132 /// specified `offset`.
133 ///
134 /// This is a [`realloc`](crate::mi_realloc) equivalent of [`mi_malloc_aligned_at`].
135 pub fn mi_realloc_aligned_at(
136 p: *mut c_void,
137 newsize: usize,
138 alignment: usize,
139 offset: usize,
140 ) -> *mut c_void;
141
142 /// Zero initialized [re-allocation](crate::mi_realloc).
143 ///
144 /// In general, only valid on memory originally allocated by zero
145 /// initialization: [`mi_calloc`](mi_calloc),
146 /// [`mi_zalloc`](crate::mi_zalloc),
147 /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
148 pub fn mi_rezalloc(p: *mut c_void, newsize: usize) -> *mut c_void;
149
150 /// Zero initialized [re-allocation](crate::mi_realloc), following `calloc`
151 /// paramater conventions.
152 ///
153 /// In general, only valid on memory originally allocated by zero
154 /// initialization: [`mi_calloc`](mi_calloc),
155 /// [`mi_zalloc`](crate::mi_zalloc),
156 /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
157 pub fn mi_recalloc(p: *mut c_void, newcount: usize, size: usize) -> *mut c_void;
158
159 /// Aligned version of [`mi_rezalloc`].
160 pub fn mi_rezalloc_aligned(p: *mut c_void, newsize: usize, alignment: usize) -> *mut c_void;
161
162 /// Offset-aligned version of [`mi_rezalloc`].
163 pub fn mi_rezalloc_aligned_at(
164 p: *mut c_void,
165 newsize: usize,
166 alignment: usize,
167 offset: usize,
168 ) -> *mut c_void;
169
170 /// Aligned version of [`mi_recalloc`].
171 pub fn mi_recalloc_aligned(
172 p: *mut c_void,
173 newcount: usize,
174 size: usize,
175 alignment: usize,
176 ) -> *mut c_void;
177
178 /// Offset-aligned version of [`mi_recalloc`].
179 pub fn mi_recalloc_aligned_at(
180 p: *mut c_void,
181 newcount: usize,
182 size: usize,
183 alignment: usize,
184 offset: usize,
185 ) -> *mut c_void;
186
187 /// Allocate an object of no more than [`MI_SMALL_SIZE_MAX`] bytes.
188 ///
189 /// Does not check that `size` is indeed small.
190 ///
191 /// Note: Currently [`mi_malloc`](crate::mi_malloc) checks if `size` is
192 /// small and calls this if
193 /// so at runtime, so its' only worth using if you know for certain.
194 pub fn mi_malloc_small(size: usize) -> *mut c_void;
195
196 /// Allocate an zero-initialized object of no more than
197 /// [`MI_SMALL_SIZE_MAX`] bytes.
198 ///
199 /// Does not check that `size` is indeed small.
200 ///
201 /// Note: Currently [`mi_zalloc`](crate::mi_zalloc) checks if `size` is
202 /// small and calls this if so at runtime, so its' only worth using if you
203 /// know for certain.
204 pub fn mi_zalloc_small(size: usize) -> *mut c_void;
205
206 /// Return the available bytes in a memory block.
207 ///
208 /// The returned size can be used to call `mi_expand` successfully.
209 pub fn mi_usable_size(p: *const c_void) -> usize;
210
211 /// Return the used allocation size.
212 ///
213 /// Returns the size `n` that will be allocated, where `n >= size`.
214 ///
215 /// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. This
216 /// can be used to reduce internal wasted space when allocating buffers for
217 /// example.
218 ///
219 /// See [`mi_usable_size`](mi_usable_size).
220 pub fn mi_good_size(size: usize) -> usize;
221
222 /// Eagerly free memory.
223 ///
224 /// If `force` is true, aggressively return memory to the OS (can be
225 /// expensive!)
226 ///
227 /// Regular code should not have to call this function. It can be beneficial
228 /// in very narrow circumstances; in particular, when a long running thread
229 /// allocates a lot of blocks that are freed by other threads it may improve
230 /// resource usage by calling this every once in a while.
231 pub fn mi_collect(force: bool);
232
233 /// Checked free: If `p` came from mimalloc's heap (as decided by
234 /// [`mi_is_in_heap_region`]), this is [`mi_free(p)`](crate::mi_free), but
235 /// otherwise it is a no-op.
236 pub fn mi_cfree(p: *mut c_void);
237
238 /// Returns true if this is a pointer into a memory region that has been
239 /// reserved by the mimalloc heap.
240 ///
241 /// This function is described by the mimalloc documentation as "relatively
242 /// fast".
243 ///
244 /// See also [`mi_heap_check_owned`], which is (much) slower and slightly
245 /// more precise, but only concerns a single `mi_heap`.
246 pub fn mi_is_in_heap_region(p: *const c_void) -> bool;
247
248 /// Layout-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
249 /// the size and alignment as well.
250 ///
251 /// Note: unlike some allocators that require this information for
252 /// performance, mimalloc doesn't need it (as of the current version,
253 /// v2.0.0), and so it currently implements this as a (debug) assertion that
254 /// verifies that `p` is actually aligned to `alignment` and is usable for
255 /// at least `size` bytes, before delegating to `mi_free`.
256 ///
257 /// However, currently there's no way to have this crate enable mimalloc's
258 /// debug assertions, so these checks aren't particularly useful.
259 ///
260 /// Note: It's legal to pass null to this function, and you are not required
261 /// to use this to deallocate memory from an aligned allocation function.
262 pub fn mi_free_size_aligned(p: *mut c_void, size: usize, alignment: usize);
263
264 /// Size-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
265 /// the size and alignment as well.
266 ///
267 /// Note: unlike some allocators that require this information for
268 /// performance, mimalloc doesn't need it (as of the current version,
269 /// v2.0.0), and so it currently implements this as a (debug) assertion that
270 /// verifies that `p` is actually aligned to `alignment` and is usable for
271 /// at least `size` bytes, before delegating to `mi_free`.
272 ///
273 /// However, currently there's no way to have this crate enable mimalloc's
274 /// debug assertions, so these checks aren't particularly useful.
275 ///
276 /// Note: It's legal to pass null to this function.
277 pub fn mi_free_size(p: *mut c_void, size: usize);
278
279 /// Alignment-aware deallocation: Like [`mi_free`](crate::mi_free), but
280 /// accepts the size and alignment as well.
281 ///
282 /// Note: unlike some allocators that require this information for
283 /// performance, mimalloc doesn't need it (as of the current version,
284 /// v2.0.0), and so it currently implements this as a (debug) assertion that
285 /// verifies that `p` is actually aligned to `alignment` and is usable for
286 /// at least `size` bytes, before delegating to `mi_free`.
287 ///
288 /// However, currently there's no way to have this crate enable mimalloc's
289 /// debug assertions, so these checks aren't particularly useful.
290 ///
291 /// Note: It's legal to pass null to this function.
292 pub fn mi_free_aligned(p: *mut c_void, alignment: usize);
293
294 /// Print the main statistics.
295 ///
296 /// Ignores the passed in argument, and outputs to the registered output
297 /// function or stderr by default.
298 ///
299 /// Most detailed when using a debug build.
300 pub fn mi_stats_print(_: *mut c_void);
301
302 /// Print the main statistics.
303 ///
304 /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
305 /// passed as it's second parameter.
306 ///
307 /// Most detailed when using a debug build.
308 pub fn mi_stats_print_out(out: mi_output_fun, arg: *mut c_void);
309
310 /// Reset statistics.
311 ///
312 /// Note: This function is thread safe.
313 pub fn mi_stats_reset();
314
315 /// Merge thread local statistics with the main statistics and reset.
316 ///
317 /// Note: This function is thread safe.
318 pub fn mi_stats_merge();
319
320 /// Return the mimalloc version number.
321 ///
322 /// For example version 1.6.3 would return the number `163`.
323 pub fn mi_version() -> c_int;
324
325 /// Initialize mimalloc on a thread.
326 ///
327 /// Should not be used as on most systems (pthreads, windows) this is done
328 /// automatically.
329 pub fn mi_thread_init();
330
331 /// Initialize the process.
332 ///
333 /// Should not be used on most systems, as it's called by `thread_init` or the
334 /// process loader.
335 pub fn mi_process_init();
336
337 #[allow(clippy::doc_markdown)]
338 /// Return process information (time and memory usage). All parameters are
339 /// optional (nullable) out-params:
340 ///
341 /// | Parameter | Description |
342 /// | :- | :- |
343 /// | `elapsed_msecs` | Elapsed wall-clock time of the process in milli-seconds. |
344 /// | `user_msecs` | User time in milli-seconds (as the sum over all threads). |
345 /// | `system_msecs` | System time in milli-seconds. |
346 /// | `current_rss` | Current working set size (touched pages). |
347 /// | `peak_rss` | Peak working set size (touched pages). |
348 /// | `current_commit` | Current committed memory (backed by the page file). |
349 /// | `peak_commit` | Peak committed memory (backed by the page file). |
350 /// | `page_faults` | Count of hard page faults. |
351 ///
352 /// The `current_rss` is precise on Windows and MacOSX; other systems
353 /// estimate this using `current_commit`. The `commit` is precise on Windows
354 /// but estimated on other systems as the amount of read/write accessible
355 /// memory reserved by mimalloc.
356 pub fn mi_process_info(
357 elapsed_msecs: *mut usize,
358 user_msecs: *mut usize,
359 system_msecs: *mut usize,
360 current_rss: *mut usize,
361 peak_rss: *mut usize,
362 current_commit: *mut usize,
363 peak_commit: *mut usize,
364 page_faults: *mut usize,
365 );
366
367 /// Uninitialize mimalloc on a thread.
368 ///
369 /// Should not be used as on most systems (pthreads, windows) this is done
370 /// automatically. Ensures that any memory that is not freed yet (but will
371 /// be freed by other threads in the future) is properly handled.
372 ///
373 /// Note: This function is thread safe.
374 pub fn mi_thread_done();
375
376 /// Print out heap statistics for this thread.
377 ///
378 /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
379 /// passed as it's second parameter
380 ///
381 /// Most detailed when using a debug build.
382 ///
383 /// Note: This function is thread safe.
384 pub fn mi_thread_stats_print_out(out: mi_output_fun, arg: *mut c_void);
385
386 /// Register an output function.
387 ///
388 /// - `out` The output function, use `None` to output to stderr.
389 /// - `arg` Argument that will be passed on to the output function.
390 ///
391 /// The `out` function is called to output any information from mimalloc,
392 /// like verbose or warning messages.
393 ///
394 /// Note: This function is thread safe.
395 pub fn mi_register_output(out: mi_output_fun, arg: *mut c_void);
396
397 /// Register a deferred free function.
398 ///
399 /// - `deferred_free` Address of a deferred free-ing function or `None` to
400 /// unregister.
401 /// - `arg` Argument that will be passed on to the deferred free function.
402 ///
403 /// Some runtime systems use deferred free-ing, for example when using
404 /// reference counting to limit the worst case free time.
405 ///
406 /// Such systems can register (re-entrant) deferred free function to free
407 /// more memory on demand.
408 ///
409 /// - When the `force` parameter is `true` all possible memory should be
410 /// freed.
411 ///
412 /// - The per-thread `heartbeat` parameter is monotonically increasing and
413 /// guaranteed to be deterministic if the program allocates
414 /// deterministically.
415 ///
416 /// - The `deferred_free` function is guaranteed to be called
417 /// deterministically after some number of allocations (regardless of
418 /// freeing or available free memory).
419 ///
420 /// At most one `deferred_free` function can be active.
421 ///
422 /// Note: This function is thread safe.
423 pub fn mi_register_deferred_free(out: mi_deferred_free_fun, arg: *mut c_void);
424
425 /// Register an error callback function.
426 ///
427 /// The `errfun` function is called on an error in mimalloc after emitting
428 /// an error message (through the output function).
429 ///
430 /// It as always legal to just return from the `errfun` function in which
431 /// case allocation functions generally return null or ignore the condition.
432 ///
433 /// The default function only calls `abort()` when compiled in secure mode
434 /// with an `EFAULT` error. The possible error codes are:
435 ///
436 /// - `EAGAIN` (11): Double free was detected (only in debug and secure
437 /// mode).
438 /// - `EFAULT` (14): Corrupted free list or meta-data was detected (only in
439 /// debug and secure mode).
440 /// - `ENOMEM` (12): Not enough memory available to satisfy the request.
441 /// - `EOVERFLOW` (75): Too large a request, for example in `mi_calloc`, the
442 /// `count` and `size` parameters are too large.
443 /// - `EINVAL` (22): Trying to free or re-allocate an invalid pointer.
444 ///
445 /// Note: This function is thread safe.
446 pub fn mi_register_error(out: mi_error_fun, arg: *mut c_void);
447}
448
449/// An output callback. Must be thread-safe.
450///
451/// See [`mi_stats_print_out`], [`mi_thread_stats_print_out`], [`mi_register_output`]
452pub type mi_output_fun = Option<unsafe extern "C" fn(msg: *const c_char, arg: *mut c_void)>;
453
454/// Type of deferred free functions. Must be thread-safe.
455///
456/// - `force`: If true, all outstanding items should be freed.
457/// - `heartbeat` A monotonically increasing count.
458/// - `arg` Argument that was passed at registration to hold extra state.
459///
460/// See [`mi_register_deferred_free`]
461pub type mi_deferred_free_fun =
462Option<unsafe extern "C" fn(force: bool, heartbeat: c_ulonglong, arg: *mut c_void)>;
463
464/// Type of error callback functions. Must be thread-safe.
465///
466/// - `err`: Error code (see [`mi_register_error`] for a list).
467/// - `arg`: Argument that was passed at registration to hold extra state.
468///
469/// See [`mi_register_error`]
470pub type mi_error_fun = Option<unsafe extern "C" fn(code: c_int, arg: *mut c_void)>;
471
472/// Runtime options. All options are false by default.
473pub type mi_option_t = c_int;
474
475#[cfg(feature = "arena")]
476/// Arena Id
477pub type mi_arena_id_t = c_int;
478
479// Note: mimalloc doc website seems to have the order of show_stats and
480// show_errors reversed as of 1.6.3, however what I have here is correct:
481// https://github.com/microsoft/mimalloc/issues/266#issuecomment-653822341
482
483/// Print error messages to `stderr`.
484pub const mi_option_show_errors: mi_option_t = 0;
485
486/// Print statistics to `stderr` when the program is done.
487pub const mi_option_show_stats: mi_option_t = 1;
488
489/// Print verbose messages to `stderr`.
490pub const mi_option_verbose: mi_option_t = 2;
491
492/// ### The following options are experimental
493///
494/// Option (experimental) Use large OS pages (2MiB in size) if possible.
495///
496/// Use large OS pages (2MiB) when available; for some workloads this can
497/// significantly improve performance. Use `mi_option_verbose` to check if
498/// the large OS pages are enabled -- usually one needs to explicitly allow
499/// large OS pages (as on Windows and Linux). However, sometimes the OS is
500/// very slow to reserve contiguous physical memory for large OS pages so
501/// use with care on systems that can have fragmented memory (for that
502/// reason, we generally recommend to use `mi_option_reserve_huge_os_pages`
503/// instead whenever possible).
504pub const mi_option_large_os_pages: mi_option_t = 6;
505
506/// Option (experimental) The number of huge OS pages (1GiB in size) to reserve at the start of the
507/// program.
508///
509/// This reserves the huge pages at startup and sometimes this can give a large (latency) performance
510/// improvement on big workloads. Usually it is better to not use `MIMALLOC_LARGE_OS_PAGES` in
511/// combination with this setting. Just like large OS pages, use with care as reserving contiguous
512/// physical memory can take a long time when memory is fragmented (but reserving the huge pages is
513/// done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on
514/// Windows and Linux)). With huge OS pages, it may be beneficial to set the setting
515/// `mi_option_eager_commit_delay=N` (N is 1 by default) to delay the initial N segments (of 4MiB) of
516/// a thread to not allocate in the huge OS pages; this prevents threads that are short lived and
517/// allocate just a little to take up space in the huge OS page area (which cannot be reset).
518pub const mi_option_reserve_huge_os_pages: mi_option_t = 7;
519
520/// Option (experimental) Reserve huge OS pages at node N.
521///
522/// The huge pages are usually allocated evenly among NUMA nodes.
523/// You can use `mi_option_reserve_huge_os_pages_at=N` where `N` is the numa node (starting at 0) to
524/// allocate all
525/// the huge pages at a specific numa node instead.
526pub const mi_option_reserve_huge_os_pages_at: mi_option_t = 8;
527
528/// Option (experimental) Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
529pub const mi_option_reserve_os_memory: mi_option_t = 9;
530
531/// Option (experimental) the first N segments per thread are not eagerly committed (=1).
532pub const mi_option_eager_commit_delay: mi_option_t = 14;
533
534/// Option (experimental) Pretend there are at most N NUMA nodes; Use 0 to use the actual detected
535/// NUMA nodes at runtime.
536pub const mi_option_use_numa_nodes: mi_option_t = 16;
537
538/// Option (experimental) If set to 1, do not use OS memory for allocation (but only pre-reserved
539/// arenas)
540pub const mi_option_limit_os_alloc: mi_option_t = 17;
541
542/// Option (experimental) OS tag to assign to mimalloc'd memory
543pub const mi_option_os_tag: mi_option_t = 18;
544
545/// Option (experimental)
546pub const mi_option_max_errors: mi_option_t = 19;
547
548/// Option (experimental)
549pub const mi_option_max_warnings: mi_option_t = 20;
550
551/// Option (experimental)
552pub const mi_option_max_segment_reclaim: mi_option_t = 21;
553
554/// Last option.
555pub const _mi_option_last: mi_option_t = 37;
556
557extern "C" {
558 // Note: mi_option_{enable,disable} aren't exposed because they're redundant
559 // and because of https://github.com/microsoft/mimalloc/issues/266.
560
561 /// Returns true if the provided option is enabled.
562 ///
563 /// Note: this function is not thread safe.
564 pub fn mi_option_is_enabled(option: mi_option_t) -> bool;
565
566 /// Enable or disable the given option.
567 ///
568 /// Note: this function is not thread safe.
569 pub fn mi_option_set_enabled(option: mi_option_t, enable: bool);
570
571 /// If the given option has not yet been initialized with [`mi_option_set`]
572 /// or [`mi_option_set_enabled`], enables or disables the option. If it has,
573 /// this function does nothing.
574 ///
575 /// Note: this function is not thread safe.
576 pub fn mi_option_set_enabled_default(option: mi_option_t, enable: bool);
577
578 /// Returns the value of the provided option.
579 ///
580 /// The value of boolean options is 1 or 0, however experimental options
581 /// exist which take a numeric value, which is the intended use of this
582 /// function.
583 ///
584 /// These options are not exposed as constants for stability reasons,
585 /// however you can still use them as arguments to this and other
586 /// `mi_option_` functions if needed, see the [mimalloc documentation](https://microsoft.github.io/mimalloc/group__options.html) for
587 /// details.
588 ///
589 /// Note: this function is not thread safe.
590 pub fn mi_option_get(option: mi_option_t) -> c_long;
591
592 /// Set the option to the given value.
593 ///
594 /// The value of boolean options is 1 or 0, however experimental options
595 /// exist which take a numeric value, which is the intended use of this
596 /// function.
597 ///
598 /// These options are not exposed as constants for stability reasons,
599 /// however you can still use them as arguments to this and other
600 /// `mi_option_` functions if needed,
601 ///
602 /// Note: this function is not thread safe.
603 pub fn mi_option_set(option: mi_option_t, value: c_long);
604
605 /// If the given option has not yet been initialized with [`mi_option_set`]
606 /// or [`mi_option_set_enabled`], sets the option to the given value. If it
607 /// has, this function does nothing.
608 ///
609 /// The value of boolean options is 1 or 0, however experimental options
610 /// exist which take a numeric value, which is the intended use of this
611 /// function.
612 ///
613 /// These options are not exposed as constants for stability reasons,
614 /// however you can still use them as arguments to this and other
615 /// `mi_option_` functions if needed.
616 ///
617 /// Note: this function is not thread safe.
618 pub fn mi_option_set_default(option: mi_option_t, value: c_long);
619}
620
621/// First-class heaps that can be destroyed in one go.
622///
623/// Note: The pointers allocated out of a heap can be be freed using
624/// [`mi_free`](crate::mi_free) -- there is no `mi_heap_free`.
625///
626/// # Example
627///
628/// ```
629/// use mimalloc_ffi as mi;
630/// unsafe {
631/// let h = mi::mi_heap_new();
632/// assert!(!h.is_null());
633/// let p = mi::mi_heap_malloc(h, 50);
634/// assert!(!p.is_null());
635///
636/// // use p...
637/// mi::mi_free(p);
638///
639/// // Clean up the heap. Note that pointers allocated from `h`
640/// // are *not* invalided by `mi_heap_delete`. You would have
641/// // to use (the very dangerous) `mi_heap_destroy` for that
642/// // behavior
643/// mi::mi_heap_delete(h);
644/// }
645/// ```
646pub enum mi_heap_t {}
647
648/// An area of heap space contains blocks of a single size.
649///
650/// The bytes in freed blocks are `committed - used`.
651#[repr(C)]
652#[derive(Debug, Clone, Copy)]
653pub struct mi_heap_area_t {
654 /// Start of the area containing heap blocks.
655 pub blocks: *mut c_void,
656 /// Bytes reserved for this area.
657 pub reserved: usize,
658 /// Current committed bytes of this area.
659 pub committed: usize,
660 /// Bytes in use by allocated blocks.
661 pub used: usize,
662 /// Size in bytes of one block.
663 pub block_size: usize,
664 /// Size in bytes of a full block including padding and metadata.
665 pub full_block_size: usize,
666 /// Heap tag associated with this area
667 pub heap_tag: i32,
668}
669
670/// Visitor function passed to [`mi_heap_visit_blocks`]
671///
672/// Should return `true` to continue, and `false` to stop visiting (i.e. break)
673///
674/// This function is always first called for every `area` with `block` as a null
675/// pointer. If `visit_all_blocks` was `true`, the function is then called for
676/// every allocated block in that area.
677pub type mi_block_visit_fun = Option<
678 unsafe extern "C" fn(
679 heap: *const mi_heap_t,
680 area: *const mi_heap_area_t,
681 block: *mut c_void,
682 block_size: usize,
683 arg: *mut c_void,
684 ) -> bool,
685>;
686
687extern "C" {
688 /// Create a new heap that can be used for allocation.
689 pub fn mi_heap_new() -> *mut mi_heap_t;
690
691 /// Delete a previously allocated heap.
692 ///
693 /// This will release resources and migrate any still allocated blocks in
694 /// this heap (efficienty) to the default heap.
695 ///
696 /// If `heap` is the default heap, the default heap is set to the backing
697 /// heap.
698 pub fn mi_heap_delete(heap: *mut mi_heap_t);
699
700 /// Destroy a heap, freeing all its still allocated blocks.
701 ///
702 /// Use with care as this will free all blocks still allocated in the heap.
703 /// However, this can be a very efficient way to free all heap memory in one
704 /// go.
705 ///
706 /// If `heap` is the default heap, the default heap is set to the backing
707 /// heap.
708 pub fn mi_heap_destroy(heap: *mut mi_heap_t);
709
710 /// Set the default heap to use for [`mi_malloc`](crate::mi_malloc) et al.
711 ///
712 /// Returns the previous default heap.
713 pub fn mi_heap_set_default(heap: *mut mi_heap_t) -> *mut mi_heap_t;
714
715 /// Get the default heap that is used for [`mi_malloc`](crate::mi_malloc) et al.
716 pub fn mi_heap_get_default() -> *mut mi_heap_t;
717
718 /// Get the backing heap.
719 ///
720 /// The _backing_ heap is the initial default heap for a thread and always
721 /// available for allocations. It cannot be destroyed or deleted except by
722 /// exiting the thread.
723 pub fn mi_heap_get_backing() -> *mut mi_heap_t;
724
725 /// Release outstanding resources in a specific heap.
726 ///
727 /// See also [`mi_collect`].
728 pub fn mi_heap_collect(heap: *mut mi_heap_t, force: bool);
729
730 /// Equivalent to [`mi_malloc`](crate::mi_malloc), but allocates out of the
731 /// specific heap instead of the default.
732 pub fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
733
734 /// Equivalent to [`mi_zalloc`](crate::mi_zalloc), but allocates out of the
735 /// specific heap instead of the default.
736 pub fn mi_heap_zalloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
737
738 /// Equivalent to [`mi_calloc`], but allocates out of the specific heap
739 /// instead of the default.
740 pub fn mi_heap_calloc(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
741
742 /// Equivalent to [`mi_mallocn`], but allocates out of the specific heap
743 /// instead of the default.
744 pub fn mi_heap_mallocn(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
745
746 /// Equivalent to [`mi_malloc_small`], but allocates out of the specific
747 /// heap instead of the default.
748 ///
749 /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
750 pub fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
751
752 /// Equivalent to [`mi_realloc`](crate::mi_realloc), but allocates out of
753 /// the specific heap instead of the default.
754 pub fn mi_heap_realloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
755
756 /// Equivalent to [`mi_reallocn`], but allocates out of the specific heap
757 /// instead of the default.
758 pub fn mi_heap_reallocn(
759 heap: *mut mi_heap_t,
760 p: *mut c_void,
761 count: usize,
762 size: usize,
763 ) -> *mut c_void;
764
765 /// Equivalent to [`mi_reallocf`], but allocates out of the specific heap
766 /// instead of the default.
767 pub fn mi_heap_reallocf(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
768
769 /// Equivalent to [`mi_strdup`], but allocates out of the specific heap
770 /// instead of the default.
771 pub fn mi_heap_strdup(heap: *mut mi_heap_t, s: *const c_char) -> *mut c_char;
772
773 /// Equivalent to [`mi_strndup`], but allocates out of the specific heap
774 /// instead of the default.
775 pub fn mi_heap_strndup(heap: *mut mi_heap_t, s: *const c_char, n: usize) -> *mut c_char;
776
777 /// Equivalent to [`mi_realpath`], but allocates out of the specific heap
778 /// instead of the default.
779 pub fn mi_heap_realpath(
780 heap: *mut mi_heap_t,
781 fname: *const c_char,
782 resolved_name: *mut c_char,
783 ) -> *mut c_char;
784
785 /// Equivalent to [`mi_malloc_aligned`](crate::mi_malloc_aligned), but
786 /// allocates out of the specific heap instead of the default.
787 pub fn mi_heap_malloc_aligned(
788 heap: *mut mi_heap_t,
789 size: usize,
790 alignment: usize,
791 ) -> *mut c_void;
792
793 /// Equivalent to [`mi_malloc_aligned_at`], but allocates out of the
794 /// specific heap instead of the default.
795 pub fn mi_heap_malloc_aligned_at(
796 heap: *mut mi_heap_t,
797 size: usize,
798 alignment: usize,
799 offset: usize,
800 ) -> *mut c_void;
801
802 /// Equivalent to [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), but
803 /// allocates out of the specific heap instead of the default.
804 pub fn mi_heap_zalloc_aligned(
805 heap: *mut mi_heap_t,
806 size: usize,
807 alignment: usize,
808 ) -> *mut c_void;
809
810 /// Equivalent to [`mi_zalloc_aligned_at`], but allocates out of the
811 /// specific heap instead of the default.
812 pub fn mi_heap_zalloc_aligned_at(
813 heap: *mut mi_heap_t,
814 size: usize,
815 alignment: usize,
816 offset: usize,
817 ) -> *mut c_void;
818
819 /// Equivalent to [`mi_calloc_aligned`], but allocates out of the specific
820 /// heap instead of the default.
821 pub fn mi_heap_calloc_aligned(
822 heap: *mut mi_heap_t,
823 count: usize,
824 size: usize,
825 alignment: usize,
826 ) -> *mut c_void;
827
828 /// Equivalent to [`mi_calloc_aligned_at`], but allocates out of the
829 /// specific heap instead of the default.
830 pub fn mi_heap_calloc_aligned_at(
831 heap: *mut mi_heap_t,
832 count: usize,
833 size: usize,
834 alignment: usize,
835 offset: usize,
836 ) -> *mut c_void;
837
838 /// Equivalent to [`mi_realloc_aligned`](crate::mi_realloc_aligned), but allocates out of the specific
839 /// heap instead of the default.
840 pub fn mi_heap_realloc_aligned(
841 heap: *mut mi_heap_t,
842 p: *mut c_void,
843 newsize: usize,
844 alignment: usize,
845 ) -> *mut c_void;
846
847 /// Equivalent to [`mi_realloc_aligned_at`], but allocates out of the
848 /// specific heap instead of the default.
849 pub fn mi_heap_realloc_aligned_at(
850 heap: *mut mi_heap_t,
851 p: *mut c_void,
852 newsize: usize,
853 alignment: usize,
854 offset: usize,
855 ) -> *mut c_void;
856
857 /// Equivalent to [`mi_rezalloc`], but allocates out of the specific heap
858 /// instead of the default.
859 pub fn mi_heap_rezalloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
860
861 /// Equivalent to [`mi_recalloc`], but allocates out of the specific heap
862 /// instead of the default.
863 pub fn mi_heap_recalloc(
864 heap: *mut mi_heap_t,
865 p: *mut c_void,
866 newcount: usize,
867 size: usize,
868 ) -> *mut c_void;
869
870 /// Equivalent to [`mi_rezalloc_aligned`], but allocates out of the specific
871 /// heap instead of the default.
872 pub fn mi_heap_rezalloc_aligned(
873 heap: *mut mi_heap_t,
874 p: *mut c_void,
875 newsize: usize,
876 alignment: usize,
877 ) -> *mut c_void;
878
879 /// Equivalent to [`mi_rezalloc_aligned_at`], but allocates out of the
880 /// specific heap instead of the default.
881 pub fn mi_heap_rezalloc_aligned_at(
882 heap: *mut mi_heap_t,
883 p: *mut c_void,
884 newsize: usize,
885 alignment: usize,
886 offset: usize,
887 ) -> *mut c_void;
888
889 /// Equivalent to [`mi_recalloc_aligned`], but allocates out of the
890 /// specific heap instead of the default.
891 pub fn mi_heap_recalloc_aligned(
892 heap: *mut mi_heap_t,
893 p: *mut c_void,
894 newcount: usize,
895 size: usize,
896 alignment: usize,
897 ) -> *mut c_void;
898
899 /// Equivalent to [`mi_recalloc_aligned_at`], but allocates out of the
900 /// specific heap instead of the default.
901 pub fn mi_heap_recalloc_aligned_at(
902 heap: *mut mi_heap_t,
903 p: *mut c_void,
904 newcount: usize,
905 size: usize,
906 alignment: usize,
907 offset: usize,
908 ) -> *mut c_void;
909
910 /// Does a heap contain a pointer to a previously allocated block?
911 ///
912 /// `p` must be a pointer to a previously allocated block (in any heap) -- it cannot be some
913 /// random pointer!
914 ///
915 /// Returns `true` if the block pointed to by `p` is in the `heap`.
916 ///
917 /// See [`mi_heap_check_owned`].
918 pub fn mi_heap_contains_block(heap: *mut mi_heap_t, p: *const c_void) -> bool;
919
920 /// Check safely if any pointer is part of a heap.
921 ///
922 /// `p` may be any pointer -- not required to be previously allocated by the
923 /// given heap or any other mimalloc heap. Returns `true` if `p` points to a
924 /// block in the given heap, false otherwise.
925 ///
926 /// Note: expensive function, linear in the pages in the heap.
927 ///
928 /// See [`mi_heap_contains_block`], [`mi_heap_get_default`], and
929 /// [`mi_is_in_heap_region`]
930 pub fn mi_heap_check_owned(heap: *mut mi_heap_t, p: *const c_void) -> bool;
931
932 /// Check safely if any pointer is part of the default heap of this thread.
933 ///
934 /// `p` may be any pointer -- not required to be previously allocated by the
935 /// default heap for this thread, or any other mimalloc heap. Returns `true`
936 /// if `p` points to a block in the default heap, false otherwise.
937 ///
938 /// Note: expensive function, linear in the pages in the heap.
939 ///
940 /// See [`mi_heap_contains_block`], [`mi_heap_get_default`]
941 pub fn mi_check_owned(p: *const c_void) -> bool;
942
943 /// Visit all areas and blocks in `heap`.
944 ///
945 /// If `visit_all_blocks` is false, the `visitor` is only called once for
946 /// every heap area. If it's true, the `visitor` is also called for every
947 /// allocated block inside every area (with `!block.is_null()`). Return
948 /// `false` from the `visitor` to return early.
949 ///
950 /// `arg` is an extra argument passed into the `visitor`.
951 ///
952 /// Returns `true` if all areas and blocks were visited.
953 ///
954 /// Passing a `None` visitor is allowed, and is a no-op.
955 pub fn mi_heap_visit_blocks(
956 heap: *const mi_heap_t,
957 visit_all_blocks: bool,
958 visitor: mi_block_visit_fun,
959 arg: *mut c_void,
960 ) -> bool;
961
962 #[cfg(feature = "arena")]
963 /// Create a heap that only allocates in the specified arena
964 pub fn mi_heap_new_in_arena(arena_id: mi_arena_id_t) -> *mut mi_heap_t;
965
966 #[cfg(feature = "arena")]
967 /// Reserve OS memory for use by mimalloc. Reserved areas are used
968 /// before allocating from the OS again. By reserving a large area upfront,
969 /// allocation can be more efficient, and can be better managed on systems
970 /// without `mmap`/`VirtualAlloc` (like WASM for example).
971 ///
972 /// - `size` The size to reserve.
973 /// - `commit` Commit the memory upfront.
974 /// - `allow_large` Allow large OS pages (2MiB) to be used?
975 /// - `exclusive` Only allow allocations if specifically for this arena.
976 /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
977 ///
978 /// Returns 0 if successful, and an error code otherwise (e.g. `ENOMEM`)
979 pub fn mi_reserve_os_memory_ex(
980 size: usize,
981 commit: bool,
982 allow_large: bool,
983 exclusive: bool,
984 arena_id: *mut mi_arena_id_t,
985 ) -> c_int;
986
987 #[cfg(feature = "arena")]
988 /// Manage a particular memory area for use by mimalloc.
989 /// This is just like `mi_reserve_os_memory_ex` except that the area should already be
990 /// allocated in some manner and available for use my mimalloc.
991 ///
992 /// # Safety
993 /// mimalloc will likely segfault when allocating from the arena if the arena `start` & `size`
994 /// aren't aligned with mimalloc's `MI_SEGMENT_ALIGN` (e.g. 32MB on x86_64 machines).
995 ///
996 /// - `start` Start of the memory area
997 /// - `size` The size of the memory area. Must be large than `MI_ARENA_BLOCK_SIZE` (e.g. 64MB
998 /// on x86_64 machines).
999 /// - `commit` Set true if the memory range is already commited.
1000 /// - `is_large` Set true if the memory range consists of large files, or if the memory should
1001 /// not be decommitted or protected (like rdma etc.).
1002 /// - `is_zero` Set true if the memory range consists only of zeros.
1003 /// - `numa_node` Possible associated numa node or `-1`.
1004 /// - `exclusive` Only allow allocations if specifically for this arena.
1005 /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1006 ///
1007 /// Returns `true` if arena was successfully allocated
1008 pub fn mi_manage_os_memory_ex(
1009 start: *const c_void,
1010 size: usize,
1011 is_committed: bool,
1012 is_large: bool,
1013 is_zero: bool,
1014 numa_node: c_int,
1015 exclusive: bool,
1016 arena_id: *mut mi_arena_id_t,
1017 ) -> bool;
1018}