libmimalloc_sys/extended.rs
1#![allow(nonstandard_style)]
2
3use core::ffi::c_void;
4
5use cty::{c_char, c_int, c_long, c_ulonglong};
6
7/// The maximum number of bytes which may be used as an argument to a function
8/// in the `_small` family ([`mi_malloc_small`], [`mi_zalloc_small`], etc).
9pub const MI_SMALL_SIZE_MAX: usize = 128 * core::mem::size_of::<*mut c_void>();
10
11extern "C" {
12 /// Allocate `count` items of `size` length each.
13 ///
14 /// Returns `null` if `count * size` overflows or on out-of-memory.
15 ///
16 /// All items are initialized to zero.
17 pub fn mi_calloc(count: usize, size: usize) -> *mut c_void;
18
19 /// Allocate `count` items of `size` length each.
20 ///
21 /// Returns `null` if `count * size` overflows or on out-of-memory,
22 /// otherwise returns the same as [`mi_malloc(count *
23 /// size)`](crate::mi_malloc).
24 /// Equivalent to [`mi_calloc`], but returns uninitialized (and not zeroed)
25 /// bytes.
26 pub fn mi_mallocn(count: usize, size: usize) -> *mut c_void;
27
28 /// Re-allocate memory to `count` elements of `size` bytes.
29 ///
30 /// The realloc equivalent of the [`mi_mallocn`] interface. Returns `null`
31 /// if `count * size` overflows or on out-of-memory, otherwise returns the
32 /// same as [`mi_realloc(p, count * size)`](crate::mi_realloc).
33 pub fn mi_reallocn(p: *mut c_void, count: usize, size: usize) -> *mut c_void;
34
35 /// Try to re-allocate memory to `newsize` bytes _in place_.
36 ///
37 /// Returns null on out-of-memory or if the memory could not be expanded in
38 /// place. On success, returns the same pointer as `p`.
39 ///
40 /// If `newsize` is larger than the original `size` allocated for `p`, the
41 /// bytes after `size` are uninitialized.
42 ///
43 /// If null is returned, the original pointer is not freed.
44 ///
45 /// Note: Conceptually, this is a realloc-like which returns null if it
46 /// would be forced to reallocate memory and copy. In practice it's
47 /// equivalent testing against [`mi_usable_size`](crate::mi_usable_size).
48 pub fn mi_expand(p: *mut c_void, newsize: usize) -> *mut c_void;
49
50 /// Re-allocate memory to `newsize` bytes.
51 ///
52 /// This differs from [`mi_realloc`](crate::mi_realloc) in that on failure,
53 /// `p` is freed.
54 pub fn mi_reallocf(p: *mut c_void, newsize: usize) -> *mut c_void;
55
56 /// Allocate and duplicate a nul-terminated C string.
57 ///
58 /// This can be useful for Rust code when interacting with the FFI.
59 pub fn mi_strdup(s: *const c_char) -> *mut c_char;
60
61 /// Allocate and duplicate a nul-terminated C string, up to `n` bytes.
62 ///
63 /// This can be useful for Rust code when interacting with the FFI.
64 pub fn mi_strndup(s: *const c_char, n: usize) -> *mut c_char;
65
66 /// Resolve a file path name, producing a `C` string which can be passed to
67 /// [`mi_free`](crate::mi_free).
68 ///
69 /// `resolved_name` should be null, but can also point to a buffer of at
70 /// least `PATH_MAX` bytes.
71 ///
72 /// If successful, returns a pointer to the resolved absolute file name, or
73 /// `null` on failure (with `errno` set to the error code).
74 ///
75 /// If `resolved_name` was `null`, the returned result should be freed with
76 /// [`mi_free`](crate::mi_free).
77 ///
78 /// This can rarely be useful in FFI code, but is mostly included for
79 /// completeness.
80 pub fn mi_realpath(fname: *const c_char, resolved_name: *mut c_char) -> *mut c_char;
81
82 /// Allocate `size * count` bytes aligned by `alignment`.
83 ///
84 /// Return pointer to the allocated memory or null if out of memory or if
85 /// `size * count` overflows.
86 ///
87 /// Returns a unique pointer if called with `size * count` 0.
88 pub fn mi_calloc_aligned(count: usize, size: usize, alignment: usize) -> *mut c_void;
89
90 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`.
91 ///
92 /// Note that the resulting pointer itself is not aligned by the alignment,
93 /// but after `offset` bytes it will be. This can be useful for allocating
94 /// data with an inline header, where the data has a specific alignment
95 /// requirement.
96 ///
97 /// Specifically, if `p` is the returned pointer `p.add(offset)` is aligned
98 /// to `alignment`.
99 pub fn mi_malloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
100
101 /// Allocate `size` bytes aligned by `alignment` at a specified `offset`,
102 /// zero-initialized.
103 ///
104 /// This is a [`mi_zalloc`](crate::mi_zalloc) equivalent of [`mi_malloc_aligned_at`].
105 pub fn mi_zalloc_aligned_at(size: usize, alignment: usize, offset: usize) -> *mut c_void;
106
107 /// Allocate `size` of bytes aligned by `alignment` and place the address of the
108 /// allocated memory to `ptr`.
109 ///
110 /// Returns zero on success, invalid argument for invalid alignment, or out-of-memory.
111 pub fn mi_posix_memalign(ptr: *mut *mut c_void, alignment: usize, size: usize) -> c_int;
112
113 /// Allocate `size` bytes aligned by `alignment` with alignment as the first
114 /// parameter.
115 ///
116 /// Return pointer to the allocated memory or null if out of memory.
117 pub fn mi_aligned_alloc(alignment: usize, size: usize) -> *mut c_void;
118
119 /// Allocate `size * count` bytes aligned by `alignment` at a specified
120 /// `offset`, zero-initialized.
121 ///
122 /// This is a [`calloc`](crate::mi_calloc) equivalent of [`mi_malloc_aligned_at`].
123 pub fn mi_calloc_aligned_at(
124 count: usize,
125 size: usize,
126 alignment: usize,
127 offset: usize,
128 ) -> *mut c_void;
129
130 /// Re-allocate memory to `newsize` bytes aligned by `alignment` at a
131 /// specified `offset`.
132 ///
133 /// This is a [`realloc`](crate::mi_realloc) equivalent of [`mi_malloc_aligned_at`].
134 pub fn mi_realloc_aligned_at(
135 p: *mut c_void,
136 newsize: usize,
137 alignment: usize,
138 offset: usize,
139 ) -> *mut c_void;
140
141 /// Zero initialized [re-allocation](crate::mi_realloc).
142 ///
143 /// In general, only valid on memory originally allocated by zero
144 /// initialization: [`mi_calloc`](crate::mi_calloc),
145 /// [`mi_zalloc`](crate::mi_zalloc),
146 /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
147 pub fn mi_rezalloc(p: *mut c_void, newsize: usize) -> *mut c_void;
148
149 /// Zero initialized [re-allocation](crate::mi_realloc), following `calloc`
150 /// paramater conventions.
151 ///
152 /// In general, only valid on memory originally allocated by zero
153 /// initialization: [`mi_calloc`](crate::mi_calloc),
154 /// [`mi_zalloc`](crate::mi_zalloc),
155 /// [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), ...
156 pub fn mi_recalloc(p: *mut c_void, newcount: usize, size: usize) -> *mut c_void;
157
158 /// Aligned version of [`mi_rezalloc`].
159 pub fn mi_rezalloc_aligned(p: *mut c_void, newsize: usize, alignment: usize) -> *mut c_void;
160
161 /// Offset-aligned version of [`mi_rezalloc`].
162 pub fn mi_rezalloc_aligned_at(
163 p: *mut c_void,
164 newsize: usize,
165 alignment: usize,
166 offset: usize,
167 ) -> *mut c_void;
168
169 /// Aligned version of [`mi_recalloc`].
170 pub fn mi_recalloc_aligned(
171 p: *mut c_void,
172 newcount: usize,
173 size: usize,
174 alignment: usize,
175 ) -> *mut c_void;
176
177 /// Offset-aligned version of [`mi_recalloc`].
178 pub fn mi_recalloc_aligned_at(
179 p: *mut c_void,
180 newcount: usize,
181 size: usize,
182 alignment: usize,
183 offset: usize,
184 ) -> *mut c_void;
185
186 /// Allocate an object of no more than [`MI_SMALL_SIZE_MAX`] bytes.
187 ///
188 /// Does not check that `size` is indeed small.
189 ///
190 /// Note: Currently [`mi_malloc`](crate::mi_malloc) checks if `size` is
191 /// small and calls this if
192 /// so at runtime, so its' only worth using if you know for certain.
193 pub fn mi_malloc_small(size: usize) -> *mut c_void;
194
195 /// Allocate an zero-initialized object of no more than
196 /// [`MI_SMALL_SIZE_MAX`] bytes.
197 ///
198 /// Does not check that `size` is indeed small.
199 ///
200 /// Note: Currently [`mi_zalloc`](crate::mi_zalloc) checks if `size` is
201 /// small and calls this if so at runtime, so its' only worth using if you
202 /// know for certain.
203 pub fn mi_zalloc_small(size: usize) -> *mut c_void;
204
205 /// Return the available bytes in a memory block.
206 ///
207 /// The returned size can be used to call `mi_expand` successfully.
208 pub fn mi_usable_size(p: *const c_void) -> usize;
209
210 /// Return the used allocation size.
211 ///
212 /// Returns the size `n` that will be allocated, where `n >= size`.
213 ///
214 /// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. This
215 /// can be used to reduce internal wasted space when allocating buffers for
216 /// example.
217 ///
218 /// See [`mi_usable_size`](crate::mi_usable_size).
219 pub fn mi_good_size(size: usize) -> usize;
220
221 /// Eagerly free memory.
222 ///
223 /// If `force` is true, aggressively return memory to the OS (can be
224 /// expensive!)
225 ///
226 /// Regular code should not have to call this function. It can be beneficial
227 /// in very narrow circumstances; in particular, when a long running thread
228 /// allocates a lot of blocks that are freed by other threads it may improve
229 /// resource usage by calling this every once in a while.
230 pub fn mi_collect(force: bool);
231
232 /// Checked free: If `p` came from mimalloc's heap (as decided by
233 /// [`mi_is_in_heap_region`]), this is [`mi_free(p)`](crate::mi_free), but
234 /// otherwise it is a no-op.
235 pub fn mi_cfree(p: *mut c_void);
236
237 /// Returns true if this is a pointer into a memory region that has been
238 /// reserved by the mimalloc heap.
239 ///
240 /// This function is described by the mimalloc documentation as "relatively
241 /// fast".
242 ///
243 /// See also [`mi_heap_check_owned`], which is (much) slower and slightly
244 /// more precise, but only concerns a single `mi_heap`.
245 pub fn mi_is_in_heap_region(p: *const c_void) -> bool;
246
247 /// Layout-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
248 /// the size and alignment as well.
249 ///
250 /// Note: unlike some allocators that require this information for
251 /// performance, mimalloc doesn't need it (as of the current version,
252 /// v2.0.0), and so it currently implements this as a (debug) assertion that
253 /// verifies that `p` is actually aligned to `alignment` and is usable for
254 /// at least `size` bytes, before delegating to `mi_free`.
255 ///
256 /// However, currently there's no way to have this crate enable mimalloc's
257 /// debug assertions, so these checks aren't particularly useful.
258 ///
259 /// Note: It's legal to pass null to this function, and you are not required
260 /// to use this to deallocate memory from an aligned allocation function.
261 pub fn mi_free_size_aligned(p: *mut c_void, size: usize, alignment: usize);
262
263 /// Size-aware deallocation: Like [`mi_free`](crate::mi_free), but accepts
264 /// the size and alignment as well.
265 ///
266 /// Note: unlike some allocators that require this information for
267 /// performance, mimalloc doesn't need it (as of the current version,
268 /// v2.0.0), and so it currently implements this as a (debug) assertion that
269 /// verifies that `p` is actually aligned to `alignment` and is usable for
270 /// at least `size` bytes, before delegating to `mi_free`.
271 ///
272 /// However, currently there's no way to have this crate enable mimalloc's
273 /// debug assertions, so these checks aren't particularly useful.
274 ///
275 /// Note: It's legal to pass null to this function.
276 pub fn mi_free_size(p: *mut c_void, size: usize);
277
278 /// Alignment-aware deallocation: Like [`mi_free`](crate::mi_free), but
279 /// accepts the size and alignment as well.
280 ///
281 /// Note: unlike some allocators that require this information for
282 /// performance, mimalloc doesn't need it (as of the current version,
283 /// v2.0.0), and so it currently implements this as a (debug) assertion that
284 /// verifies that `p` is actually aligned to `alignment` and is usable for
285 /// at least `size` bytes, before delegating to `mi_free`.
286 ///
287 /// However, currently there's no way to have this crate enable mimalloc's
288 /// debug assertions, so these checks aren't particularly useful.
289 ///
290 /// Note: It's legal to pass null to this function.
291 pub fn mi_free_aligned(p: *mut c_void, alignment: usize);
292
293 /// Print the main statistics.
294 ///
295 /// Ignores the passed in argument, and outputs to the registered output
296 /// function or stderr by default.
297 ///
298 /// Most detailed when using a debug build.
299 pub fn mi_stats_print(_: *mut c_void);
300
301 /// Print the main statistics.
302 ///
303 /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
304 /// passed as it's second parameter.
305 ///
306 /// Most detailed when using a debug build.
307 pub fn mi_stats_print_out(out: mi_output_fun, arg: *mut c_void);
308
309 /// Reset statistics.
310 ///
311 /// Note: This function is thread safe.
312 pub fn mi_stats_reset();
313
314 /// Merge thread local statistics with the main statistics and reset.
315 ///
316 /// Note: This function is thread safe.
317 pub fn mi_stats_merge();
318
319 /// Return the mimalloc version number.
320 ///
321 /// For example version 1.6.3 would return the number `163`.
322 pub fn mi_version() -> c_int;
323
324 /// Initialize mimalloc on a thread.
325 ///
326 /// Should not be used as on most systems (pthreads, windows) this is done
327 /// automatically.
328 pub fn mi_thread_init();
329
330 /// Initialize the process.
331 ///
332 /// Should not be used on most systems, as it's called by thread_init or the
333 /// process loader.
334 pub fn mi_process_init();
335
336 /// Return process information (time and memory usage). All parameters are
337 /// optional (nullable) out-params:
338 ///
339 /// | Parameter | Description |
340 /// | :- | :- |
341 /// | `elapsed_msecs` | Elapsed wall-clock time of the process in milli-seconds. |
342 /// | `user_msecs` | User time in milli-seconds (as the sum over all threads). |
343 /// | `system_msecs` | System time in milli-seconds. |
344 /// | `current_rss` | Current working set size (touched pages). |
345 /// | `peak_rss` | Peak working set size (touched pages). |
346 /// | `current_commit` | Current committed memory (backed by the page file). |
347 /// | `peak_commit` | Peak committed memory (backed by the page file). |
348 /// | `page_faults` | Count of hard page faults. |
349 ///
350 /// The `current_rss` is precise on Windows and MacOSX; other systems
351 /// estimate this using `current_commit`. The `commit` is precise on Windows
352 /// but estimated on other systems as the amount of read/write accessible
353 /// memory reserved by mimalloc.
354 pub fn mi_process_info(
355 elapsed_msecs: *mut usize,
356 user_msecs: *mut usize,
357 system_msecs: *mut usize,
358 current_rss: *mut usize,
359 peak_rss: *mut usize,
360 current_commit: *mut usize,
361 peak_commit: *mut usize,
362 page_faults: *mut usize,
363 );
364
365 /// Uninitialize mimalloc on a thread.
366 ///
367 /// Should not be used as on most systems (pthreads, windows) this is done
368 /// automatically. Ensures that any memory that is not freed yet (but will
369 /// be freed by other threads in the future) is properly handled.
370 ///
371 /// Note: This function is thread safe.
372 pub fn mi_thread_done();
373
374 /// Print out heap statistics for this thread.
375 ///
376 /// Pass `None` for `out` to use the default. If `out` is provided, `arc` is
377 /// passed as it's second parameter
378 ///
379 /// Most detailed when using a debug build.
380 ///
381 /// Note: This function is thread safe.
382 pub fn mi_thread_stats_print_out(out: mi_output_fun, arg: *mut c_void);
383
384 /// Register an output function.
385 ///
386 /// - `out` The output function, use `None` to output to stderr.
387 /// - `arg` Argument that will be passed on to the output function.
388 ///
389 /// The `out` function is called to output any information from mimalloc,
390 /// like verbose or warning messages.
391 ///
392 /// Note: This function is thread safe.
393 pub fn mi_register_output(out: mi_output_fun, arg: *mut c_void);
394
395 /// Register a deferred free function.
396 ///
397 /// - `deferred_free` Address of a deferred free-ing function or `None` to
398 /// unregister.
399 /// - `arg` Argument that will be passed on to the deferred free function.
400 ///
401 /// Some runtime systems use deferred free-ing, for example when using
402 /// reference counting to limit the worst case free time.
403 ///
404 /// Such systems can register (re-entrant) deferred free function to free
405 /// more memory on demand.
406 ///
407 /// - When the `force` parameter is `true` all possible memory should be
408 /// freed.
409 ///
410 /// - The per-thread `heartbeat` parameter is monotonically increasing and
411 /// guaranteed to be deterministic if the program allocates
412 /// deterministically.
413 ///
414 /// - The `deferred_free` function is guaranteed to be called
415 /// deterministically after some number of allocations (regardless of
416 /// freeing or available free memory).
417 ///
418 /// At most one `deferred_free` function can be active.
419 ///
420 /// Note: This function is thread safe.
421 pub fn mi_register_deferred_free(out: mi_deferred_free_fun, arg: *mut c_void);
422
423 /// Register an error callback function.
424 ///
425 /// The `errfun` function is called on an error in mimalloc after emitting
426 /// an error message (through the output function).
427 ///
428 /// It as always legal to just return from the `errfun` function in which
429 /// case allocation functions generally return null or ignore the condition.
430 ///
431 /// The default function only calls abort() when compiled in secure mode
432 /// with an `EFAULT` error. The possible error codes are:
433 ///
434 /// - `EAGAIN` (11): Double free was detected (only in debug and secure
435 /// mode).
436 /// - `EFAULT` (14): Corrupted free list or meta-data was detected (only in
437 /// debug and secure mode).
438 /// - `ENOMEM` (12): Not enough memory available to satisfy the request.
439 /// - `EOVERFLOW` (75): Too large a request, for example in `mi_calloc`, the
440 /// `count` and `size` parameters are too large.
441 /// - `EINVAL` (22): Trying to free or re-allocate an invalid pointer.
442 ///
443 /// Note: This function is thread safe.
444 pub fn mi_register_error(out: mi_error_fun, arg: *mut c_void);
445}
446
447/// An output callback. Must be thread-safe.
448///
449/// See [`mi_stats_print_out`], [`mi_thread_stats_print_out`], [`mi_register_output`]
450pub type mi_output_fun = Option<unsafe extern "C" fn(msg: *const c_char, arg: *mut c_void)>;
451
452/// Type of deferred free functions. Must be thread-safe.
453///
454/// - `force`: If true, all outstanding items should be freed.
455/// - `heartbeat` A monotonically increasing count.
456/// - `arg` Argument that was passed at registration to hold extra state.
457///
458/// See [`mi_register_deferred_free`]
459pub type mi_deferred_free_fun =
460 Option<unsafe extern "C" fn(force: bool, heartbeat: c_ulonglong, arg: *mut c_void)>;
461
462/// Type of error callback functions. Must be thread-safe.
463///
464/// - `err`: Error code (see [`mi_register_error`] for a list).
465/// - `arg`: Argument that was passed at registration to hold extra state.
466///
467/// See [`mi_register_error`]
468pub type mi_error_fun = Option<unsafe extern "C" fn(code: c_int, arg: *mut c_void)>;
469
470/// Runtime options. All options are false by default.
471pub type mi_option_t = c_int;
472
473#[cfg(feature = "arena")]
474/// Arena Id
475pub type mi_arena_id_t = c_int;
476
477// Note: mimalloc doc website seems to have the order of show_stats and
478// show_errors reversed as of 1.6.3, however what I have here is correct:
479// https://github.com/microsoft/mimalloc/issues/266#issuecomment-653822341
480
481/// Print error messages to `stderr`.
482pub const mi_option_show_errors: mi_option_t = 0;
483
484/// Print statistics to `stderr` when the program is done.
485pub const mi_option_show_stats: mi_option_t = 1;
486
487/// Print verbose messages to `stderr`.
488pub const mi_option_verbose: mi_option_t = 2;
489
490/// ### The following options are experimental
491///
492/// Option (experimental) Use large OS pages (2MiB in size) if possible.
493///
494/// Use large OS pages (2MiB) when available; for some workloads this can
495/// significantly improve performance. Use mi_option_verbose to check if
496/// the large OS pages are enabled -- usually one needs to explicitly allow
497/// large OS pages (as on Windows and Linux). However, sometimes the OS is
498/// very slow to reserve contiguous physical memory for large OS pages so
499/// use with care on systems that can have fragmented memory (for that
500/// reason, we generally recommend to use mi_option_reserve_huge_os_pages
501/// instead whenever possible).
502pub const mi_option_large_os_pages: mi_option_t = 6;
503
504/// Option (experimental) The number of huge OS pages (1GiB in size) to reserve at the start of the program.
505///
506/// This reserves the huge pages at startup and sometimes this can give a large (latency) performance
507/// improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in
508/// combination with this setting. Just like large OS pages, use with care as reserving contiguous
509/// physical memory can take a long time when memory is fragmented (but reserving the huge pages is
510/// done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on
511/// Windows and Linux)). With huge OS pages, it may be beneficial to set the setting
512/// mi_option_eager_commit_delay=N (N is 1 by default) to delay the initial N segments (of 4MiB) of
513/// a thread to not allocate in the huge OS pages; this prevents threads that are short lived and
514/// allocate just a little to take up space in the huge OS page area (which cannot be reset).
515pub const mi_option_reserve_huge_os_pages: mi_option_t = 7;
516
517/// Option (experimental) Reserve huge OS pages at node N.
518///
519/// The huge pages are usually allocated evenly among NUMA nodes.
520/// You can use mi_option_reserve_huge_os_pages_at=N where `N` is the numa node (starting at 0) to allocate all
521/// the huge pages at a specific numa node instead.
522pub const mi_option_reserve_huge_os_pages_at: mi_option_t = 8;
523
524/// Option (experimental) Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
525pub const mi_option_reserve_os_memory: mi_option_t = 9;
526
527/// Option (experimental) the first N segments per thread are not eagerly committed (=1).
528pub const mi_option_eager_commit_delay: mi_option_t = 14;
529
530/// Option (experimental) Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime.
531pub const mi_option_use_numa_nodes: mi_option_t = 16;
532
533/// Option (experimental) If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
534pub const mi_option_limit_os_alloc: mi_option_t = 17;
535
536/// Option (experimental) OS tag to assign to mimalloc'd memory
537pub const mi_option_os_tag: mi_option_t = 18;
538
539/// Option (experimental)
540pub const mi_option_max_errors: mi_option_t = 19;
541
542/// Option (experimental)
543pub const mi_option_max_warnings: mi_option_t = 20;
544
545#[cfg(not(feature = "v3"))]
546/// Option (experimental)
547pub const mi_option_max_segment_reclaim: mi_option_t = 21;
548
549/// Last option.
550#[cfg(not(feature = "v3"))]
551pub const _mi_option_last: mi_option_t = 37;
552#[cfg(feature = "v3")]
553pub const _mi_option_last: mi_option_t = 43;
554
555extern "C" {
556 // Note: mi_option_{enable,disable} aren't exposed because they're redundant
557 // and because of https://github.com/microsoft/mimalloc/issues/266.
558
559 /// Returns true if the provided option is enabled.
560 ///
561 /// Note: this function is not thread safe.
562 pub fn mi_option_is_enabled(option: mi_option_t) -> bool;
563
564 /// Enable or disable the given option.
565 ///
566 /// Note: this function is not thread safe.
567 pub fn mi_option_set_enabled(option: mi_option_t, enable: bool);
568
569 /// If the given option has not yet been initialized with [`mi_option_set`]
570 /// or [`mi_option_set_enabled`], enables or disables the option. If it has,
571 /// this function does nothing.
572 ///
573 /// Note: this function is not thread safe.
574 pub fn mi_option_set_enabled_default(option: mi_option_t, enable: bool);
575
576 /// Returns the value of the provided option.
577 ///
578 /// The value of boolean options is 1 or 0, however experimental options
579 /// exist which take a numeric value, which is the intended use of this
580 /// function.
581 ///
582 /// These options are not exposed as constants for stability reasons,
583 /// however you can still use them as arguments to this and other
584 /// `mi_option_` functions if needed, see the mimalloc documentation for
585 /// details: https://microsoft.github.io/mimalloc/group__options.html
586 ///
587 /// Note: this function is not thread safe.
588 pub fn mi_option_get(option: mi_option_t) -> c_long;
589
590 /// Set the option to the given value.
591 ///
592 /// The value of boolean options is 1 or 0, however experimental options
593 /// exist which take a numeric value, which is the intended use of this
594 /// function.
595 ///
596 /// These options are not exposed as constants for stability reasons,
597 /// however you can still use them as arguments to this and other
598 /// `mi_option_` functions if needed,
599 ///
600 /// Note: this function is not thread safe.
601 pub fn mi_option_set(option: mi_option_t, value: c_long);
602
603 /// If the given option has not yet been initialized with [`mi_option_set`]
604 /// or [`mi_option_set_enabled`], sets the option to the given value. If it
605 /// has, this function does nothing.
606 ///
607 /// The value of boolean options is 1 or 0, however experimental options
608 /// exist which take a numeric value, which is the intended use of this
609 /// function.
610 ///
611 /// These options are not exposed as constants for stability reasons,
612 /// however you can still use them as arguments to this and other
613 /// `mi_option_` functions if needed.
614 ///
615 /// Note: this function is not thread safe.
616 pub fn mi_option_set_default(option: mi_option_t, value: c_long);
617}
618
619/// First-class heaps that can be destroyed in one go.
620///
621/// Note: The pointers allocated out of a heap can be be freed using
622/// [`mi_free`](crate::mi_free) -- there is no `mi_heap_free`.
623///
624/// # Example
625///
626/// ```
627/// use libmimalloc_sys as mi;
628/// unsafe {
629/// let h = mi::mi_heap_new();
630/// assert!(!h.is_null());
631/// let p = mi::mi_heap_malloc(h, 50);
632/// assert!(!p.is_null());
633///
634/// // use p...
635/// mi::mi_free(p);
636///
637/// // Clean up the heap. Note that pointers allocated from `h`
638/// // are *not* invalided by `mi_heap_delete`. You would have
639/// // to use (the very dangerous) `mi_heap_destroy` for that
640/// // behavior
641/// mi::mi_heap_delete(h);
642/// }
643/// ```
644pub enum mi_heap_t {}
645
646/// An area of heap space contains blocks of a single size.
647///
648/// The bytes in freed blocks are `committed - used`.
649#[repr(C)]
650#[derive(Debug, Clone, Copy)]
651pub struct mi_heap_area_t {
652 /// Start of the area containing heap blocks.
653 pub blocks: *mut c_void,
654 /// Bytes reserved for this area.
655 pub reserved: usize,
656 /// Current committed bytes of this area.
657 pub committed: usize,
658 /// Bytes in use by allocated blocks.
659 pub used: usize,
660 /// Size in bytes of one block.
661 pub block_size: usize,
662 /// Size in bytes of a full block including padding and metadata.
663 pub full_block_size: usize,
664 /// Heap tag associated with this area
665 pub heap_tag: i32,
666}
667
668/// Visitor function passed to [`mi_heap_visit_blocks`]
669///
670/// Should return `true` to continue, and `false` to stop visiting (i.e. break)
671///
672/// This function is always first called for every `area` with `block` as a null
673/// pointer. If `visit_all_blocks` was `true`, the function is then called for
674/// every allocated block in that area.
675pub type mi_block_visit_fun = Option<
676 unsafe extern "C" fn(
677 heap: *const mi_heap_t,
678 area: *const mi_heap_area_t,
679 block: *mut c_void,
680 block_size: usize,
681 arg: *mut c_void,
682 ) -> bool,
683>;
684
685extern "C" {
686 /// Create a new heap that can be used for allocation.
687 pub fn mi_heap_new() -> *mut mi_heap_t;
688
689 /// Delete a previously allocated heap.
690 ///
691 /// This will release resources and migrate any still allocated blocks in
692 /// this heap (efficienty) to the default heap.
693 ///
694 /// If `heap` is the default heap, the default heap is set to the backing
695 /// heap.
696 pub fn mi_heap_delete(heap: *mut mi_heap_t);
697
698 /// Destroy a heap, freeing all its still allocated blocks.
699 ///
700 /// Use with care as this will free all blocks still allocated in the heap.
701 /// However, this can be a very efficient way to free all heap memory in one
702 /// go.
703 ///
704 /// If `heap` is the default heap, the default heap is set to the backing
705 /// heap.
706 pub fn mi_heap_destroy(heap: *mut mi_heap_t);
707
708 /// Set the default heap to use for [`mi_malloc`](crate::mi_malloc) et al.
709 ///
710 /// Returns the previous default heap.
711 pub fn mi_heap_set_default(heap: *mut mi_heap_t) -> *mut mi_heap_t;
712
713 /// Get the default heap that is used for [`mi_malloc`](crate::mi_malloc) et al.
714 pub fn mi_heap_get_default() -> *mut mi_heap_t;
715
716 /// Get the backing heap.
717 ///
718 /// The _backing_ heap is the initial default heap for a thread and always
719 /// available for allocations. It cannot be destroyed or deleted except by
720 /// exiting the thread.
721 pub fn mi_heap_get_backing() -> *mut mi_heap_t;
722
723 /// Release outstanding resources in a specific heap.
724 ///
725 /// See also [`mi_collect`].
726 pub fn mi_heap_collect(heap: *mut mi_heap_t, force: bool);
727
728 /// Equivalent to [`mi_malloc`](crate::mi_malloc), but allocates out of the
729 /// specific heap instead of the default.
730 pub fn mi_heap_malloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
731
732 /// Equivalent to [`mi_zalloc`](crate::mi_zalloc), but allocates out of the
733 /// specific heap instead of the default.
734 pub fn mi_heap_zalloc(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
735
736 /// Equivalent to [`mi_calloc`], but allocates out of the specific heap
737 /// instead of the default.
738 pub fn mi_heap_calloc(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
739
740 /// Equivalent to [`mi_mallocn`], but allocates out of the specific heap
741 /// instead of the default.
742 pub fn mi_heap_mallocn(heap: *mut mi_heap_t, count: usize, size: usize) -> *mut c_void;
743
744 /// Equivalent to [`mi_malloc_small`], but allocates out of the specific
745 /// heap instead of the default.
746 ///
747 /// `size` must be smaller or equal to [`MI_SMALL_SIZE_MAX`].
748 pub fn mi_heap_malloc_small(heap: *mut mi_heap_t, size: usize) -> *mut c_void;
749
750 /// Equivalent to [`mi_realloc`](crate::mi_realloc), but allocates out of
751 /// the specific heap instead of the default.
752 pub fn mi_heap_realloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
753
754 /// Equivalent to [`mi_reallocn`], but allocates out of the specific heap
755 /// instead of the default.
756 pub fn mi_heap_reallocn(
757 heap: *mut mi_heap_t,
758 p: *mut c_void,
759 count: usize,
760 size: usize,
761 ) -> *mut c_void;
762
763 /// Equivalent to [`mi_reallocf`], but allocates out of the specific heap
764 /// instead of the default.
765 pub fn mi_heap_reallocf(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
766
767 /// Equivalent to [`mi_strdup`], but allocates out of the specific heap
768 /// instead of the default.
769 pub fn mi_heap_strdup(heap: *mut mi_heap_t, s: *const c_char) -> *mut c_char;
770
771 /// Equivalent to [`mi_strndup`], but allocates out of the specific heap
772 /// instead of the default.
773 pub fn mi_heap_strndup(heap: *mut mi_heap_t, s: *const c_char, n: usize) -> *mut c_char;
774
775 /// Equivalent to [`mi_realpath`], but allocates out of the specific heap
776 /// instead of the default.
777 pub fn mi_heap_realpath(
778 heap: *mut mi_heap_t,
779 fname: *const c_char,
780 resolved_name: *mut c_char,
781 ) -> *mut c_char;
782
783 /// Equivalent to [`mi_malloc_aligned`](crate::mi_malloc_aligned), but
784 /// allocates out of the specific heap instead of the default.
785 pub fn mi_heap_malloc_aligned(
786 heap: *mut mi_heap_t,
787 size: usize,
788 alignment: usize,
789 ) -> *mut c_void;
790
791 /// Equivalent to [`mi_malloc_aligned_at`], but allocates out of the
792 /// specific heap instead of the default.
793 pub fn mi_heap_malloc_aligned_at(
794 heap: *mut mi_heap_t,
795 size: usize,
796 alignment: usize,
797 offset: usize,
798 ) -> *mut c_void;
799
800 /// Equivalent to [`mi_zalloc_aligned`](crate::mi_zalloc_aligned), but
801 /// allocates out of the specific heap instead of the default.
802 pub fn mi_heap_zalloc_aligned(
803 heap: *mut mi_heap_t,
804 size: usize,
805 alignment: usize,
806 ) -> *mut c_void;
807
808 /// Equivalent to [`mi_zalloc_aligned_at`], but allocates out of the
809 /// specific heap instead of the default.
810 pub fn mi_heap_zalloc_aligned_at(
811 heap: *mut mi_heap_t,
812 size: usize,
813 alignment: usize,
814 offset: usize,
815 ) -> *mut c_void;
816
817 /// Equivalent to [`mi_calloc_aligned`], but allocates out of the specific
818 /// heap instead of the default.
819 pub fn mi_heap_calloc_aligned(
820 heap: *mut mi_heap_t,
821 count: usize,
822 size: usize,
823 alignment: usize,
824 ) -> *mut c_void;
825
826 /// Equivalent to [`mi_calloc_aligned_at`], but allocates out of the
827 /// specific heap instead of the default.
828 pub fn mi_heap_calloc_aligned_at(
829 heap: *mut mi_heap_t,
830 count: usize,
831 size: usize,
832 alignment: usize,
833 offset: usize,
834 ) -> *mut c_void;
835
836 /// Equivalent to [`mi_realloc_aligned`](crate::mi_realloc_aligned), but allocates out of the specific
837 /// heap instead of the default.
838 pub fn mi_heap_realloc_aligned(
839 heap: *mut mi_heap_t,
840 p: *mut c_void,
841 newsize: usize,
842 alignment: usize,
843 ) -> *mut c_void;
844
845 /// Equivalent to [`mi_realloc_aligned_at`], but allocates out of the
846 /// specific heap instead of the default.
847 pub fn mi_heap_realloc_aligned_at(
848 heap: *mut mi_heap_t,
849 p: *mut c_void,
850 newsize: usize,
851 alignment: usize,
852 offset: usize,
853 ) -> *mut c_void;
854
855 /// Equivalent to [`mi_rezalloc`], but allocates out of the specific heap
856 /// instead of the default.
857 pub fn mi_heap_rezalloc(heap: *mut mi_heap_t, p: *mut c_void, newsize: usize) -> *mut c_void;
858
859 /// Equivalent to [`mi_recalloc`], but allocates out of the specific heap
860 /// instead of the default.
861 pub fn mi_heap_recalloc(
862 heap: *mut mi_heap_t,
863 p: *mut c_void,
864 newcount: usize,
865 size: usize,
866 ) -> *mut c_void;
867
868 /// Equivalent to [`mi_rezalloc_aligned`], but allocates out of the specific
869 /// heap instead of the default.
870 pub fn mi_heap_rezalloc_aligned(
871 heap: *mut mi_heap_t,
872 p: *mut c_void,
873 newsize: usize,
874 alignment: usize,
875 ) -> *mut c_void;
876
877 /// Equivalent to [`mi_rezalloc_aligned_at`], but allocates out of the
878 /// specific heap instead of the default.
879 pub fn mi_heap_rezalloc_aligned_at(
880 heap: *mut mi_heap_t,
881 p: *mut c_void,
882 newsize: usize,
883 alignment: usize,
884 offset: usize,
885 ) -> *mut c_void;
886
887 /// Equivalent to [`mi_recalloc_aligned`], but allocates out of the
888 /// specific heap instead of the default.
889 pub fn mi_heap_recalloc_aligned(
890 heap: *mut mi_heap_t,
891 p: *mut c_void,
892 newcount: usize,
893 size: usize,
894 alignment: usize,
895 ) -> *mut c_void;
896
897 /// Equivalent to [`mi_recalloc_aligned_at`], but allocates out of the
898 /// specific heap instead of the default.
899 pub fn mi_heap_recalloc_aligned_at(
900 heap: *mut mi_heap_t,
901 p: *mut c_void,
902 newcount: usize,
903 size: usize,
904 alignment: usize,
905 offset: usize,
906 ) -> *mut c_void;
907
908 /// Does a heap contain a pointer to a previously allocated block?
909 ///
910 /// `p` must be a pointer to a previously allocated block (in any heap) -- it cannot be some
911 /// random pointer!
912 ///
913 /// Returns `true` if the block pointed to by `p` is in the `heap`.
914 ///
915 /// See [`mi_heap_check_owned`].
916 pub fn mi_heap_contains_block(heap: *mut mi_heap_t, p: *const c_void) -> bool;
917
918 /// Check safely if any pointer is part of a heap.
919 ///
920 /// `p` may be any pointer -- not required to be previously allocated by the
921 /// given heap or any other mimalloc heap. Returns `true` if `p` points to a
922 /// block in the given heap, false otherwise.
923 ///
924 /// Note: expensive function, linear in the pages in the heap.
925 ///
926 /// See [`mi_heap_contains_block`], [`mi_heap_get_default`], and
927 /// [`mi_is_in_heap_region`]
928 pub fn mi_heap_check_owned(heap: *mut mi_heap_t, p: *const c_void) -> bool;
929
930 /// Check safely if any pointer is part of the default heap of this thread.
931 ///
932 /// `p` may be any pointer -- not required to be previously allocated by the
933 /// default heap for this thread, or any other mimalloc heap. Returns `true`
934 /// if `p` points to a block in the default heap, false otherwise.
935 ///
936 /// Note: expensive function, linear in the pages in the heap.
937 ///
938 /// See [`mi_heap_contains_block`], [`mi_heap_get_default`]
939 pub fn mi_check_owned(p: *const c_void) -> bool;
940
941 /// Visit all areas and blocks in `heap`.
942 ///
943 /// If `visit_all_blocks` is false, the `visitor` is only called once for
944 /// every heap area. If it's true, the `visitor` is also called for every
945 /// allocated block inside every area (with `!block.is_null()`). Return
946 /// `false` from the `visitor` to return early.
947 ///
948 /// `arg` is an extra argument passed into the `visitor`.
949 ///
950 /// Returns `true` if all areas and blocks were visited.
951 ///
952 /// Passing a `None` visitor is allowed, and is a no-op.
953 pub fn mi_heap_visit_blocks(
954 heap: *const mi_heap_t,
955 visit_all_blocks: bool,
956 visitor: mi_block_visit_fun,
957 arg: *mut c_void,
958 ) -> bool;
959
960 #[cfg(feature = "arena")]
961 /// Create a heap that only allocates in the specified arena
962 pub fn mi_heap_new_in_arena(arena_id: mi_arena_id_t) -> *mut mi_heap_t;
963
964 #[cfg(feature = "arena")]
965 /// Reserve OS memory for use by mimalloc. Reserved areas are used
966 /// before allocating from the OS again. By reserving a large area upfront,
967 /// allocation can be more efficient, and can be better managed on systems
968 /// without `mmap`/`VirtualAlloc` (like WASM for example).
969 ///
970 /// - `size` The size to reserve.
971 /// - `commit` Commit the memory upfront.
972 /// - `allow_large` Allow large OS pages (2MiB) to be used?
973 /// - `exclusive` Only allow allocations if specifically for this arena.
974 /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
975 ///
976 /// Returns 0 if successful, and an error code otherwise (e.g. `ENOMEM`)
977 pub fn mi_reserve_os_memory_ex(
978 size: usize,
979 commit: bool,
980 allow_large: bool,
981 exclusive: bool,
982 arena_id: *mut mi_arena_id_t,
983 ) -> c_int;
984
985 #[cfg(feature = "arena")]
986 /// Manage a particular memory area for use by mimalloc.
987 /// This is just like `mi_reserve_os_memory_ex` except that the area should already be
988 /// allocated in some manner and available for use my mimalloc.
989 ///
990 /// # Safety
991 /// mimalloc will likely segfault when allocating from the arena if the arena `start` & `size`
992 /// aren't aligned with mimalloc's `MI_SEGMENT_ALIGN` (e.g. 32MB on x86_64 machines).
993 ///
994 /// - `start` Start of the memory area
995 /// - `size` The size of the memory area. Must be large than `MI_ARENA_BLOCK_SIZE` (e.g. 64MB
996 /// on x86_64 machines).
997 /// - `commit` Set true if the memory range is already commited.
998 /// - `is_large` Set true if the memory range consists of large files, or if the memory should
999 /// not be decommitted or protected (like rdma etc.).
1000 /// - `is_zero` Set true if the memory range consists only of zeros.
1001 /// - `numa_node` Possible associated numa node or `-1`.
1002 /// - `exclusive` Only allow allocations if specifically for this arena.
1003 /// - `arena_id` Pointer who's value will be set to the new arena_id if successful.
1004 ///
1005 /// Returns `true` if arena was successfully allocated
1006 pub fn mi_manage_os_memory_ex(
1007 start: *const c_void,
1008 size: usize,
1009 is_committed: bool,
1010 is_large: bool,
1011 is_zero: bool,
1012 numa_node: c_int,
1013 exclusive: bool,
1014 arena_id: *mut mi_arena_id_t,
1015 ) -> bool;
1016}
1017
1018#[cfg(test)]
1019mod tests {
1020 use super::*;
1021
1022 #[test]
1023 fn it_calculates_usable_size() {
1024 let ptr = unsafe { mi_malloc(32) } as *mut u8;
1025 let usable_size = unsafe { mi_usable_size(ptr as *mut c_void) };
1026 assert!(
1027 usable_size >= 32,
1028 "usable_size should at least equal to the allocated size"
1029 );
1030 }
1031
1032 #[test]
1033 fn runtime_stable_option() {
1034 unsafe {
1035 assert_eq!(mi_option_get(mi_option_show_errors), 0);
1036 mi_option_set(mi_option_show_errors, 1);
1037 assert_eq!(mi_option_get(mi_option_show_errors), 1);
1038
1039 assert_eq!(mi_option_get(mi_option_show_stats), 0);
1040 mi_option_set(mi_option_show_stats, 1);
1041 assert_eq!(mi_option_get(mi_option_show_stats), 1);
1042
1043 assert_eq!(mi_option_get(mi_option_verbose), 0);
1044 mi_option_set(mi_option_verbose, 1);
1045 assert_eq!(mi_option_get(mi_option_verbose), 1);
1046 }
1047 }
1048}