jevmalloc_sys/
lib.rs

1//! Rust bindings to the `jemalloc` C library.
2//!
3//! `jemalloc` is a general purpose memory allocation, its documentation
4//! can be found here:
5//!
6//! * [API documentation][jemalloc_docs]
7//! * [Wiki][jemalloc_wiki] (design documents, presentations, profiling,
8//!   debugging, tuning, ...)
9//!
10//! `jemalloc` exposes both a standard and a non-standard API.
11//!
12//! # Standard API
13//!
14//! The standard API includes: the [`malloc`], [`calloc`], [`realloc`], and
15//! [`free`], which conform to to ISO/IEC 9899:1990 (“ISO C90”),
16//! [`posix_memalign`] which conforms to conforms to POSIX.1-2016, and
17//! [`aligned_alloc`].
18//!
19//! Note that these standard leave some details as _implementation defined_.
20//! This docs document this behavior for `jemalloc`, but keep in mind that other
21//! standard-conforming implementations of these functions in other allocators
22//! might behave slightly different.
23//!
24//! # Non-Standard API
25//!
26//! The non-standard API includes: [`mallocx`], [`rallocx`], [`xallocx`],
27//! [`sallocx`], [`dallocx`], [`sdallocx`], and [`nallocx`]. These functions all
28//! have a `flags` argument that can be used to specify options. Use bitwise or
29//! `|` to specify one or more of the following: [`MALLOCX_LG_ALIGN`],
30//! [`MALLOCX_ALIGN`], [`MALLOCX_ZERO`], [`MALLOCX_TCACHE`],
31//! [`MALLOCX_TCACHE_NONE`], and [`MALLOCX_ARENA`].
32//!
33//! # Environment variables
34//!
35//! The `MALLOC_CONF` environment variable affects the execution of the
36//! allocation functions.
37//!
38//! For the documentation of the [`MALLCTL` namespace visit the jemalloc
39//! documenation][jemalloc_mallctl].
40//!
41//! [jemalloc_docs]: http://jemalloc.net/jemalloc.3.html
42//! [jemalloc_wiki]: https://github.com/jemalloc/jemalloc/wiki
43//! [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace
44#![no_std]
45#![allow(non_snake_case, non_camel_case_types, unnecessary_safety_doc)]
46#![deny(missing_docs, broken_intra_doc_links)]
47
48use libc::{c_char, c_int, c_uint, c_void, size_t};
49
50// jemalloc uses `stdbool.h` to define `bool` for which the Rust equivalent is
51// `bool`. However jemalloc also has its own `stdbool.h` that it uses when
52// compiling with MSVC, and this header defines `bool` as `BOOL` which in turn
53// is `int`.
54#[cfg(target_env = "msvc")]
55type c_bool = c_int;
56#[cfg(not(target_env = "msvc"))]
57type c_bool = bool;
58
59/// Align the memory allocation to start at an address that is a
60/// multiple of `1 << la`.
61///
62/// # Safety
63///
64/// It does not validate that `la` is within the valid range.
65#[inline]
66#[must_use]
67pub const fn MALLOCX_LG_ALIGN(la: usize) -> c_int { la as c_int }
68
69/// Align the memory allocation to start at an address that is a multiple of
70/// `align`, where a is a power of two.
71///
72/// # Safety
73///
74/// This macro does not validate that a is a power of 2.
75#[inline]
76#[must_use]
77pub const fn MALLOCX_ALIGN(aling: usize) -> c_int { aling.trailing_zeros() as c_int }
78
79/// Initialize newly allocated memory to contain zero bytes.
80///
81/// In the growing reallocation case, the real size prior to reallocation
82/// defines the boundary between untouched bytes and those that are initialized
83/// to contain zero bytes.
84///
85/// If this option is not set, newly allocated memory is uninitialized.
86pub const MALLOCX_ZERO: c_int = 0x40;
87
88/// Use the thread-specific cache (_tcache_) specified by the identifier `tc`.
89///
90/// # Safety
91///
92/// `tc` must have been acquired via the `tcache.create mallctl`. This function
93/// does not validate that `tc` specifies a valid identifier.
94#[inline]
95#[must_use]
96pub const fn MALLOCX_TCACHE(tc: usize) -> c_int { tc.wrapping_add(2).wrapping_shl(8) as c_int }
97
98/// Do not use a thread-specific cache (_tcache_).
99///
100/// Unless `MALLOCX_TCACHE(tc)` or `MALLOCX_TCACHE_NONE` is specified, an
101/// automatically managed _tcache_ will be used under many circumstances.
102///
103/// # Safety
104///
105/// This option cannot be used in the same `flags` argument as
106/// `MALLOCX_TCACHE(tc)`.
107// FIXME: This should just be a const.
108pub const MALLOCX_TCACHE_NONE: c_int = MALLOCX_TCACHE((-1_isize) as usize);
109
110/// Use the arena specified by the index `a`.
111///
112/// This option has no effect for regions that were allocated via an arena other
113/// than the one specified.
114///
115/// # Safety
116///
117/// This function does not validate that `a` specifies an arena index in the
118/// valid range.
119#[inline]
120#[must_use]
121pub const fn MALLOCX_ARENA(a: usize) -> c_int { (a as c_int).wrapping_add(1).wrapping_shl(20) }
122
123unsafe extern "C" {
124	/// Allocates `size` bytes of uninitialized memory.
125	///
126	/// It returns a pointer to the start (lowest byte address) of the allocated
127	/// space. This pointer is suitably aligned so that it may be assigned to a
128	/// pointer to any type of object and then used to access such an object in
129	/// the space allocated until the space is explicitly deallocated. Each
130	/// yielded pointer points to an object disjoint from any other object.
131	///
132	/// If the `size` of the space requested is zero, either a null pointer is
133	/// returned, or the behavior is as if the `size` were some nonzero value,
134	/// except that the returned pointer shall not be used to access an object.
135	///
136	/// # Errors
137	///
138	/// If the space cannot be allocated, a null pointer is returned and `errno`
139	/// is set to `ENOMEM`.
140	#[cfg_attr(prefixed, link_name = "_rjem_malloc")]
141	pub fn malloc(size: size_t) -> *mut c_void;
142	/// Allocates zero-initialized space for an array of `number` objects, each
143	/// of whose size is `size`.
144	///
145	/// The result is identical to calling [`malloc`] with an argument of
146	/// `number * size`, with the exception that the allocated memory is
147	/// explicitly initialized to _zero_ bytes.
148	///
149	/// Note: zero-initialized memory need not be the same as the
150	/// representation of floating-point zero or a null pointer constant.
151	#[cfg_attr(prefixed, link_name = "_rjem_calloc")]
152	pub fn calloc(number: size_t, size: size_t) -> *mut c_void;
153
154	/// Allocates `size` bytes of memory at an address which is a multiple of
155	/// `alignment` and is placed in `*ptr`.
156	///
157	/// If `size` is zero, then the value placed in `*ptr` is either null, or
158	/// the behavior is as if the `size` were some nonzero value, except that
159	/// the returned pointer shall not be used to access an object.
160	///
161	/// # Errors
162	///
163	/// On success, it returns zero. On error, the value of `errno` is _not_
164	/// set, `*ptr` is not modified, and the return values can be:
165	///
166	/// - `EINVAL`: the `alignment` argument was not a power-of-two or was not a
167	///   multiple of `mem::size_of::<*const c_void>()`.
168	/// - `ENOMEM`: there was insufficient memory to fulfill the allocation
169	///   request.
170	///
171	/// # Safety
172	///
173	/// The behavior is _undefined_ if:
174	///
175	/// * `ptr` is null.
176	#[cfg_attr(prefixed, link_name = "_rjem_posix_memalign")]
177	pub fn posix_memalign(ptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int;
178
179	/// Allocates `size` bytes of memory at an address which is a multiple of
180	/// `alignment`.
181	///
182	/// If the `size` of the space requested is zero, either a null pointer is
183	/// returned, or the behavior is as if the `size` were some nonzero value,
184	/// except that the returned pointer shall not be used to access an object.
185	///
186	/// # Errors
187	///
188	/// Returns null if the request fails.
189	///
190	/// # Safety
191	///
192	/// The behavior is _undefined_ if:
193	///
194	/// * `alignment` is not a power-of-two
195	/// * `size` is not an integral multiple of `alignment`
196	#[cfg_attr(prefixed, link_name = "_rjem_aligned_alloc")]
197	pub fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void;
198
199	/// Resizes the previously-allocated memory region referenced by `ptr` to
200	/// `size` bytes.
201	///
202	/// Deallocates the old object pointed to by `ptr` and returns a pointer to
203	/// a new object that has the size specified by `size`. The contents of the
204	/// new object are the same as that of the old object prior to deallocation,
205	/// up to the lesser of the new and old sizes.
206	///
207	/// The memory in the new object beyond the size of the old object is
208	/// uninitialized.
209	///
210	/// The returned pointer to a new object may have the same value as a
211	/// pointer to the old object, but [`realloc`] may move the memory
212	/// allocation, resulting in a different return value than `ptr`.
213	///
214	/// If `ptr` is null, [`realloc`] behaves identically to [`malloc`] for the
215	/// specified size.
216	///
217	/// If the size of the space requested is zero, the behavior is
218	/// implementation-defined: either a null pointer is returned, or the
219	/// behavior is as if the size were some nonzero value, except that the
220	/// returned pointer shall not be used to access an object # Errors
221	///
222	/// # Errors
223	///
224	/// If memory for the new object cannot be allocated, the old object is not
225	/// deallocated, its value is unchanged, [`realloc`] returns null, and
226	/// `errno` is set to `ENOMEM`.
227	///
228	/// # Safety
229	///
230	/// The behavior is _undefined_ if:
231	///
232	/// * `ptr` does not match a pointer previously returned by the memory
233	///   allocation functions of this crate, or
234	/// * the memory region referenced by `ptr` has been deallocated.
235	#[cfg_attr(prefixed, link_name = "_rjem_realloc")]
236	pub fn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void;
237
238	/// Deallocates previously-allocated memory region referenced by `ptr`.
239	///
240	/// This makes the space available for future allocations.
241	///
242	/// If `ptr` is null, no action occurs.
243	///
244	/// # Safety
245	///
246	/// The behavior is _undefined_ if:
247	///
248	/// * `ptr` does not match a pointer earlier returned by the memory
249	///   allocation functions of this crate, or
250	/// * the memory region referenced by `ptr` has been deallocated.
251	#[cfg_attr(prefixed, link_name = "_rjem_free")]
252	pub fn free(ptr: *mut c_void);
253
254	/// Allocates at least `size` bytes of memory according to `flags`.
255	///
256	/// It returns a pointer to the start (lowest byte address) of the allocated
257	/// space. This pointer is suitably aligned so that it may be assigned to a
258	/// pointer to any type of object and then used to access such an object in
259	/// the space allocated until the space is explicitly deallocated. Each
260	/// yielded pointer points to an object disjoint from any other object.
261	///
262	/// # Errors
263	///
264	/// On success it returns a non-null pointer. A null pointer return value
265	/// indicates that insufficient contiguous memory was available to service
266	/// the allocation request.
267	///
268	/// # Safety
269	///
270	/// The behavior is _undefined_ if `size == 0`.
271	#[cfg_attr(prefixed, link_name = "_rjem_mallocx")]
272	pub fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
273
274	/// Resizes the previously-allocated memory region referenced by `ptr` to be
275	/// at least `size` bytes.
276	///
277	/// Deallocates the old object pointed to by `ptr` and returns a pointer to
278	/// a new object that has the size specified by `size`. The contents of the
279	/// new object are the same as that of the old object prior to deallocation,
280	/// up to the lesser of the new and old sizes.
281	///
282	/// The the memory in the new object beyond the size of the old object is
283	/// obtained according to `flags` (it might be uninitialized).
284	///
285	/// The returned pointer to a new object may have the same value as a
286	/// pointer to the old object, but [`rallocx`] may move the memory
287	/// allocation, resulting in a different return value than `ptr`.
288	///
289	/// # Errors
290	///
291	/// On success it returns a non-null pointer. A null pointer return value
292	/// indicates that insufficient contiguous memory was available to service
293	/// the allocation request. In this case, the old object is not
294	/// deallocated, and its value is unchanged.
295	///
296	/// # Safety
297	///
298	/// The behavior is _undefiend_ if:
299	///
300	/// * `size == 0`, or
301	/// * `ptr` does not match a pointer earlier returned by the memory
302	///   allocation functions of this crate, or
303	/// * the memory region referenced by `ptr` has been deallocated.
304	#[cfg_attr(prefixed, link_name = "_rjem_rallocx")]
305	pub fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
306
307	/// Resizes the previously-allocated memory region referenced by `ptr` _in
308	/// place_ to be at least `size` bytes, returning the real size of the
309	/// allocation.
310	///
311	/// Deallocates the old object pointed to by `ptr` and sets `ptr` to a new
312	/// object that has the size returned; the old a new objects share the same
313	/// base address. The contents of the new object are the same as that of the
314	/// old object prior to deallocation, up to the lesser of the new and old
315	/// sizes.
316	///
317	/// If `extra` is non-zero, an attempt is made to resize the allocation to
318	/// be at least `size + extra` bytes. Inability to allocate the `extra`
319	/// bytes will not by itself result in failure to resize.
320	///
321	/// The memory in the new object beyond the size of the old object is
322	/// obtained according to `flags` (it might be uninitialized).
323	///
324	/// # Errors
325	///
326	/// If the allocation cannot be adequately grown in place up to `size`, the
327	/// size returned is smaller than `size`.
328	///
329	/// Note:
330	///
331	/// * the size value returned can be larger than the size requested during
332	///   allocation
333	/// * when shrinking an allocation, use the size returned to determine
334	///   whether the allocation was shrunk sufficiently or not.
335	///
336	/// # Safety
337	///
338	/// The behavior is _undefined_ if:
339	///
340	/// * `size == 0`, or
341	/// * `size + extra > size_t::max_value()`, or
342	/// * `ptr` does not match a pointer earlier returned by the memory
343	///   allocation functions of this crate, or
344	/// * the memory region referenced by `ptr` has been deallocated.
345	#[cfg_attr(prefixed, link_name = "_rjem_xallocx")]
346	pub fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
347
348	/// Returns the real size of the previously-allocated memory region
349	/// referenced by `ptr`.
350	///
351	/// The value may be larger than the size requested on allocation.
352	///
353	/// # Safety
354	///
355	/// The behavior is _undefined_ if:
356	///
357	/// * `ptr` does not match a pointer earlier returned by the memory
358	///   allocation functions of this crate, or
359	/// * the memory region referenced by `ptr` has been deallocated.
360	#[cfg_attr(prefixed, link_name = "_rjem_sallocx")]
361	pub fn sallocx(ptr: *const c_void, flags: c_int) -> size_t;
362
363	/// Deallocates previously-allocated memory region referenced by `ptr`.
364	///
365	/// This makes the space available for future allocations.
366	///
367	/// # Safety
368	///
369	/// The behavior is _undefined_ if:
370	///
371	/// * `ptr` does not match a pointer earlier returned by the memory
372	///   allocation functions of this crate, or
373	/// * `ptr` is null, or
374	/// * the memory region referenced by `ptr` has been deallocated.
375	#[cfg_attr(prefixed, link_name = "_rjem_dallocx")]
376	pub fn dallocx(ptr: *mut c_void, flags: c_int);
377
378	/// Deallocates previously-allocated memory region referenced by `ptr` with
379	/// `size` hint.
380	///
381	/// This makes the space available for future allocations.
382	///
383	/// # Safety
384	///
385	/// The behavior is _undefined_ if:
386	///
387	/// * `size` is not in range `[req_size, alloc_size]`, where `req_size` is
388	///   the size requested when performing the allocation, and `alloc_size` is
389	///   the allocation size returned by [`nallocx`], [`sallocx`], or
390	///   [`xallocx`],
391	/// * `ptr` does not match a pointer earlier returned by the memory
392	///   allocation functions of this crate, or
393	/// * `ptr` is null, or
394	/// * the memory region referenced by `ptr` has been deallocated.
395	#[cfg_attr(prefixed, link_name = "_rjem_sdallocx")]
396	pub fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
397
398	/// Returns the real size of the allocation that would result from a
399	/// [`mallocx`] function call with the same arguments.
400	///
401	/// # Errors
402	///
403	/// If the inputs exceed the maximum supported size class and/or alignment
404	/// it returns zero.
405	///
406	/// # Safety
407	///
408	/// The behavior is _undefined_ if `size == 0`.
409	#[cfg_attr(prefixed, link_name = "_rjem_nallocx")]
410	pub fn nallocx(size: size_t, flags: c_int) -> size_t;
411
412	/// Returns the real size of the previously-allocated memory region
413	/// referenced by `ptr`.
414	///
415	/// The value may be larger than the size requested on allocation.
416	///
417	/// Although the excess bytes can be overwritten by the application without
418	/// ill effects, this is not good programming practice: the number of excess
419	/// bytes in an allocation depends on the underlying implementation.
420	///
421	/// The main use of this function is for debugging and introspection.
422	///
423	/// # Errors
424	///
425	/// If `ptr` is null, 0 is returned.
426	///
427	/// # Safety
428	///
429	/// The behavior is _undefined_ if:
430	///
431	/// * `ptr` does not match a pointer earlier returned by the memory
432	///   allocation functions of this crate, or
433	/// * the memory region referenced by `ptr` has been deallocated.
434	#[cfg_attr(prefixed, link_name = "_rjem_malloc_usable_size")]
435	pub fn malloc_usable_size(ptr: *const c_void) -> size_t;
436
437	/// General interface for introspecting the memory allocator, as well as
438	/// setting modifiable parameters and triggering actions.
439	///
440	/// The period-separated name argument specifies a location in a
441	/// tree-structured namespace ([see jemalloc's `MALLCTL`
442	/// documentation][jemalloc_mallctl]).
443	///
444	/// To read a value, pass a pointer via `oldp` to adequate space to contain
445	/// the value, and a pointer to its length via `oldlenp``; otherwise pass
446	/// null and null. Similarly, to write a value, pass a pointer to the value
447	/// via `newp`, and its length via `newlen`; otherwise pass null and 0.
448	///
449	/// # Errors
450	///
451	/// Returns `0` on success, otherwise returns:
452	///
453	/// * `EINVAL`: if `newp` is not null, and `newlen` is too large or too
454	///   small. Alternatively, `*oldlenp` is too large or too small; in this
455	///   case as much data as possible are read despite the error.
456	///
457	/// * `ENOENT`: `name` or mib specifies an unknown/invalid value.
458	///
459	/// * `EPERM`: Attempt to read or write void value, or attempt to write
460	///   read-only value.
461	///
462	/// * `EAGAIN`: A memory allocation failure occurred.
463	///
464	/// * `EFAULT`: An interface with side effects failed in some way not
465	///   directly related to `mallctl` read/write processing.
466	///
467	/// [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace
468	#[cfg_attr(prefixed, link_name = "_rjem_mallctl")]
469	pub fn mallctl(
470		name: *const c_char,
471		oldp: *mut c_void,
472		oldlenp: *mut size_t,
473		newp: *mut c_void,
474		newlen: size_t,
475	) -> c_int;
476	/// Translates a name to a “Management Information Base” (MIB) that can be
477	/// passed repeatedly to [`mallctlbymib`].
478	///
479	/// This avoids repeated name lookups for applications that repeatedly query
480	/// the same portion of the namespace.
481	///
482	/// On success, `mibp` contains an array of `*miblenp` integers, where
483	/// `*miblenp` is the lesser of the number of components in name and the
484	/// input value of `*miblenp`. Thus it is possible to pass a `*miblenp` that
485	/// is smaller than the number of period-separated name components, which
486	/// results in a partial MIB that can be used as the basis for constructing
487	/// a complete MIB. For name components that are integers (e.g. the 2 in
488	/// arenas.bin.2.size), the corresponding MIB component will always be that
489	/// integer.
490	#[cfg_attr(prefixed, link_name = "_rjem_mallctlnametomib")]
491	pub fn mallctlnametomib(
492		name: *const c_char,
493		mibp: *mut size_t,
494		miblenp: *mut size_t,
495	) -> c_int;
496
497	/// Like [`mallctl`] but taking a `mib` as input instead of a name.
498	#[cfg_attr(prefixed, link_name = "_rjem_mallctlbymib")]
499	pub fn mallctlbymib(
500		mib: *const size_t,
501		miblen: size_t,
502		oldp: *mut c_void,
503		oldpenp: *mut size_t,
504		newp: *mut c_void,
505		newlen: size_t,
506	) -> c_int;
507
508	/// Writes summary statistics via the `write_cb` callback function pointer
509	/// and `cbopaque` data passed to `write_cb`, or [`malloc_message`] if
510	/// `write_cb` is null.
511	///
512	/// The statistics are presented in human-readable form unless “J”
513	/// is specified as a character within the opts string, in which case the
514	/// statistics are presented in JSON format.
515	///
516	/// This function can be called repeatedly.
517	///
518	/// General information that never changes during execution can be omitted
519	/// by specifying `g` as a character within the opts string.
520	///
521	/// Note that [`malloc_message`] uses the `mallctl*` functions internally,
522	/// so inconsistent statistics can be reported if multiple threads use these
523	/// functions simultaneously.
524	///
525	/// If the Cargo feature `stats` is enabled, `m`, `d`, and `a` can be
526	/// specified to omit merged arena, destroyed merged arena, and per arena
527	/// statistics, respectively; `b` and `l` can be specified to omit per size
528	/// class statistics for bins and large objects, respectively; `x` can be
529	/// specified to omit all mutex statistics. Unrecognized characters are
530	/// silently ignored.
531	///
532	/// Note that thread caching may prevent some statistics from being
533	/// completely up to date, since extra locking would be required to merge
534	/// counters that track thread cache operations.
535	#[cfg_attr(prefixed, link_name = "_rjem_malloc_stats_print")]
536	pub fn malloc_stats_print(
537		write_cb: Option<unsafe extern "C" fn(*mut c_void, *const c_char)>,
538		cbopaque: *mut c_void,
539		opts: *const c_char,
540	);
541
542	/// Allows overriding the function which emits the text strings forming the
543	/// errors and warnings if for some reason the `STDERR_FILENO` file
544	/// descriptor is not suitable for this.
545	///
546	/// [`malloc_message`] takes the `cbopaque` pointer argument that is null,
547	/// unless overridden by the arguments in a call to [`malloc_stats_print`],
548	/// followed by a string pointer.
549	///
550	/// Please note that doing anything which tries to allocate memory in this
551	/// function is likely to result in a crash or deadlock.
552	#[cfg_attr(prefixed, link_name = "_rjem_malloc_message")]
553	pub static mut malloc_message:
554		Option<unsafe extern "C" fn(cbopaque: *mut c_void, s: *const c_char)>;
555
556	/// Compile-time string of configuration options.
557	///
558	/// Once, when the first call is made to one of the memory allocation
559	/// routines, the allocator initializes its internals based in part on
560	/// various options that can be specified at compile- or run-time.
561	///
562	/// The string specified via `--with-malloc-conf`, the string pointed to by
563	/// the global variable `malloc_conf`, the “name” of the file referenced by
564	/// the symbolic link named `/etc/malloc.conf`, and the value of the
565	/// environment variable `MALLOC_CONF`, will be interpreted, in that order,
566	/// from left to right as options. Note that `malloc_conf` may be read
567	/// before `main()` is entered, so the declaration of `malloc_conf` should
568	/// specify an initializer that contains the final value to be read by
569	/// `jemalloc`.
570	///
571	/// `--with-malloc-conf` and `malloc_conf` are compile-time mechanisms,
572	/// whereas `/etc/malloc.conf` and `MALLOC_CONF` can be safely set any time
573	/// prior to program invocation.
574	///
575	/// An options string is a comma-separated list of `option:value` pairs.
576	/// There is one key corresponding to each `opt.* mallctl` (see the `MALLCTL
577	/// NAMESPACE` section for options documentation). For example,
578	/// `abort:true,narenas:1` sets the `opt.abort` and `opt.narenas` options.
579	/// Some options have boolean values (`true`/`false`), others have integer
580	/// values (base `8`, `10`, or `16`, depending on prefix), and yet others
581	/// have raw string values.
582	#[cfg_attr(prefixed, link_name = "_rjem_malloc_conf")]
583	pub static malloc_conf: Option<&'static c_char>;
584}
585
586/// Extent lifetime management functions.
587pub type extent_hooks_t = extent_hooks_s;
588
589// note: there are two structs here, one is used when compiling the crate
590// normally, and the other one is behind the `--cfg jevmalloc_docs` flag and
591// used only when generating docs.
592//
593// For the docs we want to use type aliases here, but `ctest` does see through
594// them when generating the code to verify the FFI bindings, and it needs to
595// be able to tell that these are `fn` types so that `Option<fn>` gets lowered
596// to C function pointers.
597
598#[repr(C)]
599#[cfg(not(jevmalloc_docs))]
600#[derive(Copy, Clone, Default)]
601#[doc(hidden)]
602#[allow(missing_docs)]
603pub struct extent_hooks_s {
604	pub alloc: Option<
605		unsafe extern "C" fn(
606			*mut Self,
607			*mut c_void,
608			size_t,
609			size_t,
610			*mut c_bool,
611			*mut c_bool,
612			c_uint,
613		) -> *mut c_void,
614	>,
615	pub dalloc:
616		Option<unsafe extern "C" fn(*mut Self, *mut c_void, size_t, c_bool, c_uint) -> c_bool>,
617	pub destroy: Option<unsafe extern "C" fn(*mut Self, *mut c_void, size_t, c_bool, c_uint)>,
618	pub commit: Option<
619		unsafe extern "C" fn(*mut Self, *mut c_void, size_t, size_t, size_t, c_uint) -> c_bool,
620	>,
621	pub decommit: Option<
622		unsafe extern "C" fn(*mut Self, *mut c_void, size_t, size_t, size_t, c_uint) -> c_bool,
623	>,
624	pub purge_lazy: Option<
625		unsafe extern "C" fn(*mut Self, *mut c_void, size_t, size_t, size_t, c_uint) -> c_bool,
626	>,
627	pub purge_forced: Option<
628		unsafe extern "C" fn(*mut Self, *mut c_void, size_t, size_t, size_t, c_uint) -> c_bool,
629	>,
630	pub split: Option<
631		unsafe extern "C" fn(
632			*mut Self,
633			*mut c_void,
634			size_t,
635			size_t,
636			size_t,
637			c_bool,
638			c_uint,
639		) -> c_bool,
640	>,
641	pub merge: Option<
642		unsafe extern "C" fn(
643			*mut Self,
644			*mut c_void,
645			size_t,
646			*mut c_void,
647			size_t,
648			c_bool,
649			c_uint,
650		) -> c_bool,
651	>,
652}
653
654/// Extent lifetime management functions.
655///
656/// The extent_hooks_t structure comprises function pointers which are described
657/// individually below. `jemalloc` uses these functions to manage extent
658/// lifetime, which starts off with allocation of mapped committed memory, in
659/// the simplest case followed by deallocation. However, there are performance
660/// and platform reasons to retain extents for later reuse. Cleanup attempts
661/// cascade from deallocation to decommit to forced purging to lazy purging,
662/// which gives the extent management functions opportunities to reject the most
663/// permanent cleanup operations in favor of less permanent (and often less
664/// costly) operations. All operations except allocation can be universally
665/// opted out of by setting the hook pointers to `NULL`, or selectively opted
666/// out of by returning failure. Note that once the extent hook is set, the
667/// structure is accessed directly by the associated arenas, so it must remain
668/// valid for the entire lifetime of the arenas.
669#[repr(C)]
670#[cfg(jevmalloc_docs)]
671#[derive(Copy, Clone, Default)]
672pub struct extent_hooks_s {
673	#[allow(missing_docs)]
674	pub alloc: Option<extent_alloc_t>,
675	#[allow(missing_docs)]
676	pub dalloc: Option<extent_dalloc_t>,
677	#[allow(missing_docs)]
678	pub destroy: Option<extent_destroy_t>,
679	#[allow(missing_docs)]
680	pub commit: Option<extent_commit_t>,
681	#[allow(missing_docs)]
682	pub decommit: Option<extent_decommit_t>,
683	#[allow(missing_docs)]
684	pub purge_lazy: Option<extent_purge_t>,
685	#[allow(missing_docs)]
686	pub purge_forced: Option<extent_purge_t>,
687	#[allow(missing_docs)]
688	pub split: Option<extent_split_t>,
689	#[allow(missing_docs)]
690	pub merge: Option<extent_merge_t>,
691}
692
693/// Extent allocation function.
694///
695/// On success returns a pointer to `size` bytes of mapped memory on behalf of
696/// arena `arena_ind` such that the extent's base address is a multiple of
697/// `alignment`, as well as setting `*zero` to indicate whether the extent is
698/// zeroed and `*commit` to indicate whether the extent is committed.
699///
700/// Zeroing is mandatory if `*zero` is `true` upon function entry. Committing is
701/// mandatory if `*commit` is true upon function entry. If `new_addr` is not
702/// null, the returned pointer must be `new_addr` on success or null on error.
703///
704/// Committed memory may be committed in absolute terms as on a system that does
705/// not overcommit, or in implicit terms as on a system that overcommits and
706/// satisfies physical memory needs on demand via soft page faults. Note that
707/// replacing the default extent allocation function makes the arena's
708/// `arena.<i>.dss` setting irrelevant.
709///
710/// # Errors
711///
712/// On error the function returns null and leaves `*zero` and `*commit`
713/// unmodified.
714///
715/// # Safety
716///
717/// The behavior is _undefined_ if:
718///
719/// * the `size` parameter is not a multiple of the page size
720/// * the `alignment` parameter is not a power of two at least as large as the
721///   page size
722pub type extent_alloc_t = unsafe extern "C" fn(
723	extent_hooks: *mut extent_hooks_t,
724	new_addr: *mut c_void,
725	size: size_t,
726	alignment: size_t,
727	zero: *mut c_bool,
728	commit: *mut c_bool,
729	arena_ind: c_uint,
730) -> *mut c_void;
731
732/// Extent deallocation function.
733///
734/// Deallocates an extent at given `addr` and `size` with `committed`/decommited
735/// memory as indicated, on behalf of arena `arena_ind`, returning `false` upon
736/// success.
737///
738/// If the function returns `true`, this indicates opt-out from deallocation;
739/// the virtual memory mapping associated with the extent remains mapped, in the
740/// same commit state, and available for future use, in which case it will be
741/// automatically retained for later reuse.
742pub type extent_dalloc_t = unsafe extern "C" fn(
743	extent_hooks: *mut extent_hooks_t,
744	addr: *mut c_void,
745	size: size_t,
746	committed: c_bool,
747	arena_ind: c_uint,
748) -> c_bool;
749
750/// Extent destruction function.
751///
752/// Unconditionally destroys an extent at given `addr` and `size` with
753/// `committed`/decommited memory as indicated, on behalf of arena `arena_ind`.
754///
755/// This function may be called to destroy retained extents during arena
756/// destruction (see `arena.<i>.destroy`).
757pub type extent_destroy_t = unsafe extern "C" fn(
758	extent_hooks: *mut extent_hooks_t,
759	addr: *mut c_void,
760	size: size_t,
761	committed: c_bool,
762	arena_ind: c_uint,
763);
764
765/// Extent commit function.
766///
767/// Commits zeroed physical memory to back pages within an extent at given
768/// `addr` and `size` at `offset` bytes, extending for `length` on behalf of
769/// arena `arena_ind`, returning `false` upon success.
770///
771/// Committed memory may be committed in absolute terms as on a system that does
772/// not overcommit, or in implicit terms as on a system that overcommits and
773/// satisfies physical memory needs on demand via soft page faults. If the
774/// function returns `true`, this indicates insufficient physical memory to
775/// satisfy the request.
776pub type extent_commit_t = unsafe extern "C" fn(
777	extent_hooks: *mut extent_hooks_t,
778	addr: *mut c_void,
779	size: size_t,
780	offset: size_t,
781	length: size_t,
782	arena_ind: c_uint,
783) -> c_bool;
784
785/// Extent decommit function.
786///
787/// Decommits any physical memory that is backing pages within an extent at
788/// given `addr` and `size` at `offset` bytes, extending for `length` on behalf
789/// of arena `arena_ind`, returning `false` upon success, in which case the
790/// pages will be committed via the extent commit function before being reused.
791///
792/// If the function returns `true`, this indicates opt-out from decommit; the
793/// memory remains committed and available for future use, in which case it will
794/// be automatically retained for later reuse.
795pub type extent_decommit_t = unsafe extern "C" fn(
796	extent_hooks: *mut extent_hooks_t,
797	addr: *mut c_void,
798	size: size_t,
799	offset: size_t,
800	length: size_t,
801	arena_ind: c_uint,
802) -> c_bool;
803
804/// Extent purge function.
805///
806/// Discards physical pages within the virtual memory mapping associated with an
807/// extent at given `addr` and `size` at `offset` bytes, extending for `length`
808/// on behalf of arena `arena_ind`.
809///
810/// A lazy extent purge function (e.g. implemented via `madvise(...MADV_FREE)`)
811/// can delay purging indefinitely and leave the pages within the purged virtual
812/// memory range in an indeterminite state, whereas a forced extent purge
813/// function immediately purges, and the pages within the virtual memory range
814/// will be zero-filled the next time they are accessed. If the function returns
815/// `true`, this indicates failure to purge.
816pub type extent_purge_t = unsafe extern "C" fn(
817	extent_hooks: *mut extent_hooks_t,
818	addr: *mut c_void,
819	size: size_t,
820	offset: size_t,
821	length: size_t,
822	arena_ind: c_uint,
823) -> c_bool;
824
825/// Extent split function.
826///
827/// Optionally splits an extent at given `addr` and `size` into two adjacent
828/// extents, the first of `size_a` bytes, and the second of `size_b` bytes,
829/// operating on `committed`/decommitted memory as indicated, on behalf of arena
830/// `arena_ind`, returning `false` upon success.
831///
832/// If the function returns `true`, this indicates that the extent remains
833/// unsplit and therefore should continue to be operated on as a whole.
834pub type extent_split_t = unsafe extern "C" fn(
835	extent_hooks: *mut extent_hooks_t,
836	addr: *mut c_void,
837	size: size_t,
838	size_a: size_t,
839	size_b: size_t,
840	committed: c_bool,
841	arena_ind: c_uint,
842) -> c_bool;
843
844/// Extent merge function.
845///
846/// Optionally merges adjacent extents, at given `addr_a` and `size_a` with
847/// given `addr_b` and `size_b` into one contiguous extent, operating on
848/// `committed`/decommitted memory as indicated, on behalf of arena `arena_ind`,
849/// returning `false` upon success.
850///
851/// If the function returns `true`, this indicates that the extents remain
852/// distinct mappings and therefore should continue to be operated on
853/// independently.
854pub type extent_merge_t = unsafe extern "C" fn(
855	extent_hooks: *mut extent_hooks_t,
856	addr_a: *mut c_void,
857	size_a: size_t,
858	addr_b: *mut c_void,
859	size_b: size_t,
860	committed: c_bool,
861	arena_ind: c_uint,
862) -> c_bool;
863
864#[allow(missing_docs)]
865mod env;
866
867pub use env::*;