rpmalloc_sys/
lib.rs

1#![allow(non_snake_case, non_camel_case_types)]
2#![allow(unsafe_code)] // FFI bindings need it
3#![deny(missing_docs)]
4
5//! # 🐏 rpmalloc-sys
6//!
7//! [![Build Status](https://github.com/EmbarkStudios/rpmalloc-rs/workflows/CI/badge.svg)](https://github.com/EmbarkStudios/rpmalloc-rs/actions?workflow=CI)
8//! [![Crates.io](https://img.shields.io/crates/v/rpmalloc-sys.svg)](https://crates.io/crates/rpmalloc-sys)
9//! [![Docs](https://docs.rs/rpmalloc-sys/badge.svg)](https://docs.rs/rpmalloc-sys)
10//! [![Contributor Covenant](https://img.shields.io/badge/contributor%20covenant-v1.4%20adopted-ff69b4.svg)](../CODE_OF_CONDUCT.md)
11//! [![Embark](https://img.shields.io/badge/embark-open%20source-blueviolet.svg)](http://embark.dev)
12//!
13//! Unsafe FFI bindings to [rpmalloc](https://github.com/rampantpixels/rpmalloc) C library
14//!
15//! ## Contributing
16//!
17//! We welcome community contributions to this project.
18//!
19//! Please read our [Contributor Guide](CONTRIBUTING.md) for more information on how to get started.
20//!
21//! ## License
22//!
23//! Licensed under either of
24//!
25//! * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or <http://www.apache.org/licenses/LICENSE-2.0>)
26//! * MIT license ([LICENSE-MIT](LICENSE-MIT) or <http://opensource.org/licenses/MIT>)
27//!
28//! at your option.
29//!
30//! Note that the [rpmalloc](https://github.com/rampantpixels/rpmalloc) library this crate uses is under public domain, and can also be licensed under MIT.
31//!
32//! ### Contribution
33//!
34//! Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
35
36#[cfg(test)]
37mod tests;
38
39pub use libc::{c_int, c_uint, c_void, size_t};
40
41/// Global memory statistics
42#[repr(C)]
43#[derive(Clone, Copy, Default, Debug)]
44pub struct rpmalloc_global_statistics_t {
45    /// Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
46    pub mapped: size_t,
47    /// Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
48    pub mapped_peak: size_t,
49    /// Current amount of memory in global caches for small and medium sizes (<32KiB)
50    pub cached: size_t,
51    /// Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
52    pub huge_alloc: size_t,
53    /// Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
54    pub huge_alloc_peak: size_t,
55    /// Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
56    pub mapped_total: size_t,
57    /// Total amount of memory unmapped since initialization  (only if ENABLE_STATISTICS=1)
58    pub unmapped_total: size_t,
59}
60
61/// Mmemory span statistics for a thread
62#[repr(C)]
63#[derive(Clone, Copy, Default, Debug)]
64pub struct rpmalloc_thread_span_statistics_t {
65    /// Currently used number of spans
66    pub current: size_t,
67    /// High water mark of spans used
68    pub peak: size_t,
69    /// Number of spans transitioned to global cache
70    pub to_global: size_t,
71    /// Number of spans transitioned from global cache
72    pub from_global: size_t,
73    /// Number of spans transitioned to thread cache
74    pub to_cache: size_t,
75    /// Number of spans transitioned from thread cache
76    pub from_cache: size_t,
77    /// Number of spans transitioned to reserved state
78    pub to_reserved: size_t,
79    /// Number of spans transitioned from reserved state
80    pub from_reserved: size_t,
81    /// Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
82    pub map_calls: size_t,
83}
84
85/// Memory size statistics for a thread
86#[repr(C)]
87#[derive(Clone, Copy)]
88pub struct rpmalloc_thread_size_statistics_t {
89    /// Current number of allocations
90    pub alloc_current: size_t,
91    /// Peak number of allocations
92    pub alloc_peak: size_t,
93    /// Total number of allocations
94    pub alloc_total: size_t,
95    /// Total number of frees
96    pub free_total: size_t,
97    /// Number of spans transitioned to cache
98    pub spans_to_cache: size_t,
99    /// Number of spans transitioned from cache
100    pub spans_from_cache: size_t,
101    /// Number of spans transitioned from reserved state
102    pub spans_from_reserved: size_t,
103    /// Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
104    pub map_calls: size_t,
105}
106
107/// Memory statistics for a thread
108#[repr(C)]
109#[derive(Clone, Copy)]
110pub struct rpmalloc_thread_statistics_t {
111    /// Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
112    pub sizecache: size_t,
113    /// Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
114    pub spancache: size_t,
115    /// Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
116    pub thread_to_global: size_t,
117    /// Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
118    pub global_to_thread: size_t,
119    /// Per span count statistics (only if ENABLE_STATISTICS=1)
120    pub span_use: [rpmalloc_thread_span_statistics_t; 32],
121    /// Per size class statistics (only if ENABLE_STATISTICS=1)
122    pub size_use: [rpmalloc_thread_size_statistics_t; 128],
123}
124
125/*
126typedef struct rpmalloc_config_t {
127    //! Map memory pages for the given number of bytes. The returned address MUST be
128    //  aligned to the rpmalloc span size, which will always be a power of two.
129    //  Optionally the function can store an alignment offset in the offset variable
130    //  in case it performs alignment and the returned pointer is offset from the
131    //  actual start of the memory region due to this alignment. The alignment offset
132    //  will be passed to the memory unmap function. The alignment offset MUST NOT be
133    //  larger than 65535 (storable in an uint16_t), if it is you must use natural
134    //  alignment to shift it into 16 bits. If you set a memory_map function, you
135    //  must also set a memory_unmap function or else the default implementation will
136    //  be used for both.
137    void* (*memory_map)(pub size, size_t* offset);
138    //! Unmap the memory pages starting at address and spanning the given number of bytes.
139    //  If release is set to non-zero, the unmap is for an entire span range as returned by
140    //  a previous call to memory_map and that the entire range should be released. The
141    //  release argument holds the size of the entire span range. If release is set to 0,
142    //  the unmap is a partial decommit of a subset of the mapped memory range.
143    //  If you set a memory_unmap function, you must also set a memory_map function or
144    //  else the default implementation will be used for both.
145    void (*memory_unmap)(void* address, pub size, pub offset, pub release);
146    //! Size of memory pages. The page size MUST be a power of two. All memory mapping
147    //  requests to memory_map will be made with size set to a multiple of the page size.
148    pub page_size;
149    //! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
150    //  range (unless 0 - set to 0 to use the default span size).
151    pub span_size;
152    //! Number of spans to map at each request to map new virtual memory blocks. This can
153    //  be used to minimize the system call overhead at the cost of virtual memory address
154    //  space. The extra mapped pages will not be written until actually used, so physical
155    //  committed memory should not be affected in the default implementation. Will be
156    //  aligned to a multiple of spans that match memory page size in case of huge pages.
157    pub span_map_count;
158    //! Enable use of large/huge pages
159    int enable_huge_pages;
160    //! Debug callback if memory guards are enabled. Called if a memory overwrite is detected
161    void (*memory_overwrite)(void* address);
162} rpmalloc_config_t;
163*/
164
165extern "C" {
166    /// Initialize allocator with default configuration
167    pub fn rpmalloc_initialize() -> c_int;
168
169    //extern int rpmalloc_initialize_config(const rpmalloc_config_t* config);
170    //extern const rpmalloc_config_t* rpmalloc_config(void);
171
172    /// Finalize allocator
173    pub fn rpmalloc_finalize();
174
175    /// Initialize allocator for calling thread
176    pub fn rpmalloc_thread_initialize();
177
178    /// Finalize allocator for calling thread
179    pub fn rpmalloc_thread_finalize();
180
181    /// Perform deferred deallocations pending for the calling thread hea
182    pub fn rpmalloc_thread_collect();
183
184    /// Query if allocator is initialized for calling thread
185    pub fn rpmalloc_is_thread_initialized() -> c_int;
186
187    /// Get per-thread statistics
188    pub fn rpmalloc_thread_statistics(stats: *mut rpmalloc_thread_statistics_t);
189
190    /// Get global statistics
191    pub fn rpmalloc_global_statistics(stats: *mut rpmalloc_global_statistics_t);
192
193    /// Dump all statistics in human readable format to file (should be a FILE*)
194    pub fn rpmalloc_dump_statistics(file: *mut c_void);
195
196    /// Allocate a memory block of at least the given size
197    pub fn rpmalloc(size: size_t) -> *mut c_void;
198
199    /// Free the given memory block
200    pub fn rpfree(ptr: *mut c_void);
201
202    /// Allocate a memory block of at least the given size and zero initialize it
203    pub fn rpcalloc(num: size_t, size: size_t) -> *mut c_void;
204
205    /// Reallocate the given block to at least the given size
206    pub fn rprealloc(ptr: *mut c_void, size: size_t) -> *mut c_void;
207
208    /// Reallocate the given block to at least the given size and alignment,
209    /// with optional control flags (see RPMALLOC_NO_PRESERVE).
210    /// Alignment must be a power of two and a multiple of sizeof(void*),
211    /// and should ideally be less than memory page size. A caveat of rpmalloc
212    /// internals is that this must also be strictly less than the span size (default 64KiB)
213    pub fn rpaligned_realloc(
214        ptr: *mut c_void,
215        alignment: size_t,
216        size: size_t,
217        oldsize: size_t,
218        flags: c_uint,
219    ) -> *mut c_void;
220
221    /// Allocate a memory block of at least the given size and alignment.
222    /// Alignment must be a power of two and a multiple of sizeof(void*),
223    /// and should ideally be less than memory page size. A caveat of rpmalloc
224    /// internals is that this must also be strictly less than the span size (default 64KiB)    
225    pub fn rpaligned_alloc(alignment: size_t, size: size_t) -> *mut c_void;
226
227    /// Allocate a memory block of at least the given size and alignment, and zero initialize it.
228    /// Alignment must be a power of two and a multiple of sizeof(void*),
229    /// and should ideally be less than memory page size. A caveat of rpmalloc
230    /// internals is that this must also be strictly less than the span size (default 64KiB)    
231    pub fn rpaligned_calloc(alignment: size_t, num: size_t, size: size_t) -> *mut c_void;
232
233    /// Allocate a memory block of at least the given size and alignment.
234    /// Alignment must be a power of two and a multiple of sizeof(void*),
235    /// and should ideally be less than memory page size. A caveat of rpmalloc
236    /// internals is that this must also be strictly less than the span size (default 64KiB)
237    pub fn rpmemalign(alignment: size_t, size: size_t) -> *mut c_void;
238
239    /// Allocate a memory block of at least the given size and alignment.
240    /// Alignment must be a power of two and a multiple of sizeof(void*),
241    /// and should ideally be less than memory page size. A caveat of rpmalloc
242    /// internals is that this must also be strictly less than the span size (default 64KiB)
243    pub fn rpposix_memalign(memptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int;
244
245    /// Query the usable size of the given memory block (from given pointer to the end of block)
246    pub fn rpmalloc_usable_size(ptr: *mut c_void) -> size_t;
247}