portable_dlmalloc/raw.rs
1// This module defines C FFI definitions of dlmalloc library.
2// Use this library only if you understand the safety.
3
4use core::ffi::c_void;
5
6pub type DLInspectHandler=unsafe extern "C" fn (start:*mut c_void,end:*mut c_void,used_bytes:usize,callback_arg:*mut c_void);
7
8/// Used by `dlmallopt` routine. \
9/// Defines when dlmalloc will trim blank pages.
10pub const M_TRIM_THRESHOLD:i32=-1;
11/// Used by `dlmallopt` routine. \
12/// Defines the granularity of the page size. (e.g.: 4K for x86)
13pub const M_GRANULARITY:i32=-2;
14/// Used by `dlmallopt` routine. \
15/// Defines the minimum size of `mmap` call.
16pub const M_MMAP_THRESHOLD:i32=-3;
17
18/// This structure is returned by `dlmallinfo` and `mspace_mallinfo` routines. \
19/// It reports the current status of the heap.
20#[repr(C)] pub struct MallInfo
21{
22 /// non-mapped spaced allcated from system
23 pub arena:usize,
24 /// number of free chunks
25 pub ordblks:usize,
26 /// always 0
27 pub smblks:usize,
28 /// always 0
29 pub hblks:usize,
30 /// space in `mmap`ed regions
31 pub hblkhd:usize,
32 /// maximum total allocated space
33 pub usmblks:usize,
34 /// always 0
35 pub fsmblks:usize,
36 /// total allocated space
37 pub uordblks:usize,
38 /// total free space
39 pub fordblks:usize,
40 /// releaseable (via `malloc_trim` space)
41 pub keepcost:usize
42}
43
44unsafe extern "C"
45{
46 /// `malloc(size_t n)`
47 ///
48 /// Returns a pointer to a newly allocated chunk of at least n bytes, or
49 /// null if no space is available, in which case errno is set to ENOMEM
50 /// on ANSI C systems.
51 ///
52 /// If n is zero, malloc returns a minimum-sized chunk. (The minimum
53 /// size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
54 /// systems.) Note that size_t is an unsigned type, so calls with
55 /// arguments that would be negative if signed are interpreted as
56 /// requests for huge amounts of space, which will often fail. The
57 /// maximum supported value of n differs across systems, but is in all
58 /// cases less than the maximum representable value of a size_t.
59 pub fn dlmalloc(n:usize)->*mut c_void;
60 /// `dlfree(void* p)`
61 ///
62 /// Releases the chunk of memory pointed to by p, that had been previously
63 /// allocated using malloc or a related routine such as realloc.
64 /// It has no effect if p is null. If p was not malloced or already
65 /// freed, free(p) will by default cause the current program to abort.
66 pub fn dlfree(p:*mut c_void);
67 /// `calloc(size_t n_elements, size_t element_size);`
68 ///
69 /// Returns a pointer to n_elements * element_size bytes, with all locations
70 /// set to zero.
71 pub fn dlcalloc(n_elements:usize,element_size:usize)->*mut c_void;
72 /// `realloc(void* p, size_t n)`
73 ///
74 /// Returns a pointer to a chunk of size n that contains the same data
75 /// as does chunk p up to the minimum of (n, p's size) bytes, or null
76 /// if no space is available.
77 ///
78 /// The returned pointer may or may not be the same as p. The algorithm
79 /// prefers extending p in most cases when possible, otherwise it
80 /// employs the equivalent of a malloc-copy-free sequence.
81 ///
82 /// If p is null, realloc is equivalent to malloc.
83 ///
84 /// If space is not available, realloc returns null, errno is set (if on
85 /// ANSI) and p is NOT freed.
86 ///
87 /// if n is for fewer bytes than already held by p, the newly unused
88 /// space is lopped off and freed if possible. realloc with a size
89 /// argument of zero (re)allocates a minimum-sized chunk.
90 ///
91 /// The old unix realloc convention of allowing the last-free'd chunk
92 /// to be used as an argument to realloc is not supported.
93 pub fn dlrealloc(p:*mut c_void,n:usize)->*mut c_void;
94 /// `realloc_in_place(void* p, size_t n)`
95 ///
96 /// Resizes the space allocated for p to size n, only if this can be
97 /// done without moving p (i.e., only if there is adjacent space
98 /// available if n is greater than p's current allocated size, or n is
99 /// less than or equal to p's size). This may be used instead of plain
100 /// realloc if an alternative allocation strategy is needed upon failure
101 /// to expand space; for example, reallocation of a buffer that must be
102 /// memory-aligned or cleared. You can use realloc_in_place to trigger
103 /// these alternatives only when needed.
104 ///
105 /// Returns p if successful; otherwise null.
106 pub fn dlrealloc_in_place(p:*mut c_void,n:usize)->*mut c_void;
107 /// `memalign(size_t alignment, size_t n);`
108 ///
109 /// Returns a pointer to a newly allocated chunk of n bytes, aligned
110 /// in accord with the alignment argument.
111 ///
112 /// The alignment argument should be a power of two. If the argument is
113 /// not a power of two, the nearest greater power is used.
114 /// 8-byte alignment is guaranteed by normal malloc calls, so don't
115 /// bother calling memalign with an argument of 8 or less.
116 ///
117 /// Overreliance on memalign is a sure way to fragment space.
118 pub fn dlmemalign(alignment:usize,n:usize)->*mut c_void;
119 /// `int posix_memalign(void** pp, size_t alignment, size_t n);`
120 ///
121 /// Allocates a chunk of n bytes, aligned in accord with the alignment
122 /// argument. Differs from memalign only in that it
123 /// 1. assigns the allocated memory to *pp rather than returning it,
124 /// 2. fails and returns EINVAL if the alignment is not a power of two
125 /// 3. fails and returns ENOMEM if memory cannot be allocated.
126 pub fn dlposix_memalign(pp:*mut *mut c_void,alignment:usize,n:usize)->i32;
127 /// `valloc(size_t n);`
128 ///
129 /// Equivalent to memalign(pagesize, n), where pagesize is the page
130 /// size of the system. If the pagesize is unknown, 4096 is used.
131 pub fn dlvalloc(n:usize)->*mut c_void;
132 /// `mallopt(int parameter_number, int parameter_value)`
133 ///
134 /// Sets tunable parameters The format is to provide a
135 /// (parameter-number, parameter-value) pair. mallopt then sets the
136 /// corresponding parameter to the argument value if it can (i.e., so
137 /// long as the value is meaningful), and returns 1 if successful else
138 /// 0. To workaround the fact that mallopt is specified to use int,
139 /// not size_t parameters, the value -1 is specially treated as the
140 /// maximum unsigned size_t value.
141 ///
142 /// SVID/XPG/ANSI defines four standard param numbers for mallopt,
143 /// normally defined in malloc.h. None of these are use in this malloc,
144 /// so setting them has no effect. But this malloc also supports other
145 /// options in mallopt. See below for details. Briefly, supported
146 /// parameters are as follows (listed defaults are for "typical"
147 /// configurations).
148 ///
149 /// | Symbol | param # | default | allowed param values
150 /// |---|---|---|---|
151 /// | M_TRIM_THRESHOLD | -1 | 2x1024x1024 | any (-1 disables)
152 /// | M_GRANULARITY | -2 | page size | any power of 2 >= page size
153 /// | M_MMAP_THRESHOLD | -3 | 2x1024x1024 | any (or 0 if no MMAP support)
154 pub fn dlmallopt(parameter_number:i32,parameter_value:i32)->i32;
155 /// `malloc_footprint();`
156 ///
157 /// Returns the number of bytes obtained from the system. The total
158 /// number of bytes allocated by malloc, realloc etc., is less than this
159 /// value. Unlike mallinfo, this function returns only a precomputed
160 /// result, so can be called frequently to monitor memory consumption.
161 /// Even if locks are otherwise defined, this function does not use them,
162 /// so results might not be up to date.
163 pub fn dlmalloc_footprint()->usize;
164 /// `malloc_max_footprint();`
165 ///
166 /// Returns the maximum number of bytes obtained from the system. This
167 /// value will be greater than current footprint if deallocated space
168 /// has been reclaimed by the system. The peak number of bytes allocated
169 /// by malloc, realloc etc., is less than this value. Unlike mallinfo,
170 /// this function returns only a precomputed result, so can be called
171 /// frequently to monitor memory consumption. Even if locks are
172 /// otherwise defined, this function does not use them, so results might
173 /// not be up to date.`
174 pub fn dlmalloc_max_footprint()->usize;
175 /// `malloc_footprint_limit();`
176 ///
177 /// Returns the number of bytes that the heap is allowed to obtain from
178 /// the system, returning the last value returned by
179 /// malloc_set_footprint_limit, or the maximum size_t value if
180 /// never set. The returned value reflects a permission. There is no
181 /// guarantee that this number of bytes can actually be obtained from
182 /// the system.
183 pub fn dlmalloc_footprint_limit()->usize;
184 /// `malloc_set_footprint_limit();`
185 ///
186 /// Sets the maximum number of bytes to obtain from the system, causing
187 /// failure returns from malloc and related functions upon attempts to
188 /// exceed this value. The argument value may be subject to page
189 /// rounding to an enforceable limit; this actual value is returned.
190 /// Using an argument of the maximum possible size_t effectively
191 /// disables checks. If the argument is less than or equal to the
192 /// current malloc_footprint, then all future allocations that require
193 /// additional system memory will fail. However, invocation cannot
194 /// retroactively deallocate existing used memory.
195 pub fn dlmalloc_set_footprint_limit(bytes:usize)->usize;
196 /// `malloc_inspect_all(void(*handler)(void *start,void *end,size_t used_bytes,void* callback_arg),void* arg);`
197 ///
198 /// Traverses the heap and calls the given handler for each managed
199 /// region, skipping all bytes that are (or may be) used for bookkeeping
200 /// purposes. Traversal does not include include chunks that have been
201 /// directly memory mapped. Each reported region begins at the start
202 /// address, and continues up to but not including the end address. The
203 /// first used_bytes of the region contain allocated data. If
204 /// used_bytes is zero, the region is unallocated. The handler is
205 /// invoked with the given callback argument. If locks are defined, they
206 /// are held during the entire traversal. It is a bad idea to invoke
207 /// other malloc functions from within the handler.
208 ///
209 /// For example, to count the number of in-use chunks with size greater
210 /// than 1000, you could write:
211 /// ```C
212 /// static int count = 0;
213 /// void count_chunks(void* start, void* end, size_t used, void* arg)
214 /// {
215 /// if (used >= 1000) ++count;
216 /// }
217 /// ```
218 /// then:
219 /// ```C
220 /// malloc_inspect_all(count_chunks, NULL);
221 /// ```
222 ///
223 /// malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
224 pub fn dlmalloc_inspect_all(handler:DLInspectHandler,arg:*mut c_void);
225 /// `mallinfo()`
226 ///
227 /// Returns (by copy) a struct containing various summary statistics:
228 ///
229 /// - arena: current total non-mmapped bytes allocated from system
230 /// - ordblks: the number of free chunks
231 /// - smblks: always zero.
232 /// - hblks: current number of mmapped regions
233 /// - hblkhd: total bytes held in mmapped regions
234 /// - usmblks: the maximum total allocated space. This will be greater than current total if trimming has occurred.
235 /// - fsmblks: always zero
236 /// - uordblks: current total allocated space (normal or mmapped)
237 /// - fordblks: total free space
238 /// - keepcost: the maximum number of bytes that could ideally be released back to system via malloc_trim. ("ideally" means that it ignores page restrictions etc.)
239 ///
240 /// Because these fields are ints, but internal bookkeeping may
241 /// be kept as longs, the reported values may wrap around zero and
242 /// thus be inaccurate.
243 pub fn dlmallinfo()->MallInfo;
244 /// `independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);`
245 ///
246 /// independent_calloc is similar to calloc, but instead of returning a
247 /// single cleared space, it returns an array of pointers to n_elements
248 /// independent elements that can hold contents of size elem_size, each
249 /// of which starts out cleared, and can be independently freed,
250 /// realloc'ed etc. The elements are guaranteed to be adjacently
251 /// allocated (this is not guaranteed to occur with multiple callocs or
252 /// mallocs), which may also improve cache locality in some
253 /// applications.
254 ///
255 /// The "chunks" argument is optional (i.e., may be null, which is
256 /// probably the most typical usage). If it is null, the returned array
257 /// is itself dynamically allocated and should also be freed when it is
258 /// no longer needed. Otherwise, the chunks array must be of at least
259 /// n_elements in length. It is filled in with the pointers to the
260 /// chunks.
261 ///
262 /// In either case, independent_calloc returns this pointer array, or
263 /// null if the allocation failed. If n_elements is zero and "chunks"
264 /// is null, it returns a chunk representing an array with zero elements
265 /// (which should be freed if not wanted).
266 ///
267 /// Each element must be freed when it is no longer needed. This can be
268 /// done all at once using bulk_free.
269 ///
270 /// independent_calloc simplifies and speeds up implementations of many
271 /// kinds of pools. It may also be useful when constructing large data
272 /// structures that initially have a fixed number of fixed-sized nodes,
273 /// but the number is not known at compile time, and some of the nodes
274 /// may later need to be freed. For example:
275 /// ```C
276 /// struct Node { int item; struct Node* next; };
277 ///
278 /// struct Node* build_list() {
279 /// struct Node** pool;
280 /// int n = read_number_of_nodes_needed();
281 /// if (n <= 0) return 0;
282 /// pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
283 /// if (pool == 0) die();
284 /// // organize into a linked list...
285 /// struct Node* first = pool[0];
286 /// for (i = 0; i < n-1; ++i)
287 /// pool[i]->next = pool[i+1];
288 /// free(pool); // Can now free the array (or not, if it is needed later)
289 /// return first;
290 /// }
291 /// ```
292 pub fn dlindependent_calloc(n_elements:usize,element_size:usize,chunks:*mut *mut c_void)->*mut *mut c_void;
293 /// `independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);`
294 ///
295 /// independent_comalloc allocates, all at once, a set of n_elements
296 /// chunks with sizes indicated in the "sizes" array. It returns
297 /// an array of pointers to these elements, each of which can be
298 /// independently freed, realloc'ed etc. The elements are guaranteed to
299 /// be adjacently allocated (this is not guaranteed to occur with
300 /// multiple callocs or mallocs), which may also improve cache locality
301 /// in some applications.
302 ///
303 /// The "chunks" argument is optional (i.e., may be null). If it is null
304 /// the returned array is itself dynamically allocated and should also
305 /// be freed when it is no longer needed. Otherwise, the chunks array
306 /// must be of at least n_elements in length. It is filled in with the
307 /// pointers to the chunks.
308 ///
309 /// In either case, independent_comalloc returns this pointer array, or
310 /// null if the allocation failed. If n_elements is zero and chunks is
311 /// null, it returns a chunk representing an array with zero elements
312 /// (which should be freed if not wanted).
313 ///
314 /// Each element must be freed when it is no longer needed. This can be
315 /// done all at once using bulk_free.
316 ///
317 /// independent_comallac differs from independent_calloc in that each
318 /// element may have a different size, and also that it does not
319 /// automatically clear elements.
320 ///
321 /// independent_comalloc can be used to speed up allocation in cases
322 /// where several structs or objects must always be allocated at the
323 /// same time. For example:
324 ///
325 /// ```C
326 /// struct Head { ... }
327 /// struct Foot { ... }
328 ///
329 /// void send_message(char* msg) {
330 /// int msglen = strlen(msg);
331 /// size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
332 /// void* chunks[3];
333 /// if (independent_comalloc(3, sizes, chunks) == 0)
334 /// die();
335 /// struct Head* head = (struct Head*)(chunks[0]);
336 /// char* body = (char*)(chunks[1]);
337 /// struct Foot* foot = (struct Foot*)(chunks[2]);
338 /// // ...
339 /// }
340 /// ```
341 /// In general though, independent_comalloc is worth using only for
342 /// larger values of n_elements. For small values, you probably won't
343 /// detect enough difference from series of malloc calls to bother.
344 ///
345 /// Overuse of independent_comalloc can increase overall memory usage,
346 /// since it cannot reuse existing noncontiguous small chunks that
347 /// might be available for some of the elements.
348 pub fn dlindependent_comalloc(n_element:usize,sizes:*const usize,chunks:*mut *mut c_void)->*mut *mut c_void;
349 /// `bulk_free(void* array[], size_t n_elements)`
350 ///
351 /// Frees and clears (sets to null) each non-null pointer in the given
352 /// array. This is likely to be faster than freeing them one-by-one.
353 /// If footers are used, pointers that have been allocated in different
354 /// mspaces are not freed or cleared, and the count of all such pointers
355 /// is returned. For large arrays of pointers with poor locality, it
356 /// may be worthwhile to sort this array before calling bulk_free.
357 pub fn dlbulk_free(array:*mut *mut c_void,n_elements:usize)->usize;
358 /// `pvalloc(size_t n);`
359 /// Equivalent to valloc(minimum-page-that-holds(n)), that is,
360 /// round up n to nearest pagesize.
361 pub fn dlpvalloc(n:usize)->*mut c_void;
362 /// `malloc_trim(size_t pad);`
363 ///
364 /// If possible, gives memory back to the system (via negative arguments
365 /// to sbrk) if there is unused memory at the `high' end of the malloc
366 /// pool or in unused MMAP segments. You can call this after freeing
367 /// large blocks of memory to potentially reduce the system-level memory
368 /// requirements of a program. However, it cannot guarantee to reduce
369 /// memory. Under some allocation patterns, some large free blocks of
370 /// memory will be locked between two used chunks, so they cannot be
371 /// given back to the system.
372 ///
373 /// The `pad' argument to malloc_trim represents the amount of free
374 /// trailing space to leave untrimmed. If this argument is zero, only
375 /// the minimum amount of memory to maintain internal data structures
376 /// will be left. Non-zero arguments can be supplied to maintain enough
377 /// trailing space to service future expected allocations without having
378 /// to re-obtain memory from the system.
379 ///
380 /// Malloc_trim returns 1 if it actually released any memory, else 0.
381 pub fn dlmalloc_trim(pad:usize)->i32;
382 /// `malloc_usable_size(void* p);`
383 ///
384 /// Returns the number of bytes you can actually use in
385 /// an allocated chunk, which may be more than you requested (although
386 /// often not) due to alignment and minimum size constraints.
387 /// You can use this many bytes without worrying about
388 /// overwriting other allocated objects. This is not a particularly great
389 /// programming practice. malloc_usable_size can be more useful in
390 /// debugging and assertions, for example:
391 /// ```C
392 /// p = malloc(n);
393 /// assert(malloc_usable_size(p) >= 256);
394 /// ```
395 pub fn dlmalloc_usable_size(p:*mut c_void)->usize;
396 /// `create_mspace` creates and returns a new independent space with the
397 /// given initial capacity, or, if 0, the default granularity size. It
398 /// returns null if there is no system memory available to create the
399 /// space. If argument locked is non-zero, the space uses a separate
400 /// lock to control access. The capacity of the space will grow
401 /// dynamically as needed to service mspace_malloc requests. You can
402 /// control the sizes of incremental increases of this space by
403 /// compiling with a different DEFAULT_GRANULARITY or dynamically
404 /// setting with mallopt(M_GRANULARITY, value).
405 pub fn create_mspace(capacity:usize,locked:i32)->*mut c_void;
406 /// `destroy_mspace` destroys the given space, and attempts to return all
407 ///
408 /// of its memory back to the system, returning the total number of
409 /// bytes freed. After destruction, the results of access to all memory
410 /// used by the space become undefined.
411 pub fn destroy_mspace(msp:*mut c_void)->usize;
412 /// `create_mspace_with_base`` uses the memory supplied as the initial base
413 /// of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
414 /// space is used for bookkeeping, so the capacity must be at least this
415 /// large. (Otherwise 0 is returned.) When this initial space is
416 /// exhausted, additional memory will be obtained from the system.
417 /// Destroying this space will deallocate all additionally allocated
418 /// space (if possible) but not the initial base.
419 pub fn create_mspace_with_base(base:*mut c_void,capacity:usize,locked:i32)->*mut c_void;
420 /// `mspace_track_large_chunks` controls whether requests for large chunks
421 /// are allocated in their own untracked mmapped regions, separate from
422 /// others in this mspace. By default large chunks are not tracked,
423 /// which reduces fragmentation. However, such chunks are not
424 /// necessarily released to the system upon destroy_mspace. Enabling
425 /// tracking by setting to true may increase fragmentation, but avoids
426 /// leakage when relying on destroy_mspace to release all memory
427 /// allocated using this space. The function returns the previous
428 /// setting.
429 pub fn mspace_track_large_chunks(msp:*mut c_void,enable:i32)->i32;
430 /// `mspace_malloc` behaves as malloc, but operates within the given space.
431 pub fn mspace_malloc(msp:*mut c_void,bytes:usize)->*mut c_void;
432 /// mspace_free behaves as free, but operates within
433 /// the given space.
434 ///
435 /// If compiled with FOOTERS==1, mspace_free is not actually needed.
436 /// free may be called instead of mspace_free because freed chunks from
437 /// any space are handled by their originating spaces.
438 pub fn mspace_free(msp:*mut c_void,mem:*mut c_void);
439 /// `mspace_realloc` behaves as realloc, but operates within
440 /// the given space.
441 ///
442 /// If compiled with FOOTERS==1, mspace_realloc is not actually
443 /// needed. realloc may be called instead of mspace_realloc because
444 /// realloced chunks from any space are handled by their originating
445 /// spaces.
446 pub fn mspace_realloc(msp:*mut c_void,mem:*mut c_void,newsize:usize)->*mut c_void;
447 /// `mspace_calloc` behaves as calloc, but operates within the given space.
448 pub fn mspace_calloc(msp:*mut c_void,n_elements:usize,elem_size:usize)->*mut c_void;
449 /// `mspace_realloc_in_place` behaves as realloc_in_place,
450 /// but operates within the given space.
451 pub fn mspace_realloc_in_place(msp:*mut c_void,mem:*mut c_void,newsize:usize)->*mut c_void;
452 /// `mspace_memalign` behaves as memalign, but operates within the given space.
453 pub fn mspace_memalign(msp:*mut c_void,alignment:usize,bytes:usize)->*mut c_void;
454 /// `mspace_independent_calloc` behaves as independent_calloc, but operates within the given space.
455 pub fn mspace_independent_calloc(msp:*mut c_void,n_elements:usize,elem_size:usize,chunks:*mut *mut c_void)->*mut *mut c_void;
456 /// `mspace_independent_comalloc` behaves as independent_comalloc, but operates within the given space.
457 pub fn mspace_independent_comlloc(msp:*mut c_void,n_elements:usize,sizes:*const usize,chunks:*mut *mut c_void)->*mut *mut c_void;
458 /// `mspace_footprint()` returns the number of bytes obtained from the system for this space.
459 pub fn mspace_footprint(msp:*mut c_void)->usize;
460 /// `mspace_max_footprint()` returns the peak number of bytes obtained from the system for this space.
461 pub fn mspace_max_footprint(msp:*mut c_void)->usize;
462 /// `mspace_mallinfo` behaves as mallinfo, but reports properties of the given space.
463 pub fn mspace_mallinfo(msp:*mut c_void)->MallInfo;
464 /// `mspace_trim` behaves as malloc_trim, but operates within the given space.
465 pub fn mspace_trim(msp:*mut c_void,pad:usize)->i32;
466}