portable_dlmalloc/
raw.rs

1// This module defines C FFI definitions of dlmalloc library.
2// Use this library only if you understand the safety.
3
4use core::ffi::c_void;
5
6pub type DLInspectHandler=unsafe extern "C" fn (start:*mut c_void,end:*mut c_void,used_bytes:usize,callback_arg:*mut c_void);
7
8pub const M_TRIM_THRESHOLD:i32=-1;
9pub const M_GRANULARITY:i32=-2;
10pub const M_MMAP_THRESHOLD:i32=-3;
11
12#[repr(C)] pub struct MallInfo
13{
14	/// non-mapped spaced allcated from system
15	pub arena:usize,
16	/// number of free chunks
17	pub ordblks:usize,
18	/// always 0
19	pub smblks:usize,
20	/// always 0
21	pub hblks:usize,
22	/// space in `mmap`ed regions
23	pub hblkhd:usize,
24	/// maximum total allocated space
25	pub usmblks:usize,
26	/// always 0
27	pub fsmblks:usize,
28	/// total allocated space
29	pub uordblks:usize,
30	/// total free space
31	pub fordblks:usize,
32	/// releaseable (via `malloc_trim` space)
33	pub keepcost:usize
34}
35
36unsafe extern "C"
37{
38	/// `malloc(size_t n)`
39	///
40	/// Returns a pointer to a newly allocated chunk of at least n bytes, or
41	/// null if no space is available, in which case errno is set to ENOMEM
42	/// on ANSI C systems.
43	///
44	/// If n is zero, malloc returns a minimum-sized chunk. (The minimum
45	/// size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
46	/// systems.)  Note that size_t is an unsigned type, so calls with
47	/// arguments that would be negative if signed are interpreted as
48	/// requests for huge amounts of space, which will often fail. The
49	/// maximum supported value of n differs across systems, but is in all
50	/// cases less than the maximum representable value of a size_t.
51	pub fn dlmalloc(n:usize)->*mut c_void;
52	/// `dlfree(void* p)`
53	/// 
54	/// Releases the chunk of memory pointed to by p, that had been previously
55	/// allocated using malloc or a related routine such as realloc.
56	/// It has no effect if p is null. If p was not malloced or already
57	/// freed, free(p) will by default cause the current program to abort.
58	pub fn dlfree(p:*mut c_void);
59	/// `calloc(size_t n_elements, size_t element_size);`
60	/// 
61	/// Returns a pointer to n_elements * element_size bytes, with all locations
62	/// set to zero.
63	pub fn dlcalloc(n_elements:usize,element_size:usize)->*mut c_void;
64	/// `realloc(void* p, size_t n)`
65	/// 
66	/// Returns a pointer to a chunk of size n that contains the same data
67	/// as does chunk p up to the minimum of (n, p's size) bytes, or null
68	/// if no space is available.
69	///
70	/// The returned pointer may or may not be the same as p. The algorithm
71	/// prefers extending p in most cases when possible, otherwise it
72	/// employs the equivalent of a malloc-copy-free sequence.
73	///
74	/// If p is null, realloc is equivalent to malloc.
75	///
76	/// If space is not available, realloc returns null, errno is set (if on
77	/// ANSI) and p is NOT freed.
78	///
79	/// if n is for fewer bytes than already held by p, the newly unused
80	/// space is lopped off and freed if possible.  realloc with a size
81	/// argument of zero (re)allocates a minimum-sized chunk.
82	/// 
83	/// Note that the return new pointer does not guarantee the same
84	/// alignment to the old pointer!
85	///
86	/// The old unix realloc convention of allowing the last-free'd chunk
87	/// to be used as an argument to realloc is not supported.
88	pub fn dlrealloc(p:*mut c_void,n:usize)->*mut c_void;
89	/// `realloc_in_place(void*  p, size_t n)`
90	/// 
91	/// Resizes the space allocated for p to size n, only if this can be
92	/// done without moving p (i.e., only if there is adjacent space
93	/// available if n is greater than p's current allocated size, or n is
94	/// less than or equal to p's size). This may be used instead of plain
95	/// realloc if an alternative allocation strategy is needed upon failure
96	/// to expand space; for example, reallocation of a buffer that must be
97	/// memory-aligned or cleared. You can use realloc_in_place to trigger
98	/// these alternatives only when needed.
99	///
100	/// Returns p if successful; otherwise null.
101	pub fn dlrealloc_in_place(p:*mut c_void,n:usize)->*mut c_void;
102	/// `memalign(size_t alignment, size_t n);`
103	/// 
104	/// Returns a pointer to a newly allocated chunk of n bytes, aligned
105	/// in accord with the alignment argument.
106	///
107	/// The alignment argument should be a power of two. If the argument is
108	/// not a power of two, the nearest greater power is used.
109	/// 8-byte alignment is guaranteed by normal malloc calls, so don't
110	/// bother calling memalign with an argument of 8 or less.
111	///
112	/// Overreliance on memalign is a sure way to fragment space.
113	pub fn dlmemalign(alignment:usize,n:usize)->*mut c_void;
114	/// `int posix_memalign(void** pp, size_t alignment, size_t n);`
115	/// 
116	/// Allocates a chunk of n bytes, aligned in accord with the alignment
117	/// argument. Differs from memalign only in that it
118	/// 1. assigns the allocated memory to *pp rather than returning it,
119	/// 2. fails and returns EINVAL if the alignment is not a power of two
120	/// 3. fails and returns ENOMEM if memory cannot be allocated.
121	pub fn dlposix_memalign(pp:*mut *mut c_void,alignment:usize,n:usize)->i32;
122	/// `valloc(size_t n);`
123	/// 
124	/// Equivalent to memalign(pagesize, n), where pagesize is the page
125	/// size of the system. If the pagesize is unknown, 4096 is used.
126	pub fn dlvalloc(n:usize)->*mut c_void;
127	/// `mallopt(int parameter_number, int parameter_value)`
128	/// 
129	/// Sets tunable parameters The format is to provide a
130	/// (parameter-number, parameter-value) pair.  mallopt then sets the
131	/// corresponding parameter to the argument value if it can (i.e., so
132	/// long as the value is meaningful), and returns 1 if successful else
133	/// 0.  To workaround the fact that mallopt is specified to use int,
134	/// not size_t parameters, the value -1 is specially treated as the
135	/// maximum unsigned size_t value.
136	///
137	/// SVID/XPG/ANSI defines four standard param numbers for mallopt,
138	/// normally defined in malloc.h.  None of these are use in this malloc,
139	/// so setting them has no effect. But this malloc also supports other
140	/// options in mallopt. See below for details.  Briefly, supported
141	/// parameters are as follows (listed defaults are for "typical"
142	/// configurations).
143	///
144	/// | Symbol           | param #  | default     |  allowed param values
145	/// |---|---|---|---|
146	/// | M_TRIM_THRESHOLD |    -1    | 2x1024x1024 |   any   (-1 disables)
147	/// | M_GRANULARITY    |    -2    |  page size  |   any power of 2 >= page size
148	/// | M_MMAP_THRESHOLD |    -3    | 2x1024x1024 |   any   (or 0 if no MMAP support)
149	pub fn dlmallopt(parameter_number:i32,parameter_value:i32)->i32;
150	/// `malloc_footprint();`
151	/// 
152	/// Returns the number of bytes obtained from the system.  The total
153	/// number of bytes allocated by malloc, realloc etc., is less than this
154	/// value. Unlike mallinfo, this function returns only a precomputed
155	/// result, so can be called frequently to monitor memory consumption.
156	/// Even if locks are otherwise defined, this function does not use them,
157	/// so results might not be up to date.
158	pub fn dlmalloc_footprint()->usize;
159	/// `malloc_max_footprint();`
160	/// 
161	/// Returns the maximum number of bytes obtained from the system. This
162	/// value will be greater than current footprint if deallocated space
163	/// has been reclaimed by the system. The peak number of bytes allocated
164	/// by malloc, realloc etc., is less than this value. Unlike mallinfo,
165	/// this function returns only a precomputed result, so can be called
166	/// frequently to monitor memory consumption.  Even if locks are
167	/// otherwise defined, this function does not use them, so results might
168	/// not be up to date.`
169	pub fn dlmalloc_max_footprint()->usize;
170	/// `malloc_footprint_limit();`
171	/// 
172	/// Returns the number of bytes that the heap is allowed to obtain from
173	/// the system, returning the last value returned by
174	/// malloc_set_footprint_limit, or the maximum size_t value if
175	/// never set. The returned value reflects a permission. There is no
176	/// guarantee that this number of bytes can actually be obtained from
177	/// the system.
178	pub fn dlmalloc_footprint_limit()->usize;
179	/// `malloc_set_footprint_limit();`
180	/// 
181	/// Sets the maximum number of bytes to obtain from the system, causing
182	/// failure returns from malloc and related functions upon attempts to
183	/// exceed this value. The argument value may be subject to page
184	/// rounding to an enforceable limit; this actual value is returned.
185	/// Using an argument of the maximum possible size_t effectively
186	/// disables checks. If the argument is less than or equal to the
187	/// current malloc_footprint, then all future allocations that require
188	/// additional system memory will fail. However, invocation cannot
189	/// retroactively deallocate existing used memory.
190	pub fn dlmalloc_set_footprint_limit(bytes:usize)->usize;
191	/// `malloc_inspect_all(void(*handler)(void *start,void *end,size_t used_bytes,void*  callback_arg),void*  arg);`
192	/// 
193	/// Traverses the heap and calls the given handler for each managed
194	/// region, skipping all bytes that are (or may be) used for bookkeeping
195	/// purposes.  Traversal does not include include chunks that have been
196	/// directly memory mapped. Each reported region begins at the start
197	/// address, and continues up to but not including the end address.  The
198	/// first used_bytes of the region contain allocated data. If
199	/// used_bytes is zero, the region is unallocated. The handler is
200	/// invoked with the given callback argument. If locks are defined, they
201	/// are held during the entire traversal. It is a bad idea to invoke
202	/// other malloc functions from within the handler.
203	///
204	/// For example, to count the number of in-use chunks with size greater
205	/// than 1000, you could write:
206	/// ```C
207	/// static int count = 0;
208	/// void count_chunks(void*  start, void* end, size_t used, void* arg)
209	/// {
210	///     if (used >= 1000) ++count;
211	/// }
212	/// ```
213	/// then:
214	/// ```C
215	/// malloc_inspect_all(count_chunks, NULL);
216	/// ```
217	///
218	/// malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
219	pub fn dlmalloc_inspect_all(handler:DLInspectHandler,arg:*mut c_void);
220	/// `mallinfo()`
221	/// 
222	/// Returns (by copy) a struct containing various summary statistics:
223	///
224	/// -  arena:     current total non-mmapped bytes allocated from system
225	/// -  ordblks:   the number of free chunks
226	/// -  smblks:    always zero.
227	/// -  hblks:     current number of mmapped regions
228	/// -  hblkhd:    total bytes held in mmapped regions
229	/// -  usmblks:   the maximum total allocated space. This will be greater than current total if trimming has occurred.
230	/// -  fsmblks:   always zero
231	/// -  uordblks:  current total allocated space (normal or mmapped)
232	/// -  fordblks:  total free space
233	/// -  keepcost:  the maximum number of bytes that could ideally be released back to system via malloc_trim. ("ideally" means that it ignores page restrictions etc.)
234	///
235	/// Because these fields are ints, but internal bookkeeping may
236	/// be kept as longs, the reported values may wrap around zero and
237	/// thus be inaccurate.
238	pub fn dlmallinfo()->MallInfo;
239	/// `independent_calloc(size_t n_elements, size_t element_size, void*  chunks[]);`
240	///
241	/// independent_calloc is similar to calloc, but instead of returning a
242	/// single cleared space, it returns an array of pointers to n_elements
243	/// independent elements that can hold contents of size elem_size, each
244	/// of which starts out cleared, and can be independently freed,
245	/// realloc'ed etc. The elements are guaranteed to be adjacently
246	/// allocated (this is not guaranteed to occur with multiple callocs or
247	/// mallocs), which may also improve cache locality in some
248	/// applications.
249	///
250	/// The "chunks" argument is optional (i.e., may be null, which is
251	/// probably the most typical usage). If it is null, the returned array
252	/// is itself dynamically allocated and should also be freed when it is
253	/// no longer needed. Otherwise, the chunks array must be of at least
254	/// n_elements in length. It is filled in with the pointers to the
255	/// chunks.
256	///
257	/// In either case, independent_calloc returns this pointer array, or
258	/// null if the allocation failed.  If n_elements is zero and "chunks"
259	/// is null, it returns a chunk representing an array with zero elements
260	/// (which should be freed if not wanted).
261	///
262	/// Each element must be freed when it is no longer needed. This can be
263	/// done all at once using bulk_free.
264	///
265	/// independent_calloc simplifies and speeds up implementations of many
266	/// kinds of pools.  It may also be useful when constructing large data
267	/// structures that initially have a fixed number of fixed-sized nodes,
268	/// but the number is not known at compile time, and some of the nodes
269	/// may later need to be freed. For example:
270	/// ```C
271	/// struct Node { int item; struct Node*  next; };
272	///
273	/// struct Node*  build_list() {
274	///     struct Node**  pool;
275	///     int n = read_number_of_nodes_needed();
276	///     if (n <= 0) return 0;
277	///     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
278	///     if (pool == 0) die();
279	///     // organize into a linked list...
280	///     struct Node*  first = pool[0];
281	///     for (i = 0; i < n-1; ++i)
282	///       pool[i]->next = pool[i+1];
283	///     free(pool);     // Can now free the array (or not, if it is needed later)
284	///     return first;
285	/// }
286	/// ```
287	pub fn dlindependent_calloc(n_elements:usize,element_size:usize,chunks:*mut *mut c_void)->*mut *mut c_void;
288	/// `independent_comalloc(size_t n_elements, size_t sizes[], void*  chunks[]);`
289	///
290	/// independent_comalloc allocates, all at once, a set of n_elements
291	/// chunks with sizes indicated in the "sizes" array.    It returns
292	/// an array of pointers to these elements, each of which can be
293	/// independently freed, realloc'ed etc. The elements are guaranteed to
294	/// be adjacently allocated (this is not guaranteed to occur with
295	/// multiple callocs or mallocs), which may also improve cache locality
296	/// in some applications.
297	///
298	/// The "chunks" argument is optional (i.e., may be null). If it is null
299	/// the returned array is itself dynamically allocated and should also
300	/// be freed when it is no longer needed. Otherwise, the chunks array
301	/// must be of at least n_elements in length. It is filled in with the
302	/// pointers to the chunks.
303	///
304	/// In either case, independent_comalloc returns this pointer array, or
305	/// null if the allocation failed.  If n_elements is zero and chunks is
306	/// null, it returns a chunk representing an array with zero elements
307	/// (which should be freed if not wanted).
308	///
309	/// Each element must be freed when it is no longer needed. This can be
310	/// done all at once using bulk_free.
311	///
312	/// independent_comallac differs from independent_calloc in that each
313	/// element may have a different size, and also that it does not
314	/// automatically clear elements.
315	///
316	/// independent_comalloc can be used to speed up allocation in cases
317	/// where several structs or objects must always be allocated at the
318	/// same time.  For example:
319	///
320	/// ```C
321	/// struct Head { ... }
322	/// struct Foot { ... }
323	///
324	/// void send_message(char*  msg) {
325	///     int msglen = strlen(msg);
326	///     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
327	///     void*  chunks[3];
328	///     if (independent_comalloc(3, sizes, chunks) == 0)
329	///       die();
330	///     struct Head*  head = (struct Head*)(chunks[0]);
331	///     char*         body = (char*)(chunks[1]);
332	///     struct Foot*  foot = (struct Foot*)(chunks[2]);
333	///     // ...
334	/// }
335	/// ```
336	/// In general though, independent_comalloc is worth using only for
337	/// larger values of n_elements. For small values, you probably won't
338	/// detect enough difference from series of malloc calls to bother.
339	///
340	/// Overuse of independent_comalloc can increase overall memory usage,
341	/// since it cannot reuse existing noncontiguous small chunks that
342	/// might be available for some of the elements.
343	pub fn dlindependent_comalloc(n_element:usize,sizes:*const usize,chunks:*mut *mut c_void)->*mut *mut c_void;
344	/// `bulk_free(void*  array[], size_t n_elements)`
345	/// 
346	/// Frees and clears (sets to null) each non-null pointer in the given
347	/// array.  This is likely to be faster than freeing them one-by-one.
348	/// If footers are used, pointers that have been allocated in different
349	/// mspaces are not freed or cleared, and the count of all such pointers
350	/// is returned.  For large arrays of pointers with poor locality, it
351	/// may be worthwhile to sort this array before calling bulk_free.
352	pub fn dlbulk_free(array:*mut *mut c_void,n_elements:usize)->usize;
353	/// `pvalloc(size_t n);`
354	/// Equivalent to valloc(minimum-page-that-holds(n)), that is,
355	/// round up n to nearest pagesize.
356	pub fn dlpvalloc(n:usize)->*mut c_void;
357	/// `malloc_trim(size_t pad);`
358	///
359	/// If possible, gives memory back to the system (via negative arguments
360	/// to sbrk) if there is unused memory at the `high' end of the malloc
361	/// pool or in unused MMAP segments. You can call this after freeing
362	/// large blocks of memory to potentially reduce the system-level memory
363	/// requirements of a program. However, it cannot guarantee to reduce
364	/// memory. Under some allocation patterns, some large free blocks of
365	/// memory will be locked between two used chunks, so they cannot be
366	/// given back to the system.
367	///
368	/// The `pad' argument to malloc_trim represents the amount of free
369	/// trailing space to leave untrimmed. If this argument is zero, only
370	/// the minimum amount of memory to maintain internal data structures
371	/// will be left. Non-zero arguments can be supplied to maintain enough
372	/// trailing space to service future expected allocations without having
373	/// to re-obtain memory from the system.
374	///
375	/// Malloc_trim returns 1 if it actually released any memory, else 0.
376	pub fn dlmalloc_trim(pad:usize)->i32;
377	/// `malloc_usable_size(void*  p);`
378	///
379	/// Returns the number of bytes you can actually use in
380	/// an allocated chunk, which may be more than you requested (although
381	/// often not) due to alignment and minimum size constraints.
382	/// You can use this many bytes without worrying about
383	/// overwriting other allocated objects. This is not a particularly great
384	/// programming practice. malloc_usable_size can be more useful in
385	/// debugging and assertions, for example:
386	/// ```C
387	/// p = malloc(n);
388	/// assert(malloc_usable_size(p) >= 256);
389	/// ```
390	pub fn dlmalloc_usable_size(p:*mut c_void)->usize;
391	/// `create_mspace` creates and returns a new independent space with the
392	/// given initial capacity, or, if 0, the default granularity size.  It
393	/// returns null if there is no system memory available to create the
394	/// space.  If argument locked is non-zero, the space uses a separate
395	/// lock to control access. The capacity of the space will grow
396	/// dynamically as needed to service mspace_malloc requests.  You can
397	/// control the sizes of incremental increases of this space by
398	/// compiling with a different DEFAULT_GRANULARITY or dynamically
399	/// setting with mallopt(M_GRANULARITY, value).
400	pub fn create_mspace(capacity:usize,locked:i32)->*mut c_void;
401	/// `destroy_mspace` destroys the given space, and attempts to return all
402	/// 
403	/// of its memory back to the system, returning the total number of
404	/// bytes freed. After destruction, the results of access to all memory
405	/// used by the space become undefined.
406	pub fn destroy_mspace(msp:*mut c_void)->usize;
407	/// `create_mspace_with_base`` uses the memory supplied as the initial base
408	/// of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
409	/// space is used for bookkeeping, so the capacity must be at least this
410	/// large. (Otherwise 0 is returned.) When this initial space is
411	/// exhausted, additional memory will be obtained from the system.
412	/// Destroying this space will deallocate all additionally allocated
413	/// space (if possible) but not the initial base.
414	pub fn create_mspace_with_base(base:*mut c_void,capacity:usize,locked:i32)->*mut c_void;
415	/// `mspace_track_large_chunks` controls whether requests for large chunks
416	/// are allocated in their own untracked mmapped regions, separate from
417	/// others in this mspace. By default large chunks are not tracked,
418	/// which reduces fragmentation. However, such chunks are not
419	/// necessarily released to the system upon destroy_mspace.  Enabling
420	/// tracking by setting to true may increase fragmentation, but avoids
421	/// leakage when relying on destroy_mspace to release all memory
422	/// allocated using this space.  The function returns the previous
423	/// setting.
424	pub fn mspace_track_large_chunks(msp:*mut c_void,enable:i32)->i32;
425	/// `mspace_malloc` behaves as malloc, but operates within the given space.
426	pub fn mspace_malloc(msp:*mut c_void,bytes:usize)->*mut c_void;
427	/// mspace_free behaves as free, but operates within
428	/// the given space.
429	///
430	/// If compiled with FOOTERS==1, mspace_free is not actually needed.
431	/// free may be called instead of mspace_free because freed chunks from
432	/// any space are handled by their originating spaces.
433	pub fn mspace_free(msp:*mut c_void,mem:*mut c_void);
434	/// `mspace_realloc` behaves as realloc, but operates within
435	/// the given space.
436	///
437	/// If compiled with FOOTERS==1, mspace_realloc is not actually
438	/// needed.  realloc may be called instead of mspace_realloc because
439	/// realloced chunks from any space are handled by their originating
440	/// spaces.
441	pub fn mspace_realloc(msp:*mut c_void,mem:*mut c_void,newsize:usize)->*mut c_void;
442	/// `mspace_calloc` behaves as calloc, but operates within the given space.
443	pub fn mspace_calloc(msp:*mut c_void,n_elements:usize,elem_size:usize)->*mut c_void;
444	/// `mspace_realloc_in_place` behaves as realloc_in_place,
445	/// but operates within the given space.
446	pub fn mspace_realloc_in_place(msp:*mut c_void,mem:*mut c_void,newsize:usize)->*mut c_void;
447	/// `mspace_memalign` behaves as memalign, but operates within the given space.
448	pub fn mspace_memalign(msp:*mut c_void,alignment:usize,bytes:usize)->*mut c_void;
449	/// `mspace_independent_calloc` behaves as independent_calloc, but operates within the given space.
450	pub fn mspace_independent_calloc(msp:*mut c_void,n_elements:usize,elem_size:usize,chunks:*mut *mut c_void)->*mut *mut c_void;
451	/// `mspace_independent_comalloc` behaves as independent_comalloc, but operates within the given space.
452	pub fn mspace_independent_comlloc(msp:*mut c_void,n_elements:usize,sizes:*const usize,chunks:*mut *mut c_void)->*mut *mut c_void;
453	/// `mspace_footprint()` returns the number of bytes obtained from the system for this space.
454	pub fn mspace_footprint(msp:*mut c_void)->usize;
455	/// `mspace_max_footprint()` returns the peak number of bytes obtained from the system for this space.
456	pub fn mspace_max_footprint(msp:*mut c_void)->usize;
457	/// `mspace_mallinfo` behaves as mallinfo, but reports properties of the given space.
458	pub fn mspace_mallinfo(msp:*mut c_void)->MallInfo;
459	/// `mspace_trim` behaves as malloc_trim, but operates within the given space.
460	pub fn mspace_trim(msp:*mut c_void,pad:usize)->i32;
461}