#include "mimalloc.h"
#include "mimalloc/internal.h"
#include "mimalloc/prim.h"
#include "bitmap.h"
mi_arena_id_t _mi_arena_id_none(void) {
return NULL;
}
mi_arena_t* _mi_arena_from_id(mi_arena_id_t id) {
mi_arena_t* const arena = (mi_arena_t*)id;
mi_assert_internal(arena==NULL || arena->parent==NULL); return arena;
}
mi_arena_id_t mi_arena_id_from_arena(mi_arena_t* arena) {
mi_assert_internal(arena==NULL || arena->parent==NULL);
return (arena==NULL ? _mi_arena_id_none() : (mi_arena_id_t)arena);
}
static bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena) {
if (arena == req_arena) return true; if (arena == NULL) return false;
if (req_arena == NULL && !arena->is_exclusive) return true; if (arena->parent != NULL && arena->parent == req_arena) return true; return false;
}
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena) {
if (memid.memkind == MI_MEM_ARENA) {
return mi_arena_is_suitable(memid.mem.arena.arena, request_arena);
}
else {
return mi_arena_is_suitable(NULL, request_arena);
}
}
size_t mi_arenas_get_count(mi_subproc_t* subproc) {
return mi_atomic_load_relaxed(&subproc->arena_count);
}
mi_arena_t* mi_arena_from_index(mi_subproc_t* subproc, size_t idx) {
mi_assert_internal(idx < mi_arenas_get_count(subproc));
return mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[idx]);
}
static size_t mi_arena_info_slices(mi_arena_t* arena) {
return arena->info_slices;
}
#if MI_DEBUG > 1
static bool mi_heap_has_page(mi_heap_t* heap, mi_arena_t* arena, mi_page_t* page) {
mi_assert(arena->arena_idx < MI_MAX_ARENAS);
mi_arena_pages_t* arena_pages = heap->arena_pages[arena->arena_idx];
return (page->memid.memkind == MI_MEM_ARENA &&
page->memid.mem.arena.arena == arena &&
arena_pages != NULL &&
mi_bitmap_is_setN(arena_pages->pages, page->memid.mem.arena.slice_index, 1));
}
#endif
size_t mi_arena_min_alignment(void) {
return MI_ARENA_SLICE_ALIGN;
}
size_t mi_arena_min_size(void) {
return MI_ARENA_MIN_SIZE;
}
static size_t mi_arena_max_object_size(void) {
size_t max_size = mi_option_get_size(mi_option_arena_max_object_size);
max_size = _mi_align_up(max_size, MI_ARENA_SLICE_SIZE);
if (max_size <= MI_ARENA_MIN_OBJ_SIZE) {
return MI_ARENA_MIN_OBJ_SIZE;
}
else if (max_size >= MI_ARENA_MAX_SIZE - MI_BCHUNK_SIZE) { return (MI_ARENA_MAX_SIZE - MI_BCHUNK_SIZE);
}
else {
return max_size;
}
}
mi_decl_nodiscard static bool mi_arena_commit(mi_arena_t* arena, void* start, size_t size, bool* is_zero, size_t already_committed) {
if (arena != NULL && arena->commit_fun != NULL) {
return (*arena->commit_fun)(true, start, size, is_zero, arena->commit_fun_arg);
}
else if (already_committed > 0) {
return _mi_os_commit_ex(start, size, is_zero, already_committed);
}
else {
return _mi_os_commit(start, size, is_zero);
}
}
static size_t mi_arena_size(mi_arena_t* arena) {
return mi_size_of_slices(arena->slice_count);
}
static uint8_t* mi_arena_start(mi_arena_t* arena) {
return ((uint8_t*)arena);
}
uint8_t* mi_arena_slice_start(mi_arena_t* arena, size_t slice_index) {
mi_assert_internal(slice_index < arena->slice_count);
return (mi_arena_start(arena) + mi_size_of_slices(slice_index));
}
mi_page_t* mi_arena_page_at_slice(mi_arena_t* arena, size_t slice_index) {
mi_assert_internal(slice_index < arena->slice_count);
if (arena->pages_meta != NULL) {
mi_page_t* const page = &arena->pages_meta[slice_index];
#if MI_PAGE_META_ALIGNED_FREE_SMALL
if (page->block_size>0) return page;
#else
return page;
#endif
}
return (mi_page_t*)mi_arena_slice_start(arena,slice_index);
}
void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
if (size != NULL) *size = 0;
mi_arena_t* arena = _mi_arena_from_id(arena_id);
if (arena == NULL) return NULL;
if (size != NULL) {
mi_assert_internal(mi_size_of_slices(arena->slice_count) <= arena->total_size);
*size = arena->total_size;
}
return mi_arena_start(arena);
}
static mi_memid_t mi_memid_create_arena(mi_arena_t* arena, size_t slice_index, size_t slice_count) {
mi_assert_internal(slice_index < UINT32_MAX);
mi_assert_internal(slice_count < UINT32_MAX);
mi_assert_internal(slice_count > 0);
mi_assert_internal(slice_index < arena->slice_count);
mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
memid.mem.arena.arena = arena;
memid.mem.arena.slice_index = (uint32_t)slice_index;
memid.mem.arena.slice_count = (uint32_t)slice_count;
return memid;
}
static mi_arena_t* mi_arena_from_memid(mi_memid_t memid, size_t* slice_index, size_t* slice_count) {
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
mi_arena_t* arena = memid.mem.arena.arena;
if (slice_index!=NULL) { *slice_index = memid.mem.arena.slice_index; }
if (slice_count!=NULL) { *slice_count = memid.mem.arena.slice_count; }
return arena;
}
static size_t mi_page_full_size(mi_page_t* page) {
if (page->memid.memkind == MI_MEM_ARENA) {
return page->memid.mem.arena.slice_count * MI_ARENA_SLICE_SIZE;
}
else if (mi_memid_is_os(page->memid) || page->memid.memkind == MI_MEM_EXTERNAL) {
mi_assert_internal((uint8_t*)page->memid.mem.os.base <= (uint8_t*)page);
const ptrdiff_t presize = (uint8_t*)page - (uint8_t*)page->memid.mem.os.base;
mi_assert_internal((ptrdiff_t)page->memid.mem.os.size >= presize);
return (presize > (ptrdiff_t)page->memid.mem.os.size ? 0 : page->memid.mem.os.size - presize);
}
else {
return 0;
}
}
static mi_decl_noinline void* mi_arena_try_alloc_at(
mi_arena_t* arena, size_t slice_count, bool commit, size_t tseq, mi_memid_t* memid)
{
size_t slice_index;
if (!mi_bbitmap_try_find_and_clearN(arena->slices_free, slice_count, tseq, &slice_index)) return NULL;
void* p = mi_arena_slice_start(arena, slice_index);
*memid = mi_memid_create_arena(arena, slice_index, slice_count);
memid->is_pinned = arena->memid.is_pinned;
size_t touched_slices = slice_count;
if (arena->memid.initially_zero) {
size_t already_dirty = 0;
memid->initially_zero = mi_bitmap_setN(arena->slices_dirty, slice_index, slice_count, &already_dirty);
mi_assert_internal(already_dirty <= touched_slices);
touched_slices -= already_dirty;
}
if (commit) {
const size_t already_committed = mi_bitmap_popcountN(arena->slices_committed, slice_index, slice_count);
if (already_committed < slice_count) {
bool commit_zero = false;
if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed))) {
mi_bbitmap_setN(arena->slices_free, slice_index, slice_count);
return NULL;
}
if (commit_zero) {
memid->initially_zero = true;
}
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, NULL);
#if MI_DEBUG > 1
if (memid->initially_zero) {
if (!mi_mem_is_zero(p, mi_size_of_slices(slice_count))) {
_mi_error_message(EFAULT, "internal error: arena allocation was not zero-initialized!\n");
memid->initially_zero = false;
}
}
#endif
}
else {
_mi_os_reuse(p, mi_size_of_slices(slice_count));
if (_mi_os_has_overcommit() && touched_slices > 0 && !arena->memid.is_pinned ) {
mi_subproc_stat_increase( arena->subproc, committed, mi_size_of_slices(touched_slices));
}
}
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
memid->initially_committed = true;
if (memid->initially_zero) {
mi_track_mem_defined(p, slice_count * MI_ARENA_SLICE_SIZE);
}
else {
mi_track_mem_undefined(p, slice_count * MI_ARENA_SLICE_SIZE);
}
}
else {
memid->initially_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count);
if (!memid->initially_committed) {
size_t already_committed_count = 0;
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count);
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
mi_subproc_stat_decrease(arena->subproc, committed, mi_size_of_slices(already_committed_count));
}
}
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
if (commit) { mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); }
if (commit) { mi_assert_internal(memid->initially_committed); }
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
return p;
}
static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id);
static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_large, mi_arena_id_t* arena_id)
{
const size_t arena_count = mi_arenas_get_count(subproc);
if (arena_count > (MI_MAX_ARENAS - 4)) return false;
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
if (arena_reserve == 0) return false;
if (!_mi_os_has_virtual_reserve()) {
arena_reserve = arena_reserve/4; }
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_SLICE_SIZE);
if (arena_count >= 1 && arena_count <= 128) {
const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/8, 0, 16);
size_t reserve = 0;
if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) {
arena_reserve = reserve;
}
}
req_size = _mi_align_up(req_size + MI_ARENA_MAX_CHUNK_OBJ_SIZE, MI_ARENA_MAX_CHUNK_OBJ_SIZE); if (arena_reserve < req_size) {
arena_reserve = req_size;
}
const size_t min_reserve = MI_ARENA_MIN_SIZE;
const size_t max_reserve = MI_ARENA_MAX_SIZE; if (arena_reserve < min_reserve) {
arena_reserve = min_reserve;
}
else if (arena_reserve > max_reserve) {
arena_reserve = max_reserve;
}
if (arena_reserve < req_size) return false;
bool arena_commit = false;
const bool overcommit = _mi_os_has_overcommit();
if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = overcommit; }
else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
const bool adjust = (overcommit && arena_commit);
if (adjust) { mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve); }
int err = mi_reserve_os_memory_ex2(subproc, arena_reserve, arena_commit, allow_large, false , arena_id);
if (err != 0) {
if (adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve); } const size_t small_arena_reserve = 4 * MI_ARENA_MIN_SIZE; if (arena_reserve > small_arena_reserve && small_arena_reserve > req_size) {
if (adjust) { mi_subproc_stat_adjust_decrease(subproc, committed, small_arena_reserve); }
err = mi_reserve_os_memory_ex2(subproc, small_arena_reserve, arena_commit, allow_large, false , arena_id);
if (err != 0 && adjust) { mi_subproc_stat_adjust_increase( subproc, committed, small_arena_reserve); } }
}
return (err==0);
}
static inline bool mi_arena_is_suitable_ex(mi_arena_t* arena, mi_arena_t* req_arena, bool match_numa, int numa_node, bool allow_pinned) {
if (!allow_pinned && arena->memid.is_pinned) return false;
if (!mi_arena_is_suitable(arena, req_arena)) return false;
if (req_arena == NULL) { const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
if (match_numa) { if (!numa_suitable) return false; }
else { if (numa_suitable) return false; }
}
return true;
}
static size_t mi_arena_start_idx(mi_heap_t* heap, size_t tseq, size_t arena_cycle) {
const size_t hseq = heap->heap_seq;
const size_t hcount = mi_atomic_load_relaxed(&heap->subproc->heap_count);
if (arena_cycle <= 1) return 0;
if (hseq==0 || hcount<=1) return (tseq % arena_cycle);
size_t start;
mi_assert_internal(arena_cycle <= 0x8FF); const size_t frac = (arena_cycle * 256) / hcount; if (frac==0) {
start = (hseq % arena_cycle);
}
else {
const size_t hspot = (hseq % hcount);
start = (frac * hspot) / 256;
if (frac >= 512) { start = start + (tseq % (frac/256));
}
}
mi_assert_internal(start < arena_cycle);
return start;
}
#define mi_forall_arenas(heap, req_arena, tseq, name_arena) { \
const size_t _arena_count = mi_arenas_get_count(heap->subproc); \
const size_t _arena_cycle = (_arena_count == 0 ? 0 : _arena_count - 1); \
\
const size_t _start = mi_arena_start_idx(heap,tseq,_arena_cycle); \
for (size_t _i = 0; _i < _arena_count; _i++) { \
mi_arena_t* name_arena; \
if (req_arena != NULL) { \
name_arena = req_arena; \
if (_i > 0) break; \
} \
else { \
size_t _idx; \
if (_i < _arena_cycle) { \
_idx = _i + _start; \
if (_idx >= _arena_cycle) { _idx -= _arena_cycle; } \
} \
else { \
_idx = _i; \
} \
name_arena = mi_arena_from_index(heap->subproc,_idx); \
} \
if (name_arena != NULL) \
{
#define mi_forall_arenas_end() \
} \
} \
}
#define mi_forall_suitable_arenas(heap, req_arena, tseq, match_numa, numa_node, allow_large, name_arena) \
mi_forall_arenas(heap, req_arena,tseq,name_arena) { \
if (mi_arena_is_suitable_ex(name_arena, req_arena, match_numa, numa_node, allow_large)) { \
#define mi_forall_suitable_arenas_end() \
}} \
mi_forall_arenas_end()
static mi_decl_noinline void* mi_arenas_try_find_free(
mi_heap_t* heap, size_t slice_count, size_t alignment,
bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
{
mi_assert(alignment <= MI_ARENA_SLICE_ALIGN);
if (alignment > MI_ARENA_SLICE_ALIGN) return NULL;
mi_forall_suitable_arenas(heap, req_arena, tseq, true , numa_node, allow_large, arena)
{
void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid);
if (p != NULL) return p;
}
mi_forall_suitable_arenas_end();
if (numa_node < 0) return NULL;
mi_forall_suitable_arenas(heap, req_arena, tseq, false , numa_node, allow_large, arena)
{
void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid);
if (p != NULL) return p;
}
mi_forall_suitable_arenas_end();
return NULL;
}
static mi_decl_noinline void* mi_arenas_try_alloc(
mi_heap_t* heap,
size_t slice_count, size_t alignment,
bool commit, bool allow_large,
mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
{
mi_assert(alignment <= MI_ARENA_SLICE_ALIGN);
void* p;
if (slice_count * MI_ARENA_SLICE_SIZE > MI_ARENA_MAX_SIZE) return NULL;
p = mi_arenas_try_find_free(heap, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid);
if (p != NULL) return p;
if (req_arena != NULL) return NULL;
if (_mi_preloading()) return NULL;
if (mi_option_is_enabled(mi_option_disallow_os_alloc)) return NULL;
mi_subproc_t* const subproc = heap->subproc;
const size_t arena_count = mi_arenas_get_count(subproc);
mi_lock(&subproc->arena_reserve_lock) {
if (arena_count == mi_arenas_get_count(subproc)) {
mi_arena_id_t arena_id = _mi_arena_id_none();
mi_arena_reserve(subproc, mi_size_of_slices(slice_count), allow_large, &arena_id);
}
else {
}
}
mi_assert_internal(req_arena == NULL);
p = mi_arenas_try_find_free(heap, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid);
if (p != NULL) return p;
return NULL;
}
static void* mi_arena_os_alloc_aligned(
size_t size, size_t alignment, size_t align_offset,
bool commit, bool allow_large,
mi_arena_id_t req_arena_id, mi_memid_t* memid)
{
if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) {
errno = ENOMEM;
return NULL;
}
if (align_offset > 0) {
return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid);
}
else {
return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid);
}
}
void* _mi_arenas_alloc_aligned( mi_heap_t* heap,
size_t size, size_t alignment, size_t align_offset,
bool commit, bool allow_large,
mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
{
mi_assert_internal(memid != NULL);
mi_assert_internal(size > 0);
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && size >= MI_ARENA_MIN_OBJ_SIZE && size <= mi_arena_max_object_size() && alignment <= MI_ARENA_SLICE_ALIGN && align_offset == 0) {
const size_t slice_count = mi_slice_count_of_size(size);
void* p = mi_arenas_try_alloc(heap, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid);
if (p != NULL) return p;
}
void* p = mi_arena_os_alloc_aligned(size, alignment, align_offset, commit, allow_large, req_arena, memid);
return p;
}
void* _mi_arenas_alloc(mi_heap_t* heap, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid)
{
return _mi_arenas_alloc_aligned(heap, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, numa_node, memid);
}
static bool mi_abandoned_page_unown(mi_page_t* page, mi_theap_t* current_theap) {
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(_mi_thread_id()==current_theap->tld->thread_id);
mi_thread_free_t tf_new;
mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free);
do {
mi_assert_internal(mi_tf_is_owned(tf_old));
while mi_unlikely(mi_tf_block(tf_old) != NULL) {
_mi_page_free_collect(page, false); if (mi_page_all_free(page)) { _mi_arenas_page_unabandon(page, current_theap);
_mi_arenas_page_free(page, current_theap);
return true;
}
tf_old = mi_atomic_load_relaxed(&page->xthread_free);
}
mi_assert_internal(mi_tf_block(tf_old)==NULL);
tf_new = mi_tf_create(NULL, false);
} while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_old, tf_new));
return false;
}
static bool mi_arena_try_claim_abandoned(size_t slice_index, mi_arena_t* arena, bool* keep_abandoned) {
mi_page_t* const page = mi_arena_page_at_slice(arena, slice_index);
if (!mi_page_claim_ownership(page)) {
*keep_abandoned = true;
return false;
}
else {
*keep_abandoned = false;
return true;
}
}
static mi_arena_pages_t* mi_arena_pages_alloc(mi_arena_t* arena);
static mi_arena_pages_t* mi_heap_arena_pages(mi_heap_t* heap, mi_arena_t* arena) {
mi_assert_internal(arena!=NULL);
mi_assert_internal(heap!=NULL);
mi_assert(arena->arena_idx < MI_MAX_ARENAS);
return mi_atomic_load_ptr_relaxed(mi_arena_pages_t, &heap->arena_pages[arena->arena_idx]);
}
static mi_arena_t* mi_page_arena_pages(mi_page_t* page, size_t* slice_index, size_t* slice_count, mi_arena_pages_t** parena_pages) {
mi_assert_internal(mi_page_is_owned(page));
mi_arena_t* const arena = mi_arena_from_memid(page->memid, slice_index, slice_count);
mi_assert_internal(arena != NULL);
if (parena_pages != NULL) {
mi_arena_pages_t* const arena_pages = mi_heap_arena_pages(mi_page_heap(page), arena);
mi_assert_internal(arena_pages != NULL);
mi_assert_internal(slice_index==NULL || mi_bitmap_is_set(arena_pages->pages, *slice_index));
*parena_pages = arena_pages;
}
return arena;
}
static mi_arena_pages_t* mi_heap_ensure_arena_pages(mi_heap_t* heap, mi_arena_t* arena) {
mi_assert_internal(arena!=NULL);
mi_assert_internal(heap!=NULL);
mi_assert(arena->arena_idx < MI_MAX_ARENAS);
mi_arena_pages_t* arena_pages = mi_heap_arena_pages(heap, arena);
if (arena_pages==NULL) {
mi_lock(&heap->arena_pages_lock) {
arena_pages = mi_atomic_load_ptr_acquire(mi_arena_pages_t, &heap->arena_pages[arena->arena_idx]);
if (arena_pages == NULL) { if (_mi_is_heap_main(heap)) {
arena_pages = &arena->pages_main;
}
else {
arena_pages = mi_arena_pages_alloc(arena);
}
mi_atomic_store_ptr_release(mi_arena_pages_t, &heap->arena_pages[arena->arena_idx], arena_pages);
}
}
}
if (_mi_is_heap_main(heap)) { mi_assert(arena_pages != NULL); } return arena_pages;
}
static mi_page_t* mi_arenas_page_try_find_abandoned(mi_theap_t* theap, size_t slice_count, size_t block_size)
{
mi_heap_t* const heap = _mi_theap_heap(theap);
const size_t tseq = theap->tld->thread_seq;
mi_arena_t* const req_arena = heap->exclusive_arena;
MI_UNUSED(slice_count);
const size_t bin = _mi_bin(block_size);
mi_assert_internal(bin < MI_BIN_COUNT);
mi_assert_internal(heap != NULL);
if (mi_atomic_load_relaxed(&heap->abandoned_count[bin]) == 0) {
return NULL;
}
const bool allow_large = true;
const int any_numa = -1;
const bool match_numa = true;
mi_forall_suitable_arenas(heap, req_arena, tseq, match_numa, any_numa, allow_large, arena)
{
mi_arena_pages_t* const arena_pages = mi_heap_arena_pages(heap, arena);
if (arena_pages != NULL) {
size_t slice_index;
mi_bitmap_t* const bitmap = arena_pages->pages_abandoned[bin];
if (mi_bitmap_try_find_and_claim(bitmap, tseq, &slice_index, &mi_arena_try_claim_abandoned, arena)) {
mi_page_t* page = mi_arena_page_at_slice(arena, slice_index);
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(mi_heap_has_page(heap, arena, page));
mi_atomic_decrement_relaxed(&heap->abandoned_count[bin]);
mi_theap_stat_decrease(theap, pages_abandoned, 1);
mi_theap_stat_counter_increase(theap, pages_reclaim_on_alloc, 1);
_mi_page_free_collect(page, false); mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page), MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_block_size(page) == block_size);
mi_assert_internal(!mi_page_is_full(page));
return page;
}
}
}
mi_forall_suitable_arenas_end();
return NULL;
}
static uint8_t* mi_arenas_page_alloc_fresh_area(mi_theap_t* theap, size_t slice_count, size_t block_size, size_t block_alignment, bool os_align, bool commit, mi_memid_t* memid) {
MI_UNUSED_RELEASE(block_size);
const bool allow_large = (MI_SECURE < 2); const size_t page_alignment = MI_ARENA_SLICE_ALIGN;
mi_heap_t* const heap = _mi_theap_heap(theap);
mi_tld_t* const tld = theap->tld;
mi_arena_t* const req_arena = heap->exclusive_arena;
const int numa_node = (heap->numa_node >= 0 ? heap->numa_node : tld->numa_node);
uint8_t* start = NULL;
*memid = _mi_memid_none();
const size_t alloc_size = mi_size_of_slices(slice_count);
if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && !os_align && slice_count <= mi_arena_max_object_size()/MI_ARENA_SLICE_SIZE) {
start = (uint8_t*)mi_arenas_try_alloc(heap, slice_count, page_alignment, commit, allow_large, req_arena, tld->thread_seq, numa_node, memid);
if (start != NULL) {
mi_arena_pages_t* const arena_pages = mi_heap_ensure_arena_pages(heap, memid->mem.arena.arena);
if (arena_pages==NULL) {
_mi_arenas_free(start, mi_size_of_slices(slice_count), *memid); start = NULL;
}
else {
mi_assert_internal(mi_bitmap_is_clearN(arena_pages->pages, memid->mem.arena.slice_index, memid->mem.arena.slice_count));
mi_bitmap_set(arena_pages->pages, memid->mem.arena.slice_index);
}
}
}
if (start == NULL) {
if (os_align) {
mi_assert_internal(slice_count >= mi_slice_count_of_size(block_size) + mi_slice_count_of_size(page_alignment));
start = (uint8_t*)mi_arena_os_alloc_aligned(alloc_size, block_alignment, page_alignment , commit, allow_large, req_arena, memid);
}
else {
start = (uint8_t*)mi_arena_os_alloc_aligned(alloc_size, page_alignment, 0 , commit, allow_large, req_arena, memid);
}
}
if (start == NULL) return NULL;
mi_assert_internal(_mi_is_aligned(start, MI_PAGE_ALIGN));
mi_assert_internal(!os_align || _mi_is_aligned(start + page_alignment, block_alignment));
return start;
}
static size_t mi_page_block_start(size_t block_size, bool os_align)
{
#if MI_GUARDED
const size_t os_page_size = _mi_os_page_size();
mi_assert_internal(MI_PAGE_ALIGN >= os_page_size);
if (!os_align && block_size % os_page_size == 0 && block_size > os_page_size ) {
return _mi_align_up(mi_page_info_size(), os_page_size);
}
else
#endif
if (os_align) {
return MI_PAGE_ALIGN;
}
else if (_mi_is_power_of_two(block_size) && block_size <= MI_PAGE_MAX_START_BLOCK_ALIGN2) {
return _mi_align_up(mi_page_info_size(), block_size);
}
else if (block_size != 0 && (block_size % MI_PAGE_OSPAGE_BLOCK_ALIGN2) == 0) {
return _mi_align_up(mi_page_info_size(), MI_PAGE_OSPAGE_BLOCK_ALIGN2);
}
else {
return mi_page_info_size();
}
}
static mi_page_t* mi_arenas_page_alloc_fresh(mi_theap_t* theap, size_t slice_count, size_t block_size, size_t block_alignment, bool commit)
{
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
const size_t alloc_size = mi_size_of_slices(slice_count);
mi_memid_t memid = _mi_memid_none();
uint8_t* const slice_start = mi_arenas_page_alloc_fresh_area(theap,slice_count,block_size,block_alignment,os_align,commit,&memid);
if (!slice_start) return NULL;
#if (MI_SECURE >= 2 && (!MI_PAGE_META_IS_SEPARATED || MI_PAGE_META_ALIGNED_FREE_SMALL)) || MI_SECURE >= 4
mi_assert(alloc_size > _mi_os_secure_guard_page_size());
const size_t page_noguard_size = alloc_size - _mi_os_secure_guard_page_size();
if (memid.initially_committed) {
_mi_os_secure_guard_page_set_at(slice_start + page_noguard_size, memid);
}
#else
const size_t page_noguard_size = alloc_size;
#endif
mi_page_t* page = NULL;
bool page_meta_is_separate = false;
size_t block_start = 0;
if (memid.memkind == MI_MEM_ARENA) {
mi_arena_t* const arena = memid.mem.arena.arena;
if (arena->pages_meta != NULL) {
mi_assert_internal(MI_PAGE_META_IS_SEPARATED!=0);
mi_page_t* const page_meta = &arena->pages_meta[memid.mem.arena.slice_index];
mi_assert_internal(page_meta->block_size == 0);
#if MI_PAGE_META_ALIGNED_FREE_SMALL
if (block_size > MI_SMALL_SIZE_MAX)
#endif
{
page = page_meta;
page_meta_is_separate = true;
block_start = 0;
#if !defined(MI_PAGE_BLOCK_START_MAX_OFFSET)
#define MI_PAGE_BLOCK_START_MAX_OFFSET (8*MI_INTPTR_BITS)
#endif
if (block_size >= MI_INTPTR_SIZE && block_size <= MI_PAGE_BLOCK_START_MAX_OFFSET && _mi_is_power_of_two(block_size)) {
block_start += block_size;
}
mi_assert_internal(page->block_size == 0);
_mi_memzero_aligned(page, sizeof(*page));
}
}
}
if (page == NULL) {
page = (mi_page_t*)slice_start;
block_start = mi_page_block_start(block_size, os_align);
}
size_t commit_size = 0;
if (!memid.initially_committed) {
commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE);
if (commit_size > page_noguard_size) { commit_size = page_noguard_size; }
bool is_zero = false;
if mi_unlikely(!mi_arena_commit( mi_memid_arena(memid), slice_start, commit_size, &is_zero, 0)) {
_mi_arenas_free(slice_start, alloc_size, memid);
return NULL;
}
}
if (!memid.initially_zero && !page_meta_is_separate) {
_mi_memzero_aligned(page, sizeof(*page));
}
if (!memid.initially_zero && memid.initially_committed) {
mi_track_mem_undefined(slice_start, slice_count * MI_ARENA_SLICE_SIZE);
}
else if (memid.initially_committed) {
mi_track_mem_defined(slice_start, slice_count * MI_ARENA_SLICE_SIZE);
}
#if MI_DEBUG > 1
if (memid.initially_zero && memid.initially_committed) {
if (!mi_mem_is_zero(slice_start, page_noguard_size)) {
_mi_error_message(EFAULT, "internal error: page memory was not zero initialized.\n");
memid.initially_zero = false;
if (block_start > 0) { _mi_memzero_aligned(page, sizeof(*page)); }
}
}
#endif
const size_t reserved = (os_align ? 1 : (page_noguard_size - block_start) / block_size);
mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX);
page->reserved = (uint16_t)reserved;
page->page_start = slice_start + block_start;
page->block_size = block_size;
page->slice_committed = commit_size;
page->memid = memid;
page->free_is_zero = memid.initially_zero;
mi_assert_internal(page->free==NULL);
mi_assert_internal(page_meta_is_separate == mi_page_meta_is_separated(page));
mi_assert_internal(mi_page_slice_start(page) == slice_start);
mi_page_claim_ownership(page);
if mi_unlikely(!_mi_page_map_register(page)) {
_mi_arenas_free( slice_start, alloc_size, memid );
return NULL;
}
mi_theap_stat_increase(theap, pages, 1);
mi_theap_stat_increase(theap, page_bins[_mi_page_stats_bin(page)], 1);
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page),MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_block_size(page) == block_size);
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(mi_page_is_owned(page));
return page;
}
static mi_page_t* mi_arenas_page_regular_alloc(mi_theap_t* theap, size_t slice_count, size_t block_size)
{
mi_page_t* page = mi_arenas_page_try_find_abandoned(theap, slice_count, block_size);
if (page != NULL) {
return page; }
const long commit_on_demand = mi_option_get(mi_option_page_commit_on_demand);
const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || (commit_on_demand == 2 && _mi_os_has_overcommit()) || (commit_on_demand == 0));
page = mi_arenas_page_alloc_fresh(theap, slice_count, block_size, 1, commit);
if (page == NULL) return NULL;
mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count);
if (!_mi_page_init(theap, page)) {
_mi_arenas_free( page, mi_page_full_size(page), page->memid);
return NULL;
}
return page;
}
static mi_page_t* mi_arenas_page_singleton_alloc(mi_theap_t* theap, size_t block_size, size_t block_alignment)
{
const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN);
const size_t info_size = (os_align ? MI_PAGE_ALIGN : mi_page_info_size());
#if MI_SECURE < 2
const size_t slice_count = mi_slice_count_of_size(info_size + block_size);
#else
const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size());
#endif
mi_page_t* page = mi_arenas_page_alloc_fresh(theap, slice_count, block_size, block_alignment, true );
if (page == NULL) return NULL;
mi_assert(page->reserved == 1);
if (!_mi_page_init(theap, page)) {
_mi_arenas_free( page, mi_page_full_size(page), page->memid);
return NULL;
}
return page;
}
mi_page_t* _mi_arenas_page_alloc(mi_theap_t* theap, size_t block_size, size_t block_alignment) {
mi_page_t* page;
if mi_unlikely(block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) {
mi_assert_internal(_mi_is_power_of_two(block_alignment));
page = mi_arenas_page_singleton_alloc(theap, block_size, block_alignment);
}
else if (block_size <= MI_SMALL_MAX_OBJ_SIZE) {
page = mi_arenas_page_regular_alloc(theap, mi_slice_count_of_size(MI_SMALL_PAGE_SIZE), block_size);
}
else if (block_size <= MI_MEDIUM_MAX_OBJ_SIZE) {
page = mi_arenas_page_regular_alloc(theap, mi_slice_count_of_size(MI_MEDIUM_PAGE_SIZE), block_size);
}
#if MI_ENABLE_LARGE_PAGES
else if (block_size <= MI_LARGE_MAX_OBJ_SIZE) {
page = mi_arenas_page_regular_alloc(theap, mi_slice_count_of_size(MI_LARGE_PAGE_SIZE), block_size);
}
#endif
else {
page = mi_arenas_page_singleton_alloc(theap, block_size, block_alignment);
}
if mi_unlikely(page == NULL) {
return NULL;
}
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page), MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(block_alignment <= MI_PAGE_MAX_OVERALLOC_ALIGN || _mi_is_aligned(mi_page_start(page), block_alignment));
return page;
}
void _mi_arenas_page_free(mi_page_t* page, mi_theap_t* current_theapx) {
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page), MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_all_free(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(page->next==NULL && page->prev==NULL);
mi_assert_internal(current_theapx == NULL || _mi_thread_id()==current_theapx->tld->thread_id);
if (current_theapx != NULL) {
mi_theap_stat_decrease(current_theapx, page_bins[_mi_page_stats_bin(page)], 1);
mi_theap_stat_decrease(current_theapx, pages, 1);
}
else {
mi_heap_t* const heap = mi_page_heap(page);
mi_heap_stat_decrease(heap, page_bins[_mi_page_stats_bin(page)], 1);
mi_heap_stat_decrease(heap, pages, 1);
}
#if MI_DEBUG>1
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
size_t bin = _mi_bin(mi_page_block_size(page));
size_t slice_index;
size_t slice_count;
mi_arena_pages_t* arena_pages = NULL;
mi_arena_t* const arena = mi_page_arena_pages(page, &slice_index, &slice_count, &arena_pages);
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
mi_assert_internal(mi_bitmap_is_clearN(arena_pages->pages_abandoned[bin], slice_index, 1));
mi_assert_internal(mi_bitmap_is_setN(arena_pages->pages, slice_index, 1));
}
#endif
#if (MI_SECURE >= 2 && (!MI_PAGE_META_IS_SEPARATED || MI_PAGE_META_ALIGNED_FREE_SMALL)) || MI_SECURE >= 4
if (!page->memid.is_pinned) {
_mi_os_secure_guard_page_reset_before(mi_page_slice_start(page) + mi_page_full_size(page), page->memid);
}
#endif
_mi_page_map_unregister(page);
if (page->memid.memkind == MI_MEM_ARENA) {
mi_arena_pages_t* arena_pages;
size_t slice_index;
size_t slice_count; MI_UNUSED(slice_count);
mi_arena_t* const arena = mi_page_arena_pages(page, &slice_index, &slice_count, &arena_pages);
mi_assert_internal(arena_pages!=NULL);
mi_bitmap_clear(arena_pages->pages, slice_index);
if (page->slice_committed > 0) {
mi_assert_internal(mi_page_full_size(page) >= page->slice_committed);
const size_t total_slices = page->slice_committed / MI_ARENA_SLICE_SIZE; mi_assert_internal(slice_count >= total_slices);
if (total_slices > 0) {
mi_bitmap_setN(arena->slices_committed, slice_index, total_slices, NULL);
}
const size_t extra = page->slice_committed % MI_ARENA_SLICE_SIZE;
if (extra > 0) {
mi_subproc_stat_decrease(arena->subproc, committed, extra);
}
}
else {
mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
}
}
if (mi_page_meta_is_separated(page)) { page->block_size = 0; } _mi_arenas_free( mi_page_slice_start(page), mi_page_full_size(page), page->memid);
}
void _mi_arenas_page_abandon(mi_page_t* page, mi_theap_t* current_theap) {
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page), MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(!mi_page_all_free(page));
mi_assert_internal(page->next==NULL && page->prev == NULL);
mi_assert_internal(_mi_thread_id()==current_theap->tld->thread_id);
mi_heap_t* heap = mi_page_heap(page); mi_assert_internal(heap==_mi_theap_heap(current_theap));
if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) {
size_t bin = _mi_bin(mi_page_block_size(page));
size_t slice_index;
size_t slice_count;
mi_arena_pages_t* arena_pages = NULL;
mi_arena_t* const arena = mi_page_arena_pages(page, &slice_index, &slice_count, &arena_pages); MI_UNUSED(arena);
mi_assert_internal(!mi_page_is_singleton(page));
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count));
mi_page_set_abandoned_mapped(page);
const bool was_clear = mi_bitmap_set(arena_pages->pages_abandoned[bin], slice_index);
MI_UNUSED(was_clear); mi_assert_internal(was_clear);
mi_atomic_increment_relaxed(&heap->abandoned_count[bin]);
mi_theap_stat_increase(current_theap, pages_abandoned, 1);
}
else {
if (page->memid.memkind != MI_MEM_ARENA && mi_option_is_enabled(mi_option_visit_abandoned)) {
mi_lock(&heap->os_abandoned_pages_lock) {
page->prev = NULL;
page->next = heap->os_abandoned_pages;
if (page->next != NULL) { page->next->prev = page; }
heap->os_abandoned_pages = page;
}
}
mi_theap_stat_increase(current_theap, pages_abandoned, 1);
}
mi_abandoned_page_unown(page, current_theap);
}
bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page) {
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page), MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(!mi_page_is_abandoned_mapped(page));
mi_assert_internal(!mi_page_is_full(page));
mi_assert_internal(!mi_page_all_free(page));
mi_assert_internal(!mi_page_is_singleton(page));
if (mi_page_is_full(page) || mi_page_is_abandoned_mapped(page) || page->memid.memkind != MI_MEM_ARENA) {
return false;
}
else {
mi_theap_t* const theap = _mi_page_associated_theap_peek(page);
if (theap == NULL) {
return false;
}
else {
mi_theap_stat_counter_increase(theap, pages_reabandon_full, 1);
mi_theap_stat_adjust_decrease(theap, pages_abandoned, 1); _mi_arenas_page_abandon(page, theap);
return true;
}
}
}
void _mi_arenas_page_unabandon(mi_page_t* page, mi_theap_t* current_theapx) {
mi_assert_internal(_mi_is_aligned(mi_page_slice_start(page), MI_PAGE_ALIGN));
mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page);
mi_assert_internal(mi_page_is_owned(page));
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(current_theapx==NULL || _mi_thread_id()==current_theapx->tld->thread_id);
mi_heap_t* const heap = mi_page_heap(page);
if (mi_page_is_abandoned_mapped(page)) {
mi_assert_internal(page->memid.memkind==MI_MEM_ARENA);
size_t bin = _mi_bin(mi_page_block_size(page));
size_t slice_index;
size_t slice_count;
mi_arena_pages_t* arena_pages;
mi_arena_t* arena = mi_page_arena_pages(page, &slice_index, &slice_count, &arena_pages); MI_UNUSED(arena);
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count));
mi_bitmap_clear_once_set(arena_pages->pages_abandoned[bin], slice_index);
mi_page_clear_abandoned_mapped(page);
mi_atomic_decrement_relaxed(&heap->abandoned_count[bin]);
}
else {
if (page->memid.memkind != MI_MEM_ARENA && mi_option_is_enabled(mi_option_visit_abandoned)) {
mi_lock(&heap->os_abandoned_pages_lock) {
if (page->prev != NULL) { page->prev->next = page->next; }
if (page->next != NULL) { page->next->prev = page->prev; }
if (heap->os_abandoned_pages == page) { heap->os_abandoned_pages = page->next; }
page->next = NULL;
page->prev = NULL;
}
}
}
if (current_theapx!=NULL) {
mi_theap_stat_decrease(current_theapx, pages_abandoned, 1);
}
else {
mi_heap_stat_decrease(heap, pages_abandoned, 1);
}
}
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices);
static void mi_arenas_try_purge(bool force, bool visit_all, mi_subproc_t* subproc, size_t tseq);
void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) {
if (p==NULL) return;
if (size==0) return;
mi_track_mem_undefined(p, size);
if (mi_memkind_is_os(memid.memkind)) {
_mi_os_free(p, size, memid);
}
else if (memid.memkind == MI_MEM_ARENA) {
size_t slice_count;
size_t slice_index;
mi_arena_t* arena = mi_arena_from_memid(memid, &slice_index, &slice_count);
mi_assert_internal((size%MI_ARENA_SLICE_SIZE)==0);
mi_assert_internal((slice_count*MI_ARENA_SLICE_SIZE)==size);
mi_assert_internal(mi_arena_slice_start(arena,slice_index) <= (uint8_t*)p);
mi_assert_internal(mi_arena_slice_start(arena,slice_index) + mi_size_of_slices(slice_count) > (uint8_t*)p);
if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
mi_assert_internal(slice_index < arena->slice_count);
mi_assert_internal(slice_index >= mi_arena_info_slices(arena));
if (slice_index < mi_arena_info_slices(arena) || slice_index > arena->slice_count) {
_mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
if (!arena->memid.is_pinned ) { mi_arena_schedule_purge(arena, slice_index, slice_count);
}
bool all_inuse = mi_bbitmap_setN(arena->slices_free, slice_index, slice_count);
if (!all_inuse) {
_mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", mi_arena_slice_start(arena,slice_index), mi_size_of_slices(slice_count));
return;
};
}
else if (memid.memkind == MI_MEM_META) {
_mi_meta_free(p, size, memid);
}
else {
mi_assert_internal(mi_memid_needs_no_free(memid));
}
}
void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld) {
mi_arenas_try_purge(force_purge, visit_all, tld->subproc, tld->thread_seq);
}
static bool mi_arena_strictly_contains(mi_arena_t* arena, const void* p) {
return (arena != NULL &&
mi_arena_start(arena) <= (const uint8_t*)p &&
mi_arena_start(arena) + mi_size_of_slices(arena->slice_count) >(const uint8_t*)p);
}
static bool mi_arenas_contain_ex(const void* p, mi_arena_t* parent) {
mi_subproc_t* subproc = _mi_subproc();
const size_t max_arena = mi_arenas_get_count(subproc);
for (size_t i = 0; i < max_arena; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[i]);
if (arena != NULL) {
if (parent==NULL || arena==parent || arena->parent==parent) {
if (mi_arena_strictly_contains(arena, p)) {
return true;
}
}
}
}
return false;
}
bool _mi_arenas_contain(const void* p) {
return mi_arenas_contain_ex(p, NULL);
}
bool mi_arena_contains(mi_arena_id_t arena_id, const void* p) {
mi_arena_t* arena = _mi_arena_from_id(arena_id);
if (arena==NULL) return false;
else if (mi_arena_strictly_contains(arena, p)) return true;
else return mi_arenas_contain_ex(p, arena); }
static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) {
mi_assert_internal(subproc != NULL);
const size_t arena_count = mi_arenas_get_count(subproc);
for (size_t i = 0; i < arena_count; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[i]);
if (arena != NULL) {
mi_atomic_store_ptr_release(mi_arena_t, &subproc->arenas[i], NULL);
if (mi_memkind_is_os(arena->memid.memkind)) {
_mi_os_free_ex(mi_arena_start(arena), mi_arena_size(arena), true, arena->memid, subproc); }
}
}
size_t expected = arena_count;
mi_atomic_cas_strong_acq_rel(&subproc->arena_count, &expected, (size_t)0);
}
void _mi_arenas_unsafe_destroy_all(mi_subproc_t* subproc) {
mi_arenas_unsafe_destroy(subproc);
}
static bool mi_arenas_add(mi_subproc_t* subproc, mi_arena_t* arena, mi_arena_id_t* arena_id)
{
mi_assert_internal(arena != NULL);
mi_assert_internal(arena->slice_count > 0);
if (arena_id != NULL) { *arena_id = _mi_arena_id_none(); }
mi_arena_t* expected;
size_t count = mi_arenas_get_count(subproc);
for( size_t i = 0; i < count; i++) {
if (mi_arena_from_index(subproc,i) == NULL) {
arena->arena_idx = i;
expected = NULL;
if (mi_atomic_cas_ptr_strong_release(mi_arena_t, &subproc->arenas[i], &expected, arena)) {
if (arena_id != NULL) { *arena_id = mi_arena_id_from_arena(arena); }
return true;
}
}
}
while(count<MI_MAX_ARENAS) {
if (mi_atomic_cas_strong_release(&subproc->arena_count, &count, count+1)) {
arena->arena_idx = count;
expected = NULL;
if (mi_atomic_cas_ptr_strong_release(mi_arena_t, &subproc->arenas[count], &expected, arena)) {
mi_subproc_stat_counter_increase(arena->subproc, arena_count, 1);
if (arena_id != NULL) { *arena_id = mi_arena_id_from_arena(arena); }
return true;
}
}
}
arena->arena_idx = 0;
arena->subproc = NULL;
return false;
}
static size_t mi_arena_pages_size(size_t slice_count, size_t* bitmap_base) {
if (slice_count == 0) slice_count = MI_BCHUNK_BITS;
mi_assert_internal((slice_count % MI_BCHUNK_BITS) == 0);
const size_t base_size = _mi_align_up(sizeof(mi_arena_pages_t), MI_BCHUNK_SIZE);
const size_t bitmaps_count = 1 + MI_ARENA_BIN_COUNT; const size_t bitmaps_size = bitmaps_count * mi_bitmap_size(slice_count, NULL);
const size_t size = base_size + bitmaps_size;
if (bitmap_base != NULL) *bitmap_base = base_size;
return size;
}
static size_t mi_arena_info_slices_needed(size_t slice_count, size_t* bitmap_base) {
if (slice_count == 0) slice_count = MI_BCHUNK_BITS;
mi_assert_internal((slice_count % MI_BCHUNK_BITS) == 0);
const size_t base_size = _mi_align_up(sizeof(mi_arena_t), MI_BCHUNK_SIZE);
const size_t bitmaps_count = 4 + MI_ARENA_BIN_COUNT; const size_t bitmaps_size = bitmaps_count * mi_bitmap_size(slice_count, NULL) + mi_bbitmap_size(slice_count, NULL); #if MI_PAGE_META_IS_SEPARATED
const size_t pages_size = slice_count * sizeof(mi_page_t);
#else
const size_t pages_size = 0;
#endif
const size_t size = base_size + bitmaps_size + pages_size;
const size_t os_page_size = _mi_os_page_size();
const size_t info_size = _mi_align_up(size, os_page_size) + _mi_os_secure_guard_page_size();
const size_t info_slices = mi_slice_count_of_size(info_size);
if (bitmap_base != NULL) *bitmap_base = base_size;
return info_slices;
}
static mi_bitmap_t* mi_arena_bitmap_init(size_t slice_count, uint8_t** base) {
mi_bitmap_t* bitmap = (mi_bitmap_t*)(*base);
*base = (*base) + mi_bitmap_init(bitmap, slice_count, true );
return bitmap;
}
static mi_bbitmap_t* mi_arena_bbitmap_init(size_t slice_count, uint8_t** base) {
mi_bbitmap_t* bbitmap = (mi_bbitmap_t*)(*base);
*base = (*base) + mi_bbitmap_init(bbitmap, slice_count, true );
return bbitmap;
}
static mi_arena_pages_t* mi_arena_pages_alloc(mi_arena_t* arena) {
const size_t slice_count = arena->slice_count;
size_t bitmap_base = 0;
const size_t size = mi_arena_pages_size(slice_count, &bitmap_base);
mi_arena_pages_t* arena_pages = (mi_arena_pages_t*)mi_heap_zalloc_aligned(mi_heap_main(), size, MI_BCHUNK_SIZE);
if (arena_pages==NULL) return NULL;
uint8_t* base = (uint8_t*)arena_pages + bitmap_base;
mi_assert_internal(_mi_is_aligned(base, MI_BCHUNK_SIZE));
arena_pages->pages = mi_arena_bitmap_init(slice_count, &base);
for (size_t i = 0; i < MI_ARENA_BIN_COUNT; i++) {
arena_pages->pages_abandoned[i] = mi_arena_bitmap_init(slice_count, &base);
}
return arena_pages;
}
static mi_arena_t* mi_arena_initialize(mi_subproc_t* subproc, void* start,
size_t slice_count, mi_arena_t* parent, size_t total_size,
int numa_node, bool exclusive,
mi_memid_t memid, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id)
{
mi_assert_internal(_mi_is_aligned(start,MI_ARENA_SLICE_ALIGN));
mi_assert_internal(mi_size_of_slices(slice_count)>=MI_ARENA_MIN_SIZE);
if (slice_count > MI_BITMAP_MAX_BIT_COUNT) { _mi_warning_message("cannot use OS memory since it is too large (size %zu MiB, maximum is %zu MiB)", mi_size_of_slices(slice_count)/MI_MiB, mi_size_of_slices(MI_BITMAP_MAX_BIT_COUNT)/MI_MiB);
return NULL;
}
size_t bitmap_base;
const size_t info_slices = mi_arena_info_slices_needed(slice_count, &bitmap_base);
if (slice_count < info_slices+1) {
_mi_warning_message("cannot use OS memory since it is not large enough (size %zu KiB, minimum required is %zu KiB)", mi_size_of_slices(slice_count)/MI_KiB, mi_size_of_slices(info_slices+1)/MI_KiB);
return NULL;
}
else if (info_slices >= MI_ARENA_MAX_CHUNK_OBJ_SLICES) {
_mi_warning_message("cannot use OS memory since it is too large with respect to the maximum object size (size %zu MiB, meta-info slices %zu, maximum object slices are %zu)", mi_size_of_slices(slice_count)/MI_MiB, info_slices, MI_ARENA_MAX_CHUNK_OBJ_SLICES);
return NULL;
}
mi_arena_t* arena = (mi_arena_t*)start;
if (!memid.initially_committed) {
size_t commit_size = mi_size_of_slices(info_slices);
if (!memid.is_pinned) { commit_size -= _mi_os_secure_guard_page_size(); }
bool ok = false;
if (commit_fun != NULL) {
ok = (*commit_fun)(true , arena, commit_size, NULL, commit_fun_arg);
}
else {
ok = _mi_os_commit(arena, commit_size, NULL);
}
if (!ok) {
_mi_warning_message("unable to commit meta-data for OS memory");
return NULL;
}
}
else if (!memid.is_pinned) {
_mi_os_secure_guard_page_set_before((uint8_t*)arena + mi_size_of_slices(info_slices), memid);
}
if (!memid.initially_zero) {
_mi_memzero(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size());
}
arena->subproc = subproc;
arena->memid = memid;
arena->is_exclusive = exclusive;
arena->slice_count = slice_count;
arena->info_slices = info_slices;
arena->numa_node = numa_node; arena->purge_expire = 0;
arena->commit_fun = commit_fun;
arena->commit_fun_arg = commit_fun_arg;
arena->parent = parent;
arena->total_size = total_size;
uint8_t* base = mi_arena_start(arena) + bitmap_base;
arena->slices_free = mi_arena_bbitmap_init(slice_count, &base);
arena->slices_committed = mi_arena_bitmap_init(slice_count, &base);
arena->slices_dirty = mi_arena_bitmap_init(slice_count, &base);
arena->slices_purge = mi_arena_bitmap_init(slice_count, &base);
arena->pages_main.pages = mi_arena_bitmap_init(slice_count, &base);
for (size_t i = 0; i < MI_ARENA_BIN_COUNT; i++) {
arena->pages_main.pages_abandoned[i] = mi_arena_bitmap_init(slice_count, &base);
}
#if MI_PAGE_META_IS_SEPARATED
arena->pages_meta = (mi_page_t*)base;
base += (slice_count * sizeof(mi_page_t));
#else
arena->pages_meta = NULL;
#endif
mi_assert_internal(mi_size_of_slices(info_slices) >= (size_t)(base - mi_arena_start(arena)));
mi_bbitmap_unsafe_setN(arena->slices_free, info_slices , arena->slice_count - info_slices);
if (memid.initially_committed) {
mi_bitmap_unsafe_setN(arena->slices_committed, 0, arena->slice_count);
}
if (!memid.initially_zero) {
mi_bitmap_unsafe_setN(arena->slices_dirty, 0, arena->slice_count);
}
if (!mi_arenas_add(subproc, arena, arena_id)) { return NULL; }
return arena;
}
static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive,
mi_memid_t memid, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept
{
mi_assert(_mi_is_aligned(start, MI_ARENA_SLICE_SIZE));
mi_assert(start!=NULL);
if (arena_id != NULL) { *arena_id = _mi_arena_id_none(); }
if (start==NULL) return false;
if (!_mi_is_aligned(start, MI_ARENA_SLICE_SIZE)) {
void* const aligned_start = _mi_align_up_ptr(start, MI_ARENA_SLICE_SIZE);
const size_t diff = (uint8_t*)aligned_start - (uint8_t*)start;
if (diff >= size || (size - diff) < MI_ARENA_SLICE_SIZE) {
_mi_warning_message("after alignment, the size of the arena becomes too small (memory at %p with size %zu)\n", start, size);
return false;
}
start = aligned_start;
size = size - diff;
}
size_t total_slice_count = _mi_align_down(size / MI_ARENA_SLICE_SIZE, MI_BCHUNK_BITS);
size_t total_size = mi_size_of_slices(total_slice_count);
if (total_size < MI_ARENA_MIN_SIZE) {
_mi_warning_message("cannot use OS memory since it is not large enough (size %zu KiB, minimum required is %zu KiB)", size/MI_KiB, MI_ARENA_MIN_SIZE/MI_KiB);
return false;
}
mi_arena_t* parent = NULL;
do {
size_t slice_count = total_slice_count;
if (slice_count > MI_BITMAP_MAX_BIT_COUNT) { slice_count = MI_BITMAP_MAX_BIT_COUNT;
}
mi_arena_t* arena = mi_arena_initialize( subproc, start, slice_count, parent,
(parent==NULL ? total_size : 0), numa_node, exclusive,
memid, commit_fun, commit_fun_arg,
(parent==NULL ? arena_id : NULL));
if (arena==NULL) {
if (parent==NULL) {
return false;
}
else {
mi_assert(mi_size_of_slices(total_slice_count) <= parent->total_size);
parent->total_size -= mi_size_of_slices(total_slice_count);
return true;
}
}
if (parent==NULL) {
parent = arena;
memid.memkind = MI_MEM_NONE;
}
mi_assert(slice_count <= total_slice_count);
total_slice_count -= slice_count;
start = (uint8_t*)start + mi_size_of_slices(slice_count);
}
while (total_slice_count > 0);
return true;
}
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_pinned, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
memid.mem.os.base = start;
memid.mem.os.size = size;
memid.initially_committed = is_committed;
memid.initially_zero = is_zero;
memid.is_pinned = is_pinned;
return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, NULL, NULL, arena_id);
}
bool mi_manage_memory(void* start, size_t size, bool is_committed, bool is_zero, bool is_pinned, int numa_node, bool exclusive, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept
{
mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
memid.mem.os.base = start;
memid.mem.os.size = size;
memid.initially_committed = is_committed;
memid.initially_zero = is_zero;
memid.is_pinned = is_pinned;
return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, commit_fun, commit_fun_arg, arena_id);
}
static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) {
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
size = _mi_align_up(size, MI_ARENA_SLICE_SIZE); mi_memid_t memid;
void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid);
if (start == NULL) return ENOMEM;
if (!mi_manage_os_memory_ex2(subproc, start, size, -1 , exclusive, memid, NULL, NULL, arena_id)) {
_mi_os_free_ex(start, size, commit, memid, NULL);
_mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024));
return ENOMEM;
}
_mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), memid.is_pinned ? " (in large os pages)" : "");
return 0;
}
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
return mi_reserve_os_memory_ex2(_mi_subproc(), size, commit, allow_large, exclusive, arena_id);
}
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false , NULL);
}
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
}
static size_t mi_arena_used_slices(mi_arena_t* arena) {
size_t idx;
if (mi_bbitmap_bsr_inv(arena->slices_free, &idx)) {
return (idx + 1);
}
else {
return mi_arena_info_slices(arena);
}
}
static size_t mi_debug_show_bfield(mi_bfield_t field, char* buf, size_t* k) {
size_t bit_set_count = 0;
for (int bit = 0; bit < MI_BFIELD_BITS; bit++) {
bool is_set = ((((mi_bfield_t)1 << bit) & field) != 0);
if (is_set) bit_set_count++;
buf[*k] = (is_set ? 'x' : '.');
*k = *k + 1;
}
return bit_set_count;
}
typedef enum mi_ansi_color_e {
MI_BLACK = 30,
MI_MAROON,
MI_DARKGREEN,
MI_ORANGE,
MI_NAVY,
MI_PURPLE,
MI_TEAL,
MI_GRAY,
MI_DARKGRAY = 90,
MI_RED,
MI_GREEN,
MI_YELLOW,
MI_BLUE,
MI_MAGENTA,
MI_CYAN,
MI_WHITE
} mi_ansi_color_t;
static void mi_debug_color(char* buf, size_t* k, mi_ansi_color_t color) {
*k += _mi_snprintf(buf + *k, 32, "\x1B[%dm", (int)color);
}
static int mi_page_commit_usage(mi_page_t* page) {
const size_t committed_size = mi_page_committed(page);
const size_t used_size = page->used * mi_page_block_size(page);
return (int)(used_size * 100 / committed_size);
}
static size_t mi_debug_show_page_bfield(char* buf, size_t* k, mi_arena_t* arena, size_t slice_index, long* pbit_of_page, mi_ansi_color_t* pcolor_of_page ) {
size_t bit_set_count = 0;
long bit_of_page = *pbit_of_page;
mi_ansi_color_t color = *pcolor_of_page;
mi_ansi_color_t prev_color = MI_GRAY;
for (int bit = 0; bit < MI_BFIELD_BITS; bit++, bit_of_page--) {
void* start = mi_arena_slice_start(arena, slice_index + bit);
mi_page_t* page = _mi_safe_ptr_page(start);
char c = ' ';
if (page!=NULL && start==mi_page_slice_start(page)) {
mi_assert_internal(bit_of_page <= 0);
bit_set_count++;
c = 'p';
color = MI_GRAY;
if (mi_page_is_singleton(page)) { c = 's'; }
else if (mi_page_is_full(page)) { c = 'f'; }
if (!mi_page_is_abandoned(page)) { c = _mi_toupper(c); }
int commit_usage = mi_page_commit_usage(page);
if (commit_usage < 25) { color = MI_MAROON; }
else if (commit_usage < 50) { color = MI_ORANGE; }
else if (commit_usage < 75) { color = MI_TEAL; }
else color = MI_DARKGREEN;
bit_of_page = (long)page->memid.mem.arena.slice_count;
}
else {
c = '?';
if (bit_of_page > 0) { c = '-'; }
else if (_mi_meta_is_meta_page(start)) { c = 'm'; color = MI_GRAY; }
else if (slice_index + bit < arena->info_slices) { c = 'i'; color = MI_GRAY; }
else if (mi_bbitmap_is_setN(arena->slices_free, slice_index+bit,1)) {
if (mi_bitmap_is_set(arena->slices_purge, slice_index + bit)) { c = '~'; color = MI_ORANGE; }
else if (mi_bitmap_is_set(arena->slices_committed, slice_index + bit)) { c = '_'; color = MI_GRAY; }
else { c = '.'; color = MI_GRAY; }
}
if (bit==MI_BFIELD_BITS-1 && bit_of_page > 1) { c = '>'; }
}
if (color != prev_color) {
mi_debug_color(buf, k, color);
prev_color = color;
}
buf[*k] = c; *k += 1;
}
mi_debug_color(buf, k, MI_GRAY);
*pbit_of_page = bit_of_page;
*pcolor_of_page = color;
return bit_set_count;
}
static size_t mi_debug_show_chunks(const char* header1, const char* header2, const char* header3,
size_t slice_count, size_t chunk_count,
mi_bchunk_t* chunks, mi_bchunkmap_t* chunk_bins, bool invert, mi_arena_t* arena, bool narrow)
{
_mi_raw_message("\x1B[37m%s%s%s (use/commit: \x1B[31m0 - 25%%\x1B[33m - 50%%\x1B[36m - 75%%\x1B[32m - 100%%\x1B[0m)\n", header1, header2, header3);
const size_t fields_per_line = (narrow ? 2 : 4);
const size_t used_slice_count = mi_arena_used_slices(arena);
size_t bit_count = 0;
size_t bit_set_count = 0;
long bit_of_page = 0;
mi_ansi_color_t color_of_page = MI_GRAY;
for (size_t i = 0; i < chunk_count && bit_count < slice_count; i++) {
char buf[5*MI_BCHUNK_BITS + 64]; _mi_memzero(buf, sizeof(buf));
if (bit_count > used_slice_count && i+2 < chunk_count) {
const size_t diff = chunk_count - 1 - i;
bit_count += diff*MI_BCHUNK_BITS;
_mi_raw_message(" |\n");
i = chunk_count-1;
}
size_t k = 0;
if (i<10) { buf[k++] = ('0' + (char)i); buf[k++] = ' '; buf[k++] = ' '; }
else if (i<100) { buf[k++] = ('0' + (char)(i/10)); buf[k++] = ('0' + (char)(i%10)); buf[k++] = ' '; }
else if (i<1000) { buf[k++] = ('0' + (char)(i/100)); buf[k++] = ('0' + (char)((i%100)/10)); buf[k++] = ('0' + (char)(i%10)); }
char chunk_kind = ' ';
if (chunk_bins != NULL) {
switch (mi_bbitmap_debug_get_bin(chunk_bins,i)) {
case MI_CBIN_SMALL: chunk_kind = 'S'; break;
case MI_CBIN_MEDIUM: chunk_kind = 'M'; break;
case MI_CBIN_LARGE: chunk_kind = 'L'; break;
case MI_CBIN_HUGE: chunk_kind = 'H'; break;
case MI_CBIN_OTHER: chunk_kind = 'X'; break;
default: chunk_kind = ' '; break; }
}
buf[k++] = chunk_kind;
buf[k++] = ' ';
for (size_t j = 0; j < MI_BCHUNK_FIELDS; j++) {
if (j > 0 && (j % fields_per_line) == 0) {
_mi_raw_message(" %s\n\x1B[37m", buf);
_mi_memzero(buf, sizeof(buf));
_mi_memset(buf, ' ', 5); k = 5;
}
if (bit_count < slice_count) {
mi_bfield_t bfield = 0;
if (chunks!=NULL) {
bfield = chunks[i].bfields[j];
}
if (invert) bfield = ~bfield;
size_t xcount = (chunks==NULL ? mi_debug_show_page_bfield(buf, &k, arena, bit_count, &bit_of_page, &color_of_page)
: mi_debug_show_bfield(bfield, buf, &k));
if (invert) xcount = MI_BFIELD_BITS - xcount;
bit_set_count += xcount;
buf[k++] = ' ';
}
else {
_mi_memset(buf + k, 'o', MI_BFIELD_BITS);
k += MI_BFIELD_BITS;
}
bit_count += MI_BFIELD_BITS;
}
_mi_raw_message(" %s\n\x1B[37m", buf);
}
_mi_raw_message("\x1B[0m total pages: %zu\n", bit_set_count);
return bit_set_count;
}
static void mi_debug_show_arenas_ex(mi_heap_t* heap, bool show_pages, bool narrow) mi_attr_noexcept {
mi_subproc_t* subproc = heap->subproc;
size_t max_arenas = mi_arenas_get_count(subproc);
size_t page_total = 0;
for (size_t i = 0; i < max_arenas; i++) {
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[i]);
if (arena == NULL) break;
mi_assert(arena->subproc == subproc);
_mi_raw_message("%sarena %zu at %p: %zu slices (%zu MiB)%s%s, subproc: %p, numa: %i\n",
(arena->parent==NULL ? "" : "(sub)"), i, arena, arena->slice_count, (size_t)(mi_size_of_slices(arena->slice_count)/MI_MiB),
(arena->memid.is_pinned ? ", pinned" : ""), (arena->is_exclusive ? ", exclusive" : ""),
arena->subproc, arena->numa_node);
if (show_pages) {
{
const char* header1 = "chunks (p:page, f:full, s:singleton, P,F,S:not abandoned, i:arena-info, m:meta-data, ~:free-purgable, _:free-committed, .:free-reserved)";
const char* header2 = (narrow ? "\n " : " ");
const char* header3 = "(chunk bin: S:small, M : medium, L : large, X : other)";
page_total += mi_debug_show_chunks(header1, header2, header3, arena->slice_count,
mi_bbitmap_chunk_count(arena->slices_free), NULL,
arena->slices_free->chunkmap_bins, false, arena, narrow);
}
}
}
if (show_pages) _mi_raw_message("total pages in arenas: %zu\n", page_total);
}
void mi_debug_show_arenas(void) mi_attr_noexcept {
mi_debug_show_arenas_ex(mi_heap_main(), true , true );
}
void mi_arenas_print(void) mi_attr_noexcept {
mi_debug_show_arenas();
}
int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
if (arena_id != NULL) *arena_id = NULL;
if (pages==0) return 0;
if (numa_node < -1) numa_node = -1;
if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
size_t hsize = 0;
size_t pages_reserved = 0;
mi_memid_t memid;
void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
if (p==NULL || pages_reserved==0) {
_mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
return ENOMEM;
}
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory_ex2(_mi_subproc(), p, hsize, numa_node, exclusive, memid, NULL, NULL, arena_id)) {
_mi_os_free(p, hsize, memid);
return ENOMEM;
}
return 0;
}
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
}
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
if (pages == 0) return 0;
int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? (int)numa_nodes : _mi_os_numa_node_count());
if (numa_count <= 0) { numa_count = 1; }
const size_t pages_per = pages / numa_count;
const size_t pages_mod = pages % numa_count;
const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50);
for (int numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) {
size_t node_pages = pages_per; if ((size_t)numa_node < pages_mod) { node_pages++; }
int err = mi_reserve_huge_os_pages_at(node_pages, numa_node, timeout_per);
if (err) return err;
if (pages < node_pages) {
pages = 0;
}
else {
pages -= node_pages;
}
}
return 0;
}
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
MI_UNUSED(max_secs);
_mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
if (pages_reserved != NULL) *pages_reserved = 0;
int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
return err;
}
static long mi_arena_purge_delay(void) {
return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
}
static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) {
mi_assert_internal(!arena->memid.is_pinned);
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
const size_t size = mi_size_of_slices(slice_count);
void* const p = mi_arena_slice_start(arena, slice_index);
size_t already_committed;
mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed); const bool all_committed = (already_committed == slice_count);
const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed , mi_size_of_slices(already_committed), arena->commit_fun, arena->commit_fun_arg);
if (needs_recommit) {
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
}
else if (!all_committed) {
mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count);
}
return needs_recommit;
}
static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) {
const long delay = mi_arena_purge_delay();
if (arena->memid.is_pinned || delay < 0 || _mi_preloading()) return;
mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count));
if (delay == 0) {
mi_arena_purge(arena, slice_index, slice_count);
}
else {
const mi_msecs_t expire = _mi_clock_now() + delay;
mi_msecs_t expire0 = 0;
if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) {
mi_assert_internal(expire0==0);
mi_atomic_casi64_strong_acq_rel(&arena->subproc->purge_expire, &expire0, expire);
}
else {
}
mi_bitmap_setN(arena->slices_purge, slice_index, slice_count, NULL);
}
}
typedef struct mi_purge_visit_info_s {
mi_msecs_t now;
mi_msecs_t delay;
bool all_purged;
bool any_purged;
} mi_purge_visit_info_t;
static bool mi_arena_try_purge_range(mi_arena_t* arena, size_t slice_index, size_t slice_count) {
mi_assert(slice_count < MI_BCHUNK_BITS);
if (mi_bbitmap_try_clearNC(arena->slices_free, slice_index, slice_count)) {
bool decommitted = mi_arena_purge(arena, slice_index, slice_count); MI_UNUSED(decommitted);
mi_assert_internal(!decommitted || mi_bitmap_is_clearN(arena->slices_committed, slice_index, slice_count));
mi_bbitmap_setN(arena->slices_free, slice_index, slice_count);
return true;
}
else {
return false;
}
}
static bool mi_arena_try_purge_visitor(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg) {
mi_purge_visit_info_t* vinfo = (mi_purge_visit_info_t*)arg;
if (mi_arena_try_purge_range(arena, slice_index, slice_count)) {
vinfo->any_purged = true;
vinfo->all_purged = true;
}
else if (slice_count > 1)
{
for (size_t i = 0; i < slice_count; i++) {
const bool purged = mi_arena_try_purge_range(arena, slice_index + i, 1);
vinfo->any_purged = vinfo->any_purged || purged;
vinfo->all_purged = vinfo->all_purged && purged;
}
}
return true; }
static int mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
{
if (arena->memid.is_pinned) return -1;
mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
if (!force) {
if (expire==0) return -1;
if (expire > now) return 0;
}
mi_atomic_storei64_release(&arena->purge_expire, (mi_msecs_t)0);
mi_subproc_stat_counter_increase(arena->subproc, arena_purges, 1);
mi_purge_visit_info_t vinfo = { now, mi_arena_purge_delay(), true , false };
const size_t minslices = mi_slice_count_of_size(_mi_os_minimal_purge_size());
_mi_bitmap_forall_setc_rangesn(arena->slices_purge, minslices, &mi_arena_try_purge_visitor, arena, &vinfo);
return (vinfo.any_purged ? 1 : -1);
}
static void mi_arenas_try_purge(bool force, bool visit_all, mi_subproc_t* subproc, size_t tseq)
{
const long delay = mi_arena_purge_delay();
if (_mi_preloading() || delay <= 0) return;
const mi_msecs_t now = _mi_clock_now();
const mi_msecs_t arenas_expire = mi_atomic_loadi64_acquire(&subproc->purge_expire);
if (!visit_all && !force && (arenas_expire == 0 || arenas_expire > now)) return;
const size_t max_arena = mi_arenas_get_count(subproc);
if (max_arena == 0) return;
static mi_atomic_guard_t purge_guard;
mi_atomic_guard(&purge_guard)
{
if (arenas_expire > now) { mi_atomic_storei64_release(&subproc->purge_expire, now + (delay/10)); }
const size_t arena_start = tseq % max_arena;
size_t max_purge_count = (visit_all ? max_arena : (max_arena/4)+1);
bool all_visited = true;
bool any_purged = false;
for (size_t _i = 0; _i < max_arena; _i++) {
size_t i = _i + arena_start;
if (i >= max_arena) { i -= max_arena; }
mi_arena_t* arena = mi_arena_from_index(subproc,i);
if (arena != NULL) {
const int purged = mi_arena_try_purge(arena, now, force);
if (purged >= 0) { any_purged = true;
if (purged >= 1) { if (max_purge_count <= 1) {
all_visited = false;
break;
}
max_purge_count--;
}
}
}
}
if (all_visited && !any_purged) {
mi_atomic_storei64_release(&subproc->purge_expire, (mi_msecs_t)0);
}
}
}
typedef struct mi_heap_visit_info_s {
mi_heap_t* heap;
mi_block_visit_fun* visitor;
void* arg;
bool visit_blocks;
} mi_heap_visit_info_t;
static bool mi_heap_visit_page(mi_page_t* page, mi_heap_visit_info_t* vinfo) {
mi_heap_area_t area;
_mi_heap_area_init(&area, page);
mi_assert_internal(vinfo->heap == mi_page_heap(page));
if (!vinfo->visitor(vinfo->heap, &area, NULL, area.block_size, vinfo->arg)) {
return false;
}
if (vinfo->visit_blocks) {
return _mi_theap_area_visit_blocks(&area, page, vinfo->visitor, vinfo->arg);
}
else {
return true;
}
}
static bool mi_heap_visit_page_at(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg) {
MI_UNUSED(slice_count);
mi_heap_visit_info_t* vinfo = (mi_heap_visit_info_t*)arg;
mi_page_t* page = mi_arena_page_at_slice(arena, slice_index);
return mi_heap_visit_page(page, vinfo);
}
bool _mi_heap_visit_blocks(mi_heap_t* heap, bool abandoned_only, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
mi_assert(visitor!=NULL);
if (visitor==NULL) return false;
if (heap==NULL) { heap = mi_heap_main(); }
mi_heap_visit_info_t visit_info = { heap, visitor, arg, visit_blocks };
bool ok = true;
mi_forall_arenas(heap, NULL, 0, arena) {
mi_arena_pages_t* arena_pages = mi_heap_arena_pages(heap, arena);
if (ok && arena_pages != NULL) {
if (abandoned_only) {
for (size_t bin = 0; ok && bin < MI_BIN_COUNT; bin++) {
if (mi_atomic_load_relaxed(&heap->abandoned_count[bin]) > 0) {
ok = _mi_bitmap_forall_set(arena_pages->pages_abandoned[bin], &mi_heap_visit_page_at, arena, &visit_info);
}
}
}
else {
ok = _mi_bitmap_forall_set(arena_pages->pages, &mi_heap_visit_page_at, arena, &visit_info);
}
}
}
mi_forall_arenas_end();
if (!ok) return false;
mi_page_t* page = NULL;
mi_lock(&heap->os_abandoned_pages_lock) {
page = heap->os_abandoned_pages;
}
while (ok && page != NULL) {
mi_page_t* next = page->next; ok = mi_heap_visit_page(page, &visit_info);
page = next;
}
return ok;
}
bool mi_heap_visit_blocks(mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
return _mi_heap_visit_blocks(heap, false, visit_blocks, visitor, arg);
}
bool mi_heap_visit_abandoned_blocks(mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) {
return _mi_heap_visit_blocks(heap, true, visit_blocks, visitor, arg);
}
typedef struct mi_heap_delete_visit_info_s {
mi_heap_t* heap_target;
mi_theap_t* theap_target;
mi_theap_t* theap;
} mi_heap_delete_visit_info_t;
static bool mi_heap_delete_page(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
MI_UNUSED(block); MI_UNUSED(block_size); MI_UNUSED(heap);
mi_heap_delete_visit_info_t* info = (mi_heap_delete_visit_info_t*)arg;
mi_heap_t* heap_target = info->heap_target;
mi_theap_t* const theap = NULL; mi_page_t* const page = (mi_page_t*)area->reserved1;
mi_page_claim_ownership(page); if (mi_page_is_abandoned(page)) {
_mi_arenas_page_unabandon(page,theap);
}
else {
page->next = page->prev = NULL; mi_page_set_theap(page,NULL); }
mi_assert_internal(mi_page_is_abandoned(page));
mi_assert_internal(mi_page_is_owned(page));
if (page->used==0) {
_mi_arenas_page_free(page, theap);
}
else if (heap_target==NULL) {
page->used=0; _mi_arenas_page_free(page, theap);
}
else {
const size_t sbin = _mi_page_stats_bin(page);
size_t slice_index;
size_t slice_count;
mi_arena_pages_t* arena_pages = NULL;
mi_arena_t* const arena = mi_page_arena_pages(page, &slice_index, &slice_count, &arena_pages);
mi_assert_internal(mi_bitmap_is_set(arena_pages->pages, slice_index));
mi_bitmap_clear(arena_pages->pages, slice_index);
if (theap != NULL) {
mi_theap_stat_decrease(theap, page_bins[sbin], 1);
mi_theap_stat_decrease(theap, pages, 1);
}
else {
mi_heap_stat_decrease((mi_heap_t*)heap, page_bins[_mi_page_stats_bin(page)], 1);
mi_heap_stat_decrease((mi_heap_t*)heap, pages, 1);
}
mi_theap_t* theap_target = info->theap_target;
mi_arena_pages_t* arena_pages_target = mi_heap_ensure_arena_pages(heap_target, arena);
if mi_unlikely(arena_pages_target==NULL) {
heap_target = mi_heap_main();
theap_target = mi_heap_theap(heap_target);
arena_pages_target = mi_heap_ensure_arena_pages(heap_target, arena);
mi_assert_internal(arena_pages_target!=NULL);
}
mi_assert_internal(mi_bitmap_is_clear(arena_pages_target->pages, slice_index));
mi_bitmap_set(arena_pages_target->pages, slice_index);
page->heap = heap_target;
mi_theap_stat_increase(theap_target, page_bins[sbin], 1);
mi_theap_stat_increase(theap_target, pages, 1);
_mi_arenas_page_abandon(page,theap_target);
}
return true;
}
static void mi_heap_delete_pages(mi_heap_t* heap, mi_heap_t* heap_target) {
mi_theap_t* const theap_target = (heap_target != NULL ? _mi_heap_theap(heap_target) : NULL);
mi_heap_delete_visit_info_t info = { heap_target, theap_target, NULL };
_mi_heap_visit_blocks(heap, false, false, &mi_heap_delete_page, &info);
#if MI_DEBUG>1
for (size_t i = 0; i < MI_ARENA_BIN_COUNT; i++) {
mi_arena_pages_t* const arena_pages = mi_atomic_load_ptr_relaxed(mi_arena_pages_t, &heap->arena_pages[i]);
if (arena_pages!=NULL) {
mi_assert_internal(mi_bitmap_is_all_clear(arena_pages->pages));
}
}
mi_lock(&heap->os_abandoned_pages_lock) {
mi_assert_internal(heap->os_abandoned_pages == NULL);
}
for (size_t i = 0; i < MI_BIN_COUNT; i++) {
mi_assert_internal(mi_atomic_load_relaxed(&heap->abandoned_count[i])==0);
}
#endif
}
void _mi_heap_move_pages(mi_heap_t* heap_from, mi_heap_t* heap_to) {
if (_mi_is_heap_main(heap_from)) return;
if (heap_to==NULL) { heap_to = mi_heap_main(); }
mi_heap_delete_pages(heap_from, heap_to);
}
void _mi_heap_destroy_pages(mi_heap_t* heap_from) {
if (_mi_is_heap_main(heap_from)) return;
mi_heap_delete_pages(heap_from, NULL);
}