#include "mimalloc.h"
#include "mimalloc-internal.h"
#include "mimalloc-atomic.h"
#define MI_IN_PAGE_C
#include "page-queue.c"
#undef MI_IN_PAGE_C
static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t i) {
mi_assert_internal(page != NULL);
mi_assert_internal(i <= page->reserved);
return (mi_block_t*)((uint8_t*)page_start + (i * page->block_size));
}
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_stats_t* stats);
#if (MI_DEBUG>1)
static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) {
size_t count = 0;
while (head != NULL) {
mi_assert_internal(page == _mi_ptr_page(head));
count++;
head = mi_block_next(page, head);
}
return count;
}
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
size_t psize;
uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize);
mi_block_t* start = (mi_block_t*)page_area;
mi_block_t* end = (mi_block_t*)(page_area + psize);
while(p != NULL) {
if (p < start || p >= end) return false;
p = mi_block_next(page, p);
}
return true;
}
static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->block_size > 0);
mi_assert_internal(page->used <= page->capacity);
mi_assert_internal(page->capacity <= page->reserved);
mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = _mi_page_start(segment,page,NULL);
mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL));
mi_assert_internal(mi_page_list_is_valid(page,page->free));
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
#if MI_DEBUG>3
if (page->flags.is_zero) {
for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) {
mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t)));
}
}
#endif
mi_block_t* tfree = mi_tf_block(page->thread_free);
mi_assert_internal(mi_page_list_is_valid(page, tfree));
size_t tfree_count = mi_page_list_count(page, tfree);
mi_assert_internal(tfree_count <= page->thread_freed + 1);
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
mi_assert_internal(page->used + free_count == page->capacity);
return true;
}
bool _mi_page_is_valid(mi_page_t* page) {
mi_assert_internal(mi_page_is_valid_init(page));
#if MI_SECURE
mi_assert_internal(page->cookie != 0);
#endif
if (page->heap!=NULL) {
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id || segment->thread_id==0);
if (segment->page_kind != MI_PAGE_HUGE) {
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(mi_heap_contains_queue(page->heap,pq));
}
}
return true;
}
#endif
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay ) {
mi_thread_free_t tfree;
mi_thread_free_t tfreex;
do {
tfreex = tfree = page->thread_free;
if (mi_unlikely(mi_tf_delayed(tfree) < MI_DELAYED_FREEING)) {
tfreex = mi_tf_set_delayed(tfree,delay);
}
else if (mi_unlikely(mi_tf_delayed(tfree) == MI_DELAYED_FREEING)) {
mi_atomic_yield(); continue; }
}
while((mi_tf_delayed(tfreex) != mi_tf_delayed(tfree)) && !mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree));
}
static void _mi_page_thread_free_collect(mi_page_t* page)
{
mi_block_t* head;
mi_thread_free_t tfree;
mi_thread_free_t tfreex;
do {
tfree = page->thread_free;
head = mi_tf_block(tfree);
tfreex = mi_tf_set_block(tfree,NULL);
} while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree));
if (head == NULL) return;
uintptr_t count = 1;
mi_block_t* tail = head;
mi_block_t* next;
while ((next = mi_block_next(page,tail)) != NULL) {
count++;
tail = next;
}
mi_block_set_next(page,tail, page->local_free);
page->local_free = head;
mi_atomic_subu(&page->thread_freed, count);
page->used -= count;
}
void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_assert_internal(page!=NULL);
if (force || mi_tf_block(page->thread_free) != NULL) { _mi_page_thread_free_collect(page);
}
if (page->local_free != NULL) {
if (mi_likely(page->free == NULL)) {
page->free = page->local_free;
page->local_free = NULL;
page->flags.is_zero = false;
}
else if (force) {
mi_block_t* tail = page->local_free;
mi_block_t* next;
while ((next = mi_block_next(page, tail)) != NULL) {
tail = next;
}
mi_block_set_next(page, tail, page->free);
page->free = page->local_free;
page->local_free = NULL;
page->flags.is_zero = false;
}
}
mi_assert_internal(!force || page->local_free == NULL);
}
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
mi_assert_internal(page->heap == NULL);
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
_mi_page_free_collect(page,false);
mi_page_queue_t* pq = mi_page_queue(heap, page->block_size);
mi_page_queue_push(heap, pq, page);
mi_assert_expensive(_mi_page_is_valid(page));
}
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
mi_page_t* page = _mi_segment_page_alloc(block_size, &heap->tld->segments, &heap->tld->os);
if (page == NULL) return NULL;
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_page_init(heap, page, block_size, &heap->tld->stats);
_mi_stat_increase( &heap->tld->stats.pages, 1);
if (pq!=NULL) mi_page_queue_push(heap, pq, page); mi_assert_expensive(_mi_page_is_valid(page));
return page;
}
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
mi_assert_internal(mi_heap_contains_queue(heap, pq));
mi_page_t* page = pq->first;
if (!heap->no_reclaim &&
_mi_segment_try_reclaim_abandoned(heap, false, &heap->tld->segments) &&
page != pq->first)
{
page = pq->first;
if (page->free != NULL) return page;
}
page = mi_page_fresh_alloc(heap, pq, pq->block_size);
if (page==NULL) return NULL;
mi_assert_internal(pq->block_size==page->block_size);
mi_assert_internal(pq==mi_page_queue(heap,page->block_size));
return page;
}
void _mi_heap_delayed_free(mi_heap_t* heap) {
mi_block_t* block;
do {
block = (mi_block_t*)heap->thread_delayed_free;
} while (block != NULL && !mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), NULL, block));
while(block != NULL) {
mi_block_t* next = mi_block_nextx(heap->cookie,block);
if (!_mi_free_delayed_block(block)) {
mi_block_t* dfree;
do {
dfree = (mi_block_t*)heap->thread_delayed_free;
mi_block_set_nextx(heap->cookie, block, dfree);
} while (!mi_atomic_cas_ptr_weak(mi_atomic_cast(void*,&heap->thread_delayed_free), block, dfree));
}
block = next;
}
}
void _mi_page_unfull(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(mi_page_is_in_full(page));
_mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE);
if (!mi_page_is_in_full(page)) return;
mi_heap_t* heap = page->heap;
mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
mi_page_set_in_full(page, false); mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
mi_page_set_in_full(page, true);
mi_page_queue_enqueue_from(pq, pqfull, page);
}
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(!mi_page_immediate_available(page));
mi_assert_internal(!mi_page_is_in_full(page));
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE);
if (mi_page_is_in_full(page)) return;
mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);
_mi_page_free_collect(page,false); }
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(page->heap != NULL);
_mi_page_use_delayed_free(page,MI_NEVER_DELAYED_FREE);
#if MI_DEBUG>1
for (mi_block_t* block = (mi_block_t*)page->heap->thread_delayed_free; block != NULL; block = mi_block_nextx(page->heap->cookie,block)) {
mi_assert_internal(_mi_ptr_page(block) != page);
}
#endif
mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
mi_page_queue_remove(pq, page);
mi_assert_internal(page->heap == NULL);
_mi_segment_page_abandon(page,segments_tld);
}
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(mi_page_all_free(page));
#if MI_DEBUG>1
mi_thread_free_t free = mi_tf_set_delayed(page->thread_free,MI_NEVER_DELAYED_FREE);
free = mi_atomic_exchange(&page->thread_free, free);
mi_assert_internal(mi_tf_delayed(free) != MI_DELAYED_FREEING);
#endif
mi_page_set_has_aligned(page, false);
if (page->block_size > MI_LARGE_OBJ_SIZE_MAX) {
if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&page->heap->tld->stats.giant, page->block_size);
}
else {
_mi_stat_decrease(&page->heap->tld->stats.huge, page->block_size);
}
}
mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
mi_page_queue_remove(pq, page);
mi_assert_internal(page->heap == NULL);
_mi_segment_page_free(page, force, segments_tld);
}
void _mi_page_retire(mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(mi_page_all_free(page));
mi_page_set_has_aligned(page, false);
if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
return; }
}
_mi_page_free(page, mi_page_queue_of(page), false);
}
#define MI_MAX_SLICE_SHIFT (6)
#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT)
#define MI_MIN_SLICES (2)
static void mi_page_free_list_extend_secure(mi_heap_t* heap, mi_page_t* page, size_t extend, mi_stats_t* stats) {
UNUSED(stats);
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->local_free == NULL);
mi_assert_internal(page->capacity + extend <= page->reserved);
void* page_area = _mi_page_start(_mi_page_segment(page), page, NULL);
size_t bsize = page->block_size;
size_t shift = MI_MAX_SLICE_SHIFT;
while ((extend >> shift) == 0) {
shift--;
}
size_t slice_count = (size_t)1U << shift;
size_t slice_extend = extend / slice_count;
mi_assert_internal(slice_extend >= 1);
mi_block_t* blocks[MI_MAX_SLICES]; size_t counts[MI_MAX_SLICES]; for (size_t i = 0; i < slice_count; i++) {
blocks[i] = mi_page_block_at(page, page_area, page->capacity + i*slice_extend);
counts[i] = slice_extend;
}
counts[slice_count-1] += (extend % slice_count);
size_t current = _mi_heap_random(heap) % slice_count;
counts[current]--;
page->free = blocks[current];
uintptr_t rnd = heap->random;
for (size_t i = 1; i < extend; i++) {
size_t round = i%MI_INTPTR_SIZE;
if (round == 0) rnd = _mi_random_shuffle(rnd);
size_t next = ((rnd >> 8*round) & (slice_count-1));
while (counts[next]==0) { next++;
if (next==slice_count) next = 0;
}
counts[next]--;
mi_block_t* block = blocks[current];
blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); mi_block_set_next(page, block, blocks[next]); current = next;
}
mi_block_set_next(page, blocks[current], NULL); heap->random = _mi_random_shuffle(rnd);
}
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* page, size_t extend, mi_stats_t* stats)
{
UNUSED(stats);
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->local_free == NULL);
mi_assert_internal(page->capacity + extend <= page->reserved);
void* page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
size_t bsize = page->block_size;
mi_block_t* start = mi_page_block_at(page, page_area, page->capacity);
mi_block_t* last = mi_page_block_at(page, page_area, page->capacity + extend - 1);
mi_block_t* block = start;
while(block <= last) {
mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize);
mi_block_set_next(page,block,next);
block = next;
}
mi_block_set_next(page, last, NULL);
page->free = start;
}
#define MI_MAX_EXTEND_SIZE (4*1024)
#if MI_SECURE
#define MI_MIN_EXTEND (8*MI_SECURE)
#else
#define MI_MIN_EXTEND (1)
#endif
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* stats) {
UNUSED(stats);
mi_assert(page->free == NULL);
mi_assert(page->local_free == NULL);
mi_assert_expensive(mi_page_is_valid_init(page));
if (page->free != NULL) return;
if (page->capacity >= page->reserved) return;
size_t page_size;
_mi_page_start(_mi_page_segment(page), page, &page_size);
mi_stat_increase(stats->pages_extended, 1);
size_t extend = page->reserved - page->capacity;
size_t max_extend = (page->block_size >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)page->block_size);
if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND;
if (extend > max_extend) {
extend = (max_extend==0 ? 1 : max_extend);
}
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
mi_assert_internal(extend < (1UL<<16));
if (extend < MI_MIN_SLICES || MI_SECURE==0) { mi_page_free_list_extend(page, extend, stats );
}
else {
mi_page_free_list_extend_secure(heap, page, extend, stats);
}
page->capacity += (uint16_t)extend;
mi_stat_increase(stats->page_committed, extend * page->block_size);
if (!page->is_zero_init) {
page->flags.is_zero = false;
}
mi_assert_expensive(mi_page_is_valid_init(page));
}
static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_stats_t* stats) {
mi_assert(page != NULL);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert(segment != NULL);
mi_assert_internal(block_size > 0);
size_t page_size;
_mi_segment_page_start(segment, page, block_size, &page_size);
page->block_size = block_size;
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
#if MI_SECURE
page->cookie = _mi_heap_random(heap) | 1;
#endif
page->flags.is_zero = page->is_zero_init;
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->used == 0);
mi_assert_internal(page->thread_free == 0);
mi_assert_internal(page->thread_freed == 0);
mi_assert_internal(page->next == NULL);
mi_assert_internal(page->prev == NULL);
mi_assert_internal(!mi_page_has_aligned(page));
#if MI_SECURE
mi_assert_internal(page->cookie != 0);
#endif
mi_assert_expensive(mi_page_is_valid_init(page));
mi_page_extend_free(heap,page,stats);
mi_assert(mi_page_immediate_available(page));
}
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq)
{
mi_page_t* rpage = NULL;
size_t count = 0;
size_t page_free_count = 0;
mi_page_t* page = pq->first;
while( page != NULL)
{
mi_page_t* next = page->next; count++;
_mi_page_free_collect(page,false);
if (mi_page_immediate_available(page)) {
if (page_free_count < 8 && mi_page_all_free(page)) {
page_free_count++;
if (rpage != NULL) _mi_page_free(rpage,pq,false);
rpage = page;
page = next;
continue; }
else {
break; }
}
if (page->capacity < page->reserved) {
mi_page_extend_free(heap, page, &heap->tld->stats);
mi_assert_internal(mi_page_immediate_available(page));
break;
}
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
mi_page_to_full(page,pq);
page = next;
}
mi_stat_counter_increase(heap->tld->stats.searches,count);
if (page == NULL) {
page = rpage;
rpage = NULL;
}
if (rpage != NULL) {
_mi_page_free(rpage,pq,false);
}
if (page == NULL) {
page = mi_page_fresh(heap, pq);
}
else {
mi_assert(pq->first == page);
}
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
return page;
}
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
if (mi_option_get(mi_option_secure) >= 3 && page->capacity < page->reserved && ((_mi_heap_random(heap) & 1) == 1)) {
mi_page_extend_free(heap, page, &heap->tld->stats);
mi_assert_internal(mi_page_immediate_available(page));
}
else {
_mi_page_free_collect(page,false);
}
if (mi_page_immediate_available(page)) {
return page; }
}
return mi_page_queue_find_free_ex(heap, pq);
}
static mi_deferred_free_fun* volatile deferred_free = NULL;
void _mi_deferred_free(mi_heap_t* heap, bool force) {
heap->tld->heartbeat++;
if (deferred_free != NULL && !heap->tld->recurse) {
heap->tld->recurse = true;
deferred_free(force, heap->tld->heartbeat);
heap->tld->recurse = false;
}
}
void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
deferred_free = fn;
}
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
if (page != NULL) {
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(page->block_size == block_size);
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1);
mi_assert_internal(_mi_page_segment(page)->thread_id==0); page->heap = NULL;
if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) {
_mi_stat_increase(&heap->tld->stats.giant, block_size);
_mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);
}
else {
_mi_stat_increase(&heap->tld->stats.huge, block_size);
_mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
}
}
return page;
}
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
if (mi_unlikely(!mi_heap_is_initialized(heap))) {
mi_thread_init(); heap = mi_get_default_heap();
}
mi_assert_internal(mi_heap_is_initialized(heap));
_mi_deferred_free(heap, false);
_mi_heap_delayed_free(heap);
mi_page_t* page;
if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {
if (mi_unlikely(size > PTRDIFF_MAX)) { page = NULL;
}
else {
page = mi_huge_page_alloc(heap,size);
}
}
else {
page = mi_find_free_page(heap,size);
}
if (page == NULL) return NULL;
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(page->block_size >= size);
return _mi_page_malloc(heap, page, size);
}