#include "mimalloc.h"
#include "mimalloc-internal.h"
#include "mimalloc-atomic.h"
#include <string.h>
size_t _mi_os_large_page_size();
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, mi_os_tld_t* tld);
#if (MI_INTPTR_SIZE==8)
#define MI_HEAP_REGION_MAX_SIZE (256 * (1ULL << 30))
#elif (MI_INTPTR_SIZE==4)
#define MI_HEAP_REGION_MAX_SIZE (3 * (1UL << 30))
#else
#error "define the maximum heap space allowed for regions on this platform"
#endif
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
#define MI_REGION_MAP_BITS (MI_INTPTR_SIZE * 8)
#define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_REGION_MAP_BITS)
#define MI_REGION_MAX_ALLOC_SIZE ((MI_REGION_MAP_BITS/4)*MI_SEGMENT_SIZE)
#define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE)
#define MI_REGION_MAP_FULL UINTPTR_MAX
typedef struct mem_region_s {
volatile uintptr_t map; volatile void* start; } mem_region_t;
static mem_region_t regions[MI_REGION_MAX];
static volatile size_t regions_count = 0; static volatile uintptr_t region_next_idx = 0;
static size_t mi_region_block_count(size_t size) {
mi_assert_internal(size <= MI_REGION_MAX_ALLOC_SIZE);
return (size + MI_SEGMENT_SIZE - 1) / MI_SEGMENT_SIZE;
}
static uintptr_t mi_region_block_mask(size_t blocks, size_t bitidx) {
mi_assert_internal(blocks + bitidx <= MI_REGION_MAP_BITS);
return ((((uintptr_t)1 << blocks) - 1) << bitidx);
}
static size_t mi_good_commit_size(size_t size) {
if (size > (SIZE_MAX - _mi_os_large_page_size())) return size;
return _mi_align_up(size, _mi_os_large_page_size());
}
bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
size_t count = mi_atomic_read(®ions_count);
for (size_t i = 0; i < count; i++) {
uint8_t* start = (uint8_t*)mi_atomic_read_ptr(®ions[i].start);
if (start != NULL && (uint8_t*)p >= start && (uint8_t*)p < start + MI_REGION_SIZE) return true;
}
return false;
}
#define ALLOCATING ((void*)1)
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
{
size_t mask = mi_region_block_mask(blocks,bitidx);
mi_assert_internal(mask != 0);
mi_assert_internal((mask & mi_atomic_read(®ion->map)) == mask);
void* start;
do {
start = mi_atomic_read_ptr(®ion->start);
if (start == NULL) {
start = ALLOCATING; }
else if (start == ALLOCATING) {
mi_atomic_yield(); continue;
}
} while( start == ALLOCATING && !mi_atomic_compare_exchange_ptr(®ion->start, ALLOCATING, NULL) );
mi_assert_internal(start != NULL);
if (start == ALLOCATING) {
start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, mi_option_is_enabled(mi_option_eager_region_commit), tld);
mi_atomic_write_ptr(®ion->start, start);
if (start == NULL) {
size_t map;
do {
map = mi_atomic_read(®ion->map);
} while (!mi_atomic_compare_exchange(®ion->map, map & ~mask, map));
return false;
}
mi_atomic_compare_exchange(®ions_count, idx+1, idx);
}
mi_assert_internal(start != NULL && start != ALLOCATING);
mi_assert_internal(start == mi_atomic_read_ptr(®ion->start));
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
if (commit && !mi_option_is_enabled(mi_option_eager_region_commit)) {
_mi_os_commit(blocks_start, mi_good_commit_size(size), tld->stats); }
mi_atomic_write(®ion_next_idx,idx); *p = blocks_start;
*id = (idx*MI_REGION_MAP_BITS) + bitidx;
return true;
}
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(p != NULL && id != NULL);
mi_assert_internal(blocks < MI_REGION_MAP_BITS);
const uintptr_t mask = mi_region_block_mask(blocks,0);
const size_t bitidx_max = MI_REGION_MAP_BITS - blocks;
uintptr_t map = mi_atomic_read(®ion->map);
uintptr_t m = mask; for(size_t bitidx = 0; bitidx <= bitidx_max; bitidx++, m <<= 1) {
if ((map & m) == 0) { mi_assert_internal((m >> bitidx) == mask); uintptr_t newmap = map | m;
mi_assert_internal((newmap^map) >> bitidx == mask);
if (!mi_atomic_compare_exchange(®ion->map, newmap, map)) {
map = mi_atomic_read(®ion->map);
}
else {
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, p, id, tld);
}
}
}
return true;
}
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(idx < MI_REGION_MAX);
mem_region_t* region = ®ions[idx];
uintptr_t m = mi_atomic_read(®ion->map);
if (m != MI_REGION_MAP_FULL) { return mi_region_alloc_blocks(region, idx, blocks, size, commit, p, id, tld);
}
else {
return true; }
}
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t* id, mi_os_tld_t* tld)
{
mi_assert_internal(id != NULL && tld != NULL);
mi_assert_internal(size > 0);
*id = SIZE_MAX;
if (size > MI_REGION_MAX_ALLOC_SIZE || alignment > MI_SEGMENT_ALIGN) {
return _mi_os_alloc_aligned(mi_good_commit_size(size), alignment, true, tld); }
size = _mi_align_up(size, _mi_os_page_size());
size_t blocks = mi_region_block_count(size);
mi_assert_internal(blocks > 0 && blocks <= 8*MI_INTPTR_SIZE);
void* p = NULL;
size_t count = mi_atomic_read(®ions_count);
size_t idx = mi_atomic_read(®ion_next_idx);
for (size_t visited = 0; visited < count; visited++, idx++) {
if (idx >= count) idx = 0; if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; if (p != NULL) break;
}
if (p == NULL) {
for (idx = count; idx < count + 4 && idx < MI_REGION_MAX; idx++) {
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; if (p != NULL) break;
}
}
if (p == NULL) {
p = _mi_os_alloc_aligned(size, alignment, commit, tld);
}
mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0);
return p;
}
void* _mi_mem_alloc(size_t size, bool commit, size_t* id, mi_os_tld_t* tld) {
return _mi_mem_alloc_aligned(size,0,commit,id,tld);
}
void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
if (p==NULL) return;
if (size==0) return;
if (id == SIZE_MAX) {
_mi_os_free(p, size, stats);
}
else {
mi_assert_internal(size <= MI_REGION_MAX_ALLOC_SIZE); if (size > MI_REGION_MAX_ALLOC_SIZE) return;
size = _mi_align_up(size, _mi_os_page_size());
size_t idx = (id / MI_REGION_MAP_BITS);
size_t bitidx = (id % MI_REGION_MAP_BITS);
size_t blocks = mi_region_block_count(size);
size_t mask = mi_region_block_mask(blocks, bitidx);
mi_assert_internal(idx < MI_REGION_MAX); if (idx >= MI_REGION_MAX) return; mem_region_t* region = ®ions[idx];
mi_assert_internal((mi_atomic_read(®ion->map) & mask) == mask ); void* start = mi_atomic_read_ptr(®ion->start);
mi_assert_internal(start != NULL);
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
mi_assert_internal(blocks_start == p); mi_assert_internal(bitidx + blocks <= MI_REGION_MAP_BITS);
if (blocks_start != p || bitidx + blocks > MI_REGION_MAP_BITS) return;
if (!mi_option_is_enabled(mi_option_large_os_pages)) {
if (mi_option_is_enabled(mi_option_eager_region_commit)) {
}
else {
}
}
uintptr_t map;
uintptr_t newmap;
do {
map = mi_atomic_read(®ion->map);
newmap = map & ~mask;
} while (!mi_atomic_compare_exchange(®ion->map, newmap, map));
}
}
void _mi_mem_collect(mi_stats_t* stats) {
for (size_t i = 0; i < regions_count; i++) {
mem_region_t* region = ®ions[i];
if (mi_atomic_read(®ion->map) == 0 && region->start != NULL) {
uintptr_t m;
do {
m = mi_atomic_read(®ion->map);
} while(m == 0 && !mi_atomic_compare_exchange(®ion->map, ~((uintptr_t)0), 0 ));
if (m == 0) {
if (region->start != NULL) _mi_os_free((void*)region->start, MI_REGION_SIZE, stats);
region->start = 0;
mi_atomic_write(®ion->map,0);
}
}
}
}
bool _mi_mem_commit(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_commit(p, size, stats);
}
bool _mi_mem_decommit(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_decommit(p, size, stats);
}
bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_reset(p, size, stats);
}
bool _mi_mem_unreset(void* p, size_t size, mi_stats_t* stats) {
return _mi_os_unreset(p, size, stats);
}
bool _mi_mem_protect(void* p, size_t size) {
return _mi_os_protect(p, size);
}
bool _mi_mem_unprotect(void* p, size_t size) {
return _mi_os_unprotect(p, size);
}