#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <stdarg.h>
#include <stdalign.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#define CAML_NAME_SPACE
#define CAML_INTERNALS
#include "boxroot.h"
#include <caml/minor_gc.h>
#include <caml/major_gc.h>
#if defined(_POSIX_TIMERS) && defined(_POSIX_MONOTONIC_CLOCK)
#define POSIX_CLOCK
#include <time.h>
#endif
#include "ocaml_hooks.h"
#include "platform.h"
static_assert(!BXR_FORCE_REMOTE || BXR_MULTITHREAD,
"invalid configuration");
enum {
YOUNG = BXR_CLASS_YOUNG,
OLD,
UNTRACKED
};
struct bxr_private {
bxr_slot contents;
};
typedef struct {
_Atomic(bxr_slot_ref) a_next;
bxr_slot_ref end;
atomic_int a_alloc_count;
} atomic_free_list;
typedef struct pool {
bxr_free_list free_list;
struct pool *prev;
struct pool *next;
alignas(Cache_line_size) atomic_free_list delayed_fl;
mutex_t mutex;
bxr_slot roots[];
} pool;
#define POOL_CAPACITY ((int)((BXR_POOL_SIZE - sizeof(pool)) / sizeof(bxr_slot)))
static_assert(BXR_POOL_SIZE / sizeof(bxr_slot) <= INT_MAX, "pool size too large");
static_assert(POOL_CAPACITY >= 1, "pool size too small");
static_assert(offsetof(pool, free_list) == 0, "incorrect free_list offset");
typedef struct {
pool *old;
pool *young;
pool *current;
pool *free;
} pool_rings;
static pool_rings *pools[Num_domains] = { NULL };
static pool_rings orphan = { NULL, NULL, NULL, NULL };
static mutex_t orphan_mutex = BXR_MUTEX_INITIALIZER;
static bxr_free_list empty_fl = { (bxr_slot_ref)&empty_fl, NULL, -1, -1, UNTRACKED };
_Thread_local ptrdiff_t bxr_cached_dom_id = -1;
bxr_free_list *bxr_current_free_list[Num_domains + 1] =
{ &empty_fl,
&empty_fl,
};
static void set_current_fl(int dom_id, bxr_free_list *fl)
{
DEBUGassert(dom_id >= 0 && dom_id < Num_domains);
bxr_current_free_list[dom_id + 1] = fl;
}
static void init_pool_rings(int dom_id)
{
pool_rings *local = pools[dom_id];
if (local == NULL) local = malloc(sizeof(pool_rings));
if (local == NULL) return;
local->old = NULL;
local->young = NULL;
local->current = NULL;
local->free = NULL;
set_current_fl(dom_id, &empty_fl);
pools[dom_id] = local;
}
static struct {
atomic_llong minor_collections;
atomic_llong major_collections;
atomic_llong total_create_young;
atomic_llong total_create_old;
atomic_llong total_create_slow;
atomic_llong total_delete_young;
atomic_llong total_delete_old;
atomic_llong total_delete_slow;
atomic_llong total_modify;
atomic_llong total_modify_slow;
atomic_llong total_gc_pool_rings;
atomic_llong total_scanning_work_minor;
atomic_llong total_scanning_work_major;
atomic_llong total_minor_time;
atomic_llong total_major_time;
atomic_llong total_gc_pool_time;
atomic_llong peak_minor_time;
atomic_llong peak_major_time;
atomic_llong total_alloced_pools;
atomic_llong total_emptied_pools;
atomic_llong total_freed_pools;
atomic_llong live_pools; atomic_llong peak_pools; atomic_llong ring_operations; atomic_llong young_hit_gen;
atomic_llong young_hit_young;
atomic_llong get_pool_header; atomic_llong is_pool_member; } stats;
static inline pool * get_pool_header(bxr_slot_ref s)
{
if (DEBUG) incr(&stats.get_pool_header);
return (pool *)Bxr_get_pool_header(s);
}
static inline bool is_pool_member(bxr_slot v, pool *p)
{
if (DEBUG) incr(&stats.is_pool_member);
return (uintptr_t)p == ((uintptr_t)v.as_slot_ref & ~((uintptr_t)BXR_POOL_SIZE - 2));
}
static inline bool is_empty_free_list(bxr_slot_ref v, pool *p)
{
return (v == (bxr_slot_ref)p);
}
static inline void ring_link(pool *p, pool *q)
{
p->next = q;
q->prev = p;
incr(&stats.ring_operations);
}
static inline void ring_push_back(pool *source, pool **target)
{
if (source == NULL) return;
DEBUGassert(source->prev == source && source->next == source);
DEBUGassert(source != *target);
if (*target == NULL) {
*target = source;
} else {
DEBUGassert((*target)->free_list.class == source->free_list.class);
pool *target_last = (*target)->prev;
pool *source_last = source->prev;
ring_link(target_last, source);
ring_link(source_last, *target);
}
}
static pool * ring_pop(pool **target)
{
pool *front = *target;
DEBUGassert(front != NULL);
if (front->next == front) {
*target = NULL;
} else {
*target = front->next;
ring_link(front->prev, front->next);
}
ring_link(front, front);
return front;
}
static inline bxr_slot_ref empty_free_list(pool *p) { return (bxr_slot_ref)p; }
static inline bool is_full_pool(pool *p)
{
return is_empty_free_list(p->free_list.next, p);
}
static pool * get_empty_pool()
{
long long live_pools = 1 + incr(&stats.live_pools);
if (live_pools > stats.peak_pools) stats.peak_pools = live_pools;
pool *p = bxr_alloc_uninitialised_pool(BXR_POOL_SIZE);
if (p == NULL) return NULL;
incr(&stats.total_alloced_pools);
ring_link(p, p);
p->free_list.next = p->roots;
p->free_list.alloc_count = 0;
p->free_list.end = &p->roots[POOL_CAPACITY - 1];
p->free_list.domain_id = -1;
p->free_list.class = UNTRACKED;
store_relaxed(&p->delayed_fl.a_next, empty_free_list(p));
store_relaxed(&p->delayed_fl.a_alloc_count, 0);
p->delayed_fl.end = NULL;
bxr_initialize_mutex(&p->mutex);
p->roots[POOL_CAPACITY - 1].as_slot_ref = empty_free_list(p);
for (bxr_slot_ref s = p->roots + POOL_CAPACITY - 2; s >= p->roots; --s) {
s->as_slot_ref = s + 1;
}
return p;
}
static int anticipated_alloc_count(pool *p)
{
return p->free_list.alloc_count + load_relaxed(&p->delayed_fl.a_alloc_count);
}
static int gc_pool(pool *p)
{
int old_alloc_count = load_relaxed(&p->delayed_fl.a_alloc_count);
if (0 == old_alloc_count) return 0;
bxr_mutex_lock(&p->mutex);
if (is_full_pool(p)) p->free_list.end = p->delayed_fl.end;
p->free_list.alloc_count = anticipated_alloc_count(p);
store_relaxed(&p->delayed_fl.a_alloc_count, 0);
bxr_slot_ref list = p->free_list.next;
p->free_list.next = load_relaxed(&p->delayed_fl.a_next);
store_relaxed(&p->delayed_fl.a_next, empty_free_list(p));
p->delayed_fl.end->as_slot_ref = list;
bxr_mutex_unlock(&p->mutex);
return old_alloc_count;
}
static void free_pool_ring(pool **ring)
{
while (*ring != NULL) {
pool *p = ring_pop(ring);
bxr_free_pool(p);
incr(&stats.total_freed_pools);
}
}
static void free_pool_rings(pool_rings *ps)
{
free_pool_ring(&ps->old);
free_pool_ring(&ps->young);
free_pool_ring(&ps->current);
free_pool_ring(&ps->free);
}
static inline bool is_not_too_full(pool *p)
{
return p->free_list.alloc_count <= (int)(BXR_DEALLOC_THRESHOLD / sizeof(bxr_slot));
}
static void set_current_pool(int dom_id, pool *p)
{
DEBUGassert(pools[dom_id]->current == NULL);
if (p != NULL) {
p->free_list.domain_id = dom_id;
pools[dom_id]->current = p;
p->free_list.class = YOUNG;
set_current_fl(dom_id, &p->free_list);
} else {
set_current_fl(dom_id, &empty_fl);
}
}
static void reclassify_pool(pool **source, int dom_id, int cl);
static void try_demote_pool(int dom_id, pool *p)
{
DEBUGassert(p->free_list.class != UNTRACKED);
pool_rings *remote = pools[dom_id];
if (p == remote->current || !is_not_too_full(p)) return;
int cl = (p->free_list.alloc_count == 0) ? UNTRACKED : p->free_list.class;
pool **source = (p == remote->old) ? &remote->old :
(p == remote->young) ? &remote->young : &p;
reclassify_pool(source, dom_id, cl);
}
static inline pool * pop_available(pool **target)
{
if (*target == NULL || is_full_pool(*target)) return NULL;
return ring_pop(target);
}
static pool * find_available_pool(int dom_id)
{
pool_rings *local = pools[dom_id];
pool *p = pop_available(&local->young);
if (p == NULL && local->old != NULL && is_not_too_full(local->old))
p = pop_available(&local->old);
if (p == NULL) p = pop_available(&local->free);
if (p == NULL) p = get_empty_pool();
DEBUGassert(local->current == NULL);
set_current_pool(dom_id, p);
return p;
}
static void validate_all_pools(int dom_id);
static void reclassify_pool(pool **source, int dom_id, int cl)
{
DEBUGassert(*source != NULL);
pool_rings *local = pools[dom_id];
pool *p = ring_pop(source);
p->free_list.domain_id = dom_id;
pool **target = NULL;
switch (cl) {
case OLD: target = &local->old; break;
case YOUNG: target = &local->young; break;
case UNTRACKED:
target = &local->free;
incr(&stats.total_emptied_pools);
decr(&stats.live_pools);
break;
}
p->free_list.class = cl;
ring_push_back(p, target);
if (is_not_too_full(p)) *target = p;
}
static void promote_young_pools(int dom_id)
{
pool_rings *local = pools[dom_id];
while (local->young != NULL) {
reclassify_pool(&local->young, dom_id, OLD);
}
DEBUGassert(local->current == NULL);
}
static atomic_int status = BOXROOT_NOT_SETUP;
int boxroot_status()
{
return load_relaxed(&status);
}
static bool setup();
static void try_gc_and_reclassify_one_pool_no_stw(pool **source, int dom_id);
boxroot bxr_create_slow(value init)
{
incr(&stats.total_create_slow);
if (Caml_state_opt == NULL) { errno = EPERM; return NULL; }
if (0 == setup()) return NULL;
#if !OCAML_MULTICORE
if (!bxr_domain_lock_held()) { errno = EPERM; return NULL; }
if (!bxr_check_thread_hooks()) {
status = BOXROOT_INVALID;
return NULL;
}
#endif
int dom_id = Domain_id;
if (pools[dom_id] == NULL) init_pool_rings(dom_id);
pool_rings *local = pools[dom_id];
if (local == NULL) return NULL;
if (bxr_cached_dom_id == -1) {
bxr_cached_dom_id = dom_id;
return boxroot_create(init);
} else {
DEBUGassert(bxr_cached_dom_id == dom_id);
}
if (local->current != NULL) {
DEBUGassert(is_full_pool(local->current));
reclassify_pool(&local->current, dom_id, YOUNG);
try_gc_and_reclassify_one_pool_no_stw(&local->young, dom_id);
}
pool *p = find_available_pool(dom_id);
if (p == NULL) return NULL;
DEBUGassert(!is_full_pool(p));
return boxroot_create(init);
}
extern inline value boxroot_get(boxroot root);
extern inline value const * boxroot_get_ref(boxroot root);
void bxr_create_debug(value init)
{
DEBUGassert(Caml_state_opt != NULL);
if (Is_block(init) && Is_young(init)) incr(&stats.total_create_young);
else incr(&stats.total_create_old);
}
extern inline boxroot boxroot_create(value init);
extern inline bool bxr_free_slot(bxr_free_list *fl, boxroot root);
void bxr_delete_debug(boxroot root)
{
DEBUGassert(root != NULL);
value v = boxroot_get(root);
if (Is_block(v) && Is_young(v)) incr(&stats.total_delete_young);
else incr(&stats.total_delete_old);
}
static void free_slot_atomic(pool *p, boxroot root)
{
bxr_slot_ref new_next = &root->contents;
bxr_slot_ref old_next = atomic_exchange_explicit(&p->delayed_fl.a_next, new_next,
memory_order_relaxed);
new_next->as_slot_ref = old_next;
if (BXR_UNLIKELY(is_empty_free_list(old_next, p)))
p->delayed_fl.end = new_next;
decr_release(&p->delayed_fl.a_alloc_count);
}
void bxr_delete_slow(bxr_free_list *fl, boxroot root, bool remote)
{
incr(&stats.total_delete_slow);
pool *p = (pool *)fl;
if (!remote) {
try_demote_pool(p->free_list.domain_id, p);
} else if (OCAML_MULTICORE && bxr_domain_lock_held()) {
free_slot_atomic(p, root);
} else {
bxr_mutex_lock(&p->mutex);
free_slot_atomic(p, root);
bxr_mutex_unlock(&p->mutex);
}
}
extern inline void boxroot_delete(boxroot root);
bool bxr_modify_slow(boxroot *root_ref, value new_value)
{
incr(&stats.total_modify_slow);
boxroot root = *root_ref;
if (!Is_block(new_value) || !Is_young(new_value)) {
root->contents.as_value = new_value;
return true;
}
boxroot new = boxroot_create(new_value);
if (BXR_UNLIKELY(new == NULL)) return false;
*root_ref = new;
boxroot_delete(root);
return true;
}
void bxr_modify_debug(boxroot *rootp)
{
DEBUGassert(*rootp);
incr(&stats.total_modify);
}
extern inline bool boxroot_modify(boxroot *rootp, value new_value);
static void validate_pool(pool *pl)
{
if (pl->free_list.next == NULL) {
assert(pl->free_list.class == UNTRACKED);
return;
}
bxr_slot_ref curr = pl->free_list.next;
int pos = 0;
for (; !is_empty_free_list(curr, pl); curr = curr->as_slot_ref, pos++)
{
assert(pos < POOL_CAPACITY);
assert(curr >= pl->roots && curr < pl->roots + POOL_CAPACITY);
}
assert(pos == POOL_CAPACITY - pl->free_list.alloc_count);
int count = 0;
for(int i = 0; i < POOL_CAPACITY; i++) {
bxr_slot s = pl->roots[i];
--stats.is_pool_member;
if (!is_pool_member(s, pl)) {
value v = s.as_value;
if (pl->free_list.class != YOUNG && Is_block(v)) assert(!Is_young(v));
++count;
}
}
assert(count == anticipated_alloc_count(pl));
}
static void validate_ring(pool **ring, int dom_id, int cl)
{
pool *start_pool = *ring;
if (start_pool == NULL) return;
pool *p = start_pool;
do {
assert(p->free_list.domain_id == dom_id);
assert(p->free_list.class == cl);
validate_pool(p);
assert(p->next != NULL);
assert(p->next->prev == p);
assert(p->prev != NULL);
assert(p->prev->next == p);
p = p->next;
} while (p != start_pool);
}
static void validate_all_pools(int dom_id)
{
pool_rings *local = pools[dom_id];
validate_ring(&local->old, dom_id, OLD);
validate_ring(&local->young, dom_id, YOUNG);
validate_ring(&local->current, dom_id, YOUNG);
validate_ring(&local->free, dom_id, UNTRACKED);
}
static void gc_pool_rings(int dom_id);
static void orphan_pools(int dom_id)
{
pool_rings *local = pools[dom_id];
if (local == NULL) return;
gc_pool_rings(dom_id);
bxr_mutex_lock(&orphan_mutex);
ring_push_back(local->old, &orphan.old);
ring_push_back(local->young, &orphan.young);
ring_push_back(local->current, &orphan.young);
bxr_mutex_unlock(&orphan_mutex);
free_pool_ring(&local->free);
init_pool_rings(dom_id);
}
static void adopt_orphaned_pools(int dom_id)
{
bxr_mutex_lock(&orphan_mutex);
while (orphan.old != NULL)
reclassify_pool(&orphan.old, dom_id, OLD);
while (orphan.young != NULL)
reclassify_pool(&orphan.young, dom_id, YOUNG);
bxr_mutex_unlock(&orphan_mutex);
}
static void try_gc_and_reclassify_pool(pool **source, int dom_id)
{
pool *p = *source;
if (gc_pool(p) != 0) {
if (p->free_list.alloc_count == 0)
reclassify_pool(source, dom_id, UNTRACKED);
else if (is_not_too_full(p))
reclassify_pool(source, dom_id, p->free_list.class);
}
}
static void try_gc_and_reclassify_one_pool_no_stw(pool **source, int dom_id)
{
pool *start = *source;
pool *p = start;
do {
if (anticipated_alloc_count(p) == 0) {
atomic_thread_fence(memory_order_acquire);
pool **new_source = (p == start) ? source : &p;
try_gc_and_reclassify_pool(new_source, dom_id);
return;
}
p = p->next;
} while (p != start);
}
static void gc_ring(pool **ring, int dom_id)
{
if (!BXR_MULTITHREAD) return;
pool *p = *ring;
if (p == NULL) return;
while (p == *ring) {
pool *next = p->next;
try_gc_and_reclassify_pool(ring, dom_id);
if (p == next)
return;
p = next;
}
do {
pool *next = p->next;
try_gc_and_reclassify_pool(&p, dom_id);
p = next;
} while (p != *ring);
}
static long long time_counter(void);
static void gc_pool_rings(int dom_id)
{
incr(&stats.total_gc_pool_rings);
long long start = time_counter();
pool_rings *local = pools[dom_id];
if (local->current != NULL) {
reclassify_pool(&local->current, dom_id, YOUNG);
set_current_pool(dom_id, NULL);
}
gc_ring(&local->young, dom_id);
gc_ring(&local->old, dom_id);
long long duration = time_counter() - start;
stats.total_gc_pool_time += duration;
}
static int scan_pool_gen(scanning_action action, void *data, pool *pl)
{
int allocs_to_find = anticipated_alloc_count(pl);
int young_hit = 0;
bxr_slot_ref current = pl->roots;
while (allocs_to_find) {
DEBUGassert(current < &pl->roots[POOL_CAPACITY]);
bxr_slot s = *current;
if (!is_pool_member(s, pl)) {
--allocs_to_find;
value v = s.as_value;
if (DEBUG && Is_block(v) && Is_young(v)) ++young_hit;
CALL_GC_ACTION(action, data, v, ¤t->as_value);
}
++current;
}
stats.young_hit_gen += young_hit;
return current - pl->roots;
}
static int scan_pool_young(scanning_action action, void *data, pool *pl)
{
#if OCAML_MULTICORE
uintnat young_start = (uintnat)caml_minor_heaps_start + 1;
uintnat young_range = (uintnat)caml_minor_heaps_end - 1 - young_start;
#else
uintnat young_start = (uintnat)Caml_state->young_start;
uintnat young_range = (uintnat)Caml_state->young_end - young_start;
#endif
bxr_slot_ref start = pl->roots;
bxr_slot_ref end = start + POOL_CAPACITY;
int young_hit = 0;
bxr_slot_ref current;
for (current = start; current < end; current++) {
bxr_slot s = *current;
value v = s.as_value;
if ((uintnat)v - young_start <= young_range
&& BXR_LIKELY(Is_block(v))) {
++young_hit;
CALL_GC_ACTION(action, data, v, ¤t->as_value);
}
}
stats.young_hit_young += young_hit;
return current - start;
}
static int scan_pool(scanning_action action, int only_young, void *data,
pool *pl)
{
bxr_mutex_lock(&pl->mutex);
int work = (only_young) ? scan_pool_young(action, data, pl)
: scan_pool_gen(action, data, pl);
bxr_mutex_unlock(&pl->mutex);
return work;
}
static int scan_ring(scanning_action action, int only_young,
void *data, pool **ring)
{
int work = 0;
pool *start_pool = *ring;
if (start_pool == NULL) return 0;
pool *p = start_pool;
do {
work += scan_pool(action, only_young, data, p);
p = p->next;
} while (p != start_pool);
return work;
}
static int scan_pools(scanning_action action, int only_young,
void *data, int dom_id)
{
pool_rings *local = pools[dom_id];
int work = scan_ring(action, only_young, data, &local->young);
if (!only_young) work += scan_ring(action, 0, data, &local->old);
return work;
}
static void scan_roots(scanning_action action, int only_young,
void *data, int dom_id)
{
if (DEBUG) validate_all_pools(dom_id);
gc_pool_rings(dom_id);
adopt_orphaned_pools(dom_id);
int work = scan_pools(action, only_young, data, dom_id);
if (bxr_in_minor_collection()) {
promote_young_pools(dom_id);
} else {
free_pool_ring(&pools[dom_id]->free);
}
if (only_young) stats.total_scanning_work_minor += work;
else stats.total_scanning_work_major += work;
if (DEBUG) validate_all_pools(dom_id);
}
static long long time_counter(void)
{
#if defined(POSIX_CLOCK)
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (long long)t.tv_sec * (long long)1000000000 + (long long)t.tv_nsec;
#else
return 0;
#endif
}
static long long kib_of_pools(long long count, int unit)
{
int log_per_pool = BXR_POOL_LOG_SIZE - unit * 10;
if (log_per_pool >= 0) return count << log_per_pool;
else return count >> -log_per_pool;
}
static double average(long long total, long long units)
{
return ((double)total) / (double)units;
}
void boxroot_print_stats()
{
printf("minor collections: %'lld\n"
"major collections (and others): %'lld\n",
stats.minor_collections,
stats.major_collections);
if (stats.total_alloced_pools == 0) return;
printf("BXR_POOL_LOG_SIZE: %d (%'lld KiB, %'d roots/pool)\n"
"DEBUG: %d\n"
"OCAML_MULTICORE: %d\n"
"BXR_MULTITHREAD: %d\n"
"BXR_FORCE_REMOTE: %d\n",
(int)BXR_POOL_LOG_SIZE, kib_of_pools(1, 1), (int)POOL_CAPACITY,
(int)DEBUG, (int)OCAML_MULTICORE,
(int)BXR_MULTITHREAD, (int)BXR_FORCE_REMOTE);
printf("total allocated pools: %'lld (%'lld MiB)\n"
"peak allocated pools: %'lld (%'lld MiB)\n"
"total emptied pools: %'lld (%'lld MiB)\n"
"total freed pools: %'lld (%'lld MiB)\n",
stats.total_alloced_pools,
kib_of_pools(stats.total_alloced_pools, 2),
stats.peak_pools,
kib_of_pools(stats.peak_pools, 2),
stats.total_emptied_pools,
kib_of_pools(stats.total_emptied_pools, 2),
stats.total_freed_pools,
kib_of_pools(stats.total_freed_pools, 2));
double scanning_work_minor =
average(stats.total_scanning_work_minor, stats.minor_collections);
double scanning_work_major =
average(stats.total_scanning_work_major, stats.major_collections);
long long total_scanning_work =
stats.total_scanning_work_minor + stats.total_scanning_work_major;
#if DEBUG
double young_hits_gen_pct =
average(stats.young_hit_gen * 100, stats.total_scanning_work_major);
#endif
double young_hits_young_pct =
average(stats.young_hit_young * 100, stats.total_scanning_work_minor);
printf("work per minor: %'.0f\n"
"work per major: %'.0f\n"
"total scanning work: %'lld (%'lld minor, %'lld major)\n"
#if DEBUG
"young hits (non-minor collection): %.2f%%\n"
#endif
"young hits (minor collection): %.2f%%\n",
scanning_work_minor,
scanning_work_major,
total_scanning_work, stats.total_scanning_work_minor, stats.total_scanning_work_major,
#if DEBUG
young_hits_gen_pct,
#endif
young_hits_young_pct);
#if defined(POSIX_CLOCK)
double time_per_minor =
average(stats.total_minor_time, stats.minor_collections) / 1000;
double time_per_major =
average(stats.total_major_time, stats.major_collections) / 1000;
double time_per_gc_pool_rings =
average(stats.total_gc_pool_time, stats.total_gc_pool_rings) / 1000;
printf("average time per minor: %'.3fµs\n"
"average time per major: %'.3fµs\n"
"peak time per minor: %'.3fµs\n"
"peak time per major: %'.3fµs\n"
"average time per gc_pool_rings: %'.3fµs\n",
time_per_minor,
time_per_major,
((double)stats.peak_minor_time) / 1000,
((double)stats.peak_major_time) / 1000,
time_per_gc_pool_rings);
#endif
double ring_operations_per_pool =
average(stats.ring_operations, stats.total_alloced_pools);
printf("total boxroot_create_slow: %'lld\n"
"total boxroot_delete_slow: %'lld\n"
"total boxroot_modify_slow: %'lld\n"
"total ring operations: %'lld\n"
"ring operations per pool: %.2f\n"
"total gc_pool_rings: %'lld\n",
stats.total_create_slow,
stats.total_delete_slow,
stats.total_modify_slow,
stats.ring_operations,
ring_operations_per_pool,
stats.total_gc_pool_rings);
#if DEBUG
long long total_create = stats.total_create_young + stats.total_create_old;
long long total_delete = stats.total_delete_young + stats.total_delete_old;
double create_young_pct =
average(stats.total_create_young * 100, total_create);
double delete_young_pct =
average(stats.total_delete_young * 100, total_delete);
printf("total created: %'lld (%.2f%% young)\n"
"total deleted: %'lld (%.2f%% young)\n"
"total modified: %'lld\n",
total_create, create_young_pct,
total_delete, delete_young_pct,
stats.total_modify);
printf("get_pool_header: %'lld\n"
"is_pool_member: %'lld\n",
stats.get_pool_header,
stats.is_pool_member);
#endif
}
static void scanning_callback(scanning_action action, int only_young,
void *data)
{
if (boxroot_status() == BOXROOT_NOT_SETUP
|| boxroot_status() == BOXROOT_TORE_DOWN) return;
bool in_minor_collection = bxr_in_minor_collection();
if (in_minor_collection) incr(&stats.minor_collections);
else incr(&stats.major_collections);
int dom_id = Domain_id;
if (pools[dom_id] == NULL) return;
#if !OCAML_MULTICORE
if (!bxr_check_thread_hooks()) status = BOXROOT_INVALID;
#endif
long long start = time_counter();
scan_roots(action, only_young, data, dom_id);
long long duration = time_counter() - start;
atomic_llong *total = in_minor_collection ? &stats.total_minor_time : &stats.total_major_time;
atomic_llong *peak = in_minor_collection ? &stats.peak_minor_time : &stats.peak_major_time;
*total += duration;
if (duration > *peak) *peak = duration; }
static void domain_termination_callback()
{
DEBUGassert(OCAML_MULTICORE == 1);
int dom_id = Domain_id;
orphan_pools(dom_id);
}
static mutex_t init_mutex = BXR_MUTEX_INITIALIZER;
static bool setup()
{
if (boxroot_status() == BOXROOT_RUNNING) return true;
bool res = true;
bxr_mutex_lock(&init_mutex);
if (status != BOXROOT_NOT_SETUP) {
res = (status == BOXROOT_RUNNING);
goto out;
}
bxr_setup_hooks(&scanning_callback, &domain_termination_callback);
status = BOXROOT_RUNNING;
out:
bxr_mutex_unlock(&init_mutex);
return res;
}
bool boxroot_setup() { return true; }
void boxroot_teardown()
{
bxr_mutex_lock(&init_mutex);
if (status != BOXROOT_RUNNING) goto out;
status = BOXROOT_TORE_DOWN;
for (int i = 0; i < Num_domains; i++) {
pool_rings *ps = pools[i];
if (ps == NULL) continue;
free_pool_rings(ps);
free(ps);
set_current_fl(i, &empty_fl);
}
free_pool_rings(&orphan);
out:
bxr_mutex_unlock(&init_mutex);
}