#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/thread_event.h"
#define E(event, condition_unused, is_alloc_event_unused) \
uint64_t event##_new_event_wait(tsd_t *tsd); \
uint64_t event##_postponed_event_wait(tsd_t *tsd); \
void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
ITERATE_OVER_ALL_EVENTS
#undef E
#define E(event, condition_unused, is_alloc_event_unused) \
static uint64_t event##_fetch_elapsed(tsd_t *tsd);
ITERATE_OVER_ALL_EVENTS
#undef E
static uint64_t
tcache_gc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static uint64_t
tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static uint64_t
prof_sample_fetch_elapsed(tsd_t *tsd) {
uint64_t last_event = thread_allocated_last_event_get(tsd);
uint64_t last_sample_event = prof_sample_last_event_get(tsd);
prof_sample_last_event_set(tsd, last_event);
return last_event - last_sample_event;
}
static uint64_t
stats_interval_fetch_elapsed(tsd_t *tsd) {
uint64_t last_event = thread_allocated_last_event_get(tsd);
uint64_t last_stats_event = stats_interval_last_event_get(tsd);
stats_interval_last_event_set(tsd, last_event);
return last_event - last_stats_event;
}
static uint64_t
peak_alloc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static uint64_t
peak_dalloc_fetch_elapsed(tsd_t *tsd) {
return TE_INVALID_ELAPSED;
}
static bool
te_ctx_has_active_events(te_ctx_t *ctx) {
assert(config_debug);
#define E(event, condition, alloc_event) \
if (condition && alloc_event == ctx->is_alloc) { \
return true; \
}
ITERATE_OVER_ALL_EVENTS
#undef E
return false;
}
static uint64_t
te_next_event_compute(tsd_t *tsd, bool is_alloc) {
uint64_t wait = TE_MAX_START_WAIT;
#define E(event, condition, alloc_event) \
if (is_alloc == alloc_event && condition) { \
uint64_t event_wait = \
event##_event_wait_get(tsd); \
assert(event_wait <= TE_MAX_START_WAIT); \
if (event_wait > 0U && event_wait < wait) { \
wait = event_wait; \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
assert(wait <= TE_MAX_START_WAIT);
return wait;
}
static void
te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) {
uint64_t current_bytes = te_ctx_current_bytes_get(ctx);
uint64_t last_event = te_ctx_last_event_get(ctx);
uint64_t next_event = te_ctx_next_event_get(ctx);
uint64_t next_event_fast = te_ctx_next_event_fast_get(ctx);
assert(last_event != next_event);
if (next_event > TE_NEXT_EVENT_FAST_MAX || !tsd_fast(tsd)) {
assert(next_event_fast == 0U);
} else {
assert(next_event_fast == next_event);
}
uint64_t interval = next_event - last_event;
assert(current_bytes - last_event < interval);
uint64_t min_wait = te_next_event_compute(tsd, te_ctx_is_alloc(ctx));
assert((!te_ctx_has_active_events(ctx) && last_event == 0U) ||
interval == min_wait ||
(interval < min_wait && interval == TE_MAX_INTERVAL));
}
void
te_assert_invariants_debug(tsd_t *tsd) {
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, true);
te_assert_invariants_impl(tsd, &ctx);
te_ctx_get(tsd, &ctx, false);
te_assert_invariants_impl(tsd, &ctx);
}
static void
te_ctx_next_event_fast_update(te_ctx_t *ctx) {
uint64_t next_event = te_ctx_next_event_get(ctx);
uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ?
next_event : 0U;
te_ctx_next_event_fast_set(ctx, next_event_fast);
}
void
te_recompute_fast_threshold(tsd_t *tsd) {
if (tsd_state_get(tsd) != tsd_state_nominal) {
te_next_event_fast_set_non_nominal(tsd);
return;
}
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, true);
te_ctx_next_event_fast_update(&ctx);
te_ctx_get(tsd, &ctx, false);
te_ctx_next_event_fast_update(&ctx);
atomic_fence(ATOMIC_SEQ_CST);
if (tsd_state_get(tsd) != tsd_state_nominal) {
te_next_event_fast_set_non_nominal(tsd);
}
}
static void
te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
uint64_t wait) {
assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx));
assert(wait <= TE_MAX_START_WAIT);
uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <=
TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
te_ctx_next_event_set(tsd, ctx, next_event);
}
static uint64_t
te_clip_event_wait(uint64_t event_wait) {
assert(event_wait > 0U);
if (TE_MIN_START_WAIT > 1U &&
unlikely(event_wait < TE_MIN_START_WAIT)) {
event_wait = TE_MIN_START_WAIT;
}
if (TE_MAX_START_WAIT < UINT64_MAX &&
unlikely(event_wait > TE_MAX_START_WAIT)) {
event_wait = TE_MAX_START_WAIT;
}
return event_wait;
}
void
te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
uint64_t bytes_after = te_ctx_current_bytes_get(ctx);
uint64_t accumbytes = bytes_after - te_ctx_last_event_get(ctx);
te_ctx_last_event_set(ctx, bytes_after);
bool allow_event_trigger = tsd_nominal(tsd) &&
tsd_reentrancy_level_get(tsd) == 0;
bool is_alloc = ctx->is_alloc;
uint64_t wait = TE_MAX_START_WAIT;
#define E(event, condition, alloc_event) \
bool is_##event##_triggered = false; \
if (is_alloc == alloc_event && condition) { \
uint64_t event_wait = event##_event_wait_get(tsd); \
assert(event_wait <= TE_MAX_START_WAIT); \
if (event_wait > accumbytes) { \
event_wait -= accumbytes; \
} else if (!allow_event_trigger) { \
event_wait = event##_postponed_event_wait(tsd); \
} else { \
is_##event##_triggered = true; \
event_wait = event##_new_event_wait(tsd); \
} \
event_wait = te_clip_event_wait(event_wait); \
event##_event_wait_set(tsd, event_wait); \
if (event_wait < wait) { \
wait = event_wait; \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
assert(wait <= TE_MAX_START_WAIT);
te_adjust_thresholds_helper(tsd, ctx, wait);
te_assert_invariants(tsd);
#define E(event, condition, alloc_event) \
if (is_alloc == alloc_event && condition && \
is_##event##_triggered) { \
assert(allow_event_trigger); \
uint64_t elapsed = event##_fetch_elapsed(tsd); \
event##_event_handler(tsd, elapsed); \
}
ITERATE_OVER_ALL_EVENTS
#undef E
te_assert_invariants(tsd);
}
static void
te_init(tsd_t *tsd, bool is_alloc) {
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, is_alloc);
te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx));
uint64_t wait = TE_MAX_START_WAIT;
#define E(event, condition, alloc_event) \
if (is_alloc == alloc_event && condition) { \
uint64_t event_wait = event##_new_event_wait(tsd); \
event_wait = te_clip_event_wait(event_wait); \
event##_event_wait_set(tsd, event_wait); \
if (event_wait < wait) { \
wait = event_wait; \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
te_adjust_thresholds_helper(tsd, &ctx, wait);
}
void
tsd_te_init(tsd_t *tsd) {
assert(TE_MAX_INTERVAL <= UINT64_MAX - SC_LARGE_MAXCLASS + 1);
te_init(tsd, true);
te_init(tsd, false);
te_assert_invariants(tsd);
}