private(LAVD) struct bpf_cpumask __kptr *turbo_cpumask;
private(LAVD) struct bpf_cpumask __kptr *big_cpumask;
private(LAVD) struct bpf_cpumask __kptr *little_cpumask;
private(LAVD) struct bpf_cpumask __kptr *active_cpumask;
private(LAVD) struct bpf_cpumask __kptr *ovrflw_cpumask;
const volatile u64 nr_cpu_ids;
static volatile u64 nr_cpus_onln;
static volatile u64 nr_cpus_big;
struct sys_stat sys_stat;
volatile bool no_preemption;
volatile bool no_wake_sync;
volatile bool no_core_compaction;
volatile bool no_freq_scaling;
volatile bool is_powersave_mode;
volatile bool reinit_cpumask_for_performance;
const volatile bool is_autopilot_on;
const volatile bool is_smt_active;
const volatile u8 verbose;
const volatile u8 preempt_shift;
UEI_DEFINE(uei);
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, u32);
__type(value, struct cpu_ctx);
__uint(max_entries, 1);
} cpu_ctx_stor SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct task_ctx);
} task_ctx_stor SEC(".maps");
#define debugln(fmt, ...) \
({ \
if (verbose > 0) \
bpf_printk("[%s:%d] " fmt, __func__, __LINE__, \
##__VA_ARGS__); \
})
#define traceln(fmt, ...) \
({ \
if (verbose > 1) \
bpf_printk("[%s:%d] " fmt, __func__, __LINE__, \
##__VA_ARGS__); \
})
#ifndef min
#define min(X, Y) (((X) < (Y)) ? (X) : (Y))
#endif
#ifndef max
#define max(X, Y) (((X) < (Y)) ? (Y) : (X))
#endif
static u64 sigmoid_u64(u64 v, u64 max)
{
return (v > max) ? max : v;
}
static u64 rsigmoid_u64(u64 v, u64 max)
{
return (v >= max) ? 0 : max - v;
}
static struct task_ctx *get_task_ctx(struct task_struct *p)
{
return bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
}
static struct cpu_ctx *get_cpu_ctx(void)
{
const u32 idx = 0;
return bpf_map_lookup_elem(&cpu_ctx_stor, &idx);
}
static struct cpu_ctx *get_cpu_ctx_id(s32 cpu_id)
{
const u32 idx = 0;
return bpf_map_lookup_percpu_elem(&cpu_ctx_stor, &idx, cpu_id);
}
static struct cpu_ctx *get_cpu_ctx_task(const struct task_struct *p)
{
return get_cpu_ctx_id(scx_bpf_task_cpu(p));
}
static u32 calc_avg32(u32 old_val, u32 new_val)
{
return (old_val - (old_val >> 4)) + (new_val >> 4);
}
static u64 calc_avg(u64 old_val, u64 new_val)
{
return (old_val - (old_val >> 4)) + (new_val >> 4);
}
static u64 calc_asym_avg(u64 old_val, u64 new_val)
{
if (old_val < new_val)
return (new_val - (new_val >> 2)) + (old_val >> 2);
else
return (old_val - (old_val >> 3)) + (new_val >> 3);
}
static u64 calc_avg_freq(u64 old_freq, u64 interval)
{
u64 new_freq, ewma_freq;
new_freq = LAVD_TIME_ONE_SEC / interval;
ewma_freq = calc_avg(old_freq, new_freq);
return ewma_freq;
}
static bool is_kernel_task(struct task_struct *p)
{
return !!(p->flags & PF_KTHREAD);
}
static bool is_kernel_worker(struct task_struct *p)
{
return !!(p->flags & (PF_WQ_WORKER | PF_IO_WORKER));
}
static bool is_pinned(const struct task_struct *p)
{
return p->nr_cpus_allowed == 1;
}
static bool is_lat_cri(struct task_ctx *taskc)
{
return taskc->lat_cri >= sys_stat.avg_lat_cri;
}
static bool is_greedy(struct task_ctx *taskc)
{
return taskc->is_greedy;
}
static bool is_eligible(struct task_ctx *taskc)
{
return !is_greedy(taskc);
}
static bool is_lock_holder(struct task_ctx *taskc)
{
return taskc->futex_boost;
}
static bool have_scheduled(struct task_ctx *taskc)
{
return taskc->slice_ns != 0;
}
static u16 get_nice_prio(struct task_struct *p)
{
u16 prio = p->static_prio - MAX_RT_PRIO;
return prio;
}
static bool use_full_cpus(void)
{
return sys_stat.nr_active >= nr_cpus_onln;
}
static s64 pick_any_bit(u64 bitmap, u64 nuance)
{
u64 i, pos;
bpf_for(i, 0, 64) {
pos = (i + nuance) % 64;
if (bitmap & (1LLU << pos))
return pos;
}
return -ENOENT;
}
static void set_on_core_type(struct task_ctx *taskc,
const struct cpumask *cpumask)
{
bool on_big = false, on_little = false;
struct cpu_ctx *cpuc;
int cpu;
bpf_for(cpu, 0, nr_cpu_ids) {
if (!bpf_cpumask_test_cpu(cpu, cpumask))
continue;
cpuc = get_cpu_ctx_id(cpu);
if (!cpuc) {
scx_bpf_error("Failed to look up cpu_ctx: %d", cpu);
return;
}
if (cpuc->big_core)
on_big = true;
else
on_little = true;
if (on_big && on_little)
break;
}
WRITE_ONCE(taskc->on_big, on_big);
WRITE_ONCE(taskc->on_little, on_little);
}
static bool prob_x_out_of_y(u32 x, u32 y)
{
u32 r = bpf_get_prandom_u32() % y;
return r < x;
}