libbpf-rs 0.26.2

libbpf-rs is a safe, idiomatic, and opinionated wrapper around libbpf-sys
Documentation
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include "runqslower.h"
#include "vmlinux.h"
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>

#define TASK_RUNNING 0

const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0;
const volatile pid_t targ_tgid = 0;

// Dummy instance to get skeleton to generate definition for `struct event`
struct event _event = {0};

// Kernel 5.14 changed the state field to __state
struct task_struct___pre_5_14 {
    long int state;
};

struct {
    __uint(type, BPF_MAP_TYPE_HASH);
    __uint(max_entries, 10240);
    __type(key, u32);
    __type(value, u64);
} start SEC(".maps");

struct {
    /* bpflint: disable=perfbuf-usage */
    __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
    __type(key, u32);
    __type(value, u32);
} events SEC(".maps");

/* record enqueue timestamp */
static __always_inline int trace_enqueue(u32 tgid, u32 pid)
{
    u64 ts;

    if (!pid)
        return 0;
    if (targ_tgid && targ_tgid != tgid)
        return 0;
    if (targ_pid && targ_pid != pid)
        return 0;

    ts = bpf_ktime_get_ns();
    bpf_map_update_elem(&start, &pid, &ts, 0);
    return 0;
}

SEC("tp_btf/sched_wakeup")
int handle__sched_wakeup(u64 *ctx)
{
    /* TP_PROTO(struct task_struct *p) */
    struct task_struct *p = (void *)ctx[0];

    return trace_enqueue(p->tgid, p->pid);
}

SEC("tp_btf/sched_wakeup_new")
int handle__sched_wakeup_new(u64 *ctx)
{
    /* TP_PROTO(struct task_struct *p) */
    struct task_struct *p = (void *)ctx[0];

    return trace_enqueue(p->tgid, p->pid);
}

static inline long get_task_state(struct task_struct *t)
{
    if (bpf_core_field_exists(t->__state))
        return t->__state;

    return ((struct task_struct___pre_5_14 *)t)->state;
}

SEC("tp_btf/sched_switch")
int handle__sched_switch(u64 *ctx)
{
    /* TP_PROTO(bool preempt, struct task_struct *prev,
     *        struct task_struct *next)
     */
    struct task_struct *prev = (struct task_struct *)ctx[1];
    struct task_struct *next = (struct task_struct *)ctx[2];
    struct event event = {};
    u64 *tsp, delta_us;
    long state = get_task_state(prev);
    u32 pid;

    /* ivcsw: treat like an enqueue event and store timestamp */
    if (state == TASK_RUNNING)
        trace_enqueue(prev->tgid, prev->pid);

    pid = next->pid;

    /* fetch timestamp and calculate delta */
    tsp = bpf_map_lookup_elem(&start, &pid);
    if (!tsp)
        return 0; /* missed enqueue */

    delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
    if (min_us && delta_us <= min_us)
        return 0;

    event.pid = pid;
    event.delta_us = delta_us;
    bpf_probe_read_kernel_str(&event.task, sizeof(event.task), next->comm);

    /* output */
    bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event,
                          sizeof(event));

    bpf_map_delete_elem(&start, &pid);
    return 0;
}

char LICENSE[] SEC("license") = "GPL";