#include <rt/trace.h>
#include <rt/atomic.h>
#include <rt/event.h>
#include <rt/log.h>
#include <rt/mutex.h>
#include <rt/queue.h>
#include <rt/sem.h>
#include <rt/syscall.h>
#include <rt/task.h>
#if RT_LOG_ENABLE
#include <inttypes.h>
#endif
#if RT_TRACE_ENABLE
#ifndef RT_TRACE_NUM_EVENTS
#define RT_TRACE_NUM_EVENTS 128
#endif
#if RT_TRACE_NUM_EVENTS < 32
#error "Must have at least 32 trace event slots."
#endif
static struct rt_trace_event rt_trace_events[RT_TRACE_NUM_EVENTS];
static rt_atomic_uint32_t rt_trace_seq = 0;
static rt_atomic_size_t rt_trace_missed = 0;
#define RT_TRACE_SEND_THRESHOLD (RT_TRACE_NUM_EVENTS / 2)
static struct rt_trace_event *rt_trace_pop(void)
{
const uint32_t cycle = rt_cycle();
const uint32_t seq =
rt_atomic_fetch_add(&rt_trace_seq, 1, RT_ATOMIC_RELAXED);
struct rt_trace_event *const ev =
&rt_trace_events[seq % RT_TRACE_NUM_EVENTS];
enum rt_trace_event_state state = RT_TRACE_EVENT_STATE_EMPTY;
if (!rt_atomic_compare_exchange(&ev->state, &state,
RT_TRACE_EVENT_STATE_IN_PROGRESS,
RT_ATOMIC_RELAXED, RT_ATOMIC_RELAXED))
{
rt_atomic_fetch_add(&rt_trace_missed, 1, RT_ATOMIC_RELAXED);
return NULL;
}
ev->cycle = cycle;
ev->seq = seq;
return ev;
}
static RT_SEM_BINARY(rt_trace_send_sem);
static void rt_trace_push(struct rt_trace_event *ev)
{
const bool should_post = ((ev->seq + 1) % RT_TRACE_SEND_THRESHOLD) == 0;
rt_atomic_store(&ev->state, RT_TRACE_EVENT_STATE_FULL, RT_ATOMIC_RELEASE);
if (should_post)
{
rt_sem_post(&rt_trace_send_sem);
}
}
void rt_trace_flush(void)
{
rt_sem_post(&rt_trace_send_sem);
}
void rt_trace_syscall_run(enum rt_syscall syscall, uintptr_t arg0,
uintptr_t arg1, uintptr_t arg2)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_SYSCALL_RUN;
ev->data.syscall_run.syscall = syscall;
ev->data.syscall_run.arg0 = arg0;
ev->data.syscall_run.arg1 = arg1;
ev->data.syscall_run.arg2 = arg2;
rt_trace_push(ev);
}
}
void rt_trace_syscall_run_pending(const struct rt_syscall_record *record)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_SYSCALL_RUN_PENDING;
ev->data.syscall_pendable.args = record->args;
ev->data.syscall_pendable.syscall = record->syscall;
rt_trace_push(ev);
}
}
void rt_trace_syscall_pend(const struct rt_syscall_record *record)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_SYSCALL_PEND;
ev->data.syscall_pendable.args = record->args;
ev->data.syscall_pendable.syscall = record->syscall;
rt_trace_push(ev);
}
}
void rt_trace_first_task(const struct rt_task *task)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_FIRST_TASK;
ev->data.first_task = task;
rt_trace_push(ev);
}
}
void rt_trace_active_task(const struct rt_task *old_task,
const struct rt_task *new_task)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_ACTIVE_TASK;
ev->data.active_task.old_task = old_task;
ev->data.active_task.new_task = new_task;
rt_trace_push(ev);
}
}
void rt_trace_task_state(const struct rt_task *task,
enum rt_task_state old_state,
enum rt_task_state new_state)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_TASK_STATE;
ev->data.task_state.task = task;
ev->data.task_state.old_state = old_state;
ev->data.task_state.new_state = new_state;
rt_trace_push(ev);
}
}
void rt_trace_tick(unsigned long tick)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_TICK;
ev->data.tick = tick;
rt_trace_push(ev);
}
}
void rt_trace_interrupt(int32_t interrupt_number)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_INTERRUPT;
ev->data.interrupt_number = interrupt_number;
rt_trace_push(ev);
}
}
void rt_trace_sem_update(const struct rt_sem *sem, int old_value, int new_value)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_SEM_UPDATE;
ev->data.sem.sem = sem;
ev->data.sem.old_value = old_value;
ev->data.sem.new_value = new_value;
rt_trace_push(ev);
}
}
void rt_trace_event_wait(const struct rt_event *event, uint32_t bits,
uint32_t wait)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_EVENT_WAIT;
ev->data.event.event = event;
ev->data.event.bits = bits;
ev->data.event.wait_set = wait;
rt_trace_push(ev);
}
}
void rt_trace_event_set(const struct rt_event *event, uint32_t bits,
uint32_t set)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_EVENT_SET;
ev->data.event.event = event;
ev->data.event.bits = bits;
ev->data.event.wait_set = set;
rt_trace_push(ev);
}
}
void rt_trace_mutex_lock(const struct rt_mutex *mutex, uintptr_t holder)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_MUTEX_LOCK;
ev->data.mutex.mutex = mutex;
ev->data.mutex.holder = holder;
rt_trace_push(ev);
}
}
void rt_trace_mutex_lock_fail(const struct rt_mutex *mutex, uintptr_t failer,
uintptr_t holder)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_MUTEX_LOCK_FAIL;
ev->data.mutex_lock_fail.mutex = mutex;
ev->data.mutex_lock_fail.failer = failer;
ev->data.mutex_lock_fail.holder = holder;
rt_trace_push(ev);
}
}
void rt_trace_mutex_unlock(const struct rt_mutex *mutex, uintptr_t holder)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_MUTEX_UNLOCK;
ev->data.mutex.mutex = mutex;
ev->data.mutex.holder = holder;
rt_trace_push(ev);
}
}
static void rt_trace_queue(const struct rt_queue *queue, size_t seq,
enum rt_trace_event_type type)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = type;
ev->data.queue.queue = queue;
ev->data.queue.seq = seq;
rt_trace_push(ev);
}
}
void rt_trace_queue_push(const struct rt_queue *queue, size_t enq)
{
rt_trace_queue(queue, enq, RT_TRACE_EVENT_QUEUE_PUSH);
}
void rt_trace_queue_push_skipped(const struct rt_queue *queue, size_t enq)
{
rt_trace_queue(queue, enq, RT_TRACE_EVENT_QUEUE_PUSH_SKIPPED);
}
void rt_trace_queue_pop(const struct rt_queue *queue, size_t deq)
{
rt_trace_queue(queue, deq, RT_TRACE_EVENT_QUEUE_POP);
}
void rt_trace_queue_pop_skip(const struct rt_queue *queue, size_t deq)
{
rt_trace_queue(queue, deq, RT_TRACE_EVENT_QUEUE_POP_SKIP);
}
void rt_trace_queue_peek(const struct rt_queue *queue, size_t deq)
{
rt_trace_queue(queue, deq, RT_TRACE_EVENT_QUEUE_PEEK);
}
void rt_trace_end(void)
{
struct rt_trace_event *const ev = rt_trace_pop();
if (ev != NULL)
{
ev->type = RT_TRACE_EVENT_END;
rt_trace_push(ev);
}
rt_trace_flush();
rt_task_yield();
}
#if RT_LOG_ENABLE
static inline const char *mutex_holder_name(uintptr_t holder)
{
holder &= RT_MUTEX_HOLDER_MASK;
return holder == RT_MUTEX_HOLDER_INTERRUPT
? "interrupt"
: ((const struct rt_task *)holder)->name;
}
__attribute__((weak)) bool
rt_trace_event_write(const struct rt_trace_event *event)
{
rt_logf("%10" PRIu32 ",@%10" PRIu32 ": ", event->seq, event->cycle);
switch (event->type)
{
case RT_TRACE_EVENT_SYSCALL_RUN:
{
const uintptr_t arg0 = event->data.syscall_run.arg0;
const uintptr_t arg1 = event->data.syscall_run.arg1;
const uintptr_t arg2 = event->data.syscall_run.arg2;
rt_logf("syscall: ");
switch (event->data.syscall_run.syscall)
{
case RT_SYSCALL_SLEEP:
rt_logf("sleep(%" PRIuPTR ")\n", arg0);
break;
case RT_SYSCALL_SLEEP_PERIODIC:
rt_logf("sleep_periodic(%" PRIuPTR ", %" PRIuPTR ")\n", arg0, arg1);
break;
case RT_SYSCALL_SEM_WAIT:
rt_logf("sem_wait(%p)\n", (void *)arg0);
break;
case RT_SYSCALL_SEM_TIMEDWAIT:
rt_logf("sem_timedwait(%p, %" PRIuPTR ")\n", (void *)arg0, arg1);
break;
case RT_SYSCALL_SEM_POST:
if (arg1 == 1)
{
rt_logf("sem_post(%p)\n", (void *)arg0);
}
else
{
rt_logf("sem_post_n(%p, %" PRIuPTR ")\n", (void *)arg0, arg1);
}
break;
case RT_SYSCALL_MUTEX_LOCK:
rt_logf("mutex_lock(%p)\n", (void *)arg0);
break;
case RT_SYSCALL_MUTEX_TIMEDLOCK:
rt_logf("mutex_timedlock(%p, %" PRIuPTR ")\n", (void *)arg0, arg1);
break;
case RT_SYSCALL_MUTEX_UNLOCK:
rt_logf("mutex_unlock(%p)\n", (void *)arg0);
break;
case RT_SYSCALL_EVENT_WAIT:
rt_logf("event_wait(%p)\n", (void *)arg0);
break;
case RT_SYSCALL_EVENT_TIMEDWAIT:
rt_logf("event_timedwait(%p, %" PRIuPTR ")\n", (void *)arg0, arg2);
break;
case RT_SYSCALL_EVENT_SET:
rt_logf("event_set(%p)\n", (void *)arg0);
break;
case RT_SYSCALL_YIELD:
rt_logf("yield()\n");
break;
case RT_SYSCALL_EXIT:
rt_logf("exit()\n");
break;
}
break;
}
case RT_TRACE_EVENT_SYSCALL_PEND:
case RT_TRACE_EVENT_SYSCALL_RUN_PENDING:
{
if (event->type == RT_TRACE_EVENT_SYSCALL_PEND)
{
rt_logf("pend ");
}
else
{
rt_logf("run pending ");
}
switch (event->data.syscall_pendable.syscall)
{
case RT_SYSCALL_PENDABLE_SEM_POST:
{
struct rt_sem *const sem =
event->data.syscall_pendable.args.sem_post.sem;
const int n = event->data.syscall_pendable.args.sem_post.n;
if (n == 1)
{
rt_logf("sem_post(%p)\n", (void *)sem);
}
else
{
rt_logf("sem_post_n(%p, %d)\n", (void *)sem, n);
}
break;
}
case RT_SYSCALL_PENDABLE_EVENT_SET:
{
struct rt_event *const ev =
event->data.syscall_pendable.args.event_set.event;
rt_logf("event_set(%p)\n", (void *)ev);
break;
}
case RT_SYSCALL_PENDABLE_TICK:
rt_logf("tick()\n");
break;
}
break;
}
case RT_TRACE_EVENT_FIRST_TASK:
{
const struct rt_task *const task =
(const struct rt_task *)event->data.first_task;
rt_logf("first active task: %s\n", task->name);
break;
}
case RT_TRACE_EVENT_ACTIVE_TASK:
{
const struct rt_task *const old_task =
(const struct rt_task *)event->data.active_task.old_task;
const struct rt_task *const new_task =
(const struct rt_task *)event->data.active_task.new_task;
rt_logf("active task: %s -> %s\n", old_task->name, new_task->name);
break;
}
case RT_TRACE_EVENT_TASK_STATE:
{
const struct rt_task *const task =
(const struct rt_task *)event->data.task_state.task;
static const char *const state_names[] = {
"ready",
"blocked on sem_wait",
"blocked on sem_timedwait",
"blocked on mutex_lock",
"blocked on mutex_timedlock",
"blocked on event_wait",
"blocked on event_timedwait",
"asleep",
"exited",
};
rt_static_assert(sizeof state_names / sizeof state_names[0] ==
RT_TASK_STATE_NUM_STATES,
"incorrect state_names length");
rt_logf("task %s: %s -> %s\n", task->name,
state_names[event->data.task_state.old_state],
state_names[event->data.task_state.new_state]);
break;
}
case RT_TRACE_EVENT_TICK:
rt_logf("tick %lu\n", event->data.tick);
break;
case RT_TRACE_EVENT_INTERRUPT:
rt_logf("interrupt %" PRId32 "\n", event->data.interrupt_number);
break;
case RT_TRACE_EVENT_SEM_UPDATE:
{
const int old = event->data.sem.old_value;
const int new = event->data.sem.new_value;
rt_logf("sem %p %s: %d -> %d\n", (const void *)event->data.sem.sem,
(old > new) ? "sub" : "add", old, new);
break;
}
case RT_TRACE_EVENT_EVENT_WAIT:
{
const uint32_t bits = event->data.event.bits;
const uint32_t wait = event->data.event.wait_set;
const bool all = (wait & RT_EVENT_WAIT_ALL) != 0;
const bool noclear = (wait & RT_EVENT_WAIT_NOCLEAR) != 0;
rt_logf("event %p: 0x%" PRIx32 ", wait 0x%" PRIx32 ", %s%s",
(const void *)event->data.event.event,
bits & ~RT_EVENT_WAIT_RESERVED, wait & ~RT_EVENT_WAIT_RESERVED,
all ? "all" : "any", noclear ? ", no clear" : "");
if (rt_event_bits_match(bits, wait))
{
rt_logf(", success");
if (!noclear)
{
rt_logf(" -> 0x%" PRIx32,
(bits & ~wait) & ~RT_EVENT_WAIT_RESERVED);
}
}
else
{
rt_logf(", fail");
}
rt_logf("\n");
break;
}
case RT_TRACE_EVENT_EVENT_SET:
{
const uint32_t bits = event->data.event.bits;
const uint32_t set = event->data.event.wait_set;
rt_logf("event %p: 0x%" PRIx32 ", set 0x%" PRIx32 " -> 0x%" PRIx32 "\n",
(const void *)event->data.event.event,
bits & ~RT_EVENT_WAIT_RESERVED, set & ~RT_EVENT_WAIT_RESERVED,
(bits | set) & ~RT_EVENT_WAIT_RESERVED);
break;
}
case RT_TRACE_EVENT_MUTEX_LOCK:
rt_logf("mutex %p locked by %s\n",
(const void *)event->data.mutex.mutex,
mutex_holder_name(event->data.mutex.holder));
break;
case RT_TRACE_EVENT_MUTEX_LOCK_FAIL:
rt_logf("mutex %p lock failed by %s, held by %s\n",
(const void *)event->data.mutex_lock_fail.mutex,
mutex_holder_name(event->data.mutex_lock_fail.failer),
mutex_holder_name(event->data.mutex_lock_fail.holder));
break;
case RT_TRACE_EVENT_MUTEX_UNLOCK:
rt_logf("mutex %p unlocked by %s\n",
(const void *)event->data.mutex.mutex,
mutex_holder_name(event->data.mutex.holder));
break;
case RT_TRACE_EVENT_QUEUE_PUSH:
case RT_TRACE_EVENT_QUEUE_PUSH_SKIPPED:
case RT_TRACE_EVENT_QUEUE_POP:
case RT_TRACE_EVENT_QUEUE_POP_SKIP:
case RT_TRACE_EVENT_QUEUE_PEEK:
{
static const char *const queue_op_names[] = {
"push", "push skipped", "pop", "pop skip", "peek",
};
rt_static_assert(sizeof queue_op_names / sizeof queue_op_names[0] ==
RT_TRACE_EVENT_QUEUE_PEEK -
RT_TRACE_EVENT_QUEUE_PUSH + 1,
"incorrect queue_op_names length");
const size_t seq = event->data.queue.seq;
rt_logf("queue %p %s gen %u, index %zx\n",
(const void *)event->data.queue.queue,
queue_op_names[event->type - RT_TRACE_EVENT_QUEUE_PUSH],
rt_queue_qsgen(seq) >> RT_QUEUE_STATE_BITS,
rt_queue_qindex(seq));
break;
}
case RT_TRACE_EVENT_MISSED:
rt_logf("missed %zu trace events\n", event->data.missed);
break;
case RT_TRACE_EVENT_END:
rt_logf("trace end\n");
rt_log_flush();
break;
}
return true;
}
#else
__attribute__((weak)) bool
rt_trace_event_write(const struct rt_trace_event *event)
{
(void)event;
return true;
}
#endif
void rt_trace_daemon(void)
{
rt_task_drop_privilege();
size_t deq = 0;
bool end = false;
while (!end)
{
rt_sem_wait(&rt_trace_send_sem);
const size_t missed =
rt_atomic_load(&rt_trace_missed, RT_ATOMIC_RELAXED);
if (missed > 0)
{
struct rt_trace_event missed_event;
missed_event.seq = rt_atomic_load(&rt_trace_seq, RT_ATOMIC_RELAXED);
missed_event.cycle = rt_cycle();
missed_event.type = RT_TRACE_EVENT_MISSED;
missed_event.data.missed = missed;
rt_trace_event_write(&missed_event);
rt_atomic_fetch_sub(&rt_trace_missed, missed, RT_ATOMIC_RELAXED);
}
for (size_t i = 0; i < RT_TRACE_NUM_EVENTS; ++i)
{
struct rt_trace_event *const ev =
&rt_trace_events[deq % RT_TRACE_NUM_EVENTS];
if (rt_atomic_load(&ev->state, RT_ATOMIC_ACQUIRE) !=
RT_TRACE_EVENT_STATE_FULL)
{
break;
}
if (!rt_trace_event_write(ev))
{
break;
}
if (ev->type == RT_TRACE_EVENT_END)
{
end = true;
}
rt_atomic_store(&ev->state, RT_TRACE_EVENT_STATE_EMPTY,
RT_ATOMIC_RELAXED);
++deq;
}
}
}
#endif