#include <rt/abort.h>
#include <rt/atomic.h>
#include <rt/container.h>
#include <rt/context.h>
#include <rt/cycle.h>
#include <rt/event.h>
#include <rt/exit.h>
#include <rt/idle.h>
#include <rt/list.h>
#include <rt/log.h>
#include <rt/mpu.h>
#include <rt/mutex.h>
#include <rt/panic.h>
#include <rt/sem.h>
#include <rt/start.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/tick.h>
#include <rt/tls.h>
#include <rt/trace.h>
#include <rt/trap.h>
static inline struct rt_task *task_from_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_task, list);
}
static inline struct rt_task *task_from_sleep_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_task, sleep_list);
}
static inline struct rt_mutex *mutex_from_list(const struct rt_list *l)
{
return rt_container_of(l, struct rt_mutex, list);
}
static bool task_priority_less_than(const struct rt_list *a,
const struct rt_list *b)
{
return task_from_list(a)->priority < task_from_list(b)->priority;
}
static void insert_by_priority(struct rt_list *list, struct rt_task *task)
{
rt_list_insert_by(list, &task->list, task_priority_less_than);
}
RT_MPU_PRIV_BSS(rt_ready_bits)
static uint32_t rt_ready_bits = 0;
RT_MPU_PRIV_BSS(rt_ready_lists)
static struct rt_list *rt_ready_lists[RT_TASK_PRIORITY_MAX + 1];
RT_MPU_PRIV_DATA(rt_global_task_list)
static RT_LIST(rt_global_task_list);
static uint32_t min_ready_priority(void)
{
#if RT_TASK_READY_CTZ_ENABLE
return (uint32_t)__builtin_ctz(rt_ready_bits);
#else
static const unsigned char debruijn_ctz[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
};
const uint32_t min_bit = rt_ready_bits & -rt_ready_bits;
return (uint32_t)debruijn_ctz[(min_bit * UINT32_C(0x077CB531)) >> 27];
#endif }
static bool mutex_priority_less_than(const struct rt_list *a,
const struct rt_list *b)
{
const struct rt_mutex *const ma = mutex_from_list(a);
const struct rt_mutex *const mb = mutex_from_list(b);
return task_from_list(rt_list_front(&ma->wait_list))->priority <
task_from_list(rt_list_front(&mb->wait_list))->priority;
}
static void insert_mutex_by_priority(struct rt_list *list,
struct rt_mutex *mutex)
{
rt_list_insert_by(list, &mutex->list, mutex_priority_less_than);
}
RT_MPU_PRIV_BSS(rt_pending_syscalls)
static rt_atomic(struct rt_syscall_record *) rt_pending_syscalls = NULL;
RT_TASK(rt_idle, RT_STACK_MIN, RT_TASK_PRIORITY_IDLE);
struct rt_task *rt_active_task = NULL;
void rt_task_yield(void)
{
rt_syscall_0(RT_SYSCALL_YIELD);
}
__attribute__((noreturn)) void rt_task_exit(void)
{
rt_syscall_0(RT_SYSCALL_EXIT);
rt_abort();
}
struct rt_task *rt_task_self(void)
{
return rt_active_task;
}
const char *rt_task_name(void)
{
return rt_active_task->name;
}
static void task_set_state(struct rt_task *task, enum rt_task_state new_state)
{
rt_trace_task_state(task, task->state, new_state);
task->state = new_state;
}
static void task_add_to_ready_list(struct rt_task *task)
{
struct rt_list *const list = rt_ready_lists[task->priority];
if (list == NULL)
{
rt_ready_lists[task->priority] = &task->list;
rt_ready_bits |= UINT32_C(1) << task->priority;
}
else
{
rt_list_push_back(list, &task->list);
}
}
void rt_task_init(struct rt_task *task)
{
rt_list_push_back(&rt_global_task_list, &task->global_task_list);
task_add_to_ready_list(task);
}
static void task_ready(struct rt_task *task)
{
task_set_state(task, RT_TASK_STATE_READY);
task_add_to_ready_list(task);
}
static void task_unready(struct rt_task *task)
{
if (rt_list_is_empty(&task->list))
{
rt_ready_lists[task->priority] = NULL;
rt_ready_bits &= ~(UINT32_C(1) << task->priority);
}
else
{
if (rt_ready_lists[task->priority] == &task->list)
{
rt_ready_lists[task->priority] = task->list.next;
}
rt_list_remove(&task->list);
}
}
static void task_wait(struct rt_task *task, struct rt_list *list)
{
task_unready(task);
insert_by_priority(list, task);
}
RT_MPU_PRIV_BSS(rt_context_prev)
void **rt_context_prev;
struct rt_task *rt_start_sched(void)
{
#if RT_CYCLE_ENABLE
rt_cycle_init();
#endif rt_task_cycle_resume();
struct rt_task *const first_task =
task_from_list(rt_ready_lists[min_ready_priority()]);
rt_trace_first_task(first_task);
rt_active_task = first_task;
#if RT_TASK_LOCAL_STORAGE_ENABLE
rt_tls_set(first_task->tls);
#endif return first_task;
}
static struct rt_task *sched(void)
{
struct rt_task *const next_task =
task_from_list(rt_ready_lists[min_ready_priority()]);
if (next_task == rt_active_task)
{
return NULL;
}
rt_context_prev = &rt_active_task->ctx;
rt_trace_active_task(rt_active_task, next_task);
rt_active_task = next_task;
#if RT_TASK_LOCAL_STORAGE_ENABLE
rt_tls_set(next_task->tls);
#endif return next_task;
}
RT_MPU_PRIV_BSS(rt_woken_tick)
static unsigned long rt_woken_tick;
static bool wake_tick_less_than(const struct rt_list *a,
const struct rt_list *b)
{
return (task_from_sleep_list(a)->wake_tick - rt_woken_tick) <
(task_from_sleep_list(b)->wake_tick - rt_woken_tick);
}
RT_MPU_PRIV_DATA(rt_sleep_list)
static RT_LIST(rt_sleep_list);
static void sleep_until(struct rt_task *task, unsigned long wake_tick)
{
task->wake_tick = wake_tick;
rt_list_insert_by(&rt_sleep_list, &task->sleep_list, wake_tick_less_than);
}
static void wake_sem_waiters(struct rt_sem *sem)
{
int waiters = -rt_atomic_load(&sem->value, RT_ATOMIC_ACQUIRE);
if (waiters < 0)
{
waiters = 0;
}
while (sem->num_waiters > (size_t)waiters)
{
struct rt_task *const task =
task_from_list(rt_list_front(&sem->wait_list));
rt_list_remove(&task->list);
task->blocker.sem = NULL;
if (task->state == RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT)
{
rt_list_remove(&task->sleep_list);
*task->syscall_return.success = true;
task->syscall_return.success = NULL;
}
task_ready(task);
--sem->num_waiters;
}
}
static void wake_event_waiters(struct rt_event *event)
{
struct rt_list *const list = &event->wait_list;
for (struct rt_list *node = rt_list_front(list); node != list;)
{
struct rt_list *const next = node->next;
struct rt_task *const task = task_from_list(node);
const uint32_t wait = *task->syscall_return.bits;
const uint32_t bits = rt_event_trywait(event, wait);
if (rt_event_bits_match(bits, wait))
{
rt_list_remove(node);
task->blocker.event = NULL;
*task->syscall_return.bits = bits;
task->syscall_return.bits = NULL;
if (task->state == RT_TASK_STATE_BLOCKED_ON_EVENT_TIMEDWAIT)
{
rt_list_remove(&task->sleep_list);
}
task_ready(task);
}
node = next;
}
if (rt_list_is_empty(list))
{
rt_atomic_fetch_and(&event->bits, ~RT_EVENT_WAITED_MASK,
RT_ATOMIC_RELAXED);
}
}
static bool trylock(struct rt_mutex *mutex, uintptr_t new_holder)
{
uintptr_t e = RT_MUTEX_UNLOCKED;
return rt_atomic_compare_exchange(&mutex->holder, &e, new_holder,
RT_ATOMIC_ACQUIRE, RT_ATOMIC_RELAXED);
}
static void wake_mutex_waiter(struct rt_mutex *mutex)
{
if (rt_list_is_empty(&mutex->wait_list))
{
return;
}
struct rt_task *const task =
task_from_list(rt_list_front(&mutex->wait_list));
const bool has_other_waiters = task->list.next != &mutex->wait_list;
uintptr_t new_holder = (uintptr_t)task;
if (has_other_waiters)
{
new_holder |= RT_MUTEX_WAITED_MASK;
}
if (trylock(mutex, new_holder))
{
rt_list_remove(&task->list);
task->blocker.mutex = NULL;
if (task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK)
{
rt_list_remove(&task->sleep_list);
*task->syscall_return.success = true;
task->syscall_return.success = NULL;
}
task_ready(task);
if (has_other_waiters)
{
insert_mutex_by_priority(&task->mutex_list, mutex);
}
}
}
static struct rt_mutex *task_donate(struct rt_task *task)
{
uint32_t priority = task->base_priority;
if (!rt_list_is_empty(&task->mutex_list))
{
struct rt_mutex *const next_mutex =
mutex_from_list(rt_list_front(&task->mutex_list));
const uint32_t donated_priority =
task_from_list(rt_list_front(&next_mutex->wait_list))->priority;
if (priority > donated_priority)
{
priority = donated_priority;
}
}
if (priority == task->priority)
{
return NULL;
}
if (task->state == RT_TASK_STATE_READY)
{
task_unready(task);
task->priority = priority;
task_add_to_ready_list(task);
}
else if ((task->state == RT_TASK_STATE_BLOCKED_ON_SEM_WAIT) ||
(task->state == RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT))
{
task->priority = priority;
rt_list_remove(&task->list);
insert_by_priority(&task->blocker.sem->wait_list, task);
}
else if ((task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_LOCK) ||
(task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK))
{
task->priority = priority;
rt_list_remove(&task->list);
insert_by_priority(&task->blocker.mutex->wait_list, task);
return task->blocker.mutex;
}
else if ((task->state == RT_TASK_STATE_BLOCKED_ON_EVENT_WAIT) ||
(task->state == RT_TASK_STATE_BLOCKED_ON_EVENT_TIMEDWAIT))
{
task->priority = priority;
rt_list_remove(&task->list);
insert_by_priority(&task->blocker.event->wait_list, task);
}
return NULL;
}
static void mutex_donate(struct rt_mutex *mutex)
{
do
{
uintptr_t holder = rt_atomic_load(&mutex->holder, RT_ATOMIC_RELAXED);
if ((holder == RT_MUTEX_UNLOCKED) ||
(holder == RT_MUTEX_HOLDER_INTERRUPT))
{
return;
}
struct rt_task *const task =
(struct rt_task *)(holder & RT_MUTEX_HOLDER_MASK);
if (!rt_list_is_empty(&mutex->wait_list))
{
rt_list_remove(&mutex->list);
insert_mutex_by_priority(&task->mutex_list, mutex);
}
mutex = task_donate(task);
} while (mutex != NULL);
}
static void tick_syscall(void)
{
const unsigned long ticks_to_advance = rt_tick_count() - rt_woken_tick;
while (!rt_list_is_empty(&rt_sleep_list))
{
struct rt_task *const task =
task_from_sleep_list(rt_list_front(&rt_sleep_list));
if (ticks_to_advance < (task->wake_tick - rt_woken_tick))
{
break;
}
if (task->state == RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT)
{
rt_list_remove(&task->list);
struct rt_sem *const sem = task->blocker.sem;
task->blocker.sem = NULL;
rt_sem_add_n(sem, 1);
--sem->num_waiters;
wake_sem_waiters(sem);
*task->syscall_return.success = false;
task->syscall_return.success = NULL;
}
else if (task->state == RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK)
{
rt_list_remove(&task->list);
struct rt_mutex *const mutex = task->blocker.mutex;
task->blocker.mutex = NULL;
if (rt_list_is_empty(&mutex->wait_list))
{
rt_atomic_fetch_and(&mutex->holder, RT_MUTEX_HOLDER_MASK,
RT_ATOMIC_RELAXED);
rt_list_remove(&mutex->list);
}
mutex_donate(mutex);
*task->syscall_return.success = false;
task->syscall_return.success = NULL;
}
else if (task->state == RT_TASK_STATE_BLOCKED_ON_EVENT_TIMEDWAIT)
{
rt_list_remove(&task->list);
struct rt_event *const event = task->blocker.event;
task->blocker.event = NULL;
*task->syscall_return.bits =
rt_event_trywait(event, *task->syscall_return.bits);
task->syscall_return.bits = NULL;
if (rt_list_is_empty(&event->wait_list))
{
rt_atomic_fetch_and(&event->bits, ~RT_EVENT_WAITED_MASK,
RT_ATOMIC_RELAXED);
}
}
rt_list_remove(&task->sleep_list);
task_ready(task);
}
rt_woken_tick += ticks_to_advance;
}
static rt_atomic_ulong rt_tick;
void rt_tick_advance(void)
{
const unsigned long old_tick =
rt_atomic_fetch_add(&rt_tick, 1, RT_ATOMIC_RELAXED);
rt_trace_tick(old_tick + 1);
RT_MPU_PRIV_DATA(rt_tick_record)
static struct rt_syscall_record rt_tick_record = {
.syscall = RT_SYSCALL_PENDABLE_TICK,
.pending = RT_ATOMIC_FLAG_INIT,
};
if (!rt_atomic_flag_test_and_set(&rt_tick_record.pending,
RT_ATOMIC_ACQUIRE))
{
rt_syscall_push(&rt_tick_record);
rt_syscall_pend();
}
}
unsigned long rt_tick_count(void)
{
return rt_atomic_load(&rt_tick, RT_ATOMIC_RELAXED);
}
void rt_syscall_push(struct rt_syscall_record *record)
{
rt_trace_syscall_pend(record);
record->next = rt_atomic_load(&rt_pending_syscalls, RT_ATOMIC_RELAXED);
while (!rt_atomic_compare_exchange_weak(&rt_pending_syscalls, &record->next,
record, RT_ATOMIC_RELEASE,
RT_ATOMIC_RELAXED))
{
}
}
void rt_task_cycle_pause(void)
{
#if RT_TASK_CYCLE_ENABLE
const uint32_t task_cycles = rt_cycle() - rt_active_task->start_cycle;
rt_active_task->total_cycles += task_cycles;
#endif
}
void rt_task_cycle_resume(void)
{
#if RT_TASK_CYCLE_ENABLE
rt_active_task->start_cycle = rt_cycle();
#endif
}
static void yield(void)
{
struct rt_list **const list = &rt_ready_lists[min_ready_priority()];
*list = (*list)->next;
}
struct rt_task *rt_syscall_run(enum rt_syscall syscall, uintptr_t arg0,
uintptr_t arg1, uintptr_t arg2)
{
rt_task_cycle_pause();
rt_trace_syscall_run(syscall, arg0, arg1, arg2);
switch (syscall)
{
case RT_SYSCALL_SLEEP:
{
const unsigned long ticks = arg0;
task_set_state(rt_active_task, RT_TASK_STATE_ASLEEP);
task_unready(rt_active_task);
sleep_until(rt_active_task, rt_woken_tick + ticks);
break;
}
case RT_SYSCALL_SLEEP_PERIODIC:
{
const unsigned long last_wake_tick = arg0, period = arg1,
ticks_since_last_wake =
rt_woken_tick - last_wake_tick;
if (ticks_since_last_wake < period)
{
task_set_state(rt_active_task, RT_TASK_STATE_ASLEEP);
task_unready(rt_active_task);
sleep_until(rt_active_task, last_wake_tick + period);
}
break;
}
case RT_SYSCALL_SEM_WAIT:
{
struct rt_sem *const sem = (struct rt_sem *)arg0;
task_set_state(rt_active_task, RT_TASK_STATE_BLOCKED_ON_SEM_WAIT);
rt_active_task->blocker.sem = sem;
task_wait(rt_active_task, &sem->wait_list);
++sem->num_waiters;
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_SEM_TIMEDWAIT:
{
struct rt_sem *const sem = (struct rt_sem *)arg0;
const unsigned long ticks = arg1;
bool *const success = (bool *)arg2;
task_set_state(rt_active_task, RT_TASK_STATE_BLOCKED_ON_SEM_TIMEDWAIT);
rt_active_task->blocker.sem = sem;
rt_active_task->syscall_return.success = success;
task_wait(rt_active_task, &sem->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
++sem->num_waiters;
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_SEM_POST:
{
struct rt_sem *const sem = (struct rt_sem *)arg0;
rt_sem_add_n(sem, (int)arg1);
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_MUTEX_LOCK:
{
struct rt_mutex *const mutex = (struct rt_mutex *)arg0;
if (trylock(mutex, (uintptr_t)rt_active_task))
{
rt_trace_mutex_lock(mutex, (uintptr_t)rt_active_task);
break;
}
rt_atomic_fetch_or(&mutex->holder, RT_MUTEX_WAITED_MASK,
RT_ATOMIC_RELAXED);
task_set_state(rt_active_task, RT_TASK_STATE_BLOCKED_ON_MUTEX_LOCK);
rt_active_task->blocker.mutex = mutex;
task_wait(rt_active_task, &mutex->wait_list);
mutex_donate(mutex);
break;
}
case RT_SYSCALL_MUTEX_TIMEDLOCK:
{
struct rt_mutex *const mutex = (struct rt_mutex *)arg0;
const unsigned long ticks = arg1;
bool *const success = (bool *)arg2;
if (trylock(mutex, (uintptr_t)rt_active_task))
{
*success = true;
break;
}
rt_atomic_fetch_or(&mutex->holder, RT_MUTEX_WAITED_MASK,
RT_ATOMIC_RELAXED);
task_set_state(rt_active_task,
RT_TASK_STATE_BLOCKED_ON_MUTEX_TIMEDLOCK);
rt_active_task->blocker.mutex = mutex;
rt_active_task->syscall_return.success = success;
task_wait(rt_active_task, &mutex->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
mutex_donate(mutex);
break;
}
case RT_SYSCALL_MUTEX_UNLOCK:
{
struct rt_mutex *const mutex = (struct rt_mutex *)arg0;
rt_atomic_store(&mutex->holder, RT_MUTEX_UNLOCKED, RT_ATOMIC_RELEASE);
rt_list_remove(&mutex->list);
task_donate(rt_active_task);
wake_mutex_waiter(mutex);
break;
}
case RT_SYSCALL_EVENT_WAIT:
{
struct rt_event *const event = (struct rt_event *)arg0;
uint32_t *const bits = (uint32_t *)arg1;
task_set_state(rt_active_task, RT_TASK_STATE_BLOCKED_ON_EVENT_WAIT);
rt_active_task->blocker.event = event;
rt_active_task->syscall_return.bits = bits;
task_wait(rt_active_task, &event->wait_list);
rt_atomic_fetch_or(&event->bits, RT_EVENT_WAITED_MASK,
RT_ATOMIC_RELAXED);
wake_event_waiters(event);
break;
}
case RT_SYSCALL_EVENT_TIMEDWAIT:
{
struct rt_event *const event = (struct rt_event *)arg0;
uint32_t *const bits = (uint32_t *)arg1;
const unsigned long ticks = arg2;
task_set_state(rt_active_task,
RT_TASK_STATE_BLOCKED_ON_EVENT_TIMEDWAIT);
rt_active_task->blocker.event = event;
rt_active_task->syscall_return.bits = bits;
task_wait(rt_active_task, &event->wait_list);
sleep_until(rt_active_task, rt_woken_tick + ticks);
rt_atomic_fetch_or(&event->bits, RT_EVENT_WAITED_MASK,
RT_ATOMIC_RELAXED);
wake_event_waiters(event);
break;
}
case RT_SYSCALL_EVENT_SET:
{
struct rt_event *const event = (struct rt_event *)arg0;
wake_event_waiters(event);
break;
}
case RT_SYSCALL_YIELD:
yield();
break;
case RT_SYSCALL_EXIT:
task_set_state(rt_active_task, RT_TASK_STATE_EXITED);
task_unready(rt_active_task);
break;
}
struct rt_task *const new_task = sched();
rt_task_cycle_resume();
return new_task;
}
struct rt_task *rt_syscall_run_pending(void)
{
rt_task_cycle_pause();
struct rt_syscall_record *record =
rt_atomic_exchange(&rt_pending_syscalls, NULL, RT_ATOMIC_ACQUIRE);
while (record != NULL)
{
rt_trace_syscall_run_pending(record);
struct rt_syscall_record *next_record = record->next;
switch (record->syscall)
{
case RT_SYSCALL_PENDABLE_SEM_POST:
{
struct rt_sem *const sem = record->args.sem_post.sem;
rt_sem_add_n(sem, record->args.sem_post.n);
rt_atomic_flag_clear(&record->pending, RT_ATOMIC_RELEASE);
wake_sem_waiters(sem);
break;
}
case RT_SYSCALL_PENDABLE_EVENT_SET:
{
struct rt_event *const event = record->args.event_set.event;
rt_atomic_flag_clear(&record->pending, RT_ATOMIC_RELEASE);
wake_event_waiters(event);
break;
}
case RT_SYSCALL_PENDABLE_TICK:
rt_atomic_flag_clear(&record->pending, RT_ATOMIC_RELEASE);
tick_syscall();
yield();
break;
}
record = next_record;
}
struct rt_task *const new_task = sched();
rt_task_cycle_resume();
return new_task;
}
static rt_atomic(const char *) rt_panic_msg;
__attribute__((weak, noreturn)) void rt_panic(const char *msg)
{
rt_atomic_store(&rt_panic_msg, msg, RT_ATOMIC_SEQ_CST);
rt_abort();
}
__attribute__((weak)) void rt_assert(bool condition, const char *msg)
{
if (!condition)
{
rt_panic(msg);
}
}
__attribute__((weak, noreturn)) void rt_exit(void)
{
rt_trace_end();
rt_log_flush();
rt_trap();
}