rt 0.17.0

A real-time operating system capable of full preemption
Documentation
#include <rt/mutex.h>

#include <rt/assert.h>
#include <rt/interrupt.h>
#include <rt/syscall.h>
#include <rt/task.h>
#include <rt/trace.h>

#include <stddef.h>

static bool trylock(struct rt_mutex *mutex, uintptr_t new_holder)
{
    uintptr_t e = RT_MUTEX_UNLOCKED;
    if (rt_atomic_compare_exchange(&mutex->holder, &e, new_holder,
                                   RT_ATOMIC_ACQUIRE, RT_ATOMIC_RELAXED))
    {
        rt_trace_mutex_lock(mutex, new_holder);
        return true;
    }
    const uintptr_t holder = e & RT_MUTEX_HOLDER_MASK;
    if (holder == new_holder)
    {
        // Already holding this mutex.
        rt_assert(mutex->level != RT_MUTEX_LEVEL_NONRECURSIVE,
                  "recursively locked a non-recursive mutex");
        ++mutex->level;
        return true;
    }
    rt_trace_mutex_lock_fail(mutex, new_holder, holder);
    return false;
}

static inline uintptr_t task_or_interrupt_ptr(void)
{
    return rt_interrupt_is_active() ? RT_MUTEX_HOLDER_INTERRUPT
                                    : (uintptr_t)rt_task_self();
}

bool rt_mutex_trylock(struct rt_mutex *mutex)
{
    return trylock(mutex, task_or_interrupt_ptr());
}

void rt_mutex_lock(struct rt_mutex *mutex)
{
    rt_assert(!rt_interrupt_is_active(), "mutex lock from an interrupt");
    if (!trylock(mutex, (uintptr_t)rt_task_self()))
    {
        rt_syscall_mutex_lock(mutex);
    }
}

bool rt_mutex_timedlock(struct rt_mutex *mutex, unsigned long ticks)
{
    rt_assert(!rt_interrupt_is_active() || (ticks == 0),
              "mutex timedlock from an interrupt");
    return trylock(mutex, task_or_interrupt_ptr()) ||
           ((ticks != 0) && rt_syscall_mutex_timedlock(mutex, ticks));
}

void rt_mutex_unlock(struct rt_mutex *mutex)
{
    uintptr_t holder = rt_atomic_load(&mutex->holder, RT_ATOMIC_RELAXED);
    rt_assert((holder & RT_MUTEX_HOLDER_MASK) == task_or_interrupt_ptr(),
              "unlock while not holding the mutex");

    if (mutex->level > 0)
    {
        // Mutex is recursive and has been locked more than once.
        --mutex->level;
        return;
    }

    /* We've already loaded mutex->holder, and if the waited bit is set, we can
     * go directly to the syscall path; otherwise, we need to compare-and-swap,
     * because the waited bit might have been set since the load occurred.
     * NOTE: the reverse is also true, that the waited bit might be set at
     * first, but cleared by the time we get here if a timedlock timed out.
     * Making a syscall in that case is okay though, and avoiding it would
     * require doing yet another load of mutex->holder. */
    if (((holder & RT_MUTEX_WAITED_MASK) == 0) &&
        rt_atomic_compare_exchange(&mutex->holder, &holder, RT_MUTEX_UNLOCKED,
                                   RT_ATOMIC_RELEASE, RT_ATOMIC_RELAXED))
    {
        rt_trace_mutex_unlock(mutex, holder);
    }
    else
    {
        /* This path should only be taken by a task, as any interrupt that
         * unlocks must have done a successful trylock. There should never be
         * any waiting tasks in this case, since an interrupt has been active
         * for the entire duration that the mutex was held. */
        rt_syscall_mutex_unlock(mutex);
    }
}