rt 0.17.0

A real-time operating system capable of full preemption
Documentation
#include <rt/sem.h>

#include <rt/interrupt.h>
#include <rt/task.h>
#include <rt/trace.h>

static int new_value(int value, int n, int max)
{
    if (value <= max - n)
    {
        return value + n;
    }
    return max;
}

static void sem_post_pend(struct rt_sem *sem, int n)
{
    /* If the semaphore's post record is already pending, don't attempt to
     * use it again. The interrupt that is using it will still cause the
     * post to occur, so no posts are missed in this case. Instead, just
     * add to the semaphore value directly. The system call will run
     * after this increment has taken effect. */
    if (rt_atomic_flag_test_and_set(&sem->post_record.pending,
                                    RT_ATOMIC_ACQUIRE))
    {
        rt_sem_add_n(sem, n);
        return;
    }
    sem->post_record.args.sem_post.n = n;
    rt_syscall_push(&sem->post_record);
    rt_syscall_pend();
}

static bool sem_post_fast(struct rt_sem *sem, int n)
{
    int value = rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
    int new;
    do
    {
        /* If the value is negative, then the post needs to happen in a
         * system call because there are waiters. Adding to the semaphore
         * value directly when there are waiters can result in priority
         * inversion if a context switch occurs before wakes are resolved
         * but after the value is incremented, and the semaphore is
         * decremented on the fast path by another task that is lower
         * priority than a previous waiter. */
        if (value < 0)
        {
            return false;
        }
        new = new_value(value, n, sem->max_value);
    } while (!rt_atomic_compare_exchange_weak(&sem->value, &value, new,
                                              RT_ATOMIC_RELEASE,
                                              RT_ATOMIC_RELAXED));
    rt_trace_sem_update(sem, value, new);
    return true;
}

void rt_sem_post_n(struct rt_sem *sem, int n)
{
    if (!sem_post_fast(sem, n))
    {
        /* In an interrupt, we need to use the post system call record
         * attached to the semaphore because the interrupt will return
         * before system calls are handled and the stack frame won't be live
         * anymore. */
        if (rt_interrupt_is_active())
        {
            sem_post_pend(sem, n);
        }
        else
        {
            rt_syscall_sem_post(sem, n);
        }
    }
}

void rt_sem_post_n_from_task(struct rt_sem *sem, int n)
{
    if (!sem_post_fast(sem, n))
    {
        rt_syscall_sem_post(sem, n);
    }
}

void rt_sem_post_n_from_interrupt(struct rt_sem *sem, int n)
{
    if (!sem_post_fast(sem, n))
    {
        sem_post_pend(sem, n);
    }
}

void rt_sem_post(struct rt_sem *sem)
{
    rt_sem_post_n(sem, 1);
}

void rt_sem_post_from_task(struct rt_sem *sem)
{
    rt_sem_post_n_from_task(sem, 1);
}

void rt_sem_post_from_interrupt(struct rt_sem *sem)
{
    rt_sem_post_n_from_interrupt(sem, 1);
}

bool rt_sem_trywait(struct rt_sem *sem)
{
    int value = rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
    do
    {
        if (value <= 0)
        {
            return false;
        }
    } while (!rt_atomic_compare_exchange_weak(&sem->value, &value, value - 1,
                                              RT_ATOMIC_ACQUIRE,
                                              RT_ATOMIC_RELAXED));
    rt_trace_sem_update(sem, value, value - 1);
    return true;
}

void rt_sem_wait(struct rt_sem *sem)
{
    const int value = rt_atomic_fetch_sub(&sem->value, 1, RT_ATOMIC_ACQUIRE);
    rt_trace_sem_update(sem, value, value - 1);
    if (value > 0)
    {
        return;
    }
    rt_syscall_sem_wait(sem);
}

bool rt_sem_timedwait(struct rt_sem *sem, unsigned long ticks)
{
    if (ticks == 0)
    {
        return rt_sem_trywait(sem);
    }
    const int value = rt_atomic_fetch_sub(&sem->value, 1, RT_ATOMIC_ACQUIRE);
    rt_trace_sem_update(sem, value, value - 1);
    if (value > 0)
    {
        return true;
    }
    return rt_syscall_sem_timedwait(sem, ticks);
}

void rt_sem_add_n(struct rt_sem *sem, int n)
{
    int value = rt_atomic_load(&sem->value, RT_ATOMIC_RELAXED);
    int new;
    do
    {
        new = new_value(value, n, sem->max_value);
    } while (!rt_atomic_compare_exchange_weak(&sem->value, &value, new,
                                              RT_ATOMIC_RELEASE,
                                              RT_ATOMIC_RELAXED));
    rt_trace_sem_update(sem, value, new);
}