rt 0.17.0

A real-time operating system capable of full preemption
Documentation
#include <rt/event.h>

#include <rt/interrupt.h>
#include <rt/syscall.h>
#include <rt/tick.h>
#include <rt/trace.h>

static inline uint32_t event_bits(uint32_t bits)
{
    return bits & ~RT_EVENT_WAIT_RESERVED;
}

static inline bool event_is_waited(uint32_t bits)
{
    return (bits & RT_EVENT_WAITED_MASK) != 0;
}

bool rt_event_bits_match(uint32_t bits, uint32_t wait)
{
    const uint32_t waitbits = event_bits(wait);
    const uint32_t match = bits & waitbits;
    if ((wait & RT_EVENT_WAIT_ALL) != 0)
    {
        return match == waitbits;
    }
    return match != 0;
}

uint32_t rt_event_clear(struct rt_event *event, uint32_t clear)
{
    return event_bits(rt_atomic_fetch_and(&event->bits, ~clear,
                                          RT_ATOMIC_ACQ_REL));
}

uint32_t rt_event_get(const struct rt_event *event)
{
    return event_bits(rt_atomic_load(&event->bits, RT_ATOMIC_ACQUIRE));
}

static void event_set_pend(struct rt_event *event)
{
    if (!rt_atomic_flag_test_and_set(&event->set_record.pending,
                                     RT_ATOMIC_ACQUIRE))
    {
        rt_syscall_push(&event->set_record);
        rt_syscall_pend();
    }
}

static uint32_t event_set_fast(struct rt_event *event, uint32_t set)
{
    const uint32_t bits =
        rt_atomic_fetch_or(&event->bits, set, RT_ATOMIC_ACQ_REL);
    rt_trace_event_set(event, bits, set);
    return bits;
}

uint32_t rt_event_set(struct rt_event *event, uint32_t set)
{
    const uint32_t bits = event_set_fast(event, set);
    if (!event_is_waited(bits))
    {
        return event_bits(bits);
    }
    if (rt_interrupt_is_active())
    {
        event_set_pend(event);
        return event_bits(bits);
    }
    return rt_syscall_event_set(event);
}

uint32_t rt_event_set_from_task(struct rt_event *event, uint32_t set)
{
    const uint32_t bits = event_set_fast(event, set);
    if (!event_is_waited(bits))
    {
        return event_bits(bits);
    }
    return rt_syscall_event_set(event);
}

uint32_t rt_event_set_from_interrupt(struct rt_event *event, uint32_t set)
{
    const uint32_t bits = event_set_fast(event, set);
    if (event_is_waited(bits))
    {
        event_set_pend(event);
    }
    return event_bits(bits);
}

uint32_t rt_event_wait(struct rt_event *event, uint32_t wait)
{
    const uint32_t bits = rt_event_trywait(event, wait);
    if (rt_event_bits_match(bits, wait))
    {
        return bits;
    }
    return rt_syscall_event_wait(event, wait);
}

uint32_t rt_event_trywait(struct rt_event *event, uint32_t wait)
{
    uint32_t bits;
    if ((wait & RT_EVENT_WAIT_NOCLEAR) == 0)
    {
        bits = rt_atomic_load(&event->bits, RT_ATOMIC_RELAXED);
        const uint32_t waitbits = event_bits(wait);
        while (rt_event_bits_match(bits, wait) &&
               !rt_atomic_compare_exchange_weak(&event->bits, &bits,
                                                bits & ~waitbits,
                                                RT_ATOMIC_ACQ_REL,
                                                RT_ATOMIC_RELAXED))
        {
        }
    }
    else
    {
        bits = rt_atomic_load(&event->bits, RT_ATOMIC_ACQUIRE);
    }
    rt_trace_event_wait(event, bits, wait);
    return event_bits(bits);
}

uint32_t rt_event_timedwait(struct rt_event *event, uint32_t wait,
                            unsigned long ticks)
{
    const uint32_t bits = rt_event_trywait(event, wait);
    if (rt_event_bits_match(bits, wait))
    {
        return bits;
    }
    return rt_syscall_event_timedwait(event, wait, ticks);
}