rt 0.19.1

A real-time operating system capable of full preemption
Documentation
#pragma once

#include <rt/arch/control.h>
#include <rt/arch/dcb.h>
#include <rt/arch/dwt.h>
#include <rt/arch/scb.h>

#include "exc_return.h"

#define PSR_THUMB (UINT32_C(1) << 24)

struct context
{
#if !RT_ARM_V6M && RT_MPU_TASK_REGIONS_ENABLE
    /* Only track per-task privilege if per-task MPU regions are enabled and
     * the architecture version is 7 or greater. Dropping privilege prevents
     * masking interrupts, which is required for atomic operations on v6-m. */
    uint32_t control;
#endif // (__ARM_ARCH >= 7) && RT_MPU_TASK_REGIONS_ENABLE

#if RT_ARM_V8M
    uint32_t psplim;
#endif // RT_ARM_V8M

    uint32_t r4, r5, r6, r7, r8, r9, r10, r11;

#if RT_ARM_FP
    /* Only use a per-task exception return value if floating-point is enabled,
     * because otherwise the exception return value is always the same. This
     * is the lr value on exception entry, so place it after r4-r11 so it can
     * be saved/restored along with those registers. */
    uint32_t exc_return;
#endif // RT_ARM_FP

    uint32_t r0, r1, r2, r3, r12, lr, pc, psr;
};

static void profile_context_init(struct context *ctx)
{
#if !RT_ARM_V6M && RT_MPU_TASK_REGIONS_ENABLE
    /* Tasks start privileged. The SPSEL bit is RAZ/WI in handler mode where
     * context switches occur. The exception return value specifies which stack
     * pointer is used when returning to thread mode. */
    ctx->control = 0;
#endif // !RT_ARM_V6M && RT_MPU_TASK_REGIONS_ENABLE

#if RT_ARM_FP
    ctx->exc_return = (uint32_t)EXC_RETURN_TASK_NOFP;
#endif // RT_ARM_FP

    ctx->psr = PSR_THUMB;
}

void rt_task_drop_privilege(void)
{
#if !RT_ARM_V6M && RT_MPU_TASK_REGIONS_ENABLE
    uint32_t control;
    __asm__ __volatile__("mrs %0, control" : "=r"(control));
    __asm__("dsb; msr control, %0; isb"
            :
            : "r"(control | CONTROL_NPRIV)
            : "memory");
#endif // !RT_ARM_V6M && RT_MPU_TASK_REGIONS_ENABLE
}

bool rt_interrupt_is_active(void)
{
    uint32_t ipsr;
    __asm__ __volatile__("mrs %0, ipsr" : "=r"(ipsr));
    return ipsr != 0;
}

/* On M-Profile, the exception entry process saves the volatile registers, so
 * no registers are clobbered by svc. */
void rt_syscall_0(enum rt_syscall syscall)
{
    register enum rt_syscall r0 __asm__("r0") = syscall;
    __asm__("svc 0" : : "r"(r0) : "memory");
}

void rt_syscall_1(enum rt_syscall syscall, uintptr_t arg0)
{
    register enum rt_syscall r0 __asm__("r0") = syscall;
    register uintptr_t r1 __asm__("r1") = arg0;
    __asm__("svc 0" : : "r"(r0), "r"(r1) : "memory");
}

void rt_syscall_2(enum rt_syscall syscall, uintptr_t arg0, uintptr_t arg1)
{
    register enum rt_syscall r0 __asm__("r0") = syscall;
    register uintptr_t r1 __asm__("r1") = arg0;
    register uintptr_t r2 __asm__("r2") = arg1;
    __asm__("svc 0" : : "r"(r0), "r"(r1), "r"(r2) : "memory");
}

void rt_syscall_3(enum rt_syscall syscall, uintptr_t arg0, uintptr_t arg1,
                  uintptr_t arg2)
{
    register enum rt_syscall r0 __asm__("r0") = syscall;
    register uintptr_t r1 __asm__("r1") = arg0;
    register uintptr_t r2 __asm__("r2") = arg1;
    register uintptr_t r3 __asm__("r3") = arg2;
    __asm__("svc 0" : : "r"(r0), "r"(r1), "r"(r2), "r"(r3) : "memory");
}

void rt_syscall_pend(void)
{
    SCB->icsr = SCB_ICSR_PENDSVSET;
}

__attribute__((weak)) void rt_cycle_init(void)
{
#if !RT_ARM_V6M && RT_CYCLE_ENABLE
    // Enable the cycle counter.
    DCB->demcr |= DCB_DEMCR_TRCENA;
    DWT->lar = DWT_LAR_UNLOCK;
    DWT->ctrl |= DWT_CTRL_CYCCNTENA;
#endif // !RT_ARM_V6M
}

__attribute__((weak)) uint32_t rt_cycle(void)
{
#if !RT_ARM_V6M && RT_CYCLE_ENABLE
    return DWT->cyccnt;
#else
    return 0;
#endif
}

void *rt_tp;

void rt_tls_set(void *tls)
{
    rt_tp = tls;
}