rt 0.19.1

A real-time operating system capable of full preemption
Documentation
#include <rt/mpu.h>
#include <rt/task.h>

#include <rt/arch/mstatus.h>
#include <rt/arch/mix.h>
#include <rt/arch/context.h>

#include <rt/arch/msi.h>
#include <rt/arch/pseudo.S>

.macro mpuconfigure
#if RT_MPU_TASK_REGIONS_ENABLE
    pmpcfgclear RT_MPU_TASK_REGION_START_ID, RT_MPU_NUM_TASK_REGIONS
    pmpaddrset RT_MPU_TASK_REGION_START_ID, RT_MPU_NUM_TASK_REGIONS
    pmpcfgset RT_MPU_TASK_REGION_START_ID, RT_MPU_NUM_TASK_REGIONS
    fence
#endif
.endm

.macro pmpaddrset r, n
    .if \n == 0
    .exitm
    .endif
    lwm t, 0, 3, a0, (RT_MPU_TASK_PMPADDR_OFFSET + 4*\r)
    csrw RT_MPU_CSR_PMPADDR0 + \r, t0
    csrw RT_MPU_CSR_PMPADDR0 + \r + 1, t1
    csrw RT_MPU_CSR_PMPADDR0 + \r + 2, t2
    csrw RT_MPU_CSR_PMPADDR0 + \r + 3, t3
    pmpaddrset (\r + 4), (\n - 4)
.endm

.macro pmpcfgclear r, n
    .if \n == 0
    .exitm
    .endif
    csrw RT_MPU_CSR_PMPCFG0 + (\r / 4), zero
    pmpcfgset (\r + 4), (\n - 4)
.endm

.macro pmpcfgset r, n
    .if \n == 0
    .exitm
    .endif
    lw t0, (RT_MPU_TASK_PMPCFG_OFFSET + \r)(a0)
    csrw RT_MPU_CSR_PMPCFG0 + (\r / 4), t0
    pmpcfgset (\r + 4), (\n - 4)
.endm

.macro syscall_handler mepc_adjust, syscall_run
    lui t1, %hi(rt_trap_level)
    sw zero, %lo(rt_trap_level)(t1)
    mv t0, sp
    la sp, rt_main_sp
    csrc mie, MIx_MSI
    csrr t1, mepc
    csrrs t2, mstatus, MSTATUS_MIE
.if \mepc_adjust != 0
    add t1, t1, \mepc_adjust
.endif
    swm t, 1, 2, t0, 12
    swm a, 0, 7, t0, 20
    swm t, 3, 6, t0, 52
    sw ra, 68(t0)
    csrw mscratch, t0
    jal \syscall_run
    csrr t0, mscratch
    beqz a0, \@f
    add t0, t0, -NONVOLATILE_CONTEXT_SIZE
    swm s, 0, 11, t0, 0

    // Store the stack pointer with the saved context.
    lui t1, %hi(rt_context_prev)
    lw t1, %lo(rt_context_prev)(t1)
    sw t0, 0(t1)

    mpuconfigure

    lw a0, RT_TASK_CTX_OFFSET(a0)

    /* Holding a memory reservation across a synchronous system call is
     * technically possible, but rt_syscall's inline assembly has a memory clobber
     * on it, so this shouldn't happen in real code. Therefore, don't bother
     * clearing the reservation.
     * NOTE: during an asynchronous system call, the memory reservation of the
     * current task needs to be cleared. Depending on the core's implementation of
     * Zalrsc, this may happen automatically when the exception occurs, or may
     * happen with any store. If the reservation can only be cleared with another
     * sc, then we need to do one here. */

    lwm s, 0, 11, a0, 0

    // Switch to the new task stack.
    add t0, a0, NONVOLATILE_CONTEXT_SIZE
\@: lwm a, 0, 7, t0, 20
    lwm t, 3, 6, t0, 52
    lw ra, 68(t0)
    /* Restore mstatus before mepc so that interrupts are disabled first.
     * Otherwise mepc could be clobbered after it is restored. */
    lw t1, 16(t0)
    csrw mstatus, t1
    lw t2, 12(t0)
    csrs mie, MIx_MSI
    csrw mepc, t2
    li t2, -1
    lui t1, %hi(rt_trap_level)
    sw t2, %lo(rt_trap_level)(t1)
    /* Adjust the stack pointer as it is set and before t0-t2 are restored.
     * This is safe because interrupts are disabled here. */
    add sp, t0, VOLATILE_CONTEXT_SIZE
    lwm t, 0, 2, sp, -VOLATILE_CONTEXT_SIZE
    mret
.endm


    .section .text.rt_ecall_handler, "ax", %progbits
    .global rt_ecall_handler
    .type rt_ecall_handler, %function
rt_ecall_handler:
    syscall_handler 4, rt_syscall_run


.macro msi_handler
    lui t1, %hi(MSIP_BASE)
    sw zero, %lo(MSIP_BASE)(t1)
    syscall_handler 0, rt_syscall_run_pending
.endm


    .section .text.rt_msi_handler, "ax", %progbits
    .global rt_msi_handler
    .type rt_msi_handler, %function
rt_msi_handler:
    add sp, sp, -VOLATILE_CONTEXT_SIZE
    swm t, 0, 2, sp, 0
    msi_handler


    .section .text.rt_msi_trap_handler, "ax", %progbits
    .global rt_msi_trap_handler
    .type rt_msi_trap_handler, %function
rt_msi_trap_handler:
    msi_handler


    .section .text.rt_start, "ax", %progbits
    .global rt_start
    .type rt_start, %function
rt_start:
    jal rt_start_sched
    mpuconfigure
    // Switch to the first task's stack and go to the top of it.
    lw sp, RT_TASK_CTX_OFFSET(a0)
    add sp, sp, CONTEXT_SIZE
    // Load mepc, mstatus, and the arguments to rt_task_entry.
    lw t1, -VOLATILE_CONTEXT_SIZE+12(sp)
    lw t2, -VOLATILE_CONTEXT_SIZE+16(sp)
    lw a0, -VOLATILE_CONTEXT_SIZE+20(sp)
    lw a1, -VOLATILE_CONTEXT_SIZE+24(sp)
    csrs mie, MIx_MSI
    csrw mepc, t1
    csrw mstatus, t2
    mret

    .size rt_start, .-rt_start


    .section .text.rt_task_entry, "ax", %progbits
    .global rt_task_entry
    .type rt_task_entry, %function
rt_task_entry:
    jalr a1
    j rt_task_exit

    .size rt_task_entry, .-rt_task_entry


    .section .text.rt_trap_handler, "ax", %progbits
    .global rt_trap_handler
    .type rt_trap_handler, %function
rt_trap_handler:
    add sp, sp, -VOLATILE_CONTEXT_SIZE
    swm t, 0, 2, sp, 0
    csrrw t2, mcause, zero
    sll t0, t2, 2
    lui t1, %hi(rt_trap_vector)
    add t0, t0, t1
    lw t0, %lo(rt_trap_vector)(t0)
    jr t0

    .size rt_trap_handler, .-rt_trap_handler