@ vim:ft=arm
/* The interrupt management code for the syscall handler depends on both the
* type of interrupt controller and the source of the syscall interrupt, so we
* provide a way to extend this code with assembly macros.
* vic_syscall_start needs to mask and clear the syscall interrupt such that
* another syscall cannot occur when IRQs are re-enabled. vic_syscall_finish
* must unmask the syscall interrupt. Both are run with IRQs disabled. */
#include <rt/mpu.h>
#include <rt/task.h>
#include <rt/arch/fp.h>
#include <rt/arch/mode.h>
#include <rt/arch/profile.h>
#include <rt/arch/coprocessor.h>
#include <rt/arch/pseudo.S>
.syntax unified
#ifdef __thumb__
.thumb
#else // arm
.arm
#endif // __thumb__
#if RT_ARM_FP
#define FPEXC_SIZE 4
#if RT_ARM_FP_D32
// fpscr + 32 dp registers
#define FP_REGS_SIZE (4 + (32 * 8))
#define pushfp vpush {d16-d31}; vpush {d0-d15}
#define popfp vpop {d0-d15}; vpop {d16-d31}
#else // !RT_ARM_FP_D32
// fpscr + 16 dp registers
#define FP_REGS_SIZE (4 + (16 * 8))
#define pushfp vpush {d0-d15}
#define popfp vpop {d0-d15}
#endif // RT_ARM_FP_D32
#else // !RT_ARM_FP
#define FPEXC_SIZE 0
#endif // RT_ARM_FP
#define NV_INT_REGS_SIZE (8 * 4) // r4-r11
#define V_INT_REGS_SIZE (8 * 4) // r0-r3, r12, lr, cpsr, pc
#define FP_STACK_PROBE_OFFSET (NV_INT_REGS_SIZE + FPEXC_SIZE + FP_REGS_SIZE)
#define NOFP_STACK_PROBE_OFFSET (NV_INT_REGS_SIZE + FPEXC_SIZE)
.macro mpuconfigure
#if RT_MPU_TASK_REGIONS_ENABLE
add r1, r0, RT_MPU_TASK_CONFIG_OFFSET
movs r2, RT_MPU_TASK_REGION_START_ID
adds r6, r2, RT_MPU_NUM_TASK_REGIONS
movs r7, 0
.Lmpusetloop\@:
#if __ARM_ARCH == 7
ldmia r1!, {r3, r4, r5}
rgnr_set r2 // region number
drsr_set r7 // disable the region first
drbar_set r3 // base address
dracr_set r4 // access control
drsr_set r5 // size and enable
#elif __ARM_ARCH == 8
ldmia r1!, {r3, r4}
prselr_set r2 // region number
prlar_set r7 // disable the region first
prbar_set r3 // base address
prlar_set r4 // limit address
#endif // __ARM_ARCH
add r2, 1
cmp r2, r6
bne .Lmpusetloop\@
dsb
#endif // RT_MPU_TASK_REGIONS_ENABLE
.endm
.macro swapcontext
#if RT_ARM_FP
// Check if the task has a floating-point context.
vmrs r1, fpexc
tst r1, RT_FPEXC_EN
#if RT_MPU_TASK_REGIONS_ENABLE
/* Before saving more context to the stack, attempt to write to it with
* user-mode permissions to ensure that the active task is allowed to. */
ite ne
subne r12, sp, FP_STACK_PROBE_OFFSET
subeq r12, sp, NOFP_STACK_PROBE_OFFSET
strt r1, [r12]
#endif // RT_MPU_TASK_REGIONS_ENABLE
beq .Lskip_fp_save\@
pushfp
vmrs r2, fpscr
push {r2}
.Lskip_fp_save\@:
push {r1, r4-r11} // fpexc, callee-saved registers
#else // !RT_ARM_FP
#if RT_MPU_TASK_REGIONS_ENABLE
sub r12, sp, NOFP_STACK_PROBE_OFFSET
strt r4, [r12]
#endif // RT_MPU_TASK_REGIONS_ENABLE
push {r4-r11}
#endif // RT_ARM_FP
// Store the stack pointer with the saved context.
mov32 r2, rt_context_prev
ldr r2, [r2]
str sp, [r2]
mpuconfigure
// Switch to the new task stack.
ldr sp, [r0, RT_TASK_CTX_OFFSET]
#if RT_ARM_FP
pop {r1, r4-r11}
vmsr fpexc, r1
tst r1, RT_FPEXC_EN
beq .Lskip_fp_restore\@
pop {r2}
vmsr fpscr, r2
popfp
.Lskip_fp_restore\@:
#else // !RT_ARM_FP
pop {r4-r11}
#endif // RT_ARM_FP
.endm
#include <vic.S>
.section .text.rt_svc_handler, "ax", %progbits
.global rt_svc_handler
.type rt_svc_handler, %function
rt_svc_handler:
#if RT_MPU_TASK_REGIONS_ENABLE
cps MODE_SYS
sub r12, sp, 8
strt r0, [r12]
cps MODE_SVC
#endif // RT_MPU_TASK_REGIONS_ENABLE
srsdb sp!, MODE_SYS
/* The task passed syscall arguments in r0-r3 and expects them to be
* clobbered, along with r12 and lr, so none of them need to be saved, and
* r12 and lr can be used in vic_svc_start. */
vic_svc_start
cpsie i
bl rt_syscall_run
cbnz r0, 0f
/* If no context switch is required, we can resume the current task without
* restoring volatile registers. */
cpsid i, MODE_SYS
vic_svc_finish
rfeia sp!
0: cps MODE_SYS
// Reserve space for volatile registers, but no need to save them.
sub sp, 24
/* Holding an exclusive monitor across a system call is technically
* possible, but rt_syscall's svc inline assembly has a memory clobber on
* it, so this shouldn't happen in real code. Therefore, don't bother
* clearing the reservation. */
swapcontext
cpsid i
vic_svc_finish
pop {r0-r3, r12, lr}
rfeia sp!
.size rt_svc_handler, .-rt_svc_handler
.macro syscall_irq_return
cpsid i
vic_syscall_irq_finish
pop {r0-r3, r12, lr}
rfeia sp!
.endm
.section .text.rt_syscall_irq_handler, "ax", %progbits
.global rt_syscall_irq_handler
.type rt_syscall_irq_handler, %function
.balign 4
rt_syscall_irq_handler:
#if RT_MPU_TASK_REGIONS_ENABLE
cps MODE_SYS
sub sp, V_INT_REGS_SIZE
#ifdef __thumb__
strt r0, [sp]
add sp, V_INT_REGS_SIZE
#else // arm
strt r0, [sp], V_INT_REGS_SIZE
#endif // __thumb__
cps MODE_IRQ
#endif // RT_MPU_TASK_REGIONS_ENABLE
sub lr, 4
srsdb sp!, MODE_SYS
cps MODE_SYS
push {r0-r3, r12, lr}
vic_syscall_irq_start
cpsie i, MODE_SVC
bl rt_syscall_run_pending
cps MODE_SYS
cbnz r0, 0f
syscall_irq_return
/* rt_syscall_run_pending always uses an atomic instruction, so the
* exclusive reservation will already be cleared at this point. */
0: swapcontext
syscall_irq_return
.size rt_syscall_irq_handler, .-rt_syscall_irq_handler
.section .text.rt_start, "ax", %progbits
.global rt_start
.type rt_start, %function
rt_start:
bl rt_start_sched
cps MODE_SYS
mpuconfigure
ldr sp, [r0, RT_TASK_CTX_OFFSET]
#if RT_ARM_FP
// Load the initial FPEXC and skip past non-volatile registers.
ldr r1, [sp], 36
vmsr fpexc, r1
#else // !RT_ARM_FP
// Skip past non-volatile registers.
add sp, 32
#endif // RT_ARM_FP
// Load the arguments to rt_task_entry and skip past {r2, r3, r12, lr}.
ldrd r0, r1, [sp], 24
rfeia sp!
.size rt_start, .-rt_start
.section .text.rt_task_entry, "ax", %progbits
.global rt_task_entry
.type rt_task_entry, %function
rt_task_entry:
blx r1
/* The call to rt_task_exit is a tail call to a non-returning function, so
* b or bl could be used, but bl can be fixed up by the linker to blx in
* case a mode switch is needed between rt_task_entry and rt_task_exit. */
bl rt_task_exit
.size rt_task_entry, .-rt_task_entry