push_volatile .macro
push ar1h:ar0h
push xar4
push xar5
push xar6
push xar7
push xt
.if .TMS320C2800_FPU32
push rb
mov32 *sp++, r0h
mov32 *sp++, r1h
mov32 *sp++, r2h
mov32 *sp++, r3h
.if .TMS320C2800_FPU64
mov32 *sp++, r0l
mov32 *sp++, r1l
mov32 *sp++, r2l
mov32 *sp++, r3l
.endif ; .TMS320C2800_FPU64
.endif ; .TMS320C2800_FPU32
.endm
pop_volatile .macro
.if .TMS320C2800_FPU32
.if .TMS320C2800_FPU64
mov32 r3l, *--sp
mov32 r2l, *--sp
mov32 r1l, *--sp
mov32 r0l, *--sp
.endif ; .TMS320C2800_FPU64
mov32 r3h, *--sp
mov32 r2h, *--sp
mov32 r1h, *--sp
mov32 r0h, *--sp
pop rb
.endif ; .TMS320C2800_FPU32
pop xt
pop xar7
pop xar6
pop xar5
pop xar4
pop ar1h:ar0h
.endm
push_non_volatile .macro
push xar2
push xar3
push rpc
.if .TMS320C2800_FPU32
mov32 *sp++, r4h
mov32 *sp++, r5h
mov32 *sp++, r6h
mov32 *sp++, r7h
.if .TMS320C2800_FPU64
mov32 *sp++, r4l
mov32 *sp++, r5l
mov32 *sp++, r6l
mov32 *sp++, r7l
.endif ; .TMS320C2800_FPU64
mov32 *sp++, stf
.endif ; .TMS320C2800_FPU32
.endm
pop_non_volatile .macro
.if .TMS320C2800_FPU32
mov32 stf, *--sp
.if .TMS320C2800_FPU64
mov32 r7l, *--sp
mov32 r6l, *--sp
mov32 r5l, *--sp
mov32 r4l, *--sp
.endif ; .TMS320C2800_FPU64
mov32 r7h, *--sp
mov32 r6h, *--sp
mov32 r5h, *--sp
mov32 r4h, *--sp
.endif ; .TMS320C2800_FPU32
pop rpc
pop xar3
pop xar2
.endm
swapcontext .macro
push_non_volatile
; Store the new stack pointer with the saved context.
.ref rt_context_prev
movl xar6, #rt_context_prev
movl xar6, *xar6
mov al, sp
mov *xar6, al
; Switch to the new task's stack.
movb xar0, #8
mov al, *+xar4[ar0]
mov sp, al
pop_non_volatile
.endm
.sect ".text:rt_user1_handler"
.global rt_user1_handler
.asmfunc
rt_user1_handler:
; The user1 software interrupt is used to switch contexts, if required,
; after a synchronous system call.
; NOTE: the pending syscall (datalog) interrupt will still be masked here,
; but we can re-enable all other interrupts.
eint
; Allocate space on the stack for the rest of the volatile context, but
; saving it is not necessary. ar1h:ar0h is special because ar1h is
; non-volatile, ar0h is volatile, but they can only be pushed together.
; ar0h is already clobbered by not saving it before calling rt_syscall_run,
; but ar1h must be saved here now that we know a context switch is needed.
push ar1h:ar0h
.if .TMS320C2800_FPU32
.if .TMS320C2800_FPU64
addb sp, #28 ; integer + sp + dp
.else ; !.TMS320C2800_FPU64
addb sp, #20 ; integer + sp
.endif ; .TMS320C2800_FPU64
.else ; !.TMS320C2800_FPU32
addb sp, #10 ; integer only
.endif ; .TMS320C2800_FPU32
swapcontext
pop_volatile
dint ; Errata 4.1.2, caution while using nested interrupts.
iret
.endasmfunc
IFR_DLOGINT .set (1 << 14)
TRAP_USER1 .set 20
.ref rt_syscall_run
syscall .macro
and ier, #~IFR_DLOGINT
lcr rt_syscall_run
subb sp, #4
movl acc, xar4
sbf noswitch?, eq
trap #TRAP_USER1
noswitch?:
or ier, #IFR_DLOGINT
lretr
.endm
.sect ".text:rt_syscall"
.global rt_syscall_0
.global rt_syscall_1
.asmfunc
rt_syscall_0:
; rt_syscall_0 has the syscall number in al instead of ar4, so move it here.
mov ar4, al
; Fall through to rt_syscall_1.
rt_syscall_1:
; Syscall number is in ar4, arg0 is in acc.
; Allocate space for arg1 and arg2, but they're not used.
addb sp, #4
syscall
.endasmfunc
.global rt_syscall_2
.asmfunc
rt_syscall_2:
; rt_syscall_2 has arg1 on the stack, so we need to move it to where
; rt_syscall_run expects it, and make space for arg2.
movl xar6, *-sp[4]
addb sp, #4
movl *-sp[2], xar6
syscall
.endasmfunc
.global rt_syscall_3
.asmfunc
rt_syscall_3:
; rt_syscall_3 has arg1 and arg2 on the stack, so we need to move them.
movl xar7, *-sp[6]
movl xar6, *-sp[4]
push xar7
push xar6
syscall
.endasmfunc
.sect ".text:rt_datalog_handler"
.global rt_datalog_handler
.asmfunc
.ref rt_syscall_run_pending
rt_datalog_handler:
eint ; Re-enable interrupts. (This interrupt is masked still.)
push_volatile
asp
spm 0
clrc page0, ovm
clrc amode
lcr rt_syscall_run_pending
nasp
movl acc, xar4
sbf noswitch?, eq ; If there's no new context to switch to, return early.
swapcontext
noswitch?:
pop_volatile
dint ; Errata 4.1.2, caution while using nested interrupts.
iret
.endasmfunc
.sect ".text:rt_start"
.global rt_start
.asmfunc
.ref rt_start_sched
rt_start:
lcr rt_start_sched
movb xar0, #8
mov al, *+xar4[ar0]
mov sp, al
pop_non_volatile
pop_volatile
iret
.endasmfunc
.sect ".text:rt_task_entry"
.global rt_task_entry
.asmfunc
rt_task_entry:
lcr *xar4
.ref rt_task_exit
lb rt_task_exit
.endasmfunc