#[repr(C)]
pub struct Thread {
sctx: usize,
x: [usize; 31],
sepc: usize,
}
#[allow(unused)]
impl Thread {
#[inline]
pub const fn new(sepc: usize) -> Self {
Self {
sctx: 0,
x: [0; 31],
sepc,
}
}
#[inline]
pub fn x(&self, n: usize) -> usize {
self.x[n - 1]
}
#[inline]
pub fn x_mut(&mut self, n: usize) -> &mut usize {
&mut self.x[n - 1]
}
#[inline]
pub fn a(&self, n: usize) -> usize {
self.x(n + 10)
}
#[inline]
pub fn a_mut(&mut self, n: usize) -> &mut usize {
self.x_mut(n + 10)
}
#[inline]
pub fn sp(&self) -> usize {
self.x(2)
}
#[inline]
pub fn sp_mut(&mut self) -> &mut usize {
self.x_mut(2)
}
#[inline]
pub fn move_next(&mut self) {
self.sepc = self.sepc.wrapping_add(4);
}
#[inline]
pub unsafe fn execute(&mut self) -> usize {
let mut sstatus: usize;
core::arch::asm!("csrr {}, sstatus", out(reg) sstatus);
const PREVILEGE_BIT: usize = 1 << 8;
const INTERRUPT_BIT: usize = 1 << 5;
sstatus |= PREVILEGE_BIT | INTERRUPT_BIT;
core::arch::asm!(
" csrw sscratch, {sscratch}
csrw sepc , {sepc}
csrw sstatus , {sstatus}
addi sp, sp, -8
sd ra, (sp)
call {execute_naked}
ld ra, (sp)
addi sp, sp, 8
csrr {sepc} , sepc
csrr {sstatus}, sstatus
",
sscratch = in(reg) self,
sepc = inlateout(reg) self.sepc,
sstatus = inlateout(reg) sstatus,
execute_naked = sym execute_naked,
);
sstatus
}
}
#[naked]
unsafe extern "C" fn execute_naked() {
core::arch::asm!(
r" .altmacro
.macro SAVE n
sd x\n, \n*8(sp)
.endm
.macro SAVE_ALL
sd x1, 1*8(sp)
.set n, 3
.rept 29
SAVE %n
.set n, n+1
.endr
.endm
.macro LOAD n
ld x\n, \n*8(sp)
.endm
.macro LOAD_ALL
ld x1, 1*8(sp)
.set n, 3
.rept 29
LOAD %n
.set n, n+1
.endr
.endm
",
" .option push
.option nopic
",
" addi sp, sp, -32*8
SAVE_ALL
",
" la t0, 1f
csrw stvec, t0
",
" csrr t0, sscratch
sd sp, (t0)
mv sp, t0
",
" LOAD_ALL
ld sp, 2*8(sp)
",
" sret",
" .align 2",
"1: csrrw sp, sscratch, sp",
" SAVE_ALL
csrrw t0, sscratch, sp
sd t0, 2*8(sp)
",
" ld sp, (sp)",
" LOAD_ALL
addi sp, sp, 32*8
",
" ret",
" .option pop",
options(noreturn)
)
}