use core::sync::atomic::{AtomicU32, Ordering};
use sbi::SbiRet;
use sbi_spec::hsm::{HART_STATE_STARTED, HART_STATE_STOPPED, HART_STATE_SUSPENDED};
#[derive(Clone, Debug)]
pub enum Case<'a> {
NotExist,
Begin,
HartStartedBeforeTest(usize),
NoStoppedHart,
BatchBegin(&'a [usize]),
HartStarted(usize),
HartStartFailed {
hartid: usize,
ret: SbiRet,
},
HartSuspendedNonretentive(usize),
HartResumed(usize),
HartSuspendedRetentive(usize),
HartStopped(usize),
BatchPass(&'a [usize]),
Pass,
}
pub fn test(
primary_hart_id: usize,
mut hart_mask: usize,
hart_mask_base: usize,
mut f: impl FnMut(Case),
) {
if sbi::probe_extension(sbi::Hsm).is_unavailable() {
f(Case::NotExist);
return;
}
f(Case::Begin);
let mut batch = [0usize; TEST_BATCH_SIZE];
let mut batch_count = 0;
let mut batch_size = 0;
let mut hartid = hart_mask_base;
while hart_mask != 0 {
if hartid != primary_hart_id {
if sbi::hart_get_status(hartid) == STOPPED {
batch[batch_size] = hartid;
batch_size += 1;
if batch_size == TEST_BATCH_SIZE {
if test_batch(&batch, &mut f) {
batch_count += 1;
batch_size = 0;
} else {
return;
}
}
}
else {
f(Case::HartStartedBeforeTest(hartid));
}
}
let distance = hart_mask.trailing_zeros() + 1;
hart_mask >>= distance;
hartid += distance as usize;
}
if batch_size > 0 {
if test_batch(&batch[..batch_size], &mut f) {
f(Case::Pass);
}
}
else if batch_count > 0 {
f(Case::Pass);
}
else {
f(Case::NoStoppedHart)
}
}
const STARTED: SbiRet = SbiRet::success(HART_STATE_STARTED);
const STOPPED: SbiRet = SbiRet::success(HART_STATE_STOPPED);
const SUSPENDED: SbiRet = SbiRet::success(HART_STATE_SUSPENDED);
const TEST_BATCH_SIZE: usize = 4;
static mut STACK: [ItemPerHart; TEST_BATCH_SIZE] = [ItemPerHart::ZERO; TEST_BATCH_SIZE];
#[repr(C, align(512))]
struct ItemPerHart {
stage: AtomicU32,
signal: AtomicU32,
stack: [u8; 504],
}
const STAGE_IDLE: u32 = 0;
const STAGE_STARTED: u32 = 1;
const STAGE_RESUMED: u32 = 2;
impl ItemPerHart {
#[allow(clippy::declare_interior_mutable_const)]
const ZERO: Self = Self {
stage: AtomicU32::new(STAGE_IDLE),
signal: AtomicU32::new(0),
stack: [0; 504],
};
#[inline]
fn reset(&mut self) -> *const ItemPerHart {
self.stage.store(STAGE_IDLE, Ordering::Relaxed);
self as _
}
#[inline]
fn wait_start(&self) {
while self.stage.load(Ordering::Relaxed) != STAGE_STARTED {
core::hint::spin_loop();
}
}
#[inline]
fn wait_resume(&self) {
while self.stage.load(Ordering::Relaxed) != STAGE_RESUMED {
core::hint::spin_loop();
}
}
#[inline]
fn send_signal(&self) {
self.signal.store(1, Ordering::Release);
}
#[inline]
fn wait_signal(&self) {
while self
.signal
.compare_exchange(1, 0, Ordering::Relaxed, Ordering::Relaxed)
.is_err()
{
core::hint::spin_loop();
}
}
}
fn test_batch(batch: &[usize], mut f: impl FnMut(Case)) -> bool {
f(Case::BatchBegin(batch));
for (i, hartid) in batch.iter().copied().enumerate() {
let ptr = unsafe { STACK[i].reset() };
let ret = sbi::hart_start(hartid, test_entry as _, ptr as _);
if ret.is_err() {
f(Case::HartStartFailed { hartid, ret });
return false;
}
}
for (i, hartid) in batch.iter().copied().enumerate() {
let item = unsafe { &mut STACK[i] };
while sbi::hart_get_status(hartid) != STARTED {
core::hint::spin_loop();
}
f(Case::HartStarted(hartid));
item.wait_start();
item.send_signal();
while sbi::hart_get_status(hartid) != SUSPENDED {
core::hint::spin_loop();
}
f(Case::HartSuspendedNonretentive(hartid));
}
let mut mask = 1usize;
for hartid in &batch[1..] {
mask |= 1 << (hartid - batch[0]);
}
sbi::send_ipi(mask, batch[0]);
for (i, hartid) in batch.iter().copied().enumerate() {
let item = unsafe { &mut STACK[i] };
while sbi::hart_get_status(hartid) != STARTED {
core::hint::spin_loop();
}
f(Case::HartResumed(hartid));
item.wait_resume();
item.send_signal();
while sbi::hart_get_status(hartid) != SUSPENDED {
core::hint::spin_loop();
}
f(Case::HartSuspendedRetentive(hartid));
sbi::send_ipi(1, hartid);
while sbi::hart_get_status(hartid) != STOPPED {
core::hint::spin_loop();
}
f(Case::HartStopped(hartid));
}
f(Case::BatchPass(batch));
true
}
#[naked]
unsafe extern "C" fn test_entry(hartid: usize, opaque: *mut ItemPerHart) -> ! {
core::arch::asm!(
"csrw sie, zero", "call {set_stack}", "j {rust_main}", set_stack = sym set_stack,
rust_main = sym rust_main,
options(noreturn),
)
}
#[naked]
unsafe extern "C" fn set_stack(hart_id: usize, ptr: *const ItemPerHart) {
core::arch::asm!("addi sp, a1, 512", "ret", options(noreturn));
}
#[inline(never)]
extern "C" fn rust_main(hart_id: usize, opaque: *mut ItemPerHart) -> ! {
let item = unsafe { &mut *opaque };
match item.stage.compare_exchange(
STAGE_IDLE,
STAGE_STARTED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
item.wait_signal();
let ret = sbi::hart_suspend(sbi::NonRetentive, test_entry as _, opaque as _);
unreachable!("suspend [{hart_id}] but {ret:?}")
}
Err(STAGE_STARTED) => {
item.stage.store(STAGE_RESUMED, Ordering::Release);
item.wait_signal();
let _ = sbi::hart_suspend(sbi::Retentive, test_entry as _, opaque as _);
let ret = sbi::hart_stop();
unreachable!("suspend [{hart_id}] but {ret:?}")
}
Err(_) => unreachable!(),
}
}