#[cfg(feature = "priority_boost")]
use core::sync::atomic::Ordering;
use core::{fmt, marker::PhantomData};
use num_traits::ToPrimitive;
use r3_core::{
closure::ClosureEnv,
kernel::{
raw::KernelBase, ActivateTaskError, ExitTaskError, GetCurrentTaskError,
GetTaskPriorityError, Hunk, InterruptTaskError, ParkError, ParkTimeoutError,
SetTaskPriorityError, SleepError, UnparkExactError, WaitTimeoutError,
},
time::Duration,
utils::Init,
};
use crate::{
error::NoAccessError, klock, mutex, state, timeout, wait, Id, KernelCfg1, KernelTraits,
PortThreading, System,
};
#[doc(hidden)]
pub mod readyqueue;
use self::readyqueue::Queue as _;
pub(super) type TaskId = Id;
impl<Traits: KernelTraits> System<Traits> {
#[inline]
unsafe fn task_cb(this: TaskId) -> Result<&'static TaskCb<Traits>, NoAccessError> {
Traits::get_task_cb(this.get() - 1).ok_or_else(|| unsafe { crate::bad_id::<Traits>() })
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_current() -> Result<TaskId, GetCurrentTaskError> {
if !Traits::is_task_context() {
return Err(GetCurrentTaskError::BadContext);
}
let mut lock = klock::lock_cpu::<Traits>()?;
let task_cb = Traits::state().running_task(lock.borrow_mut()).unwrap();
let offset =
unsafe { (task_cb as *const TaskCb<_>).offset_from(Traits::task_cb_pool().as_ptr()) };
let task = Id::new(offset as usize + 1).unwrap();
Ok(task)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_activate(this: TaskId) -> Result<(), ActivateTaskError> {
let lock = klock::lock_cpu::<Traits>()?;
let task_cb = unsafe { Self::task_cb(this)? };
activate(lock, task_cb)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_interrupt(this: TaskId) -> Result<(), InterruptTaskError> {
let mut lock = klock::lock_cpu::<Traits>()?;
let task_cb = unsafe { Self::task_cb(this)? };
wait::interrupt_task(
lock.borrow_mut(),
task_cb,
Err(WaitTimeoutError::Interrupted),
)?;
unlock_cpu_and_check_preemption(lock);
Ok(())
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_unpark_exact(this: TaskId) -> Result<(), UnparkExactError> {
let lock = klock::lock_cpu::<Traits>()?;
let task_cb = unsafe { Self::task_cb(this)? };
unpark_exact(lock, task_cb)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_set_priority(
this: TaskId,
priority: usize,
) -> Result<(), SetTaskPriorityError> {
let lock = klock::lock_cpu::<Traits>()?;
let task_cb = unsafe { Self::task_cb(this)? };
set_task_base_priority(lock, task_cb, priority)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_priority(this: TaskId) -> Result<usize, GetTaskPriorityError> {
let lock = klock::lock_cpu::<Traits>()?;
let task_cb = unsafe { Self::task_cb(this)? };
if *task_cb.st.read(&*lock) == TaskSt::Dormant {
Err(GetTaskPriorityError::BadObjectState)
} else {
Ok(task_cb.base_priority.read(&*lock).to_usize().unwrap())
}
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub(super) fn task_effective_priority(this: TaskId) -> Result<usize, GetTaskPriorityError> {
let lock = klock::lock_cpu::<Traits>()?;
let task_cb = unsafe { Self::task_cb(this)? };
if *task_cb.st.read(&*lock) == TaskSt::Dormant {
Err(GetTaskPriorityError::BadObjectState)
} else {
Ok(task_cb.effective_priority.read(&*lock).to_usize().unwrap())
}
}
}
pub struct StackHunk<Traits> {
_phantom: PhantomData<Traits>,
hunk_offset: usize,
len: usize,
}
const STACK_HUNK_AUTO: usize = (isize::MIN) as usize;
unsafe impl<Traits: KernelTraits> Sync for StackHunk<Traits> {}
impl<Traits: KernelTraits> fmt::Debug for StackHunk<Traits> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("StackHunk")
.field(&self.hunk().as_ptr())
.finish()
}
}
impl<Traits: KernelTraits> Clone for StackHunk<Traits> {
fn clone(&self) -> Self {
*self
}
}
impl<Traits: KernelTraits> Copy for StackHunk<Traits> {}
impl<Traits: KernelTraits> Init for StackHunk<Traits> {
const INIT: Self = Self {
_phantom: PhantomData,
hunk_offset: 0,
len: 0,
};
}
impl<Traits: KernelTraits> StackHunk<Traits> {
#[inline]
const fn hunk(&self) -> Hunk<System<Traits>> {
Hunk::from_offset(self.hunk_offset)
}
pub(crate) const fn from_hunk(hunk: Hunk<System<Traits>>, len: usize) -> Self {
assert!(len & STACK_HUNK_AUTO == 0, "too large");
Self {
_phantom: PhantomData,
hunk_offset: hunk.offset(),
len,
}
}
pub(crate) const fn auto(len: usize) -> Self {
assert!(len & STACK_HUNK_AUTO == 0, "too large");
Self {
_phantom: PhantomData,
hunk_offset: 0,
len: len | STACK_HUNK_AUTO,
}
}
pub(crate) const fn auto_size(self) -> Option<usize> {
if self.len & STACK_HUNK_AUTO != 0 {
Some(self.len & !STACK_HUNK_AUTO)
} else {
None
}
}
}
impl<Traits: KernelTraits> StackHunk<Traits> {
#[inline]
pub fn as_ptr(&self) -> *mut [u8] {
core::ptr::slice_from_raw_parts_mut(self.hunk().as_ptr(), self.len)
}
}
#[repr(C)]
pub struct TaskCb<
Traits: PortThreading,
PortTaskState: 'static = <Traits as PortThreading>::PortTaskState,
TaskPriority: 'static = <Traits as KernelCfg1>::TaskPriority,
TaskReadyQueueData: 'static = <<Traits as KernelCfg1>::TaskReadyQueue as readyqueue::Queue<
Traits,
>>::PerTaskData,
> {
pub port_task_state: PortTaskState,
pub attr: &'static TaskAttr<Traits, TaskPriority>,
pub(super) base_priority: klock::CpuLockCell<Traits, TaskPriority>,
pub(super) effective_priority: klock::CpuLockCell<Traits, TaskPriority>,
pub(super) st: klock::CpuLockCell<Traits, TaskSt>,
pub(super) park_token: klock::CpuLockCell<Traits, bool>,
pub(super) ready_queue_data: TaskReadyQueueData,
pub(super) wait: wait::TaskWait<Traits>,
pub(super) last_mutex_held: klock::CpuLockCell<Traits, Option<&'static mutex::MutexCb<Traits>>>,
}
impl<
Traits: KernelTraits,
PortTaskState: fmt::Debug + 'static,
TaskPriority: fmt::Debug + 'static,
TaskReadyQueueData: fmt::Debug + 'static,
> fmt::Debug for TaskCb<Traits, PortTaskState, TaskPriority, TaskReadyQueueData>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TaskCb")
.field("self", &(self as *const _))
.field("port_task_state", &self.port_task_state)
.field("attr", self.attr)
.field("base_priority", &self.base_priority)
.field("effective_priority", &self.effective_priority)
.field("st", &self.st)
.field("ready_queue_data", &self.ready_queue_data)
.field("wait", &self.wait)
.field(
"last_mutex_held",
&self
.last_mutex_held
.debug_fmt_with(|x, f| x.map(|x| x as *const _).fmt(f)),
)
.field("park_token", &self.park_token)
.finish()
}
}
pub struct TaskAttr<
Traits: KernelCfg1,
TaskPriority: 'static = <Traits as KernelCfg1>::TaskPriority,
> {
pub entry_point: unsafe extern "C" fn(ClosureEnv),
pub entry_param: ClosureEnv,
pub stack: StackHunk<Traits>,
pub priority: TaskPriority,
}
impl<Traits: KernelTraits, TaskPriority: fmt::Debug> fmt::Debug for TaskAttr<Traits, TaskPriority> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TaskAttr")
.field("entry_point", &self.entry_point)
.field("entry_param", &self.entry_param)
.field("stack", &self.stack)
.field("priority", &self.priority)
.finish()
}
}
#[doc(hidden)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskSt {
Dormant,
Ready,
Running,
Waiting,
PendingActivation,
}
impl Init for TaskSt {
const INIT: Self = Self::Dormant;
}
pub(super) unsafe fn exit_current_task<Traits: KernelTraits>() -> Result<!, ExitTaskError> {
if !Traits::is_task_context() {
return Err(ExitTaskError::BadContext);
}
let mut lock = unsafe {
if !Traits::is_cpu_lock_active() {
Traits::enter_cpu_lock();
}
klock::assume_cpu_lock::<Traits>()
};
#[cfg(feature = "priority_boost")]
{
Traits::state()
.priority_boost
.store(false, Ordering::Release);
}
let running_task = Traits::state().running_task(lock.borrow_mut()).unwrap();
mutex::abandon_held_mutexes(lock.borrow_mut(), running_task);
debug_assert!(running_task.last_mutex_held.read(&*lock).is_none());
assert_eq!(*running_task.st.read(&*lock), TaskSt::Running);
running_task.st.replace(&mut *lock, TaskSt::Dormant);
Traits::state().running_task.replace(&mut *lock, None);
core::mem::forget(lock);
unsafe {
Traits::exit_and_dispatch(running_task);
}
}
pub(super) fn init_task<Traits: KernelTraits>(
lock: klock::CpuLockTokenRefMut<'_, Traits>,
task_cb: &'static TaskCb<Traits>,
) {
if let TaskSt::PendingActivation = task_cb.st.read(&*lock) {
unsafe { Traits::initialize_task_state(task_cb) };
unsafe { make_ready(lock, task_cb) };
}
}
fn activate<Traits: KernelTraits>(
mut lock: klock::CpuLockGuard<Traits>,
task_cb: &'static TaskCb<Traits>,
) -> Result<(), ActivateTaskError> {
if *task_cb.st.read(&*lock) != TaskSt::Dormant {
return Err(ActivateTaskError::QueueOverflow);
}
task_cb.park_token.replace(&mut *lock, false);
unsafe { Traits::initialize_task_state(task_cb) };
task_cb
.base_priority
.replace(&mut *lock, task_cb.attr.priority);
task_cb
.effective_priority
.replace(&mut *lock, task_cb.attr.priority);
unsafe { make_ready(lock.borrow_mut(), task_cb) };
unlock_cpu_and_check_preemption(lock);
Ok(())
}
pub(super) unsafe fn make_ready<Traits: KernelTraits>(
mut lock: klock::CpuLockTokenRefMut<'_, Traits>,
task_cb: &'static TaskCb<Traits>,
) {
task_cb.st.replace(&mut *lock, TaskSt::Ready);
unsafe {
<Traits>::state()
.task_ready_queue
.push_back_task(lock.into(), task_cb);
}
}
pub(super) fn unlock_cpu_and_check_preemption<Traits: KernelTraits>(
mut lock: klock::CpuLockGuard<Traits>,
) {
if System::<Traits>::raw_is_priority_boost_active() {
debug_assert_eq!(
*Traits::state()
.running_task(lock.borrow_mut())
.unwrap()
.st
.read(&*lock),
TaskSt::Running
);
return;
}
let prev_task_priority =
if let Some(running_task) = Traits::state().running_task(lock.borrow_mut()) {
if *running_task.st.read(&*lock) == TaskSt::Running {
running_task
.effective_priority
.read(&*lock)
.to_usize()
.unwrap()
} else {
usize::MAX
}
} else {
usize::MAX
};
let has_preempting_task = Traits::state()
.task_ready_queue
.has_ready_task_in_priority_range(lock.borrow_mut().into(), ..prev_task_priority);
drop(lock);
if has_preempting_task {
unsafe { Traits::yield_cpu() };
}
}
#[inline]
pub(super) fn choose_next_running_task<Traits: KernelTraits>(
mut lock: klock::CpuLockTokenRefMut<Traits>,
) {
if System::<Traits>::raw_is_priority_boost_active() {
debug_assert_eq!(
*Traits::state()
.running_task(lock.borrow_mut())
.unwrap()
.st
.read(&*lock),
TaskSt::Running
);
return;
}
let prev_running_task = Traits::state().running_task(lock.borrow_mut());
let prev_task_priority = if let Some(running_task) = prev_running_task {
if *running_task.st.read(&*lock) == TaskSt::Running {
running_task
.effective_priority
.read(&*lock)
.to_usize()
.unwrap()
} else {
usize::MAX }
} else {
usize::MAX };
let decision = Traits::state()
.task_ready_queue
.pop_front_task(lock.borrow_mut().into(), prev_task_priority);
let next_running_task = match decision {
readyqueue::ScheduleDecision::SwitchTo(task) => task,
readyqueue::ScheduleDecision::Keep => {
debug_assert_ne!(prev_task_priority, usize::MAX);
return;
}
};
if let Some(task) = next_running_task {
task.st.replace(&mut *lock, TaskSt::Running);
if ptr_from_option_ref(prev_running_task) == task {
return;
}
}
if let Some(running_task) = prev_running_task {
debug_assert_ne!(
ptr_from_option_ref(prev_running_task),
ptr_from_option_ref(next_running_task),
);
match running_task.st.read(&*lock) {
TaskSt::Running => {
unsafe { make_ready(lock.borrow_mut(), running_task) };
}
TaskSt::Waiting => {
}
TaskSt::Ready => {
}
_ => unreachable!(),
}
}
Traits::state()
.running_task
.replace(&mut *lock, next_running_task);
}
#[inline]
fn ptr_from_option_ref<T>(x: Option<&T>) -> *const T {
if let Some(x) = x {
x
} else {
core::ptr::null()
}
}
pub(super) fn wait_until_woken_up<Traits: KernelTraits>(
mut lock: klock::CpuLockTokenRefMut<'_, Traits>,
) {
debug_assert_eq!(state::expect_waitable_context::<Traits>(), Ok(()));
let running_task = Traits::state().running_task(lock.borrow_mut()).unwrap();
assert_eq!(*running_task.st.read(&*lock), TaskSt::Running);
running_task.st.replace(&mut *lock, TaskSt::Waiting);
loop {
unsafe { Traits::leave_cpu_lock() };
unsafe { Traits::yield_cpu() };
unsafe { Traits::enter_cpu_lock() };
if *running_task.st.read(&*lock) == TaskSt::Running {
break;
}
assert_eq!(*running_task.st.read(&*lock), TaskSt::Waiting);
}
}
#[inline]
pub(super) fn park_current_task<Traits: KernelTraits>() -> Result<(), ParkError> {
let mut lock = klock::lock_cpu::<Traits>()?;
state::expect_waitable_context::<Traits>()?;
let running_task = Traits::state().running_task(lock.borrow_mut()).unwrap();
if running_task.park_token.replace(&mut *lock, false) {
return Ok(());
}
wait::wait_no_queue(lock.borrow_mut(), wait::WaitPayload::Park)?;
Ok(())
}
#[inline]
pub(super) fn park_current_task_timeout<Traits: KernelTraits>(
timeout: Duration,
) -> Result<(), ParkTimeoutError> {
let time32 = timeout::time32_from_duration(timeout)?;
let mut lock = klock::lock_cpu::<Traits>()?;
state::expect_waitable_context::<Traits>()?;
let running_task = Traits::state().running_task(lock.borrow_mut()).unwrap();
if running_task.park_token.replace(&mut *lock, false) {
return Ok(());
}
wait::wait_no_queue_timeout(lock.borrow_mut(), wait::WaitPayload::Park, time32)?;
Ok(())
}
#[inline]
fn unpark_exact<Traits: KernelTraits>(
mut lock: klock::CpuLockGuard<Traits>,
task_cb: &'static TaskCb<Traits>,
) -> Result<(), UnparkExactError> {
let is_parked = match task_cb.st.read(&*lock) {
TaskSt::Dormant => return Err(UnparkExactError::BadObjectState),
TaskSt::Waiting => wait::with_current_wait_payload(lock.borrow_mut(), task_cb, |payload| {
matches!(payload, Some(wait::WaitPayload::Park))
}),
_ => false,
};
if is_parked {
wait::interrupt_task(lock.borrow_mut(), task_cb, Ok(())).unwrap();
unlock_cpu_and_check_preemption(lock);
Ok(())
} else {
if task_cb.park_token.replace(&mut *lock, true) {
Err(UnparkExactError::QueueOverflow)
} else {
Ok(())
}
}
}
#[inline]
pub(super) fn put_current_task_on_sleep_timeout<Traits: KernelTraits>(
timeout: Duration,
) -> Result<(), SleepError> {
let time32 = timeout::time32_from_duration(timeout)?;
let mut lock = klock::lock_cpu::<Traits>()?;
state::expect_waitable_context::<Traits>()?;
match wait::wait_no_queue_timeout(lock.borrow_mut(), wait::WaitPayload::Sleep, time32) {
Ok(_) => unreachable!(),
Err(WaitTimeoutError::Interrupted) => Err(SleepError::Interrupted),
Err(WaitTimeoutError::Timeout) => Ok(()),
}
}
#[inline]
fn set_task_base_priority<Traits: KernelTraits>(
mut lock: klock::CpuLockGuard<Traits>,
task_cb: &'static TaskCb<Traits>,
base_priority: usize,
) -> Result<(), SetTaskPriorityError> {
let base_priority_internal =
Traits::to_task_priority(base_priority).ok_or(SetTaskPriorityError::BadParam)?;
let st = *task_cb.st.read(&*lock);
if st == TaskSt::Dormant {
return Err(SetTaskPriorityError::BadObjectState);
}
let old_base_priority = task_cb.base_priority.read(&*lock).to_usize().unwrap();
if old_base_priority == base_priority {
return Ok(());
}
if base_priority < old_base_priority {
let waited_mutex = wait::with_current_wait_payload(lock.borrow_mut(), task_cb, |payload| {
if let Some(&wait::WaitPayload::Mutex(mutex_cb)) = payload {
Some(mutex_cb)
} else {
None
}
});
if let Some(waited_mutex) = waited_mutex {
if !mutex::does_held_mutex_allow_new_task_base_priority(
lock.borrow_mut(),
waited_mutex,
base_priority_internal,
) {
return Err(SetTaskPriorityError::BadParam);
}
}
if !mutex::do_held_mutexes_allow_new_task_base_priority(
lock.borrow_mut(),
task_cb,
base_priority_internal,
) {
return Err(SetTaskPriorityError::BadParam);
}
}
let effective_priority_internal =
mutex::evaluate_task_effective_priority(lock.borrow_mut(), task_cb, base_priority_internal);
let effective_priority = effective_priority_internal.to_usize().unwrap();
task_cb
.base_priority
.replace(&mut *lock, base_priority_internal);
let old_effective_priority = task_cb
.effective_priority
.replace(&mut *lock, effective_priority_internal)
.to_usize()
.unwrap();
if old_effective_priority == effective_priority {
return Ok(());
}
match st {
TaskSt::Ready => unsafe {
Traits::state().task_ready_queue.reorder_task(
lock.borrow_mut().into(),
task_cb,
effective_priority,
old_effective_priority,
);
},
TaskSt::Running => {}
TaskSt::Waiting => {
wait::reorder_wait_of_task(lock.borrow_mut(), task_cb);
}
TaskSt::Dormant | TaskSt::PendingActivation => unreachable!(),
}
if let TaskSt::Running | TaskSt::Ready = st {
unlock_cpu_and_check_preemption(lock);
}
Ok(())
}