#[cfg(feature = "priority_boost")]
use core::sync::atomic::Ordering;
use core::{convert::TryFrom, fmt, hash, marker::PhantomData, mem};
use num_traits::ToPrimitive;
use super::{
hunk::Hunk, mutex, state, timeout, utils, wait, ActivateTaskError, BadIdError, ExitTaskError,
GetCurrentTaskError, GetTaskPriorityError, Id, InterruptTaskError, Kernel, KernelCfg1,
ParkError, ParkTimeoutError, PortThreading, SetTaskPriorityError, SleepError, UnparkError,
UnparkExactError, WaitTimeoutError,
};
use crate::{time::Duration, utils::Init};
#[doc(hidden)]
pub mod readyqueue;
use self::readyqueue::Queue as _;
#[cfg_attr(doc, svgbobdoc::transform)]
#[doc(include = "../common.md")]
#[repr(transparent)]
pub struct Task<System>(Id, PhantomData<System>);
impl<System> Clone for Task<System> {
fn clone(&self) -> Self {
Self(self.0, self.1)
}
}
impl<System> Copy for Task<System> {}
impl<System> PartialEq for Task<System> {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<System> Eq for Task<System> {}
impl<System> hash::Hash for Task<System> {
fn hash<H>(&self, state: &mut H)
where
H: hash::Hasher,
{
hash::Hash::hash(&self.0, state);
}
}
impl<System> fmt::Debug for Task<System> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Task").field(&self.0).finish()
}
}
impl<System> Task<System> {
pub const unsafe fn from_id(id: Id) -> Self {
Self(id, PhantomData)
}
pub const fn id(self) -> Id {
self.0
}
}
impl<System: Kernel> Task<System> {
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn current() -> Result<Option<Self>, GetCurrentTaskError> {
let mut lock = utils::lock_cpu::<System>()?;
let task_cb = if let Some(cb) = System::state().running_task(lock.borrow_mut()) {
cb
} else {
return Ok(None);
};
let offset_bytes =
task_cb as *const TaskCb<_> as usize - System::task_cb_pool().as_ptr() as usize;
let offset = offset_bytes / mem::size_of::<TaskCb<System>>();
let task = unsafe { Self::from_id(Id::new(offset as usize + 1).unwrap()) };
Ok(Some(task))
}
fn task_cb(self) -> Result<&'static TaskCb<System>, BadIdError> {
System::get_task_cb(self.0.get() - 1).ok_or(BadIdError::BadId)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn activate(self) -> Result<(), ActivateTaskError> {
let lock = utils::lock_cpu::<System>()?;
let task_cb = self.task_cb()?;
activate(lock, task_cb)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn interrupt(self) -> Result<(), InterruptTaskError> {
let mut lock = utils::lock_cpu::<System>()?;
let task_cb = self.task_cb()?;
wait::interrupt_task(
lock.borrow_mut(),
task_cb,
Err(WaitTimeoutError::Interrupted),
)?;
unlock_cpu_and_check_preemption(lock);
Ok(())
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn unpark(self) -> Result<(), UnparkError> {
match self.unpark_exact() {
Ok(()) | Err(UnparkExactError::QueueOverflow) => Ok(()),
Err(UnparkExactError::BadContext) => Err(UnparkError::BadContext),
Err(UnparkExactError::BadId) => Err(UnparkError::BadId),
Err(UnparkExactError::BadObjectState) => Err(UnparkError::BadObjectState),
}
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn unpark_exact(self) -> Result<(), UnparkExactError> {
let lock = utils::lock_cpu::<System>()?;
let task_cb = self.task_cb()?;
unpark_exact(lock, task_cb)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn set_priority(self, priority: usize) -> Result<(), SetTaskPriorityError> {
let lock = utils::lock_cpu::<System>()?;
let task_cb = self.task_cb()?;
set_task_base_priority(lock, task_cb, priority)
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn priority(self) -> Result<usize, GetTaskPriorityError> {
let lock = utils::lock_cpu::<System>()?;
let task_cb = self.task_cb()?;
if *task_cb.st.read(&*lock) == TaskSt::Dormant {
Err(GetTaskPriorityError::BadObjectState)
} else {
Ok(task_cb.base_priority.read(&*lock).to_usize().unwrap())
}
}
#[cfg_attr(not(feature = "inline_syscall"), inline(never))]
pub fn effective_priority(self) -> Result<usize, GetTaskPriorityError> {
let lock = utils::lock_cpu::<System>()?;
let task_cb = self.task_cb()?;
if *task_cb.st.read(&*lock) == TaskSt::Dormant {
Err(GetTaskPriorityError::BadObjectState)
} else {
Ok(task_cb.effective_priority.read(&*lock).to_usize().unwrap())
}
}
}
pub struct StackHunk<System>(Hunk<System>, usize);
unsafe impl<System> Sync for StackHunk<System> {}
impl<System: Kernel> fmt::Debug for StackHunk<System> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("StackHunk").field(&self.0.as_ptr()).finish()
}
}
impl<System> Clone for StackHunk<System> {
fn clone(&self) -> Self {
*self
}
}
impl<System> Copy for StackHunk<System> {}
impl<System> Init for StackHunk<System> {
const INIT: Self = Self(Init::INIT, 0);
}
impl<System> StackHunk<System> {
pub const unsafe fn from_hunk(hunk: Hunk<System>, len: usize) -> Self {
Self(hunk, len)
}
pub fn into_inner(self) -> (Hunk<System>, usize) {
(self.0, self.1)
}
}
impl<System: Kernel> StackHunk<System> {
pub fn as_ptr(&self) -> *mut [u8] {
core::ptr::slice_from_raw_parts_mut(self.0.as_ptr(), self.1)
}
}
#[repr(C)]
pub struct TaskCb<
System: PortThreading,
PortTaskState: 'static = <System as PortThreading>::PortTaskState,
TaskPriority: 'static = <System as KernelCfg1>::TaskPriority,
TaskReadyQueueData: 'static = <<System as KernelCfg1>::TaskReadyQueue as readyqueue::Queue<
System,
>>::PerTaskData,
> {
pub port_task_state: PortTaskState,
pub attr: &'static TaskAttr<System, TaskPriority>,
pub(super) base_priority: utils::CpuLockCell<System, TaskPriority>,
pub(super) effective_priority: utils::CpuLockCell<System, TaskPriority>,
pub(super) st: utils::CpuLockCell<System, TaskSt>,
pub(super) park_token: utils::CpuLockCell<System, bool>,
pub(super) ready_queue_data: TaskReadyQueueData,
pub(super) wait: wait::TaskWait<System>,
pub(super) last_mutex_held: utils::CpuLockCell<System, Option<&'static mutex::MutexCb<System>>>,
}
impl<
System: Kernel,
PortTaskState: fmt::Debug + 'static,
TaskPriority: fmt::Debug + 'static,
TaskReadyQueueData: fmt::Debug + 'static,
> fmt::Debug for TaskCb<System, PortTaskState, TaskPriority, TaskReadyQueueData>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TaskCb")
.field("self", &(self as *const _))
.field("port_task_state", &self.port_task_state)
.field("attr", self.attr)
.field("base_priority", &self.base_priority)
.field("effective_priority", &self.effective_priority)
.field("st", &self.st)
.field("ready_queue_data", &self.ready_queue_data)
.field("wait", &self.wait)
.field(
"last_mutex_held",
&self
.last_mutex_held
.debug_fmt_with(|x, f| x.map(|x| x as *const _).fmt(f)),
)
.field("park_token", &self.park_token)
.finish()
}
}
pub struct TaskAttr<System, TaskPriority: 'static = <System as KernelCfg1>::TaskPriority> {
pub entry_point: unsafe fn(usize),
pub entry_param: usize,
pub stack: StackHunk<System>,
pub priority: TaskPriority,
}
impl<System: Kernel, TaskPriority: fmt::Debug> fmt::Debug for TaskAttr<System, TaskPriority> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TaskAttr")
.field("entry_point", &self.entry_point)
.field("entry_param", &self.entry_param)
.field("stack", &self.stack)
.field("priority", &self.priority)
.finish()
}
}
#[doc(hidden)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskSt {
Dormant,
Ready,
Running,
Waiting,
PendingActivation,
}
impl Init for TaskSt {
const INIT: Self = Self::Dormant;
}
pub(super) unsafe fn exit_current_task<System: Kernel>() -> Result<!, ExitTaskError> {
if !System::is_task_context() {
return Err(ExitTaskError::BadContext);
}
let mut lock = unsafe {
if !System::is_cpu_lock_active() {
System::enter_cpu_lock();
}
utils::assume_cpu_lock::<System>()
};
#[cfg(feature = "priority_boost")]
{
System::state()
.priority_boost
.store(false, Ordering::Release);
}
let running_task = System::state().running_task(lock.borrow_mut()).unwrap();
mutex::abandon_held_mutexes(lock.borrow_mut(), running_task);
debug_assert!(running_task.last_mutex_held.read(&*lock).is_none());
assert_eq!(*running_task.st.read(&*lock), TaskSt::Running);
running_task.st.replace(&mut *lock, TaskSt::Dormant);
System::state().running_task.replace(&mut *lock, None);
core::mem::forget(lock);
unsafe {
System::exit_and_dispatch(running_task);
}
}
pub(super) fn init_task<System: Kernel>(
lock: utils::CpuLockGuardBorrowMut<'_, System>,
task_cb: &'static TaskCb<System>,
) {
if let TaskSt::PendingActivation = task_cb.st.read(&*lock) {
unsafe { System::initialize_task_state(task_cb) };
unsafe { make_ready(lock, task_cb) };
}
}
fn activate<System: Kernel>(
mut lock: utils::CpuLockGuard<System>,
task_cb: &'static TaskCb<System>,
) -> Result<(), ActivateTaskError> {
if *task_cb.st.read(&*lock) != TaskSt::Dormant {
return Err(ActivateTaskError::QueueOverflow);
}
task_cb.park_token.replace(&mut *lock, false);
unsafe { System::initialize_task_state(task_cb) };
task_cb
.base_priority
.replace(&mut *lock, task_cb.attr.priority);
task_cb
.effective_priority
.replace(&mut *lock, task_cb.attr.priority);
unsafe { make_ready(lock.borrow_mut(), task_cb) };
unlock_cpu_and_check_preemption(lock);
Ok(())
}
pub(super) unsafe fn make_ready<System: Kernel>(
mut lock: utils::CpuLockGuardBorrowMut<'_, System>,
task_cb: &'static TaskCb<System>,
) {
task_cb.st.replace(&mut *lock, TaskSt::Ready);
unsafe {
<System>::state()
.task_ready_queue
.push_back_task(lock.into(), task_cb);
}
}
pub(super) fn unlock_cpu_and_check_preemption<System: Kernel>(
mut lock: utils::CpuLockGuard<System>,
) {
if System::is_priority_boost_active() {
debug_assert_eq!(
*System::state()
.running_task(lock.borrow_mut())
.unwrap()
.st
.read(&*lock),
TaskSt::Running
);
return;
}
let prev_task_priority =
if let Some(running_task) = System::state().running_task(lock.borrow_mut()) {
if *running_task.st.read(&*lock) == TaskSt::Running {
running_task
.effective_priority
.read(&*lock)
.to_usize()
.unwrap()
} else {
usize::MAX
}
} else {
usize::MAX
};
let has_preempting_task = System::state()
.task_ready_queue
.has_ready_task_in_priority_range(lock.borrow_mut().into(), ..prev_task_priority);
drop(lock);
if has_preempting_task {
unsafe { System::yield_cpu() };
}
}
#[inline]
pub(super) fn choose_next_running_task<System: Kernel>(
mut lock: utils::CpuLockGuardBorrowMut<System>,
) {
if System::is_priority_boost_active() {
debug_assert_eq!(
*System::state()
.running_task(lock.borrow_mut())
.unwrap()
.st
.read(&*lock),
TaskSt::Running
);
return;
}
let prev_running_task = System::state().running_task(lock.borrow_mut());
let prev_task_priority = if let Some(running_task) = prev_running_task {
if *running_task.st.read(&*lock) == TaskSt::Running {
running_task
.effective_priority
.read(&*lock)
.to_usize()
.unwrap()
} else {
usize::MAX }
} else {
usize::MAX };
let decision = System::state()
.task_ready_queue
.pop_front_task(lock.borrow_mut().into(), prev_task_priority);
let next_running_task = match decision {
readyqueue::ScheduleDecision::SwitchTo(task) => task,
readyqueue::ScheduleDecision::Keep => {
debug_assert_ne!(prev_task_priority, usize::MAX);
return;
}
};
if let Some(task) = next_running_task {
task.st.replace(&mut *lock, TaskSt::Running);
if ptr_from_option_ref(prev_running_task) == task {
return;
}
}
if let Some(running_task) = prev_running_task {
debug_assert_ne!(
ptr_from_option_ref(prev_running_task),
ptr_from_option_ref(next_running_task),
);
match running_task.st.read(&*lock) {
TaskSt::Running => {
unsafe { make_ready(lock.borrow_mut(), running_task) };
}
TaskSt::Waiting => {
}
TaskSt::Ready => {
}
_ => unreachable!(),
}
}
System::state()
.running_task
.replace(&mut *lock, next_running_task);
}
#[inline]
fn ptr_from_option_ref<T>(x: Option<&T>) -> *const T {
if let Some(x) = x {
x
} else {
core::ptr::null()
}
}
pub(super) fn wait_until_woken_up<System: Kernel>(
mut lock: utils::CpuLockGuardBorrowMut<'_, System>,
) {
debug_assert_eq!(state::expect_waitable_context::<System>(), Ok(()));
let running_task = System::state().running_task(lock.borrow_mut()).unwrap();
assert_eq!(*running_task.st.read(&*lock), TaskSt::Running);
running_task.st.replace(&mut *lock, TaskSt::Waiting);
loop {
unsafe { System::leave_cpu_lock() };
unsafe { System::yield_cpu() };
unsafe { System::enter_cpu_lock() };
if *running_task.st.read(&*lock) == TaskSt::Running {
break;
}
assert_eq!(*running_task.st.read(&*lock), TaskSt::Waiting);
}
}
pub(super) fn park_current_task<System: Kernel>() -> Result<(), ParkError> {
let mut lock = utils::lock_cpu::<System>()?;
state::expect_waitable_context::<System>()?;
let running_task = System::state().running_task(lock.borrow_mut()).unwrap();
if running_task.park_token.replace(&mut *lock, false) {
return Ok(());
}
wait::wait_no_queue(lock.borrow_mut(), wait::WaitPayload::Park)?;
Ok(())
}
pub(super) fn park_current_task_timeout<System: Kernel>(
timeout: Duration,
) -> Result<(), ParkTimeoutError> {
let time32 = timeout::time32_from_duration(timeout)?;
let mut lock = utils::lock_cpu::<System>()?;
state::expect_waitable_context::<System>()?;
let running_task = System::state().running_task(lock.borrow_mut()).unwrap();
if running_task.park_token.replace(&mut *lock, false) {
return Ok(());
}
wait::wait_no_queue_timeout(lock.borrow_mut(), wait::WaitPayload::Park, time32)?;
Ok(())
}
fn unpark_exact<System: Kernel>(
mut lock: utils::CpuLockGuard<System>,
task_cb: &'static TaskCb<System>,
) -> Result<(), UnparkExactError> {
let is_parked = match task_cb.st.read(&*lock) {
TaskSt::Dormant => return Err(UnparkExactError::BadObjectState),
TaskSt::Waiting => wait::with_current_wait_payload(lock.borrow_mut(), task_cb, |payload| {
matches!(payload, Some(wait::WaitPayload::Park))
}),
_ => false,
};
if is_parked {
wait::interrupt_task(lock.borrow_mut(), task_cb, Ok(())).unwrap();
unlock_cpu_and_check_preemption(lock);
Ok(())
} else {
if task_cb.park_token.replace(&mut *lock, true) {
Err(UnparkExactError::QueueOverflow)
} else {
Ok(())
}
}
}
pub(super) fn put_current_task_on_sleep_timeout<System: Kernel>(
timeout: Duration,
) -> Result<(), SleepError> {
let time32 = timeout::time32_from_duration(timeout)?;
let mut lock = utils::lock_cpu::<System>()?;
state::expect_waitable_context::<System>()?;
match wait::wait_no_queue_timeout(lock.borrow_mut(), wait::WaitPayload::Sleep, time32) {
Ok(_) => unreachable!(),
Err(WaitTimeoutError::Interrupted) => Err(SleepError::Interrupted),
Err(WaitTimeoutError::Timeout) => Ok(()),
}
}
fn set_task_base_priority<System: Kernel>(
mut lock: utils::CpuLockGuard<System>,
task_cb: &'static TaskCb<System>,
base_priority: usize,
) -> Result<(), SetTaskPriorityError> {
if base_priority >= System::NUM_TASK_PRIORITY_LEVELS {
return Err(SetTaskPriorityError::BadParam);
}
let base_priority_internal =
System::TaskPriority::try_from(base_priority).unwrap_or_else(|_| unreachable!());
let st = *task_cb.st.read(&*lock);
if st == TaskSt::Dormant {
return Err(SetTaskPriorityError::BadObjectState);
}
let old_base_priority = task_cb.base_priority.read(&*lock).to_usize().unwrap();
if old_base_priority == base_priority {
return Ok(());
}
if base_priority < old_base_priority {
let waited_mutex = wait::with_current_wait_payload(lock.borrow_mut(), task_cb, |payload| {
if let Some(&wait::WaitPayload::Mutex(mutex_cb)) = payload {
Some(mutex_cb)
} else {
None
}
});
if let Some(waited_mutex) = waited_mutex {
if !mutex::does_held_mutex_allow_new_task_base_priority(
lock.borrow_mut(),
waited_mutex,
base_priority_internal,
) {
return Err(SetTaskPriorityError::BadParam);
}
}
if !mutex::do_held_mutexes_allow_new_task_base_priority(
lock.borrow_mut(),
task_cb,
base_priority_internal,
) {
return Err(SetTaskPriorityError::BadParam);
}
}
let effective_priority_internal =
mutex::evaluate_task_effective_priority(lock.borrow_mut(), task_cb, base_priority_internal);
let effective_priority = effective_priority_internal.to_usize().unwrap();
task_cb
.base_priority
.replace(&mut *lock, base_priority_internal);
let old_effective_priority = task_cb
.effective_priority
.replace(&mut *lock, effective_priority_internal)
.to_usize()
.unwrap();
if old_effective_priority == effective_priority {
return Ok(());
}
match st {
TaskSt::Ready => unsafe {
System::state().task_ready_queue.reorder_task(
lock.borrow_mut().into(),
task_cb,
effective_priority,
old_effective_priority,
);
},
TaskSt::Running => {}
TaskSt::Waiting => {
wait::reorder_wait_of_task(lock.borrow_mut(), task_cb);
}
TaskSt::Dormant | TaskSt::PendingActivation => unreachable!(),
}
if let TaskSt::Running | TaskSt::Ready = st {
unlock_cpu_and_check_preemption(lock);
}
Ok(())
}