use core::{
fmt,
marker::PhantomPinned,
pin::Pin,
ptr::NonNull,
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
};
use super::{
state::expect_task_context,
task,
utils::{lock_cpu, CpuLockCell, CpuLockGuard, CpuLockGuardBorrowMut},
AdjustTimeError, BadParamError, Kernel, TimeError, UTicks,
};
use crate::{
time::{Duration, Time},
utils::{
binary_heap::{BinaryHeap, BinaryHeapCtx},
Init,
},
};
#[cfg(tests)]
mod tests;
pub(super) struct TimeoutGlobals<System, TimeoutHeap: 'static> {
last_tick_count: CpuLockCell<System, UTicks>,
last_tick_time: CpuLockCell<System, Time32>,
#[cfg(feature = "system_time")]
last_tick_sys_time: CpuLockCell<System, Time64>,
frontier_gap: CpuLockCell<System, Time32>,
heap: CpuLockCell<System, TimeoutHeap>,
handle_tick_in_progress: CpuLockCell<System, bool>,
}
impl<System, TimeoutHeap: Init + 'static> Init for TimeoutGlobals<System, TimeoutHeap> {
const INIT: Self = Self {
last_tick_count: Init::INIT,
last_tick_time: Init::INIT,
#[cfg(feature = "system_time")]
last_tick_sys_time: Init::INIT,
frontier_gap: Init::INIT,
heap: Init::INIT,
handle_tick_in_progress: Init::INIT,
};
}
impl<System: Kernel, TimeoutHeap: fmt::Debug> fmt::Debug for TimeoutGlobals<System, TimeoutHeap> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TimeoutGlobals")
.field("last_tick_count", &self.last_tick_count)
.field("last_tick_time", &self.last_tick_time)
.field(
"last_tick_sys_time",
match () {
#[cfg(feature = "system_time")]
() => &self.last_tick_sys_time,
#[cfg(not(feature = "system_time"))]
() => &(),
},
)
.field("frontier_gap", &self.frontier_gap)
.field("heap", &self.heap)
.field("handle_tick_in_progress", &self.handle_tick_in_progress)
.finish()
}
}
trait KernelTimeoutGlobalsExt: Kernel {
fn g_timeout() -> &'static TimeoutGlobals<Self, Self::TimeoutHeap>;
}
impl<T: Kernel> KernelTimeoutGlobalsExt for T {
#[inline(always)]
fn g_timeout() -> &'static TimeoutGlobals<Self, Self::TimeoutHeap> {
&Self::state().timeout
}
}
#[cfg(feature = "system_time")]
type Time64 = u64;
pub(super) type Time32 = u32;
type AtomicTime32 = AtomicU32;
pub(super) const BAD_DURATION32: Time32 = u32::MAX;
#[inline]
#[cfg(feature = "system_time")]
fn time64_from_sys_time(sys_time: Time) -> Time64 {
sys_time.as_micros()
}
#[inline]
#[cfg(feature = "system_time")]
fn sys_time_from_time64(sys_time: Time64) -> Time {
Time::from_micros(sys_time)
}
#[inline]
pub(super) const fn time32_from_duration(duration: Duration) -> Result<Time32, BadParamError> {
if let Some(x) = crate::utils::convert::try_i32_into_u32(duration.as_micros()) {
Ok(x)
} else {
Err(BadParamError::BadParam)
}
}
#[inline]
pub(super) fn time32_from_neg_duration(duration: Duration) -> Result<Time32, BadParamError> {
let duration = duration.as_micros();
if duration > 0 {
Err(BadParamError::BadParam)
} else {
Ok(0u32.wrapping_sub(duration as u32))
}
}
#[inline]
pub(super) fn wrapping_time32_from_duration(duration: Duration) -> Time32 {
duration.as_micros() as Time32
}
#[inline]
#[cfg(feature = "system_time")]
pub(super) fn wrapping_time64_from_duration(duration: Duration) -> Time64 {
duration.as_micros() as i64 as Time64
}
const USER_HEADROOM: Time32 = 1 << 29;
const HARD_HEADROOM: Time32 = 1 << 30;
pub const TIME_USER_HEADROOM: Duration = Duration::from_micros(USER_HEADROOM as i32);
pub const TIME_HARD_HEADROOM: Duration = Duration::from_micros(HARD_HEADROOM as i32);
pub(super) struct Timeout<System: Kernel> {
at: AtomicTime32,
heap_pos: AtomicUsize,
callback: TimeoutFn<System>,
callback_param: usize,
_pin: PhantomPinned,
_phantom: core::marker::PhantomData<System>,
}
pub(super) type TimeoutFn<System> = fn(usize, CpuLockGuard<System>) -> CpuLockGuard<System>;
const HEAP_POS_NONE: usize = usize::MAX;
impl<System: Kernel> Init for Timeout<System> {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self {
at: Init::INIT,
heap_pos: Init::INIT,
callback: |_, x| x,
callback_param: Init::INIT,
_pin: PhantomPinned,
_phantom: core::marker::PhantomData,
};
}
impl<System: Kernel> Drop for Timeout<System> {
#[inline]
fn drop(&mut self) {
if *self.heap_pos.get_mut() != HEAP_POS_NONE {
panic!("timeout is still linked");
}
}
}
impl<System: Kernel> fmt::Debug for Timeout<System> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Timeout")
.field("at", &self.at)
.field("heap_pos", &self.heap_pos)
.field("callback", &self.callback)
.field("callback_param", &self.callback_param)
.finish()
}
}
impl<System: Kernel> Timeout<System> {
pub(super) const fn new(callback: TimeoutFn<System>, callback_param: usize) -> Self {
Self {
at: AtomicTime32::new(0),
heap_pos: AtomicUsize::new(HEAP_POS_NONE),
callback,
callback_param,
_pin: PhantomPinned,
_phantom: core::marker::PhantomData,
}
}
pub(super) fn is_linked(&self, _lock: CpuLockGuardBorrowMut<'_, System>) -> bool {
self.heap_pos.load(Ordering::Relaxed) != HEAP_POS_NONE
}
pub(super) fn set_expiration_after(
&self,
lock: CpuLockGuardBorrowMut<'_, System>,
duration_time32: Time32,
) {
debug_assert_ne!(duration_time32, BAD_DURATION32);
let current_time = current_time(lock);
let at = current_time.wrapping_add(duration_time32);
self.at.store(at, Ordering::Relaxed);
}
pub(super) fn adjust_expiration(
&self,
_lock: CpuLockGuardBorrowMut<'_, System>,
duration_time32: Time32,
) {
debug_assert_ne!(duration_time32, BAD_DURATION32);
let at = self
.at
.load(Ordering::Relaxed)
.wrapping_add(duration_time32);
self.at.store(at, Ordering::Relaxed);
}
#[inline]
pub(super) fn saturating_duration_until_timeout(
&self,
lock: CpuLockGuardBorrowMut<'_, System>,
) -> Time32 {
saturating_duration_until_timeout(self, current_time(lock))
}
pub(super) fn at_raw(&self, _lock: CpuLockGuardBorrowMut<'_, System>) -> Time32 {
self.at.load(Ordering::Relaxed)
}
pub(super) fn set_at_raw(&self, _lock: CpuLockGuardBorrowMut<'_, System>, value: Time32) {
self.at.store(value, Ordering::Relaxed);
}
pub(super) const fn with_at_raw(mut self, at: Time32) -> Self {
self.at = AtomicTime32::new(at);
self
}
pub(super) const fn with_expiration_at(mut self, at: Time32) -> Self {
assert!(at != BAD_DURATION32, "`at` must be a valid duration");
self.at = AtomicTime32::new(at);
self
}
}
#[doc(hidden)]
pub struct TimeoutRef<System: Kernel>(NonNull<Timeout<System>>);
unsafe impl<System: Kernel> Send for TimeoutRef<System> {}
unsafe impl<System: Kernel> Sync for TimeoutRef<System> {}
impl<System: Kernel> Clone for TimeoutRef<System> {
fn clone(&self) -> Self {
Self(self.0)
}
}
impl<System: Kernel> Copy for TimeoutRef<System> {}
impl<System: Kernel> fmt::Debug for TimeoutRef<System> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("TimeoutRef").field(&self.0).finish()
}
}
struct TimeoutHeapCtx {
critical_point: Time32,
}
impl<System: Kernel> BinaryHeapCtx<TimeoutRef<System>> for TimeoutHeapCtx {
#[inline]
fn lt(&mut self, x: &TimeoutRef<System>, y: &TimeoutRef<System>) -> bool {
let (x, y) = unsafe {
(
x.0.as_ref().at.load(Ordering::Relaxed),
y.0.as_ref().at.load(Ordering::Relaxed),
)
};
let critical_point = self.critical_point;
x.wrapping_sub(critical_point) < y.wrapping_sub(critical_point)
}
#[inline]
fn on_move(&mut self, e: &mut TimeoutRef<System>, new_index: usize) {
unsafe { e.0.as_ref() }
.heap_pos
.store(new_index, Ordering::Relaxed);
}
}
impl<System: Kernel, TimeoutHeap> TimeoutGlobals<System, TimeoutHeap> {
pub(super) fn init(&self, mut lock: CpuLockGuardBorrowMut<'_, System>) {
self.last_tick_count
.replace(&mut *lock.borrow_mut(), unsafe { System::tick_count() });
unsafe { System::pend_tick_after(System::MAX_TIMEOUT) };
}
}
#[cfg(feature = "system_time")]
pub(super) fn system_time<System: Kernel>() -> Result<Time, TimeError> {
expect_task_context::<System>()?;
let mut lock = lock_cpu::<System>()?;
let (duration_since_last_tick, _) = duration_since_last_tick(lock.borrow_mut());
let last_tick_sys_time = System::g_timeout()
.last_tick_sys_time
.get(&*lock.borrow_mut());
let cur_sys_time = last_tick_sys_time.wrapping_add(duration_since_last_tick as Time64);
Ok(sys_time_from_time64(cur_sys_time))
}
pub(super) fn set_system_time<System: Kernel>(new_sys_time: Time) -> Result<(), TimeError> {
expect_task_context::<System>()?;
match () {
#[cfg(feature = "system_time")]
() => {
let mut lock = lock_cpu::<System>()?;
let (duration_since_last_tick, _) = duration_since_last_tick(lock.borrow_mut());
let new_last_tick_sys_time =
time64_from_sys_time(new_sys_time).wrapping_sub(duration_since_last_tick as Time64);
System::g_timeout()
.last_tick_sys_time
.replace(&mut *lock.borrow_mut(), new_last_tick_sys_time);
}
#[cfg(not(feature = "system_time"))]
() => {
let _ = new_sys_time; lock_cpu::<System>()?;
}
}
Ok(())
}
pub(super) fn adjust_system_and_event_time<System: Kernel>(
delta: Duration,
) -> Result<(), AdjustTimeError> {
let mut lock = lock_cpu::<System>()?;
let g_timeout = System::g_timeout();
mark_tick(lock.borrow_mut());
if delta.is_negative() {
let delta_abs = time32_from_neg_duration(delta).unwrap();
let new_frontier_gap = g_timeout.frontier_gap.get(&*lock) + delta_abs;
if new_frontier_gap > USER_HEADROOM {
return Err(AdjustTimeError::BadObjectState);
}
g_timeout.frontier_gap.replace(&mut *lock, new_frontier_gap);
} else if delta.is_positive() {
let delta_abs = time32_from_duration(delta).unwrap();
if let Some(&timeout_ref) = g_timeout.heap.read(&*lock).get(0) {
let timeout = unsafe { timeout_ref.0.as_ref() };
let current_time = g_timeout.last_tick_time.get(&*lock);
let duration =
saturating_duration_before_timeout_exhausting_user_headroom(timeout, current_time);
if duration < delta_abs {
return Err(AdjustTimeError::BadObjectState);
}
}
g_timeout
.frontier_gap
.replace_with(&mut *lock, |old_value| old_value.saturating_sub(delta_abs));
} else {
return Ok(());
}
let delta32 = wrapping_time32_from_duration(delta);
g_timeout
.last_tick_time
.replace_with(&mut *lock, |old_value| old_value.wrapping_add(delta32));
#[cfg(feature = "system_time")]
{
let delta64 = wrapping_time64_from_duration(delta);
g_timeout
.last_tick_sys_time
.replace_with(&mut *lock, |old_value| old_value.wrapping_add(delta64));
}
let current_time = g_timeout.last_tick_time.get(&*lock);
pend_next_tick(lock.borrow_mut(), current_time);
Ok(())
}
#[inline]
fn duration_since_last_tick<System: Kernel>(
mut lock: CpuLockGuardBorrowMut<'_, System>,
) -> (Time32, Time32) {
let tick_count = unsafe { System::tick_count() };
let last_tick_count = System::g_timeout().last_tick_count.get(&*lock.borrow_mut());
let elapsed = if System::MAX_TICK_COUNT == UTicks::MAX || tick_count >= last_tick_count {
tick_count.wrapping_sub(last_tick_count)
} else {
tick_count.wrapping_sub(last_tick_count) - (UTicks::MAX - System::MAX_TICK_COUNT)
};
(elapsed, tick_count)
}
fn mark_tick<System: Kernel>(mut lock: CpuLockGuardBorrowMut<'_, System>) {
let (duration_since_last_tick, tick_count) =
duration_since_last_tick::<System>(lock.borrow_mut());
let g_timeout = System::g_timeout();
g_timeout.last_tick_count.replace(&mut *lock, tick_count);
g_timeout
.last_tick_time
.replace_with(&mut *lock, |old_value| {
old_value.wrapping_add(duration_since_last_tick)
});
#[cfg(feature = "system_time")]
g_timeout
.last_tick_sys_time
.replace_with(&mut *lock, |old_value| {
old_value.wrapping_add(duration_since_last_tick as Time64)
});
g_timeout
.frontier_gap
.replace_with(&mut *lock, |old_value| {
old_value.saturating_sub(duration_since_last_tick)
});
}
#[inline]
pub(super) fn handle_tick<System: Kernel>() {
let mut lock = lock_cpu::<System>().unwrap();
mark_tick(lock.borrow_mut());
let g_timeout = System::g_timeout();
let current_time = g_timeout.last_tick_time.get(&*lock);
let critical_point = critical_point(current_time);
g_timeout.handle_tick_in_progress.replace(&mut *lock, true);
while let Some(&timeout_ref) = g_timeout.heap.read(&*lock).get(0) {
let timeout = unsafe { &*timeout_ref.0.as_ptr() };
let remaining = saturating_duration_until_timeout(timeout, current_time);
if remaining > 0 {
break;
}
let Timeout {
callback,
callback_param,
..
} = *timeout;
debug_assert_eq!(timeout.heap_pos.load(Ordering::Relaxed), 0);
timeout.heap_pos.store(HEAP_POS_NONE, Ordering::Relaxed);
g_timeout
.heap
.write(&mut *lock)
.heap_remove(0, TimeoutHeapCtx { critical_point });
lock = callback(callback_param, lock);
}
g_timeout.handle_tick_in_progress.replace(&mut *lock, false);
pend_next_tick(lock.borrow_mut(), current_time);
task::unlock_cpu_and_check_preemption(lock);
}
fn current_time<System: Kernel>(mut lock: CpuLockGuardBorrowMut<'_, System>) -> Time32 {
let (duration_since_last_tick, _) = duration_since_last_tick::<System>(lock.borrow_mut());
let g_timeout = System::g_timeout();
g_timeout
.last_tick_time
.get(&*lock)
.wrapping_add(duration_since_last_tick)
}
fn pend_next_tick<System: Kernel>(lock: CpuLockGuardBorrowMut<'_, System>, current_time: Time32) {
let mut delay = System::MAX_TIMEOUT;
let g_timeout = System::g_timeout();
if let Some(&timeout_ref) = g_timeout.heap.read(&*lock).get(0) {
let timeout = unsafe { timeout_ref.0.as_ref() };
delay = delay.min(saturating_duration_until_timeout(timeout, current_time));
}
unsafe {
if delay == 0 {
System::pend_tick();
} else {
System::pend_tick_after(delay);
}
}
}
#[inline]
fn critical_point(current_time: Time32) -> Time32 {
current_time.wrapping_sub(HARD_HEADROOM + USER_HEADROOM)
}
fn saturating_duration_until_timeout<System: Kernel>(
timeout: &Timeout<System>,
current_time: Time32,
) -> Time32 {
let critical_point = critical_point(current_time);
let duration_until_violating_critical_point = timeout
.at
.load(Ordering::Relaxed)
.wrapping_sub(critical_point);
duration_until_violating_critical_point.saturating_sub(HARD_HEADROOM + USER_HEADROOM)
}
fn saturating_duration_before_timeout_exhausting_user_headroom<System: Kernel>(
timeout: &Timeout<System>,
current_time: Time32,
) -> Time32 {
let critical_point = critical_point(current_time);
let duration_until_violating_critical_point = timeout
.at
.load(Ordering::Relaxed)
.wrapping_sub(critical_point);
duration_until_violating_critical_point.saturating_sub(HARD_HEADROOM)
}
pub(super) fn insert_timeout<System: Kernel>(
mut lock: CpuLockGuardBorrowMut<'_, System>,
timeout: Pin<&Timeout<System>>,
) {
assert_eq!(
timeout.heap_pos.load(Ordering::Relaxed),
HEAP_POS_NONE,
"timeout is already registered",
);
let current_time = current_time(lock.borrow_mut());
let critical_point = critical_point(current_time);
let pos = System::g_timeout().heap.write(&mut *lock).heap_push(
TimeoutRef((&*timeout).into()),
TimeoutHeapCtx { critical_point },
);
debug_assert_eq!(timeout.heap_pos.load(Ordering::Relaxed), pos);
if !System::g_timeout().handle_tick_in_progress.get(&*lock) {
pend_next_tick(lock, current_time);
}
}
#[inline]
pub(super) fn remove_timeout<System: Kernel>(
lock: CpuLockGuardBorrowMut<'_, System>,
timeout: &Timeout<System>,
) {
remove_timeout_inner(lock, timeout);
timeout.heap_pos.store(HEAP_POS_NONE, Ordering::Relaxed);
}
fn remove_timeout_inner<System: Kernel>(
mut lock: CpuLockGuardBorrowMut<'_, System>,
timeout: &Timeout<System>,
) {
let current_time = current_time(lock.borrow_mut());
let critical_point = critical_point(current_time);
let heap_pos = timeout.heap_pos.load(Ordering::Relaxed);
let timeout_ref = System::g_timeout()
.heap
.write(&mut *lock)
.heap_remove(heap_pos, TimeoutHeapCtx { critical_point });
if timeout_ref.is_none() {
debug_assert_eq!(heap_pos, HEAP_POS_NONE);
return;
}
debug_assert_eq!(
timeout_ref.unwrap().0.as_ptr() as *const _,
timeout as *const _
);
if !System::g_timeout().handle_tick_in_progress.get(&*lock) {
pend_next_tick(lock, current_time);
}
}
pub(super) struct TimeoutGuard<'a, 'b, System: Kernel> {
pub(super) timeout: Pin<&'a Timeout<System>>,
pub(super) lock: CpuLockGuardBorrowMut<'b, System>,
}
impl<'a, 'b, System: Kernel> Drop for TimeoutGuard<'a, 'b, System> {
#[inline]
fn drop(&mut self) {
remove_timeout(self.lock.borrow_mut(), &self.timeout);
}
}