use core::{fmt, marker::PhantomPinned, pin::Pin, ptr::NonNull};
use r3_core::{
kernel::{AdjustTimeError, TimeError},
time::{Duration, Time},
utils::Init,
};
use crate::{
error::BadParamError,
klock::{lock_cpu, CpuLockCell, CpuLockGuard, CpuLockTokenRefMut},
state::expect_task_context,
task,
utils::{
binary_heap::{BinaryHeap, BinaryHeapCtx},
panicking::abort_on_unwind,
},
KernelTraits, UTicks,
};
#[cfg(tests)]
mod tests;
struct TimeoutPropTag;
type TimeoutPropToken = tokenlock::UnsyncSingletonToken<TimeoutPropTag>;
type TimeoutPropTokenRef<'a> = tokenlock::UnsyncSingletonTokenRef<'a, TimeoutPropTag>;
type TimeoutPropTokenRefMut<'a> = tokenlock::UnsyncSingletonTokenRefMut<'a, TimeoutPropTag>;
type TimeoutPropKeyhole = tokenlock::SingletonTokenId<TimeoutPropTag>;
type TimeoutPropCell<T> = tokenlock::UnsyncTokenLock<T, TimeoutPropKeyhole>;
pub(super) struct TimeoutGlobals<Traits, TimeoutHeap: 'static> {
last_tick_count: CpuLockCell<Traits, UTicks>,
last_tick_time: CpuLockCell<Traits, Time32>,
#[cfg(feature = "system_time")]
last_tick_sys_time: CpuLockCell<Traits, Time64>,
frontier_gap: CpuLockCell<Traits, Time32>,
heap_and_prop_token: CpuLockCell<Traits, TimeoutHeapAndPropToken<TimeoutHeap>>,
handle_tick_in_progress: CpuLockCell<Traits, bool>,
}
#[derive(Debug)]
struct TimeoutHeapAndPropToken<TimeoutHeap: 'static> {
heap: TimeoutHeap,
prop_token: TimeoutPropToken,
}
impl<Traits, TimeoutHeap: Init + 'static> Init for TimeoutGlobals<Traits, TimeoutHeap> {
const INIT: Self = Self {
last_tick_count: Init::INIT,
last_tick_time: Init::INIT,
#[cfg(feature = "system_time")]
last_tick_sys_time: Init::INIT,
frontier_gap: Init::INIT,
heap_and_prop_token: CpuLockCell::new(TimeoutHeapAndPropToken {
heap: Init::INIT,
prop_token: unsafe { TimeoutPropToken::new_unchecked() },
}),
handle_tick_in_progress: Init::INIT,
};
}
impl<Traits: KernelTraits, TimeoutHeap: fmt::Debug> fmt::Debug
for TimeoutGlobals<Traits, TimeoutHeap>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TimeoutGlobals")
.field("last_tick_count", &self.last_tick_count)
.field("last_tick_time", &self.last_tick_time)
.field(
"last_tick_sys_time",
match () {
#[cfg(feature = "system_time")]
() => &self.last_tick_sys_time,
#[cfg(not(feature = "system_time"))]
() => &(),
},
)
.field("frontier_gap", &self.frontier_gap)
.field("heap_and_prop_token", &self.heap_and_prop_token)
.field("handle_tick_in_progress", &self.handle_tick_in_progress)
.finish()
}
}
trait KernelTimeoutGlobalsExt: KernelTraits {
fn g_timeout() -> &'static TimeoutGlobals<Self, Self::TimeoutHeap>;
}
impl<T: KernelTraits> KernelTimeoutGlobalsExt for T {
#[inline(always)]
fn g_timeout() -> &'static TimeoutGlobals<Self, Self::TimeoutHeap> {
&Self::state().timeout
}
}
#[cfg(feature = "system_time")]
type Time64 = u64;
pub(super) type Time32 = u32;
pub(super) const BAD_DURATION32: Time32 = u32::MAX;
#[inline]
#[cfg(feature = "system_time")]
fn time64_from_sys_time(sys_time: Time) -> Time64 {
sys_time.as_micros()
}
#[inline]
#[cfg(feature = "system_time")]
fn sys_time_from_time64(sys_time: Time64) -> Time {
Time::from_micros(sys_time)
}
#[inline]
pub(super) const fn time32_from_duration(duration: Duration) -> Result<Time32, BadParamError> {
if let Ok(x) = duration.as_micros().try_into() {
Ok(x)
} else {
Err(BadParamError::BadParam)
}
}
#[inline]
pub(super) fn time32_from_neg_duration(duration: Duration) -> Result<Time32, BadParamError> {
let duration = duration.as_micros();
if duration > 0 {
Err(BadParamError::BadParam)
} else {
Ok(0u32.wrapping_sub(duration as u32))
}
}
#[inline]
pub(super) fn wrapping_time32_from_duration(duration: Duration) -> Time32 {
duration.as_micros() as Time32
}
#[inline]
#[cfg(feature = "system_time")]
pub(super) fn wrapping_time64_from_duration(duration: Duration) -> Time64 {
duration.as_micros() as i64 as Time64
}
const USER_HEADROOM: Time32 = 1 << 29;
const HARD_HEADROOM: Time32 = 1 << 30;
pub const TIME_USER_HEADROOM: Duration = Duration::from_micros(USER_HEADROOM as i32);
pub const TIME_HARD_HEADROOM: Duration = Duration::from_micros(HARD_HEADROOM as i32);
pub(super) struct Timeout<Traits: KernelTraits> {
at: TimeoutPropCell<u32>,
heap_pos: TimeoutPropCell<usize>,
callback: TimeoutFn<Traits>,
callback_param: usize,
_pin: PhantomPinned,
_phantom: core::marker::PhantomData<Traits>,
}
pub(super) type TimeoutFn<Traits> = fn(usize, CpuLockGuard<Traits>) -> CpuLockGuard<Traits>;
const HEAP_POS_NONE: usize = usize::MAX;
impl<Traits: KernelTraits> Init for Timeout<Traits> {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self {
at: Init::INIT,
heap_pos: Init::INIT,
callback: |_, x| x,
callback_param: Init::INIT,
_pin: PhantomPinned,
_phantom: core::marker::PhantomData,
};
}
impl<Traits: KernelTraits> Drop for Timeout<Traits> {
#[inline]
fn drop(&mut self) {
abort_on_unwind(|| {
if *self.heap_pos.get_mut() != HEAP_POS_NONE {
panic!("timeout is still linked");
}
})
}
}
impl<Traits: KernelTraits> fmt::Debug for Timeout<Traits> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Timeout")
.field("at", &self.at)
.field("heap_pos", &self.heap_pos)
.field("callback", &self.callback)
.field("callback_param", &self.callback_param)
.finish()
}
}
impl<Traits: KernelTraits> Timeout<Traits> {
pub(super) const fn new(callback: TimeoutFn<Traits>, callback_param: usize) -> Self {
Self {
at: TimeoutPropCell::new(Init::INIT, 0),
heap_pos: TimeoutPropCell::new(Init::INIT, HEAP_POS_NONE),
callback,
callback_param,
_pin: PhantomPinned,
_phantom: core::marker::PhantomData,
}
}
pub(super) fn is_linked(&self, lock: CpuLockTokenRefMut<'_, Traits>) -> bool {
let prop_token = &Traits::g_timeout()
.heap_and_prop_token
.read(&*lock)
.prop_token;
*self.heap_pos.read(prop_token) != HEAP_POS_NONE
}
pub(super) fn set_expiration_after(
&self,
mut lock: CpuLockTokenRefMut<'_, Traits>,
duration_time32: Time32,
) {
debug_assert_ne!(duration_time32, BAD_DURATION32);
let current_time = current_time(lock.borrow_mut());
let at = current_time.wrapping_add(duration_time32);
let prop_token = &mut Traits::g_timeout()
.heap_and_prop_token
.write(&mut *lock)
.prop_token;
*self.at.write(prop_token) = at;
}
pub(super) fn adjust_expiration(
&self,
mut lock: CpuLockTokenRefMut<'_, Traits>,
duration_time32: Time32,
) {
debug_assert_ne!(duration_time32, BAD_DURATION32);
let prop_token = &mut Traits::g_timeout()
.heap_and_prop_token
.write(&mut *lock)
.prop_token;
self.at
.replace_with(prop_token, |x| x.wrapping_add(duration_time32));
}
#[inline]
pub(super) fn saturating_duration_until_timeout(
&self,
mut lock: CpuLockTokenRefMut<'_, Traits>,
) -> Time32 {
let current_time = current_time(lock.borrow_mut());
let prop_token = &Traits::g_timeout()
.heap_and_prop_token
.read(&*lock)
.prop_token;
saturating_duration_until_timeout(self, current_time, prop_token.borrow())
}
pub(super) fn at_raw(&self, lock: CpuLockTokenRefMut<'_, Traits>) -> Time32 {
let prop_token = &Traits::g_timeout()
.heap_and_prop_token
.read(&*lock)
.prop_token;
*self.at.read(prop_token)
}
pub(super) fn set_at_raw(&self, mut lock: CpuLockTokenRefMut<'_, Traits>, value: Time32) {
let prop_token = &mut Traits::g_timeout()
.heap_and_prop_token
.write(&mut *lock)
.prop_token;
*self.at.write(prop_token) = value;
}
pub(super) const fn with_at_raw(mut self, at: Time32) -> Self {
self.at = TimeoutPropCell::new(Init::INIT, at);
self
}
pub(super) const fn with_expiration_at(self, at: Time32) -> Self {
assert!(at != BAD_DURATION32, "`at` must be a valid duration");
self.with_at_raw(at)
}
}
#[doc(hidden)]
pub struct TimeoutRef<Traits: KernelTraits>(NonNull<Timeout<Traits>>);
unsafe impl<Traits: KernelTraits> Send for TimeoutRef<Traits> {}
unsafe impl<Traits: KernelTraits> Sync for TimeoutRef<Traits> {}
impl<Traits: KernelTraits> Clone for TimeoutRef<Traits> {
fn clone(&self) -> Self {
Self(self.0)
}
}
impl<Traits: KernelTraits> Copy for TimeoutRef<Traits> {}
impl<Traits: KernelTraits> fmt::Debug for TimeoutRef<Traits> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("TimeoutRef").field(&self.0).finish()
}
}
struct TimeoutHeapCtx<'a> {
critical_point: Time32,
prop_token: TimeoutPropTokenRefMut<'a>,
}
impl<Traits: KernelTraits> BinaryHeapCtx<TimeoutRef<Traits>> for TimeoutHeapCtx<'_> {
#[inline]
fn lt(&mut self, x: &TimeoutRef<Traits>, y: &TimeoutRef<Traits>) -> bool {
let (x, y) = unsafe {
(
x.0.as_ref().at.read(&*self.prop_token),
y.0.as_ref().at.read(&*self.prop_token),
)
};
let critical_point = self.critical_point;
x.wrapping_sub(critical_point) < y.wrapping_sub(critical_point)
}
#[inline]
fn on_move(&mut self, e: &mut TimeoutRef<Traits>, new_index: usize) {
unsafe { e.0.as_ref() }
.heap_pos
.replace(&mut *self.prop_token, new_index);
}
}
impl<Traits: KernelTraits, TimeoutHeap> TimeoutGlobals<Traits, TimeoutHeap> {
pub(super) fn init(&self, mut lock: CpuLockTokenRefMut<'_, Traits>) {
self.last_tick_count
.replace(&mut *lock.borrow_mut(), unsafe { Traits::tick_count() });
unsafe { Traits::pend_tick_after(Traits::MAX_TIMEOUT) };
}
}
#[cfg(feature = "system_time")]
pub(super) fn system_time<Traits: KernelTraits>() -> Result<Time, TimeError> {
expect_task_context::<Traits>()?;
let mut lock = lock_cpu::<Traits>()?;
let (duration_since_last_tick, _) = duration_since_last_tick(lock.borrow_mut());
let last_tick_sys_time = Traits::g_timeout()
.last_tick_sys_time
.get(&*lock.borrow_mut());
let cur_sys_time = last_tick_sys_time.wrapping_add(duration_since_last_tick as Time64);
Ok(sys_time_from_time64(cur_sys_time))
}
pub(super) fn set_system_time<Traits: KernelTraits>(new_sys_time: Time) -> Result<(), TimeError> {
expect_task_context::<Traits>()?;
match () {
#[cfg(feature = "system_time")]
() => {
let mut lock = lock_cpu::<Traits>()?;
let (duration_since_last_tick, _) = duration_since_last_tick(lock.borrow_mut());
let new_last_tick_sys_time =
time64_from_sys_time(new_sys_time).wrapping_sub(duration_since_last_tick as Time64);
Traits::g_timeout()
.last_tick_sys_time
.replace(&mut *lock.borrow_mut(), new_last_tick_sys_time);
}
#[cfg(not(feature = "system_time"))]
() => {
let _ = new_sys_time; lock_cpu::<Traits>()?;
}
}
Ok(())
}
pub(super) fn adjust_system_and_event_time<Traits: KernelTraits>(
delta: Duration,
) -> Result<(), AdjustTimeError> {
let mut lock = lock_cpu::<Traits>()?;
let g_timeout = Traits::g_timeout();
mark_tick(lock.borrow_mut());
if delta.is_negative() {
let delta_abs = time32_from_neg_duration(delta).unwrap();
let new_frontier_gap = g_timeout.frontier_gap.get(&*lock) + delta_abs;
if new_frontier_gap > USER_HEADROOM {
return Err(AdjustTimeError::BadObjectState);
}
g_timeout.frontier_gap.replace(&mut *lock, new_frontier_gap);
} else if delta.is_positive() {
let delta_abs = time32_from_duration(delta).unwrap();
let TimeoutHeapAndPropToken { heap, prop_token } =
g_timeout.heap_and_prop_token.read(&*lock);
if let Some(&timeout_ref) = heap.get(0) {
let timeout = unsafe { timeout_ref.0.as_ref() };
let current_time = g_timeout.last_tick_time.get(&*lock);
let duration = saturating_duration_before_timeout_exhausting_user_headroom(
timeout,
current_time,
prop_token.borrow(),
);
if duration < delta_abs {
return Err(AdjustTimeError::BadObjectState);
}
}
g_timeout
.frontier_gap
.replace_with(&mut *lock, |old_value| old_value.saturating_sub(delta_abs));
} else {
return Ok(());
}
let delta32 = wrapping_time32_from_duration(delta);
g_timeout
.last_tick_time
.replace_with(&mut *lock, |old_value| old_value.wrapping_add(delta32));
#[cfg(feature = "system_time")]
{
let delta64 = wrapping_time64_from_duration(delta);
g_timeout
.last_tick_sys_time
.replace_with(&mut *lock, |old_value| old_value.wrapping_add(delta64));
}
let current_time = g_timeout.last_tick_time.get(&*lock);
pend_next_tick(lock.borrow_mut(), current_time);
Ok(())
}
#[inline]
#[allow(clippy::suspicious_operation_groupings)]
fn duration_since_last_tick<Traits: KernelTraits>(
mut lock: CpuLockTokenRefMut<'_, Traits>,
) -> (Time32, Time32) {
let tick_count = unsafe { Traits::tick_count() };
let last_tick_count = Traits::g_timeout().last_tick_count.get(&*lock.borrow_mut());
let elapsed = if Traits::MAX_TICK_COUNT == UTicks::MAX || tick_count >= last_tick_count {
tick_count.wrapping_sub(last_tick_count)
} else {
tick_count.wrapping_sub(last_tick_count) - (UTicks::MAX - Traits::MAX_TICK_COUNT)
};
(elapsed, tick_count)
}
fn mark_tick<Traits: KernelTraits>(mut lock: CpuLockTokenRefMut<'_, Traits>) {
let (duration_since_last_tick, tick_count) =
duration_since_last_tick::<Traits>(lock.borrow_mut());
let g_timeout = Traits::g_timeout();
g_timeout.last_tick_count.replace(&mut *lock, tick_count);
g_timeout
.last_tick_time
.replace_with(&mut *lock, |old_value| {
old_value.wrapping_add(duration_since_last_tick)
});
#[cfg(feature = "system_time")]
g_timeout
.last_tick_sys_time
.replace_with(&mut *lock, |old_value| {
old_value.wrapping_add(duration_since_last_tick as Time64)
});
g_timeout
.frontier_gap
.replace_with(&mut *lock, |old_value| {
old_value.saturating_sub(duration_since_last_tick)
});
}
#[inline]
pub(super) fn handle_tick<Traits: KernelTraits>() {
let mut lock = lock_cpu::<Traits>().unwrap();
mark_tick(lock.borrow_mut());
let g_timeout = Traits::g_timeout();
let current_time = g_timeout.last_tick_time.get(&*lock);
let critical_point = critical_point(current_time);
g_timeout.handle_tick_in_progress.replace(&mut *lock, true);
while let Some(&timeout_ref) = g_timeout.heap_and_prop_token.read(&*lock).heap.get(0) {
let timeout = unsafe { &*timeout_ref.0.as_ptr() };
let TimeoutHeapAndPropToken { heap, prop_token } =
g_timeout.heap_and_prop_token.write(&mut *lock);
let remaining =
saturating_duration_until_timeout(timeout, current_time, prop_token.borrow());
if remaining > 0 {
break;
}
let Timeout {
callback,
callback_param,
..
} = *timeout;
debug_assert_eq!(*timeout.heap_pos.read(prop_token), 0);
timeout.heap_pos.replace(prop_token, HEAP_POS_NONE);
heap.heap_remove(
0,
TimeoutHeapCtx {
critical_point,
prop_token: prop_token.borrow_mut(),
},
);
lock = callback(callback_param, lock);
}
g_timeout.handle_tick_in_progress.replace(&mut *lock, false);
pend_next_tick(lock.borrow_mut(), current_time);
task::unlock_cpu_and_check_preemption(lock);
}
fn current_time<Traits: KernelTraits>(mut lock: CpuLockTokenRefMut<'_, Traits>) -> Time32 {
let (duration_since_last_tick, _) = duration_since_last_tick::<Traits>(lock.borrow_mut());
let g_timeout = Traits::g_timeout();
g_timeout
.last_tick_time
.get(&*lock)
.wrapping_add(duration_since_last_tick)
}
fn pend_next_tick<Traits: KernelTraits>(
lock: CpuLockTokenRefMut<'_, Traits>,
current_time: Time32,
) {
let mut delay = Traits::MAX_TIMEOUT;
let TimeoutHeapAndPropToken { heap, prop_token } =
Traits::g_timeout().heap_and_prop_token.read(&*lock);
if let Some(&timeout_ref) = heap.get(0) {
let timeout = unsafe { timeout_ref.0.as_ref() };
delay = delay.min(saturating_duration_until_timeout(
timeout,
current_time,
prop_token.borrow(),
));
}
unsafe {
if delay == 0 {
Traits::pend_tick();
} else {
Traits::pend_tick_after(delay);
}
}
}
#[inline]
fn critical_point(current_time: Time32) -> Time32 {
current_time.wrapping_sub(HARD_HEADROOM + USER_HEADROOM)
}
fn saturating_duration_until_timeout<Traits: KernelTraits>(
timeout: &Timeout<Traits>,
current_time: Time32,
prop_token: TimeoutPropTokenRef<'_>,
) -> Time32 {
let critical_point = critical_point(current_time);
let duration_until_violating_critical_point =
timeout.at.read(&*prop_token).wrapping_sub(critical_point);
duration_until_violating_critical_point.saturating_sub(HARD_HEADROOM + USER_HEADROOM)
}
fn saturating_duration_before_timeout_exhausting_user_headroom<Traits: KernelTraits>(
timeout: &Timeout<Traits>,
current_time: Time32,
prop_token: TimeoutPropTokenRef<'_>,
) -> Time32 {
let critical_point = critical_point(current_time);
let duration_until_violating_critical_point =
timeout.at.get(&*prop_token).wrapping_sub(critical_point);
duration_until_violating_critical_point.saturating_sub(HARD_HEADROOM)
}
pub(super) fn insert_timeout<Traits: KernelTraits>(
mut lock: CpuLockTokenRefMut<'_, Traits>,
timeout: Pin<&Timeout<Traits>>,
) {
let prop_token = &Traits::g_timeout()
.heap_and_prop_token
.read(&*lock)
.prop_token;
assert_eq!(
*timeout.heap_pos.read(prop_token),
HEAP_POS_NONE,
"timeout is already registered",
);
let current_time = current_time(lock.borrow_mut());
let critical_point = critical_point(current_time);
let TimeoutHeapAndPropToken { heap, prop_token } =
Traits::g_timeout().heap_and_prop_token.write(&mut *lock);
let pos = heap.heap_push(
TimeoutRef((&*timeout).into()),
TimeoutHeapCtx {
critical_point,
prop_token: prop_token.borrow_mut(),
},
);
debug_assert_eq!(*timeout.heap_pos.read(prop_token), pos);
if !Traits::g_timeout().handle_tick_in_progress.get(&*lock) {
pend_next_tick(lock, current_time);
}
}
#[inline]
pub(super) fn remove_timeout<Traits: KernelTraits>(
mut lock: CpuLockTokenRefMut<'_, Traits>,
timeout: &Timeout<Traits>,
) {
remove_timeout_inner(lock.borrow_mut(), timeout);
let prop_token = &mut Traits::g_timeout()
.heap_and_prop_token
.write(&mut *lock)
.prop_token;
timeout.heap_pos.replace(prop_token, HEAP_POS_NONE);
}
fn remove_timeout_inner<Traits: KernelTraits>(
mut lock: CpuLockTokenRefMut<'_, Traits>,
timeout: &Timeout<Traits>,
) {
let current_time = current_time(lock.borrow_mut());
let critical_point = critical_point(current_time);
let TimeoutHeapAndPropToken { heap, prop_token } =
Traits::g_timeout().heap_and_prop_token.write(&mut *lock);
let heap_pos = *timeout.heap_pos.read(prop_token);
let timeout_ref = heap.heap_remove(
heap_pos,
TimeoutHeapCtx {
critical_point,
prop_token: prop_token.borrow_mut(),
},
);
if timeout_ref.is_none() {
debug_assert_eq!(heap_pos, HEAP_POS_NONE);
return;
}
debug_assert_eq!(
timeout_ref.unwrap().0.as_ptr() as *const _,
timeout as *const _
);
if !Traits::g_timeout().handle_tick_in_progress.get(&*lock) {
pend_next_tick(lock, current_time);
}
}
pub(super) struct TimeoutGuard<'a, 'b, Traits: KernelTraits> {
pub(super) timeout: Pin<&'a Timeout<Traits>>,
pub(super) lock: CpuLockTokenRefMut<'b, Traits>,
}
impl<'a, 'b, Traits: KernelTraits> Drop for TimeoutGuard<'a, 'b, Traits> {
#[inline]
fn drop(&mut self) {
remove_timeout(self.lock.borrow_mut(), &self.timeout);
}
}