use alloc::{boxed::Box, collections::btree_map::BTreeMap, sync::Arc, vec::Vec};
use core::{
any::Any,
hint::{likely, unlikely},
};
use crate::{
irq::irqflags::irqs_disabled,
println,
processor::{nr_cpus, this_processor_id},
sync::spinlock::Spinlock,
time::{NSEC_PER_MSEC, clockevents::nsec_program_event, timekeeping::ktime_get},
};
#[derive(PartialEq, PartialOrd)]
enum HrtimerState {
Inactive,
Enqueued,
NeedCancel,
Running,
}
#[derive(PartialEq, PartialOrd)]
pub enum HrtimerMode {
Abs,
Rel,
}
#[derive(PartialEq, PartialOrd)]
pub enum HrtimerRestart {
NoRestart,
Restart,
}
struct HrtimerInter {
state: HrtimerState,
softexpires: u64,
expires: u64,
}
pub type HrtimerFuncData = Box<dyn Any + Send + Sync>;
pub type HrtimerFunc = fn(&Hrtimer) -> HrtimerRestart;
pub struct Hrtimer {
badge: u64,
function: HrtimerFunc,
private: Option<HrtimerFuncData>,
mode: HrtimerMode,
cpu: usize,
inter: Spinlock<HrtimerInter>,
}
impl Hrtimer {
pub fn create(mode: HrtimerMode, f: HrtimerFunc, private: Option<HrtimerFuncData>) -> Self {
Self {
badge: ktime_get(),
function: f,
private,
mode,
cpu: this_processor_id(),
inter: Spinlock::new(HrtimerInter {
state: HrtimerState::Inactive,
softexpires: u64::MAX,
expires: u64::MAX,
}),
}
}
pub fn forward_now(&self, interval: u64) -> u64 {
debug_assert!(irqs_disabled());
debug_assert_eq!(this_processor_id(), self.cpu);
let now = ktime_get();
let mut inter = self.inter.lock();
if now < inter.expires {
return 0;
}
assert!(inter.state != HrtimerState::Enqueued);
inter.expires += interval;
inter.softexpires += interval;
if likely(now < inter.expires) { 0 } else { now - inter.expires }
}
pub fn cancel_hrtimer(&self) {
let mut inter = self.inter.lock_irq_save();
if inter.state == HrtimerState::Enqueued {
inter.state = HrtimerState::NeedCancel;
}
}
pub fn private(&self) -> &Option<HrtimerFuncData> {
&self.private
}
fn reprogram(&self) {
let hcb = unsafe { &mut HRTIMER_CPUBASE[this_processor_id()] };
if hcb.cpu != self.cpu {
return;
}
if hcb.in_hrtirq {
return;
}
let expires;
{
let inter = self.inter.lock_irq_save();
expires = inter.expires;
}
if expires >= hcb.expires_next {
return;
}
hcb.expires_next = expires;
if hcb.hang_detected {
return;
}
nsec_program_event(expires, true);
}
}
struct HrtimerCpubase {
active: BTreeMap<u64, Arc<Hrtimer>>,
running: Option<Arc<Hrtimer>>,
expires_next: u64,
cpu: usize,
hang_count: u64,
hang_detected: bool,
in_hrtirq: bool,
}
impl HrtimerCpubase {
const fn new() -> Self {
Self {
active: BTreeMap::new(),
running: None,
expires_next: u64::MAX,
cpu: 0,
hang_count: 0,
hang_detected: false,
in_hrtirq: false,
}
}
}
impl HrtimerCpubase {
#[inline(always)]
fn get_next_event(&self) -> u64 {
let next = self.active.first_key_value();
match next {
Some((_, timer)) => {
let inter = timer.inter.lock_irq_save();
inter.expires
},
None => u64::MAX,
}
}
}
fn enqueue_hrtimer(timer: &Arc<Hrtimer>) -> bool {
let mut inter = timer.inter.lock_irq_save();
let hcb = unsafe { &mut HRTIMER_CPUBASE[this_processor_id()] };
if hcb.active.contains_key(&inter.expires) {
return false;
}
if inter.state != HrtimerState::Inactive {
return false;
}
inter.state = HrtimerState::Enqueued;
let res = hcb.active.insert(inter.expires, timer.clone());
debug_assert!(res.is_none());
true
}
pub fn hrtimer_start_range_ns(timer: Arc<Hrtimer>, mut tim: u64, delta: u64) -> bool {
if timer.mode == HrtimerMode::Rel {
tim += ktime_get();
}
let expires = tim + delta;
{
let mut inter = timer.inter.lock_irq_save();
inter.expires = expires;
inter.softexpires = tim;
}
if !enqueue_hrtimer(&timer) {
return false;
}
Hrtimer::reprogram(&timer);
true
}
pub fn hrtimer_start(timer: Arc<Hrtimer>, tim: u64) -> bool {
hrtimer_start_range_ns(timer, tim, 0)
}
fn __run_hrtimer(hcb: &mut HrtimerCpubase, timer: &Arc<Hrtimer>) {
{
let mut inter = timer.inter.lock();
if unlikely(inter.state == HrtimerState::NeedCancel) {
inter.state = HrtimerState::Inactive;
return;
}
inter.state = HrtimerState::Running;
}
hcb.running = Some(timer.clone());
let ret = (timer.function)(timer);
{
let mut inter = timer.inter.lock();
inter.state = HrtimerState::Inactive;
}
if ret != HrtimerRestart::NoRestart {
enqueue_hrtimer(timer);
}
debug_assert_eq!(hcb.running.as_ref().unwrap().badge, timer.badge);
hcb.running = None;
}
fn __hrtimer_run_queues(hcb: &mut HrtimerCpubase, now: u64) {
while let Some((_, timer)) = hcb.active.pop_first() {
debug_assert_eq!(timer.cpu, hcb.cpu);
{
let inter = timer.inter.lock();
if now < inter.softexpires {
hcb.active.insert(inter.expires, timer.clone());
break;
}
}
__run_hrtimer(hcb, &timer);
}
}
pub(crate) fn hrtimer_interrupt() {
let hcb = unsafe { &mut HRTIMER_CPUBASE[this_processor_id()] };
let entry_time = ktime_get();
let mut now = entry_time;
let mut retries = 0;
loop {
hcb.in_hrtirq = true;
hcb.expires_next = u64::MAX;
__hrtimer_run_queues(hcb, now);
let mut expires_next = hcb.get_next_event();
hcb.expires_next = expires_next;
hcb.in_hrtirq = false;
if nsec_program_event(expires_next, false) {
hcb.hang_detected = false;
return;
}
now = ktime_get();
if retries < 3 {
retries += 1;
continue;
}
hcb.hang_detected = true;
hcb.hang_count += 1;
let delta = now - entry_time;
if delta > 100 * NSEC_PER_MSEC {
expires_next = now + 100 * NSEC_PER_MSEC;
} else {
expires_next = now + delta;
}
nsec_program_event(expires_next, true);
if hcb.hang_count.is_multiple_of(10) {
println!(
"hrtimer: interrupt took {} ns, count {}, cpu {}",
delta, hcb.hang_count, hcb.cpu
);
}
break;
}
}
static mut HRTIMER_CPUBASE: Vec<HrtimerCpubase> = Vec::new();
#[allow(static_mut_refs)]
pub(super) fn hrtimer_init() {
unsafe {
for _ in 0..nr_cpus() {
HRTIMER_CPUBASE.push(HrtimerCpubase::new());
}
}
hrtimer_init_cpu();
}
pub(super) fn hrtimer_init_cpu() {
let hcb = unsafe { &mut HRTIMER_CPUBASE[this_processor_id()] };
hcb.cpu = this_processor_id();
}