use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Instant;
static NUMER: AtomicU64 = AtomicU64::new(0);
static DENOM: AtomicU64 = AtomicU64::new(1);
#[inline(always)]
pub(crate) fn read() -> u64 {
#[cfg(target_arch = "x86_64")]
unsafe {
core::arch::x86_64::_rdtsc()
}
#[cfg(target_arch = "aarch64")]
{
let val: u64;
unsafe { core::arch::asm!("mrs {}, cntvct_el0", out(reg) val) };
val
}
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
use std::sync::OnceLock;
static FALLBACK_EPOCH: OnceLock<Instant> = OnceLock::new();
let epoch = FALLBACK_EPOCH.get_or_init(Instant::now);
Instant::now().duration_since(*epoch).as_nanos() as u64
}
}
#[inline(always)]
pub(crate) fn elapsed_ns(start: u64, end: u64) -> u64 {
let ticks = end.wrapping_sub(start);
let n = NUMER.load(Ordering::Relaxed);
let d = DENOM.load(Ordering::Relaxed);
(ticks as u128 * n as u128 / d as u128) as u64
}
#[inline]
pub(crate) fn ticks_to_epoch_ns(ticks: u64, epoch_tsc: u64) -> u64 {
elapsed_ns(epoch_tsc, ticks)
}
pub(crate) fn calibrate() {
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
{
NUMER.store(1, Ordering::Release);
DENOM.store(1, Ordering::Release);
return;
}
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
{
let wall_start = Instant::now();
let tsc_start = read();
let target = std::time::Duration::from_millis(2);
while wall_start.elapsed() < target {}
let tsc_end = read();
let wall_ns = wall_start.elapsed().as_nanos() as u64;
let tsc_ticks = tsc_end.wrapping_sub(tsc_start);
let g = gcd(wall_ns, tsc_ticks);
NUMER.store(wall_ns / g, Ordering::Release);
DENOM.store(tsc_ticks / g, Ordering::Release);
}
}
fn gcd(mut a: u64, mut b: u64) -> u64 {
while b != 0 {
let t = b;
b = a % b;
a = t;
}
a
}
static EPOCH_TSC: AtomicU64 = AtomicU64::new(0);
pub(crate) fn set_epoch_tsc(val: u64) {
EPOCH_TSC.store(val, Ordering::Release);
}
pub(crate) fn epoch_tsc() -> u64 {
EPOCH_TSC.load(Ordering::Relaxed)
}