#[inline(always)]
#[must_use]
pub fn rdtsc() -> u64 {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
unsafe {
core::arch::x86_64::_rdtsc()
}
#[cfg(target_arch = "aarch64")]
unsafe {
let mut cnt: u64;
core::arch::asm!("mrs {0}, cntvct_el0", out(reg) cnt);
cnt
}
#[cfg(target_arch = "riscv64")]
unsafe {
let mut cycles: u64;
core::arch::asm!("rdcycle {0}", out(reg) cycles);
cycles
}
#[cfg(not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "riscv64"
)))]
{
0
}
}
#[inline(always)]
#[must_use]
#[allow(clippy::cast_possible_truncation)]
pub fn get_cpu_fast() -> u32 {
#[cfg(target_arch = "x86_64")]
unsafe {
#[allow(unused_assignments)]
let mut aux: u32 = 0;
#[cfg(feature = "hw-acceleration")]
{
let mut out: u64;
core::arch::asm!(
"rdpid {}",
out(reg) out,
options(nostack, preserves_flags),
);
aux = out as u32;
}
#[cfg(not(feature = "hw-acceleration"))]
{
core::arch::x86_64::__rdtscp(&raw mut aux);
}
aux
}
#[cfg(all(not(target_arch = "x86_64"), target_os = "linux"))]
unsafe {
libc::sched_getcpu() as u32
}
#[cfg(not(any(target_arch = "x86_64", target_os = "linux")))]
{
0
}
}
#[inline(always)]
#[must_use]
pub fn get_tick() -> u64 {
rdtsc()
}
#[inline(always)]
#[must_use]
pub fn get_tick_with_cpu() -> (u64, u32) {
#[cfg(feature = "hypervisor")]
{
#[cfg(target_arch = "x86_64")]
unsafe {
core::arch::asm!("lfence", options(nostack, preserves_flags));
let tsc = core::arch::x86_64::_rdtsc();
let cpu = get_cpu_fast();
(tsc, cpu)
}
#[cfg(not(target_arch = "x86_64"))]
{
(rdtsc(), get_cpu_fast())
}
}
#[cfg(not(feature = "hypervisor"))]
{
(rdtsc(), get_cpu_fast())
}
}
#[cfg(target_os = "linux")]
#[inline(always)]
pub unsafe fn futex_wait(addr: *const core::sync::atomic::AtomicU32, val: u32) {
unsafe {
loop {
let ret = libc::syscall(
libc::SYS_futex,
addr,
libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG,
val.cast_signed(),
core::ptr::null::<libc::timespec>(),
);
if ret == 0 {
break;
}
let err = *libc::__errno_location();
if err == libc::EAGAIN || err == libc::EINTR {
continue;
}
break; }
}
}
#[cfg(target_os = "linux")]
#[inline(always)]
pub unsafe fn futex_wake(addr: *const core::sync::atomic::AtomicU32) {
unsafe {
libc::syscall(
libc::SYS_futex,
addr,
libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG,
libc::c_int::MAX,
core::ptr::null::<libc::timespec>(),
core::ptr::null::<u32>(),
0,
);
}
}
#[cfg(not(target_os = "linux"))]
#[inline(always)]
pub fn futex_wait(_addr: *const core::sync::atomic::AtomicU32, _val: u32) {
std::thread::yield_now();
}
#[cfg(not(target_os = "linux"))]
#[inline(always)]
pub const fn futex_wake(_addr: *const core::sync::atomic::AtomicU32) {}
std::thread_local! {
static CACHED_TID: core::cell::Cell<u64> = const { core::cell::Cell::new(0) };
}
#[inline(always)]
#[must_use]
pub fn get_thread_id() -> u64 {
CACHED_TID.with(|c| {
let mut tid = c.get();
if tid == 0 {
#[cfg(target_os = "linux")]
unsafe {
tid = libc::syscall(libc::SYS_gettid).cast_unsigned();
}
#[cfg(target_os = "windows")]
unsafe {
tid = u64::from(windows_sys::Win32::System::Threading::GetCurrentThreadId());
}
c.set(tid);
}
tid
})
}
pub struct SpinLock {
locked: core::sync::atomic::AtomicBool,
}
impl SpinLock {
#[must_use]
#[inline(always)]
pub const fn new() -> Self {
Self {
locked: core::sync::atomic::AtomicBool::new(false),
}
}
#[inline(always)]
pub fn lock(&self) {
while self
.locked
.swap(true, core::sync::atomic::Ordering::Acquire)
{
while self.locked.load(core::sync::atomic::Ordering::Relaxed) {
core::hint::spin_loop();
}
}
}
#[inline(always)]
pub fn unlock(&self) {
self.locked
.store(false, core::sync::atomic::Ordering::Release);
}
}
impl Default for SpinLock {
#[inline(always)]
fn default() -> Self {
Self::new()
}
}