#![allow(dead_code)]
#[cfg(all(target_os = "linux", feature = "fast-barrier", not(miri)))]
pub use linux::*;
#[cfg(all(target_os = "windows", feature = "fast-barrier", not(miri)))]
pub use windows::*;
#[cfg(any(
not(feature = "fast-barrier"),
not(any(target_os = "windows", target_os = "linux")),
miri
))]
pub use default::*;
#[cfg(any(
not(feature = "fast-barrier"),
not(any(target_os = "windows", target_os = "linux")),
miri
))]
mod default {
use core::sync::atomic::{fence, Ordering};
pub fn detect() {}
#[inline]
pub fn light_store() -> Ordering {
Ordering::SeqCst
}
#[inline]
pub fn light_barrier() {
}
#[inline]
pub fn light_load() -> Ordering {
Ordering::SeqCst
}
#[inline]
pub fn heavy() {
fence(Ordering::SeqCst);
}
}
#[cfg(all(target_os = "linux", feature = "fast-barrier", not(miri)))]
mod linux {
use std::sync::atomic::{self, AtomicU8, Ordering};
#[inline]
pub fn light_store() -> Ordering {
match STRATEGY.load(Ordering::Relaxed) {
FALLBACK => Ordering::SeqCst,
_ => Ordering::Relaxed,
}
}
#[inline]
pub fn light_barrier() {
atomic::compiler_fence(atomic::Ordering::SeqCst)
}
#[inline]
pub fn light_load() -> Ordering {
Ordering::SeqCst
}
#[inline]
pub fn heavy() {
match STRATEGY.load(Ordering::Relaxed) {
MEMBARRIER => membarrier::barrier(),
MPROTECT => mprotect::barrier(),
_ => atomic::fence(atomic::Ordering::SeqCst),
}
}
const MEMBARRIER: u8 = 0;
const MPROTECT: u8 = 1;
const FALLBACK: u8 = 2;
static STRATEGY: AtomicU8 = AtomicU8::new(FALLBACK);
pub fn detect() {
if membarrier::is_supported() {
STRATEGY.store(MEMBARRIER, Ordering::Relaxed);
} else if mprotect::is_supported() {
STRATEGY.store(MPROTECT, Ordering::Relaxed);
}
}
macro_rules! fatal_assert {
($cond:expr) => {
if !$cond {
#[allow(unused_unsafe)]
unsafe {
libc::abort();
}
}
};
}
mod membarrier {
#[repr(i32)]
#[allow(dead_code, non_camel_case_types)]
enum membarrier_cmd {
MEMBARRIER_CMD_QUERY = 0,
MEMBARRIER_CMD_GLOBAL = (1 << 0),
MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1),
MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2),
MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
}
#[inline]
fn sys_membarrier(cmd: membarrier_cmd) -> libc::c_long {
unsafe { libc::syscall(libc::SYS_membarrier, cmd as libc::c_int, 0 as libc::c_int) }
}
pub fn is_supported() -> bool {
let ret = sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_QUERY);
if ret < 0
|| ret & membarrier_cmd::MEMBARRIER_CMD_PRIVATE_EXPEDITED as libc::c_long == 0
|| ret & membarrier_cmd::MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED as libc::c_long
== 0
{
return false;
}
if sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED) < 0 {
return false;
}
true
}
#[inline]
pub fn barrier() {
fatal_assert!(sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_PRIVATE_EXPEDITED) >= 0);
}
}
mod mprotect {
use std::cell::UnsafeCell;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::{atomic, OnceLock};
struct Barrier {
lock: UnsafeCell<libc::pthread_mutex_t>,
page: u64,
page_size: libc::size_t,
}
unsafe impl Sync for Barrier {}
impl Barrier {
#[inline]
fn barrier(&self) {
let page = self.page as *mut libc::c_void;
unsafe {
fatal_assert!(libc::pthread_mutex_lock(self.lock.get()) == 0);
fatal_assert!(
libc::mprotect(page, self.page_size, libc::PROT_READ | libc::PROT_WRITE,)
== 0
);
let atomic_usize = &*(page as *const atomic::AtomicUsize);
atomic_usize.fetch_add(1, atomic::Ordering::SeqCst);
fatal_assert!(libc::mprotect(page, self.page_size, libc::PROT_NONE) == 0);
fatal_assert!(libc::pthread_mutex_unlock(self.lock.get()) == 0);
}
}
}
static BARRIER: OnceLock<Barrier> = OnceLock::new();
pub fn is_supported() -> bool {
cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64")
}
#[inline]
pub fn barrier() {
let barrier = BARRIER.get_or_init(|| {
unsafe {
let page_size = libc::sysconf(libc::_SC_PAGESIZE);
fatal_assert!(page_size > 0);
let page_size = page_size as libc::size_t;
let page = libc::mmap(
ptr::null_mut(),
page_size,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
-1 as libc::c_int,
0 as libc::off_t,
);
fatal_assert!(page != libc::MAP_FAILED);
fatal_assert!(page as libc::size_t % page_size == 0);
libc::mlock(page, page_size as libc::size_t);
let lock = UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER);
let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
fatal_assert!(libc::pthread_mutexattr_init(attr.as_mut_ptr()) == 0);
let mut attr = attr.assume_init();
fatal_assert!(
libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL) == 0
);
fatal_assert!(libc::pthread_mutex_init(lock.get(), &attr) == 0);
fatal_assert!(libc::pthread_mutexattr_destroy(&mut attr) == 0);
let page = page as u64;
Barrier {
lock,
page,
page_size,
}
}
});
barrier.barrier();
}
}
}
#[cfg(all(target_os = "windows", feature = "fast-barrier", not(miri)))]
mod windows {
use core::sync::atomic::{self, Ordering};
use windows_sys;
pub fn detect() {}
#[inline]
pub fn light_store() -> Ordering {
Ordering::Relaxed
}
#[inline]
pub fn light_barrier() {
atomic::compiler_fence(atomic::Ordering::SeqCst)
}
#[inline]
pub fn light_load() -> Ordering {
Ordering::Relaxed
}
#[inline]
pub fn heavy() {
unsafe { windows_sys::Win32::System::Threading::FlushProcessWriteBuffers() }
}
}