use core::sync::atomic::{self, Ordering};
use nginx_sys::ngx_sched_yield;
const NGX_RWLOCK_SPIN: usize = 2048;
const NGX_RWLOCK_WLOCK: usize = usize::MAX;
type NgxAtomic = atomic::AtomicUsize;
pub struct RawSpinlock(NgxAtomic);
pub type RwLock<T> = lock_api::RwLock<RawSpinlock, T>;
pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawSpinlock, T>;
pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawSpinlock, T>;
unsafe impl lock_api::RawRwLock for RawSpinlock {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: RawSpinlock = RawSpinlock(NgxAtomic::new(0));
type GuardMarker = lock_api::GuardNoSend;
fn lock_shared(&self) {
loop {
if self.try_lock_shared() {
return;
}
if unsafe { nginx_sys::ngx_ncpu > 1 } {
for n in 0..NGX_RWLOCK_SPIN {
for _ in 0..n {
core::hint::spin_loop()
}
if self.try_lock_shared() {
return;
}
}
}
ngx_sched_yield()
}
}
fn try_lock_shared(&self) -> bool {
let value = self.0.load(Ordering::Acquire);
if value == NGX_RWLOCK_WLOCK {
return false;
}
self.0
.compare_exchange(value, value + 1, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
unsafe fn unlock_shared(&self) {
self.0.fetch_sub(1, Ordering::Release);
}
fn lock_exclusive(&self) {
loop {
if self.try_lock_exclusive() {
return;
}
if unsafe { nginx_sys::ngx_ncpu > 1 } {
for n in 0..NGX_RWLOCK_SPIN {
for _ in 0..n {
core::hint::spin_loop()
}
if self.try_lock_exclusive() {
return;
}
}
}
ngx_sched_yield()
}
}
fn try_lock_exclusive(&self) -> bool {
self.0
.compare_exchange(0, NGX_RWLOCK_WLOCK, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
unsafe fn unlock_exclusive(&self) {
self.0.store(0, Ordering::Release)
}
}