use std::sync::Arc;
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use super::spin;
pub struct SpinLockGuard<T> {
guarder: Arc<InnerSpinLock<T>>, }
impl<T> Deref for SpinLockGuard<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe {
&*self.guarder.inner.get()
}
}
}
impl<T> DerefMut for SpinLockGuard<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe {
&mut *self.guarder.inner.get()
}
}
}
impl<T> Drop for SpinLockGuard<T> {
fn drop(&mut self) {
self.guarder.status.store(false, Ordering::Release);
}
}
pub struct SpinLock<T> {
inner: Arc<InnerSpinLock<T>>, }
unsafe impl<T> Send for SpinLock<T> {}
unsafe impl<T> Sync for SpinLock<T> {}
impl<T> SpinLock<T> {
pub fn new(v: T) -> Self {
let inner = Arc::new(InnerSpinLock {
status: AtomicBool::new(false),
inner: UnsafeCell::new(v),
});
SpinLock {
inner,
}
}
#[cfg(not(target_arch = "aarch64"))]
pub fn lock(&self) -> SpinLockGuard<T> {
let mut spin_len = 1;
loop {
match self.inner.status.compare_exchange_weak(false,
true,
Ordering::Acquire,
Ordering::Acquire) {
Err(_) => {
spin_len = spin(spin_len);
continue;
},
Ok(_) => {
return SpinLockGuard {
guarder: self.inner.clone(),
};
},
}
}
}
#[cfg(target_arch = "aarch64")]
pub fn lock(&self) -> SpinLockGuard<T> {
let mut spin_len = 1;
loop {
match self.inner.status.compare_exchange(false,
true,
Ordering::Acquire,
Ordering::Acquire) {
Err(_) => {
spin_len = spin(spin_len);
continue;
},
Ok(_) => {
return SpinLockGuard {
guarder: self.inner.clone(),
};
},
}
}
}
}
struct InnerSpinLock<T> {
status: AtomicBool, inner: UnsafeCell<T>, }
unsafe impl<T> Send for InnerSpinLock<T> {}
unsafe impl<T> Sync for InnerSpinLock<T> {}