#[cfg(feature = "nightly")]
use std::sync::atomic::{AtomicUsize, Ordering};
#[cfg(not(feature = "nightly"))]
use stable::{AtomicUsize, Ordering};
use std::thread;
use std::ptr;
use std::mem;
use std::cell::Cell;
use thread_parker::ThreadParker;
use SPIN_LIMIT;
struct ThreadData {
parker: ThreadParker,
next_in_queue: Cell<*const ThreadData>,
queue_tail: Cell<*const ThreadData>,
}
impl ThreadData {
fn new() -> ThreadData {
ThreadData {
parker: ThreadParker::new(),
next_in_queue: Cell::new(ptr::null()),
queue_tail: Cell::new(ptr::null()),
}
}
}
thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
const LOCKED_BIT: usize = 1;
const QUEUE_LOCKED_BIT: usize = 2;
const QUEUE_MASK: usize = !3;
pub struct WordLock {
state: AtomicUsize,
}
impl WordLock {
#[inline]
pub fn new() -> WordLock {
WordLock { state: AtomicUsize::new(0) }
}
#[inline]
pub unsafe fn lock(&self) {
if self.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok() {
return;
}
self.lock_slow();
}
#[inline]
pub unsafe fn unlock(&self) {
if self.state
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok() {
return;
}
self.unlock_slow();
}
#[cold]
#[inline(never)]
unsafe fn lock_slow(&self) {
let mut spin_count = 0;
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT == 0 {
match self.state
.compare_exchange_weak(state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed) {
Ok(_) => return,
Err(x) => state = x,
}
continue;
}
if state & QUEUE_MASK == 0 && spin_count < SPIN_LIMIT {
spin_count += 1;
thread::yield_now();
state = self.state.load(Ordering::Relaxed);
continue;
}
if state & QUEUE_LOCKED_BIT != 0 {
thread::yield_now();
state = self.state.load(Ordering::Relaxed);
continue;
}
let thread_data = &*THREAD_DATA.with(|x| x as *const ThreadData);
assert!(mem::align_of_val(thread_data) > !QUEUE_MASK);
thread_data.next_in_queue.set(ptr::null());
thread_data.parker.prepare_park();
if let Err(x) = self.state
.compare_exchange_weak(state,
state | QUEUE_LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed) {
state = x;
continue;
}
let mut queue_head = (state & QUEUE_MASK) as *const ThreadData;
if !queue_head.is_null() {
(*(*queue_head).queue_tail.get()).next_in_queue.set(thread_data);
} else {
queue_head = thread_data;
}
(*queue_head).queue_tail.set(thread_data);
self.state.store((queue_head as usize) | LOCKED_BIT, Ordering::Release);
thread_data.parker.park();
self.state.load(Ordering::Relaxed);
}
}
#[cold]
#[inline(never)]
unsafe fn unlock_slow(&self) {
let queue_head;
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state == LOCKED_BIT {
match self.state
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) {
Ok(_) => return,
Err(x) => state = x,
}
continue;
}
if state & QUEUE_LOCKED_BIT != 0 {
thread::yield_now();
state = self.state.load(Ordering::Relaxed);
continue;
}
match self.state
.compare_exchange_weak(state,
state | QUEUE_LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed) {
Ok(_) => {
queue_head = (state & QUEUE_MASK) as *mut ThreadData;
break;
}
Err(x) => state = x,
}
}
let new_queue_head = (*queue_head).next_in_queue.get();
if !new_queue_head.is_null() {
(*new_queue_head).queue_tail.set((*queue_head).queue_tail.get());
}
self.state.store(new_queue_head as usize, Ordering::Release);
let lock = (*queue_head).parker.unpark_lock();
(*queue_head).parker.unpark(lock);
}
}