use core::cell::{Cell, UnsafeCell};
use core::ops::Deref;
use core::fmt;
use core::mem;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicUsize, Ordering};
use mutex::{RawMutex, RawMutexFair, RawMutexTimed};
use GuardNoSend;
#[cfg(feature = "owning_ref")]
use owning_ref::StableAddress;
pub unsafe trait GetThreadId {
const INIT: Self;
fn nonzero_thread_id(&self) -> usize;
}
struct RawReentrantMutex<R: RawMutex, G: GetThreadId> {
owner: AtomicUsize,
lock_count: Cell<usize>,
mutex: R,
get_thread_id: G,
}
impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
#[inline]
fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
let id = self.get_thread_id.nonzero_thread_id();
if self.owner.load(Ordering::Relaxed) == id {
self.lock_count.set(
self.lock_count
.get()
.checked_add(1)
.expect("ReentrantMutex lock count overflow"),
);
} else {
if !try_lock() {
return false;
}
self.owner.store(id, Ordering::Relaxed);
self.lock_count.set(1);
}
true
}
#[inline]
fn lock(&self) {
self.lock_internal(|| {
self.mutex.lock();
true
});
}
#[inline]
fn try_lock(&self) -> bool {
self.lock_internal(|| self.mutex.try_lock())
}
#[inline]
fn unlock(&self) {
let lock_count = self.lock_count.get() - 1;
if lock_count == 0 {
self.owner.store(0, Ordering::Relaxed);
self.mutex.unlock();
} else {
self.lock_count.set(lock_count);
}
}
}
impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
#[inline]
fn unlock_fair(&self) {
let lock_count = self.lock_count.get() - 1;
if lock_count == 0 {
self.owner.store(0, Ordering::Relaxed);
self.mutex.unlock_fair();
} else {
self.lock_count.set(lock_count);
}
}
#[inline]
fn bump(&self) {
if self.lock_count.get() == 1 {
let id = self.owner.load(Ordering::Relaxed);
self.owner.store(0, Ordering::Relaxed);
self.mutex.bump();
self.owner.store(id, Ordering::Relaxed);
}
}
}
impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
#[inline]
fn try_lock_until(&self, timeout: R::Instant) -> bool {
self.lock_internal(|| self.mutex.try_lock_until(timeout))
}
#[inline]
fn try_lock_for(&self, timeout: R::Duration) -> bool {
self.lock_internal(|| self.mutex.try_lock_for(timeout))
}
}
pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
raw: RawReentrantMutex<R, G>,
data: UnsafeCell<T>,
}
unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
for ReentrantMutex<R, G, T>
{
}
unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
for ReentrantMutex<R, G, T>
{
}
impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
#[cfg(feature = "nightly")]
#[inline]
pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex {
data: UnsafeCell::new(val),
raw: RawReentrantMutex {
owner: AtomicUsize::new(0),
lock_count: Cell::new(0),
mutex: R::INIT,
get_thread_id: G::INIT,
},
}
}
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new(val: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex {
data: UnsafeCell::new(val),
raw: RawReentrantMutex {
owner: AtomicUsize::new(0),
lock_count: Cell::new(0),
mutex: R::INIT,
get_thread_id: G::INIT,
},
}
}
#[inline]
#[allow(unused_unsafe)]
pub fn into_inner(self) -> T {
unsafe { self.data.into_inner() }
}
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
fn guard(&self) -> ReentrantMutexGuard<R, G, T> {
ReentrantMutexGuard {
remutex: &self,
marker: PhantomData,
}
}
#[inline]
pub fn lock(&self) -> ReentrantMutexGuard<R, G, T> {
self.raw.lock();
self.guard()
}
#[inline]
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<R, G, T>> {
if self.raw.try_lock() {
Some(self.guard())
} else {
None
}
}
#[inline]
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.data.get() }
}
#[inline]
pub unsafe fn force_unlock(&self) {
self.raw.unlock();
}
#[inline]
pub unsafe fn raw(&self) -> &R {
&self.raw.mutex
}
}
impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
pub unsafe fn force_unlock_fair(&self) {
self.raw.unlock_fair();
}
}
impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<R, G, T>> {
if self.raw.try_lock_for(timeout) {
Some(self.guard())
} else {
None
}
}
#[inline]
pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<R, G, T>> {
if self.raw.try_lock_until(timeout) {
Some(self.guard())
} else {
None
}
}
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
#[inline]
fn default() -> ReentrantMutex<R, G, T> {
ReentrantMutex::new(Default::default())
}
}
impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
#[inline]
fn from(t: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex::new(t)
}
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => f.debug_struct("ReentrantMutex")
.field("data", &&*guard)
.finish(),
None => f.pad("ReentrantMutex { <locked> }"),
}
}
}
#[must_use]
pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
remutex: &'a ReentrantMutex<R, G, T>,
marker: PhantomData<(&'a T, GuardNoSend)>,
}
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for ReentrantMutexGuard<'a, R, G, T>
{
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
s.remutex
}
#[inline]
pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
where
F: FnOnce(&T) -> &U,
{
let raw = &s.remutex.raw;
let data = f(unsafe { &*s.remutex.data.get() });
mem::forget(s);
MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
}
}
#[inline]
pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
where
F: FnOnce() -> U,
{
s.remutex.raw.unlock();
defer!(s.remutex.raw.lock());
f()
}
}
impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
ReentrantMutexGuard<'a, R, G, T>
{
#[inline]
pub fn unlock_fair(s: Self) {
s.remutex.raw.unlock_fair();
mem::forget(s);
}
#[inline]
pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
where
F: FnOnce() -> U,
{
s.remutex.raw.unlock_fair();
defer!(s.remutex.raw.lock());
f()
}
#[inline]
pub fn bump(s: &mut Self) {
s.remutex.raw.bump();
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
for ReentrantMutexGuard<'a, R, G, T>
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.remutex.data.get() }
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
for ReentrantMutexGuard<'a, R, G, T>
{
#[inline]
fn drop(&mut self) {
self.remutex.raw.unlock();
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
for ReentrantMutexGuard<'a, R, G, T>
{
}
#[must_use]
pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
raw: &'a RawReentrantMutex<R, G>,
data: *const T,
marker: PhantomData<&'a T>,
}
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for MappedReentrantMutexGuard<'a, R, G, T>
{
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
MappedReentrantMutexGuard<'a, R, G, T>
{
#[inline]
pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
where
F: FnOnce(&T) -> &U,
{
let raw = s.raw;
let data = f(unsafe { &*s.data });
mem::forget(s);
MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
}
}
}
impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
MappedReentrantMutexGuard<'a, R, G, T>
{
#[inline]
pub fn unlock_fair(s: Self) {
s.raw.unlock_fair();
mem::forget(s);
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
for MappedReentrantMutexGuard<'a, R, G, T>
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
for MappedReentrantMutexGuard<'a, R, G, T>
{
#[inline]
fn drop(&mut self) {
self.raw.unlock();
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
for MappedReentrantMutexGuard<'a, R, G, T>
{
}