use crate::{
mutex::{RawMutex, RawMutexFair, RawMutexTimed},
GuardNoSend,
};
use core::{
cell::{Cell, UnsafeCell},
fmt,
marker::PhantomData,
mem,
num::NonZeroUsize,
ops::Deref,
sync::atomic::{AtomicUsize, Ordering},
};
#[cfg(feature = "owning_ref")]
use owning_ref::StableAddress;
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub unsafe trait GetThreadId {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self;
fn nonzero_thread_id(&self) -> NonZeroUsize;
}
struct RawReentrantMutex<R: RawMutex, G: GetThreadId> {
owner: AtomicUsize,
lock_count: Cell<usize>,
mutex: R,
get_thread_id: G,
}
impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
#[inline]
fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
let id = self.get_thread_id.nonzero_thread_id().get();
if self.owner.load(Ordering::Relaxed) == id {
self.lock_count.set(
self.lock_count
.get()
.checked_add(1)
.expect("ReentrantMutex lock count overflow"),
);
} else {
if !try_lock() {
return false;
}
self.owner.store(id, Ordering::Relaxed);
debug_assert_eq!(self.lock_count.get(), 0);
self.lock_count.set(1);
}
true
}
#[inline]
fn lock(&self) {
self.lock_internal(|| {
self.mutex.lock();
true
});
}
#[inline]
fn try_lock(&self) -> bool {
self.lock_internal(|| self.mutex.try_lock())
}
#[inline]
fn unlock(&self) {
let lock_count = self.lock_count.get() - 1;
self.lock_count.set(lock_count);
if lock_count == 0 {
self.owner.store(0, Ordering::Relaxed);
self.mutex.unlock();
}
}
}
impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
#[inline]
fn unlock_fair(&self) {
let lock_count = self.lock_count.get() - 1;
self.lock_count.set(lock_count);
if lock_count == 0 {
self.owner.store(0, Ordering::Relaxed);
self.mutex.unlock_fair();
}
}
#[inline]
fn bump(&self) {
if self.lock_count.get() == 1 {
let id = self.owner.load(Ordering::Relaxed);
self.owner.store(0, Ordering::Relaxed);
self.mutex.bump();
self.owner.store(id, Ordering::Relaxed);
}
}
}
impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
#[inline]
fn try_lock_until(&self, timeout: R::Instant) -> bool {
self.lock_internal(|| self.mutex.try_lock_until(timeout))
}
#[inline]
fn try_lock_for(&self, timeout: R::Duration) -> bool {
self.lock_internal(|| self.mutex.try_lock_for(timeout))
}
}
pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
raw: RawReentrantMutex<R, G>,
data: UnsafeCell<T>,
}
unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
for ReentrantMutex<R, G, T>
{
}
unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
for ReentrantMutex<R, G, T>
{
}
impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
#[cfg(feature = "nightly")]
#[inline]
pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex {
data: UnsafeCell::new(val),
raw: RawReentrantMutex {
owner: AtomicUsize::new(0),
lock_count: Cell::new(0),
mutex: R::INIT,
get_thread_id: G::INIT,
},
}
}
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new(val: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex {
data: UnsafeCell::new(val),
raw: RawReentrantMutex {
owner: AtomicUsize::new(0),
lock_count: Cell::new(0),
mutex: R::INIT,
get_thread_id: G::INIT,
},
}
}
#[inline]
pub fn into_inner(self) -> T {
self.data.into_inner()
}
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
ReentrantMutexGuard {
remutex: &self,
marker: PhantomData,
}
}
#[inline]
pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
self.raw.lock();
unsafe { self.guard() }
}
#[inline]
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
if self.raw.try_lock() {
Some(unsafe { self.guard() })
} else {
None
}
}
#[inline]
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.data.get() }
}
#[inline]
pub unsafe fn force_unlock(&self) {
self.raw.unlock();
}
#[inline]
pub unsafe fn raw(&self) -> &R {
&self.raw.mutex
}
}
impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
pub unsafe fn force_unlock_fair(&self) {
self.raw.unlock_fair();
}
}
impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
if self.raw.try_lock_for(timeout) {
Some(unsafe { self.guard() })
} else {
None
}
}
#[inline]
pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
if self.raw.try_lock_until(timeout) {
Some(unsafe { self.guard() })
} else {
None
}
}
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
#[inline]
fn default() -> ReentrantMutex<R, G, T> {
ReentrantMutex::new(Default::default())
}
}
impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
#[inline]
fn from(t: T) -> ReentrantMutex<R, G, T> {
ReentrantMutex::new(t)
}
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.try_lock() {
Some(guard) => f
.debug_struct("ReentrantMutex")
.field("data", &&*guard)
.finish(),
None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("ReentrantMutex")
.field("data", &LockedPlaceholder)
.finish()
}
}
}
}
#[cfg(feature = "serde")]
impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
}
}
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
remutex: &'a ReentrantMutex<R, G, T>,
marker: PhantomData<(&'a T, GuardNoSend)>,
}
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for ReentrantMutexGuard<'a, R, G, T>
{
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
s.remutex
}
#[inline]
pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
where
F: FnOnce(&T) -> &U,
{
let raw = &s.remutex.raw;
let data = f(unsafe { &*s.remutex.data.get() });
mem::forget(s);
MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
}
}
#[inline]
pub fn try_map<U: ?Sized, F>(
s: Self,
f: F,
) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
let raw = &s.remutex.raw;
let data = match f(unsafe { &mut *s.remutex.data.get() }) {
Some(data) => data,
None => return Err(s),
};
mem::forget(s);
Ok(MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
})
}
#[inline]
pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
where
F: FnOnce() -> U,
{
s.remutex.raw.unlock();
defer!(s.remutex.raw.lock());
f()
}
}
impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
ReentrantMutexGuard<'a, R, G, T>
{
#[inline]
pub fn unlock_fair(s: Self) {
s.remutex.raw.unlock_fair();
mem::forget(s);
}
#[inline]
pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
where
F: FnOnce() -> U,
{
s.remutex.raw.unlock_fair();
defer!(s.remutex.raw.lock());
f()
}
#[inline]
pub fn bump(s: &mut Self) {
s.remutex.raw.bump();
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
for ReentrantMutexGuard<'a, R, G, T>
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.remutex.data.get() }
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
for ReentrantMutexGuard<'a, R, G, T>
{
#[inline]
fn drop(&mut self) {
self.remutex.raw.unlock();
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for ReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for ReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
for ReentrantMutexGuard<'a, R, G, T>
{
}
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
raw: &'a RawReentrantMutex<R, G>,
data: *const T,
marker: PhantomData<&'a T>,
}
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for MappedReentrantMutexGuard<'a, R, G, T>
{
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
MappedReentrantMutexGuard<'a, R, G, T>
{
#[inline]
pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
where
F: FnOnce(&T) -> &U,
{
let raw = s.raw;
let data = f(unsafe { &*s.data });
mem::forget(s);
MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
}
}
#[inline]
pub fn try_map<U: ?Sized, F>(
s: Self,
f: F,
) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
where
F: FnOnce(&T) -> Option<&U>,
{
let raw = s.raw;
let data = match f(unsafe { &*s.data }) {
Some(data) => data,
None => return Err(s),
};
mem::forget(s);
Ok(MappedReentrantMutexGuard {
raw,
data,
marker: PhantomData,
})
}
}
impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
MappedReentrantMutexGuard<'a, R, G, T>
{
#[inline]
pub fn unlock_fair(s: Self) {
s.raw.unlock_fair();
mem::forget(s);
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
for MappedReentrantMutexGuard<'a, R, G, T>
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
for MappedReentrantMutexGuard<'a, R, G, T>
{
#[inline]
fn drop(&mut self) {
self.raw.unlock();
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for MappedReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for MappedReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
for MappedReentrantMutexGuard<'a, R, G, T>
{
}