any_intern/
common.rs

1use parking_lot::{RawMutex, lock_api::RawMutex as _};
2use std::{
3    borrow::Borrow,
4    cell::UnsafeCell,
5    fmt,
6    hash::{Hash, Hasher},
7    ops,
8    ptr::{self, NonNull},
9    slice,
10    sync::Arc,
11};
12
13/// Due to [`Prv`], clients cannot make this type directly, but still allowed to use pattern
14/// match.
15pub struct Interned<'a, T: ?Sized>(pub &'a T, Prv);
16
17impl<'a, T: ?Sized> Interned<'a, T> {
18    pub fn raw(&self) -> RawInterned<T> {
19        let ptr = NonNull::from_ref(self.0);
20        RawInterned(ptr)
21    }
22
23    pub fn erased_raw(&self) -> RawInterned {
24        let ptr = NonNull::from_ref(self.0).cast::<Prv>();
25        RawInterned(ptr)
26    }
27
28    /// Caller should guarantee that the value is unique in an interner.
29    pub(crate) fn unique(value: &'a T) -> Self {
30        Self(value, Prv)
31    }
32}
33
34impl<'a, T: ?Sized> Interned<'a, T> {
35    /// # Safety
36    ///
37    /// Value pointed by the given `raw` must be alive in an interner.
38    pub unsafe fn from_raw(raw: RawInterned<T>) -> Self {
39        let ref_ = unsafe { raw.0.as_ref() };
40        Self(ref_, Prv)
41    }
42}
43
44impl<'a, T> Interned<'a, T> {
45    /// # Safety
46    ///
47    /// * Value pointed by the given `raw` must be alive in an interner.
48    /// * Type must be correct.
49    pub unsafe fn from_erased_raw(raw: RawInterned) -> Self {
50        let ref_ = unsafe { raw.0.cast::<T>().as_ref() };
51        Self(ref_, Prv)
52    }
53}
54
55/// Compares data addresses only, which is sufficient for interned values.
56impl<T: ?Sized> PartialEq for Interned<'_, T> {
57    fn eq(&self, other: &Self) -> bool {
58        ptr::addr_eq(self.0, other.0)
59    }
60}
61
62/// Compares data addresses only, which is sufficient for interned values.
63impl<T: ?Sized> Eq for Interned<'_, T> {}
64
65impl<T: PartialOrd + ?Sized> PartialOrd for Interned<'_, T> {
66    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
67        self.0.partial_cmp(other.0)
68    }
69}
70
71impl<T: Ord + ?Sized> Ord for Interned<'_, T> {
72    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
73        self.0.cmp(other.0)
74    }
75}
76
77impl<T: Hash + ?Sized> Hash for Interned<'_, T> {
78    fn hash<H: Hasher>(&self, state: &mut H) {
79        self.0.hash(state)
80    }
81}
82
83impl<T: ?Sized> Borrow<T> for Interned<'_, T> {
84    fn borrow(&self) -> &T {
85        self.0
86    }
87}
88
89impl<'a, T: ?Sized> ops::Deref for Interned<'a, T> {
90    type Target = &'a T;
91
92    fn deref(&self) -> &Self::Target {
93        &self.0
94    }
95}
96
97impl<T: ?Sized> Clone for Interned<'_, T> {
98    fn clone(&self) -> Self {
99        *self
100    }
101}
102
103impl<T: ?Sized> Copy for Interned<'_, T> {}
104
105impl<T: fmt::Debug + ?Sized> fmt::Debug for Interned<'_, T> {
106    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
107        fmt::Debug::fmt(&self.0, f)
108    }
109}
110
111impl<T: fmt::Display + ?Sized> fmt::Display for Interned<'_, T> {
112    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
113        fmt::Display::fmt(&self.0, f)
114    }
115}
116
117// Clients are not allowed to make this type directly.
118pub struct RawInterned<T: ?Sized = Prv>(pub(crate) NonNull<T>);
119
120impl<T: ?Sized> RawInterned<T> {
121    #[inline]
122    pub fn cast<U>(self) -> RawInterned<U> {
123        RawInterned(self.0.cast())
124    }
125}
126
127/// Pointer comparison by address.
128impl<T: ?Sized> PartialEq for RawInterned<T> {
129    fn eq(&self, other: &Self) -> bool {
130        ptr::addr_eq(self.0.as_ptr(), other.0.as_ptr())
131    }
132}
133
134/// Pointer comparison by address.
135impl<T: ?Sized> Eq for RawInterned<T> {}
136
137/// Pointer comparison by address.
138impl<T: ?Sized> PartialOrd for RawInterned<T> {
139    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
140        Some(self.cmp(other))
141    }
142}
143
144/// Pointer comparison by address.
145impl<T: ?Sized> Ord for RawInterned<T> {
146    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
147        self.0
148            .as_ptr()
149            .cast::<()>()
150            .cmp(&other.0.as_ptr().cast::<()>())
151    }
152}
153
154impl<T: ?Sized> Hash for RawInterned<T> {
155    fn hash<H: Hasher>(&self, state: &mut H) {
156        self.0.hash(state)
157    }
158}
159
160impl<T: ?Sized> Borrow<NonNull<T>> for RawInterned<T> {
161    fn borrow(&self) -> &NonNull<T> {
162        &self.0
163    }
164}
165
166impl<T: ?Sized> ops::Deref for RawInterned<T> {
167    type Target = NonNull<T>;
168
169    fn deref(&self) -> &Self::Target {
170        &self.0
171    }
172}
173
174impl<T: ?Sized> Clone for RawInterned<T> {
175    fn clone(&self) -> Self {
176        *self
177    }
178}
179
180impl<T: ?Sized> Copy for RawInterned<T> {}
181
182impl<T: ?Sized> fmt::Debug for RawInterned<T> {
183    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
184        self.0.fmt(f)
185    }
186}
187
188#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)]
189pub struct Prv;
190
191#[derive(Clone)]
192pub struct UnsafeLock<T: ?Sized> {
193    inner: Arc<ManualMutex<T>>,
194}
195
196/// Unlike [`Mutex`], this lock is `Send` and `Sync` regardless of whether `T` is `Send` or not.
197/// That's because `T` is always under the protection of this lock whenever clients uphold the
198/// safety of the lock.
199///
200/// # Safety
201///
202/// There must be no copies of the value inside this lock. Clients must not have copies before
203/// and after creation of this lock. Because this lock assumes that the value inside the lock
204/// has no copies, so the lock is `Send` and `Sync` even if `T` isn't.  
205/// For example, imagine that you have multiple `Rc<T>`, which is not `Send`, and make
206/// `UnsafeLock<Rc<T>>` from one copy of them, then you send the lock to another thread. It can
207/// cause data race because of `Rc<T>` outside this lock.  
208/// But if you have only one `T` and wrap it within `UnsafeLock`, then `T` is guaranteed to be
209/// protected by this lock. Making copies of `UnsafeLock<T>`, sending it to another thread, and
210/// accessing it from another thread does not break the guarantee. But you still can make copies
211/// of `T` from its pointer, but you shouldn't.
212///
213/// [`Mutex`]: std::sync::Mutex
214unsafe impl<T: ?Sized> Send for UnsafeLock<T> {}
215unsafe impl<T: ?Sized> Sync for UnsafeLock<T> {}
216
217impl<T> UnsafeLock<T> {
218    /// # Safety
219    ///
220    /// There must be no copies of the value. See [`Send implementation`].
221    ///
222    /// [`Send implementation`]: UnsafeLock<T>#impl-Send-for-UnsafeLock<T>
223    pub unsafe fn new(value: T) -> Self {
224        Self {
225            inner: Arc::new(ManualMutex {
226                mutex: RawMutex::INIT,
227                data: UnsafeCell::new(value),
228            }),
229        }
230    }
231}
232
233impl<T: ?Sized> UnsafeLock<T> {
234    /// # Safety
235    ///
236    /// * Do not dereference to the returned pointer after [`unlock`](Self::unlock).
237    /// * Do not make copies of `T` from the returned pointer. See [`Send implementation`].
238    ///
239    /// [`Send implementation`]: UnsafeLock<T>#impl-Send-for-UnsafeLock<T>
240    pub unsafe fn lock(&self) -> NonNull<T> {
241        self.inner.mutex.lock();
242        unsafe { NonNull::new_unchecked(self.inner.data.get()) }
243    }
244
245    /// # Safety
246    ///
247    /// Must follow [`lock`](Self::lock).
248    pub unsafe fn unlock(&self) {
249        unsafe { self.inner.mutex.unlock() };
250    }
251}
252
253impl<T: fmt::Debug> fmt::Debug for UnsafeLock<T> {
254    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
255        // Safety: Lock & unlock are paired with each other.
256        unsafe {
257            let t = self.lock().as_ref();
258            let ret = fmt::Debug::fmt(t, f);
259            self.unlock();
260            ret
261        }
262    }
263}
264
265struct ManualMutex<T: ?Sized> {
266    mutex: RawMutex,
267    data: UnsafeCell<T>,
268}
269
270unsafe impl<T: Send + ?Sized> Send for ManualMutex<T> {}
271unsafe impl<T: Send + ?Sized> Sync for ManualMutex<T> {}
272
273pub(crate) unsafe fn cast_then_drop_slice<T>(ptr: *mut u8, num_elems: usize) {
274    unsafe {
275        let slice = slice::from_raw_parts_mut(ptr.cast::<T>(), num_elems);
276        ptr::drop_in_place(slice);
277    }
278}
279
280#[cfg(test)]
281pub(crate) fn assert_group_addr_eq(groups: &[&[RawInterned]]) {
282    for i in 0..groups.len() {
283        // Inside of a group shares the same address.
284        for w in groups[i].windows(2) {
285            assert_eq!(w[0], w[1]);
286        }
287
288        // Groups have different addresses.
289        let a = groups[i][0];
290        for j in i + 1..groups.len() {
291            let b = groups[j][0];
292            assert_ne!(a, b);
293        }
294    }
295}