Skip to main content

any_intern/
common.rs

1use parking_lot::{RawMutex, lock_api::RawMutex as _};
2use std::{
3    borrow::Borrow,
4    cell::UnsafeCell,
5    fmt,
6    hash::{Hash, Hasher},
7    ops,
8    ptr::{self, NonNull},
9    slice,
10    sync::Arc,
11};
12
13/// Due to [`Prv`], clients cannot make this type directly, but still allowed to use pattern
14/// match.
15pub struct Interned<'a, T: ?Sized>(pub &'a T, Prv);
16
17impl<'a, T: ?Sized> Interned<'a, T> {
18    pub fn raw(&self) -> RawInterned<T> {
19        let ptr = NonNull::from_ref(self.0);
20        RawInterned(ptr)
21    }
22
23    pub fn erased_raw(&self) -> RawInterned {
24        let ptr = NonNull::from_ref(self.0).cast::<Prv>();
25        RawInterned(ptr)
26    }
27
28    /// Caller should guarantee that the value is unique in an interner.
29    pub(crate) fn unique(value: &'a T) -> Self {
30        Self(value, Prv)
31    }
32}
33
34impl<'a, T: ?Sized> Interned<'a, T> {
35    /// # Safety
36    ///
37    /// Value pointed by the given `raw` must be alive in an interner.
38    pub unsafe fn from_raw(raw: RawInterned<T>) -> Self {
39        let ref_ = unsafe { raw.0.as_ref() };
40        Self(ref_, Prv)
41    }
42}
43
44impl<'a, T> Interned<'a, T> {
45    /// # Safety
46    ///
47    /// * Value pointed by the given `raw` must be alive in an interner.
48    /// * Type must be correct.
49    pub unsafe fn from_erased_raw(raw: RawInterned) -> Self {
50        let ref_ = unsafe { raw.0.cast::<T>().as_ref() };
51        Self(ref_, Prv)
52    }
53}
54
55/// Compares data addresses only, which is sufficient for interned values.
56impl<T: ?Sized> PartialEq for Interned<'_, T> {
57    fn eq(&self, other: &Self) -> bool {
58        ptr::addr_eq(self.0, other.0)
59    }
60}
61
62/// Compares data addresses only, which is sufficient for interned values.
63impl<T: ?Sized> Eq for Interned<'_, T> {}
64
65impl<T: PartialOrd + ?Sized> PartialOrd for Interned<'_, T> {
66    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
67        self.0.partial_cmp(other.0)
68    }
69}
70
71impl<T: Ord + ?Sized> Ord for Interned<'_, T> {
72    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
73        self.0.cmp(other.0)
74    }
75}
76
77impl<T: Hash + ?Sized> Hash for Interned<'_, T> {
78    fn hash<H: Hasher>(&self, state: &mut H) {
79        self.0.hash(state)
80    }
81}
82
83impl<T: ?Sized> Borrow<T> for Interned<'_, T> {
84    fn borrow(&self) -> &T {
85        self.0
86    }
87}
88
89impl<T: ?Sized> AsRef<T> for Interned<'_, T> {
90    fn as_ref(&self) -> &T {
91        self.0
92    }
93}
94
95impl<'a, T: ?Sized> ops::Deref for Interned<'a, T> {
96    type Target = T;
97
98    fn deref(&self) -> &Self::Target {
99        self.0
100    }
101}
102
103impl<T: ?Sized> Clone for Interned<'_, T> {
104    fn clone(&self) -> Self {
105        *self
106    }
107}
108
109impl<T: ?Sized> Copy for Interned<'_, T> {}
110
111impl<T: fmt::Debug + ?Sized> fmt::Debug for Interned<'_, T> {
112    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
113        fmt::Debug::fmt(&self.0, f)
114    }
115}
116
117impl<T: fmt::Display + ?Sized> fmt::Display for Interned<'_, T> {
118    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
119        fmt::Display::fmt(&self.0, f)
120    }
121}
122
123/// A same type with the [`Interned`], but with erased lifetime.
124///
125/// Although lifetime is erased, `RawInterned` still has type, so it can be used when type is
126/// required. You can also erase type too by calling to [`erase`](Self::erase).
127///
128/// # Note
129///
130/// `RawInterned` is just pointer type and points to the type `T` in memory. If `T` is ZST or DST
131/// but containing no data like empty string, the pointer would be non-null, well-aligned, but
132/// dangling. So it's discouraged to compare type erased `RawInterned`s because they would have the
133/// same dangling pointers even though they were different types.
134//
135// Clients are not allowed to make this type directly.
136pub struct RawInterned<T: ?Sized = Prv>(pub(crate) NonNull<T>);
137
138impl<T: ?Sized> RawInterned<T> {
139    #[inline]
140    pub fn cast<U>(self) -> RawInterned<U> {
141        RawInterned(self.0.cast())
142    }
143
144    #[inline]
145    pub fn erase(self) -> RawInterned {
146        RawInterned(self.0.cast())
147    }
148}
149
150/// Pointer comparison by address.
151impl<T: ?Sized> PartialEq for RawInterned<T> {
152    fn eq(&self, other: &Self) -> bool {
153        ptr::addr_eq(self.0.as_ptr(), other.0.as_ptr())
154    }
155}
156
157/// Pointer comparison by address.
158impl<T: ?Sized> Eq for RawInterned<T> {}
159
160/// Pointer comparison by address.
161impl<T: ?Sized> PartialOrd for RawInterned<T> {
162    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
163        Some(self.cmp(other))
164    }
165}
166
167/// Pointer comparison by address.
168impl<T: ?Sized> Ord for RawInterned<T> {
169    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
170        self.0
171            .as_ptr()
172            .cast::<()>()
173            .cmp(&other.0.as_ptr().cast::<()>())
174    }
175}
176
177impl<T: ?Sized> Hash for RawInterned<T> {
178    fn hash<H: Hasher>(&self, state: &mut H) {
179        self.0.hash(state)
180    }
181}
182
183impl<T: ?Sized> Borrow<NonNull<T>> for RawInterned<T> {
184    fn borrow(&self) -> &NonNull<T> {
185        &self.0
186    }
187}
188
189impl<T: ?Sized> ops::Deref for RawInterned<T> {
190    type Target = NonNull<T>;
191
192    fn deref(&self) -> &Self::Target {
193        &self.0
194    }
195}
196
197impl<T: ?Sized> Clone for RawInterned<T> {
198    fn clone(&self) -> Self {
199        *self
200    }
201}
202
203impl<T: ?Sized> Copy for RawInterned<T> {}
204
205impl<T: ?Sized> fmt::Debug for RawInterned<T> {
206    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
207        self.0.fmt(f)
208    }
209}
210
211#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)]
212pub struct Prv;
213
214#[derive(Clone)]
215pub struct UnsafeLock<T: ?Sized> {
216    inner: Arc<ManualMutex<T>>,
217}
218
219/// Unlike [`Mutex`], this lock is `Send` and `Sync` regardless of whether `T` is `Send` or not.
220/// That's because `T` is always under the protection of this lock whenever clients uphold the
221/// safety of the lock.
222///
223/// # Safety
224///
225/// There must be no copies of the value inside this lock. Clients must not have copies before
226/// and after creation of this lock. Because this lock assumes that the value inside the lock
227/// has no copies, so the lock is `Send` and `Sync` even if `T` isn't.  
228/// For example, imagine that you have multiple `Rc<T>`, which is not `Send`, and make
229/// `UnsafeLock<Rc<T>>` from one copy of them, then you send the lock to another thread. It can
230/// cause data race because of `Rc<T>` outside this lock.  
231/// But if you have only one `T` and wrap it within `UnsafeLock`, then `T` is guaranteed to be
232/// protected by this lock. Making copies of `UnsafeLock<T>`, sending it to another thread, and
233/// accessing it from another thread does not break the guarantee. But you still can make copies
234/// of `T` from its pointer, but you shouldn't.
235///
236/// [`Mutex`]: std::sync::Mutex
237unsafe impl<T: ?Sized> Send for UnsafeLock<T> {}
238unsafe impl<T: ?Sized> Sync for UnsafeLock<T> {}
239
240impl<T> UnsafeLock<T> {
241    /// # Safety
242    ///
243    /// There must be no copies of the value. See [`Send implementation`].
244    ///
245    /// [`Send implementation`]: UnsafeLock<T>#impl-Send-for-UnsafeLock<T>
246    pub unsafe fn new(value: T) -> Self {
247        Self {
248            inner: Arc::new(ManualMutex {
249                mutex: RawMutex::INIT,
250                data: UnsafeCell::new(value),
251            }),
252        }
253    }
254}
255
256impl<T: ?Sized> UnsafeLock<T> {
257    /// # Safety
258    ///
259    /// * Do not dereference to the returned pointer after [`unlock`](Self::unlock).
260    /// * Do not make copies of `T` from the returned pointer. See [`Send implementation`].
261    ///
262    /// [`Send implementation`]: UnsafeLock<T>#impl-Send-for-UnsafeLock<T>
263    pub unsafe fn lock(&self) -> NonNull<T> {
264        self.inner.mutex.lock();
265        unsafe { NonNull::new_unchecked(self.inner.data.get()) }
266    }
267
268    /// # Safety
269    ///
270    /// Must follow [`lock`](Self::lock).
271    pub unsafe fn unlock(&self) {
272        unsafe { self.inner.mutex.unlock() };
273    }
274}
275
276impl<T: fmt::Debug> fmt::Debug for UnsafeLock<T> {
277    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
278        // Safety: Lock & unlock are paired with each other.
279        unsafe {
280            let t = self.lock().as_ref();
281            let ret = fmt::Debug::fmt(t, f);
282            self.unlock();
283            ret
284        }
285    }
286}
287
288struct ManualMutex<T: ?Sized> {
289    mutex: RawMutex,
290    data: UnsafeCell<T>,
291}
292
293unsafe impl<T: Send + ?Sized> Send for ManualMutex<T> {}
294unsafe impl<T: Send + ?Sized> Sync for ManualMutex<T> {}
295
296pub(crate) unsafe fn cast_then_drop_slice<T>(ptr: *mut u8, num_elems: usize) {
297    unsafe {
298        let slice = slice::from_raw_parts_mut(ptr.cast::<T>(), num_elems);
299        ptr::drop_in_place(slice);
300    }
301}
302
303#[cfg(test)]
304pub(crate) fn assert_group_addr_eq(groups: &[&[RawInterned]]) {
305    for i in 0..groups.len() {
306        // Inside of a group shares the same address.
307        for w in groups[i].windows(2) {
308            assert_eq!(w[0], w[1]);
309        }
310
311        // Groups have different addresses.
312        let a = groups[i][0];
313        for j in i + 1..groups.len() {
314            let b = groups[j][0];
315            assert_ne!(a, b);
316        }
317    }
318}