parc/
lib.rs

1//! This crate exposes [`ParentArc<T>`](struct.ParentArc.html) which is comparable to an
2//! [`Arc<T>`](https://doc.rust-lang.org/std/sync/struct.Arc.html) but "strong" references cannot
3//! be cloned which allows the `ParentArc<T>` to lock its weak references and block until all
4//! strong references are dropped. Once it is the only reference it can be consummed safely.
5//!
6//! This crate is compatible with
7//! [`#![no_std]`](https://rust-embedded.github.io/book/intro/no-std.html) environnements that
8//! provide an allocator.
9
10#![no_std]
11#![deny(missing_docs)]
12
13#[cfg(not(feature = "std"))]
14mod imports {
15    extern crate alloc;
16    pub(super) use alloc::boxed::Box;
17}
18
19#[cfg(feature = "std")]
20mod imports {
21    extern crate std;
22    pub(super) use std::boxed::Box;
23    pub(super) use std::fmt;
24}
25
26use imports::*;
27
28use core::mem;
29use core::ops;
30use core::pin::Pin;
31use core::ptr;
32use core::ptr::NonNull;
33use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
34
35/// Result Type for [`try_into_inner`]
36///
37/// [`try_into_inner`]: struct.ParentArc.html#method.try_into_inner
38pub type TryUnwrapResult<T> = Result<T, TryUnwrapError<T>>;
39
40/// Errors for [`TryArcResult`](type.TryUnwrapResult.html)
41pub enum TryUnwrapError<T> {
42    /// Would have locked the Temp references
43    WouldLock(ParentArc<T>),
44
45    /// Would have blocked becasue there is still a [`ChildArc`](struct.ChildArc.html) reference
46    WouldBlock(ParentArc<T>),
47}
48
49#[cfg(feature = "std")]
50impl<T> fmt::Debug for TryUnwrapError<T> {
51    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
52        match self {
53            TryUnwrapError::WouldLock(_) => write!(f, "WouldLock(...)"),
54            TryUnwrapError::WouldBlock(_) => write!(f, "WouldBlock(...)"),
55        }
56    }
57}
58
59/// Owner of multiple atomically reference counted children.
60///
61/// The type `ParentArc<T>` allows for shared access of the inner data by multiple threads through LockWeak references.
62/// Call downgrade on a `ParentArc` to create a child reference that can be upgraded into a
63/// temporary reader of the inner data. This allows for the locking and the consumption of the
64/// parent at any time because no strong references are held permanently.
65///
66/// Unlike [`Arc<T>`](https://doc.rust-lang.org/std/sync/struct.Arc.html) this structure will die
67/// along with it's readers.
68///
69/// # Thread Safety
70/// The [`LockWeak`](struct.LockWeak) can be passed around through threads safely because they do
71/// not guaranty the existence of the data at upgrade time.
72/// `ParentArc<T>` makes it thread safe to have multiple owned reference of the same data, but it doesn't add thread safety to its data.
73pub struct ParentArc<T> {
74    ptr: NonNull<Womb<T>>,
75}
76
77impl<T> ParentArc<T> {
78    /// Build a new [`ParentArc`](struct.ParentArc.html)
79    ///
80    /// # Examples
81    /// ```rust
82    /// use parc::ParentArc;
83    /// use std::sync::Mutex;
84    /// fn main() {
85    ///     let parent = ParentArc::new(Mutex::new(true));
86    /// }
87    /// ```
88    pub fn new(data: T) -> Self {
89        Self {
90            ptr: Womb::as_nnptr(data),
91        }
92    }
93
94    /// Constructs a new `Pin<ParentArc<T>>`. If `T` does not implement `Unpin`, then
95    /// `data` will be pinned in memory and unable to be moved.
96    pub fn pin(data: T) -> Pin<ParentArc<T>> {
97        unsafe { Pin::new_unchecked(ParentArc::new(data)) }
98    }
99
100    /// Locks all [`LockWeak`](struct.LockWeak.html) of this instance, it
101    /// will prevent all further upgrades until [`unlocked`]. It is advised to call this before
102    /// attempting a [`try_into_inner`].
103    ///
104    /// [`unlocked`]: #method.unlock
105    /// [`try_into_inner`]: #method.try_into_inner
106    ///
107    /// # Examples
108    /// ```rust
109    /// use parc::ParentArc;
110    /// use std::sync::Mutex;
111    /// fn main() {
112    ///     let parent = ParentArc::new(Mutex::new(0));
113    ///     parent.lock(); // LockWeaks are no longer able to upgrade successfully
114    ///     assert!(parent.is_locked());
115    /// }
116    /// ```
117    pub fn lock(&self) {
118        let lock = &self.inner().lock;
119        while lock.compare_and_swap(false, true, Ordering::Release) {}
120    }
121
122    /// Check wether the [`LockWeak`](struct.LockWeak.html)s are locked. Since only the Parent can
123    /// unlock it is considered a somewhat trustable result.
124    pub fn is_locked(&self) -> bool {
125        self.inner().lock.load(Ordering::Relaxed)
126    }
127
128    /// Unlocks all [`LockWeak`](struct.LockWeak.html) of this [`ParentArc`](struct.ParentArc.html),
129    /// this allows for their ugrade to start again.
130    ///
131    /// # Examples
132    /// ```rust
133    /// use parc::ParentArc;
134    /// use std::sync::Mutex;
135    /// fn main() {
136    ///     let parent = ParentArc::new(Mutex::new(0));
137    ///     
138    ///     parent.lock(); // LockWeaks are no longer able to upgrade successfully
139    ///     assert!(parent.is_locked());
140    ///     
141    ///     parent.unlock(); // LockWeaks can upgrade successfully again
142    ///     assert!(!parent.is_locked());
143    /// }
144    /// ```
145    pub fn unlock(&self) {
146        let lock = &self.inner().lock;
147        while lock.compare_and_swap(true, false, Ordering::Release) {}
148    }
149
150    /// Downgrade a [`ParentArc`](struct.ParentArc.html) into a [`LockWeak`](struct.LockWeak.html)
151    ///
152    /// # Examples
153    /// ```rust
154    /// use parc::{ParentArc, LockWeak};
155    /// use std::sync::Mutex;
156    /// fn main() {
157    ///     let parent = ParentArc::new(Mutex::new(true));
158    ///     let weak: LockWeak<_> = ParentArc::downgrade(&parent);
159    /// }
160    /// ```
161    pub fn downgrade(other: &Self) -> LockWeak<T> {
162        LockWeak { ptr: other.ptr }
163    }
164
165    /// Tries to downgrade a [`ParentArc`](struct.ParentArc.html) into a [`LockWeak`](struct.LockWeak.html) if the inner state allows the latter to upgrade.
166    ///
167    /// # Examples
168    /// ```rust
169    /// use parc::{ParentArc, LockWeak};
170    /// use std::sync::Mutex;
171    /// fn main() {
172    ///     let parent = ParentArc::new(Mutex::new(true));
173    ///     parent.lock(); // LockWeaks are no longer able to upgrade successfully
174    ///     
175    ///     if let Some(_) = ParentArc::try_downgrade(&parent) {
176    ///         assert!(false);
177    ///     }
178    /// }
179    /// ```
180    pub fn try_downgrade(other: &Self) -> Option<LockWeak<T>> {
181        if other.inner().lock.load(Ordering::Relaxed) {
182            return None;
183        }
184        Some(LockWeak { ptr: other.ptr })
185    }
186
187    /// Blocks the thread until all [`ChildArc`](struct.ChildArc.html) of this instance
188    /// have dropped, returning the underlying data.
189    ///
190    /// # Safety
191    ///
192    /// This call will indefinitly spin if a child has not droped correctly.
193    ///
194    /// # Examples
195    /// ```rust
196    /// use parc::{ParentArc, LockWeak};
197    /// use std::sync::Mutex;
198    /// fn main() {
199    ///     let parent = ParentArc::new(Mutex::new(true));
200    ///     
201    ///     let weak1: LockWeak<_> = ParentArc::downgrade(&parent);
202    ///     let weak2: LockWeak<_> = ParentArc::downgrade(&parent);
203    ///     
204    ///     let child = weak1.upgrade().unwrap();
205    ///     drop(child);
206    ///
207    ///     let _: Mutex<bool> = parent.block_into_inner();
208    /// }
209    /// ```
210    pub fn block_into_inner(self) -> T {
211        let this = self.inner();
212
213        self.lock();
214        while this.strong.load(Ordering::Acquire) != 0 {}
215
216        unsafe {
217            let elem = ptr::read(&this.data);
218            mem::forget(self);
219            elem
220        }
221    }
222
223    /// Non-blocking version of [`block_into_inner`](#method.block_into_inner). It is advised to
224    /// call [`lock`](#method.lock) before calling this one, unless you know for sure there are no
225    /// [`ChildArc`](struct.ChildArc.html) alive at this instance.
226    ///
227    /// # Safety
228    ///
229    /// This will never unwrap `Ok(T)` if a child has not droped correctly.
230    ///
231    /// # Examples
232    /// ```rust
233    /// use parc::{ParentArc, LockWeak, TryUnwrapError::*};
234    /// use std::sync::Mutex;
235    /// fn main() {
236    ///     let mut parent = ParentArc::new(Mutex::new(true));
237    ///     
238    ///     let weak1: LockWeak<_> = ParentArc::downgrade(&parent);
239    ///     let weak2: LockWeak<_> = ParentArc::downgrade(&parent);
240    ///     
241    ///     let child = weak1.upgrade().unwrap();
242    ///     
243    ///     // Unlocked LockWeaks
244    ///     parent = if let Err(WouldLock(parent)) = ParentArc::try_unwrap(parent) {
245    ///         parent
246    ///     } else {
247    ///         unreachable!()
248    ///     };
249    ///
250    ///     // Locked LockWeaks
251    ///     parent.lock();
252    ///     parent = if let Err(WouldBlock(parent)) = ParentArc::try_unwrap(parent) {
253    ///         parent
254    ///     } else {
255    ///         unreachable!()
256    ///     };
257    ///     parent.unlock();
258    ///
259    ///     // Droped children
260    ///     drop(child);
261    ///     let value: Mutex<bool> = ParentArc::try_unwrap(parent).unwrap();
262    /// }
263    /// ```
264    pub fn try_unwrap(other: Self) -> TryUnwrapResult<T> {
265        let this = other.inner();
266
267        if !this.lock.load(Ordering::Relaxed) && this.strong.load(Ordering::Relaxed) > 0 {
268            // Check for non-null count and unlock state
269            return Err(TryUnwrapError::WouldLock(other));
270        }
271        if this.strong.load(Ordering::Relaxed) != 0 {
272            return Err(TryUnwrapError::WouldBlock(other));
273        }
274
275        unsafe {
276            let elem = ptr::read(&this.data);
277            mem::forget(other);
278            Ok(elem)
279        }
280    }
281
282    fn inner(&self) -> &Womb<T> {
283        unsafe { self.ptr.as_ref() } // Ok to do this because we own the data
284    }
285}
286
287impl<T> AsRef<T> for ParentArc<T> {
288    fn as_ref(&self) -> &T {
289        &self.inner().data
290    }
291}
292
293impl<T> ops::Deref for ParentArc<T> {
294    type Target = T;
295    fn deref(&self) -> &Self::Target {
296        &self.inner().data
297    }
298}
299
300impl<T> Drop for ParentArc<T> {
301    fn drop(&mut self) {
302        // Wait for all reads to be droped
303        let this = self.inner();
304        while this.strong.load(Ordering::Acquire) != 0 {}
305    }
306}
307
308// Inner state shared by all instances: Parent, Weak, Child
309struct Womb<T> {
310    data: T,
311    lock: AtomicBool,
312    strong: AtomicUsize,
313}
314
315impl<T> Womb<T> {
316    fn as_nnptr(data: T) -> NonNull<Self> {
317        let x = Box::new(Self {
318            data,
319            lock: AtomicBool::new(false),
320            strong: AtomicUsize::new(0),
321        });
322        unsafe { NonNull::new_unchecked(Box::into_raw(x)) }
323    }
324}
325
326/// Weak reference to a [`ParentArc`](struct.ParentArc.html).
327///
328/// This instance can be locked at any moment, you can try to upgrade it into a
329/// [`ChildArc`](struct.ChildArc.html) which assures it can be read until the reader is dropped.
330///
331/// The typical way to obtain a Weak pointer is to call
332/// [`ParentArc::downgrade`](struct.ParentArc.html#method.downgrade).
333pub struct LockWeak<T> {
334    ptr: NonNull<Womb<T>>,
335}
336
337impl<T> LockWeak<T> {
338    /// Upgrades this Weak reference into a [`ChildArc`](struct.ChildArc.html) if the data is
339    /// unlocked or still owned by the [`ParentArc`](struct.ParentArc.html).
340    ///
341    /// # Examples
342    /// ```rust
343    /// use parc::{ParentArc, LockWeak};
344    /// use std::sync::Mutex;
345    /// fn main() {
346    ///     let parent = ParentArc::new(Mutex::new(true));
347    ///
348    ///     let weak: LockWeak<_> = ParentArc::downgrade(&parent);
349    ///     let child = weak.upgrade().unwrap();
350    /// }
351    /// ```
352    pub fn upgrade(&self) -> Option<ChildArc<T>> {
353        let this = self.inner()?;
354
355        if this.lock.load(Ordering::Relaxed) {
356            return None;
357        }
358
359        let mut n = this.strong.load(Ordering::Relaxed);
360        loop {
361            match this
362                .strong
363                .compare_exchange_weak(n, n + 1, Ordering::SeqCst, Ordering::Relaxed)
364            {
365                Ok(_) => break,
366                Err(old) => n = old,
367            }
368        }
369        Some(ChildArc::from(self.ptr))
370    }
371
372    // Pointer could be voided
373    fn inner(&self) -> Option<&Womb<T>> {
374        let address = self.ptr.as_ptr() as *mut () as usize;
375        if address == core::usize::MAX {
376            None
377        } else {
378            Some(unsafe { self.ptr.as_ref() })
379        }
380    }
381}
382
383unsafe impl<T> Send for LockWeak<T> {}
384
385/// Unclonable owned reference to a [`ParentArc`](struct.ParentArc.html).
386///
387/// This type can be dereferenced into the underlying data.
388///
389/// # Examples
390/// ```rust
391/// use parc::{ParentArc, LockWeak, ChildArc};
392/// use std::sync::Mutex;
393/// fn main() {
394///     let parent = ParentArc::new(Mutex::new(true));
395///
396///     let weak: LockWeak<_> = ParentArc::downgrade(&parent);
397///     let child: ChildArc<_> = weak.upgrade().unwrap();
398///
399///     assert!(*child.lock().unwrap());
400/// }
401/// ```
402pub struct ChildArc<T> {
403    ptr: NonNull<Womb<T>>,
404}
405
406impl<T> ChildArc<T> {
407    fn from(ptr: NonNull<Womb<T>>) -> Self {
408        Self { ptr }
409    }
410    fn inner(&self) -> &Womb<T> {
411        // safe because strong count is up one
412        unsafe { self.ptr.as_ref() }
413    }
414}
415
416impl<T> AsRef<T> for ChildArc<T> {
417    fn as_ref(&self) -> &T {
418        &self.inner().data
419    }
420}
421
422impl<T> ops::Deref for ChildArc<T> {
423    type Target = T;
424    fn deref(&self) -> &Self::Target {
425        &self.inner().data
426    }
427}
428
429impl<T> Drop for ChildArc<T> {
430    fn drop(&mut self) {
431        let strong = &self.inner().strong;
432
433        let mut n = strong.load(Ordering::Relaxed);
434        loop {
435            match strong.compare_exchange_weak(n, n - 1, Ordering::SeqCst, Ordering::Relaxed) {
436                Ok(_) => break,
437                Err(old) => n = old,
438            }
439        }
440    }
441}
442
443#[cfg(all(test, not(feature = "no_std")))]
444mod tests {
445    extern crate std;
446    use super::*;
447    use std::sync;
448    use std::thread;
449    use std::vec::Vec;
450
451    #[test]
452    fn new() {
453        let _ = ParentArc::new(2);
454    }
455
456    #[test]
457    fn one_simple_thread() {
458        let m = ParentArc::new(sync::Mutex::new(0));
459        let _ = thread::spawn({
460            let weak = ParentArc::downgrade(&m);
461            move || match weak.upgrade() {
462                Some(mutex) => *mutex.lock().unwrap() += 1,
463                None => {}
464            }
465        })
466        .join();
467        let _: sync::Mutex<usize> = m.block_into_inner();
468    }
469
470    #[test]
471    fn join_after_thread() {
472        let m = ParentArc::new(sync::Mutex::new(0));
473        let h = thread::spawn({
474            let weak = ParentArc::downgrade(&m);
475            move || match weak.upgrade() {
476                Some(mutex) => *mutex.lock().unwrap() += 1,
477                None => {}
478            }
479        });
480        let _: sync::Mutex<usize> = m.block_into_inner();
481        let _ = h.join();
482    }
483
484    #[test]
485    fn multiple_threads() {
486        let m = ParentArc::new(sync::Mutex::new(0));
487        for _ in 0..10 {
488            let _ = thread::spawn({
489                let weak = ParentArc::downgrade(&m);
490                move || match weak.upgrade() {
491                    Some(mutex) => *mutex.lock().unwrap() += 1,
492                    None => {}
493                }
494            })
495            .join();
496        }
497        let _: sync::Mutex<usize> = m.block_into_inner();
498    }
499
500    #[test]
501    fn loop_read_thread() {
502        let m = ParentArc::new(sync::Mutex::new(0));
503        let h = thread::spawn({
504            let weak = ParentArc::downgrade(&m);
505            move || loop {
506                match weak.upgrade() {
507                    Some(mutex) => *mutex.lock().unwrap() += 1,
508                    None => break,
509                }
510            }
511        });
512        let _: sync::Mutex<usize> = m.block_into_inner();
513        let _ = h.join();
514    }
515
516    #[test]
517    fn many_loop_read_threads() {
518        let m = ParentArc::new(sync::Mutex::new(0));
519
520        let mut vh = Vec::new();
521        for _ in 0..10 {
522            let h = thread::spawn({
523                let weak = ParentArc::downgrade(&m);
524                move || loop {
525                    match weak.upgrade() {
526                        Some(mutex) => *mutex.lock().unwrap() += 1,
527                        None => break,
528                    }
529                }
530            });
531            vh.push(h);
532        }
533
534        let _: sync::Mutex<usize> = m.block_into_inner();
535        for h in vh {
536            let _ = h.join();
537        }
538    }
539
540    #[test]
541    #[should_panic]
542    fn one_panic_read_threads() {
543        let m = ParentArc::new(sync::atomic::AtomicUsize::new(0));
544
545        let mut vh = Vec::new();
546        for i in 0..10 {
547            let h = thread::spawn({
548                let weak = ParentArc::downgrade(&m);
549                move || loop {
550                    match weak.upgrade() {
551                        Some(at) => {
552                            if i != 1 {
553                                at.store(1, sync::atomic::Ordering::SeqCst);
554                            } else {
555                                panic!()
556                            }
557                        }
558                        None => break,
559                    }
560                }
561            });
562            vh.push(h);
563        }
564
565        //wait for all threads to launch
566        thread::sleep(std::time::Duration::new(0, 100));
567
568        let _: sync::atomic::AtomicUsize = m.block_into_inner();
569
570        for h in vh {
571            h.join().unwrap(); // panic occurs here
572        }
573    }
574}