reference_counted/arc.rs
1// This code is adapted from the rust standard library Arc.
2
3use base::borrow;
4use base::cmp::Ordering;
5use base::convert::{From, AsMut};
6use base::fmt;
7use base::hash::{Hash, Hasher};
8use base::marker::{PhantomData, Unpin};
9use base::mem;
10use base::ops::{Deref, DerefMut};
11use base::ptr::{self, NonNull};
12use base::sync::atomic;
13use base::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
14
15use base::borrow::BorrowMut;
16
17use base::prelude::v1::*;
18
19use smart_pointer::{SmartPointer, IntoMut, SmartPointerMut};
20
21use crate::ReferenceCounted;
22
23/// A soft limit on the amount of references that may be made to an `Arc`.
24///
25/// Going above this limit will abort your program (although not
26/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
27const MAX_REFCOUNT: usize = (isize::MAX) as usize;
28
29macro_rules! acquire {
30 ($x:expr) => {
31 atomic::fence(Acquire)
32 };
33}
34
35/// A thread-safe reference-counted pointer.
36pub struct Arc<T: ?Sized> {
37 ptr: NonNull<ArcInner<T>>,
38 phantom: PhantomData<ArcInner<T>>,
39}
40
41unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
42unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
43
44impl<T: ?Sized> Arc<T> {
45 fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
46 Self { ptr, phantom: PhantomData }
47 }
48}
49
50struct ArcInner<T: ?Sized> {
51 strong: atomic::AtomicUsize,
52 data: T,
53}
54
55impl<T: ?Sized> Arc<T> {
56 #[inline]
57 fn inner(&self) -> &ArcInner<T> {
58 // This unsafety is ok because while this arc is alive we're guaranteed
59 // that the inner pointer is valid. Furthermore, we know that the
60 // `ArcInner` structure itself is `Sync` because the inner data is
61 // `Sync` as well, so we're ok loaning out an immutable pointer to these
62 // contents.
63 unsafe { self.ptr.as_ref() }
64 }
65
66 unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
67 // We are careful to *not* create a reference covering the "count" fields, as
68 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
69 unsafe { &mut (*this.ptr.as_ptr()).data }
70 }
71
72 #[inline(never)]
73 unsafe fn drop_slow(&mut self) {
74 // Destroy the data at this time, even though we may not free the box
75 // allocation itself (there may still be weak pointers lying around).
76 unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
77 }
78
79 fn ptr(&self) -> *mut ArcInner<T> {
80 self.ptr.as_ptr()
81 }
82
83 fn ref_count(&self) -> usize {
84 self.inner().strong.load(SeqCst)
85 }
86}
87
88impl<T: ?Sized> Clone for Arc<T> {
89 /// Makes a clone of the `Arc` pointer.
90 ///
91 /// This creates another pointer to the same allocation, increasing the reference count.
92 #[inline]
93 fn clone(&self) -> Arc<T> {
94 // Using a relaxed ordering is alright here, as knowledge of the
95 // original reference prevents other threads from erroneously deleting
96 // the object.
97 //
98 // As explained in the [Boost documentation][1], Increasing the
99 // reference counter can always be done with memory_order_relaxed: New
100 // references to an object can only be formed from an existing
101 // reference, and passing an existing reference from one thread to
102 // another must already provide any required synchronization.
103 //
104 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
105 let old_size = self.inner().strong.fetch_add(1, Relaxed);
106
107 // However we need to guard against massive refcounts in case someone
108 // is `mem::forget`ing Arcs. If we don't do this the count can overflow
109 // and users will use-after free. We racily saturate to `isize::MAX` on
110 // the assumption that there aren't ~2 billion threads incrementing
111 // the reference count at once. This branch will never be taken in
112 // any realistic program.
113 //
114 // We abort because such a program is incredibly degenerate, and we
115 // don't care to support it.
116 if old_size > MAX_REFCOUNT {
117 panic!();
118 }
119
120 Self::from_inner(self.ptr)
121 }
122}
123
124impl<T: ?Sized> Drop for Arc<T> {
125 /// Drops the `Arc`.
126 ///
127 /// This will decrement the reference count.
128 ///
129 /// # Examples
130 ///
131 /// ```
132 /// use std::sync::Arc;
133 ///
134 /// struct Foo;
135 ///
136 /// impl Drop for Foo {
137 /// fn drop(&mut self) {
138 /// println!("dropped!");
139 /// }
140 /// }
141 ///
142 /// let foo = Arc::new(Foo);
143 /// let foo2 = Arc::clone(&foo);
144 ///
145 /// drop(foo); // Doesn't print anything
146 /// drop(foo2); // Prints "dropped!"
147 /// ```
148 #[inline]
149 fn drop(&mut self) {
150 // Because `fetch_sub` is already atomic, we do not need to synchronize
151 // with other threads unless we are going to delete the object. This
152 // same logic applies to the below `fetch_sub` to the `weak` count.
153 if self.inner().strong.fetch_sub(1, Release) != 1 {
154 return;
155 }
156
157 // This fence is needed to prevent reordering of use of the data and
158 // deletion of the data. Because it is marked `Release`, the decreasing
159 // of the reference count synchronizes with this `Acquire` fence. This
160 // means that use of the data happens before decreasing the reference
161 // count, which happens before this fence, which happens before the
162 // deletion of the data.
163 //
164 // As explained in the [Boost documentation][1],
165 //
166 // > It is important to enforce any possible access to the object in one
167 // > thread (through an existing reference) to *happen before* deleting
168 // > the object in a different thread. This is achieved by a "release"
169 // > operation after dropping a reference (any access to the object
170 // > through this reference must obviously happened before), and an
171 // > "acquire" operation before deleting the object.
172 //
173 // In particular, while the contents of an Arc are usually immutable, it's
174 // possible to have interior writes to something like a Mutex<T>. Since a
175 // Mutex is not acquired when it is deleted, we can't rely on its
176 // synchronization logic to make writes in thread A visible to a destructor
177 // running in thread B.
178 //
179 // Also note that the Acquire fence here could probably be replaced with an
180 // Acquire load, which could improve performance in highly-contended
181 // situations. See [2].
182 //
183 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
184 // [2]: (https://github.com/rust-lang/rust/pull/41714)
185 acquire!(self.inner().strong);
186
187 unsafe {
188 self.drop_slow();
189 }
190 }
191}
192
193impl<T: ?Sized> Deref for Arc<T> {
194 type Target = T;
195
196 #[inline]
197 fn deref(&self) -> &T {
198 &self.inner().data
199 }
200}
201
202impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
203 fn borrow(&self) -> &T {
204 &**self
205 }
206}
207
208impl<T: ?Sized> AsRef<T> for Arc<T> {
209 fn as_ref(&self) -> &T {
210 &**self
211 }
212}
213
214impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
215 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
216 fmt::Display::fmt(&**self, f)
217 }
218}
219
220impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
221 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
222 fmt::Debug::fmt(&**self, f)
223 }
224}
225
226impl<T: ?Sized> fmt::Pointer for Arc<T> {
227 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
228 fmt::Pointer::fmt(&(&**self as *const T), f)
229 }
230}
231
232impl<T: ?Sized> SmartPointer<T> for Arc<T> {
233 fn new(data: T) -> Arc<T> where T: Sized {
234 let x: Box<_> = Box::new(ArcInner {
235 strong: atomic::AtomicUsize::new(1),
236 data,
237 });
238 Self::from_inner(Box::leak(x).into())
239 }
240
241 fn try_unwrap(this: Self) -> Result<T, Self> where T: Sized {
242 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
243 return Err(this);
244 }
245
246 acquire!(this.inner().strong);
247
248 unsafe {
249 let elem = ptr::read(&this.ptr.as_ref().data);
250 mem::forget(this);
251 Ok(elem)
252 }
253 }
254}
255
256pub struct UniqueArc<T: ?Sized>(Arc<T>);
257
258unsafe impl<T: ?Sized + Sync + Send> Send for UniqueArc<T> {}
259unsafe impl<T: ?Sized + Sync + Send> Sync for UniqueArc<T> {}
260
261impl<T: ?Sized> Deref for UniqueArc<T> {
262 type Target = T;
263
264 #[inline]
265 fn deref(&self) -> &T {
266 self.0.deref()
267 }
268}
269
270impl<T: ?Sized> borrow::Borrow<T> for UniqueArc<T> {
271 fn borrow(&self) -> &T {
272 self.0.borrow()
273 }
274}
275
276impl<T: ?Sized> AsRef<T> for UniqueArc<T> {
277 fn as_ref(&self) -> &T {
278 self.0.as_ref()
279 }
280}
281
282impl<T: ?Sized + fmt::Display> fmt::Display for UniqueArc<T> {
283 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
284 self.0.fmt(f)
285 }
286}
287
288impl<T: ?Sized + fmt::Debug> fmt::Debug for UniqueArc<T> {
289 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
290 self.0.fmt(f)
291 }
292}
293
294impl<T: ?Sized> fmt::Pointer for UniqueArc<T> {
295 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
296 self.0.fmt(f)
297 }
298}
299
300impl<T: ?Sized> SmartPointer<T> for UniqueArc<T> {
301 fn new(data: T) -> Self where T: Sized {
302 UniqueArc(Arc::new(data))
303 }
304
305 fn try_unwrap(this: Self) -> Result<T, Self> where T: Sized {
306 let this = this.0;
307
308 acquire!(this.inner().strong);
309
310 unsafe {
311 let elem = ptr::read(&this.ptr.as_ref().data);
312 mem::forget(this);
313 Ok(elem)
314 }
315 }
316}
317
318
319impl<T: ?Sized> DerefMut for UniqueArc<T> {
320 fn deref_mut(&mut self) -> &mut T {
321 // We know this to be uniquely owned
322 unsafe { &mut (*self.0.ptr()).data }
323 }
324}
325
326impl<T: ?Sized> BorrowMut<T> for UniqueArc<T> {
327 fn borrow_mut(&mut self) -> &mut T {
328 &mut **self
329 }
330}
331
332impl<T: ?Sized> AsMut<T> for UniqueArc<T> {
333 fn as_mut(&mut self) -> &mut T {
334 &mut **self
335 }
336}
337
338impl<T: ?Sized> SmartPointerMut<T> for UniqueArc<T> {}
339
340impl<T: ?Sized> Into<Arc<T>> for UniqueArc<T> {
341 fn into(self) -> Arc<T> {
342 self.0
343 }
344}
345
346impl<T: ?Sized> IntoMut<T> for Arc<T> {
347 type MutablePointer = UniqueArc<T>;
348
349 fn can_make_mut(this: &Self) -> bool {
350 this.ref_count() == 1
351 }
352
353 unsafe fn into_mut_unchecked(this: Self) -> Self::MutablePointer {
354 UniqueArc(this)
355 }
356
357 /// Obtain a mutable reference to the wrapped value without performing runtime checks for
358 /// upholding any invariants.
359 ///
360 /// Safety: Calling this is safe if and only if `can_make_mut` returns true.
361 unsafe fn get_mut_unchecked(this: &Self) -> &mut T {
362 // We are careful to *not* create a reference covering the "count" fields, as
363 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
364 unsafe { &mut (*this.ptr.as_ptr()).data }
365 }
366}
367
368impl<T: ?Sized> ReferenceCounted<T> for Arc<T> {
369 fn reference_count(this: &Self) -> usize {
370 this.inner().strong.load(SeqCst)
371 }
372}
373
374impl<T: Default> Default for Arc<T> {
375 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
376 ///
377 /// # Examples
378 ///
379 /// ```
380 /// use std::sync::Arc;
381 ///
382 /// let x: Arc<i32> = Default::default();
383 /// assert_eq!(*x, 0);
384 /// ```
385 fn default() -> Arc<T> {
386 Arc::new(Default::default())
387 }
388}
389
390impl<T: Default> Default for UniqueArc<T> {
391 /// Creates a new `UniqueArc<T>`, with the `Default` value for `T`.
392 fn default() -> UniqueArc<T> {
393 UniqueArc::new(Default::default())
394 }
395}
396
397impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
398 /// Equality for two `Arc`s.
399 ///
400 /// Two `Arc`s are equal if their inner values are equal, even if they are
401 /// stored in different allocation. This implementation does not check for
402 /// pointer equality.
403 #[inline]
404 fn eq(&self, other: &Arc<T>) -> bool {
405 (**self).eq(&**other)
406 }
407
408 /// Inequality for two `Arc`s.
409 ///
410 /// Two `Arc`s are unequal if their inner values are unequal. This implementation does not
411 /// check for pointer equality.
412 #[inline]
413 fn ne(&self, other: &Arc<T>) -> bool {
414 (**self).ne(&**other)
415 }
416}
417
418impl<T: ?Sized + Eq> Eq for Arc<T> {}
419
420impl<T: ?Sized + PartialEq> PartialEq for UniqueArc<T> {
421 /// Equality for two `UniqueArc`s.
422 ///
423 /// Two `UniqueArc`s are equal if their inner values are equal, even if they are
424 /// stored in different allocation. This implementation does not check for
425 /// pointer equality.
426 #[inline]
427 fn eq(&self, other: &UniqueArc<T>) -> bool {
428 (**self).eq(&**other)
429 }
430
431 /// Inequality for two `Arc`s.
432 ///
433 /// Two `Arc`s are unequal if their inner values are unequal. This implementation does not
434 /// check for pointer equality.
435 #[inline]
436 fn ne(&self, other: &UniqueArc<T>) -> bool {
437 (**self).ne(&**other)
438 }
439}
440
441impl<T: ?Sized + Eq> Eq for UniqueArc<T> {}
442
443impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
444 /// Partial comparison for two `Arc`s.
445 ///
446 /// The two are compared by calling `partial_cmp()` on their inner values.
447 fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
448 (**self).partial_cmp(&**other)
449 }
450
451 /// Less-than comparison for two `Arc`s.
452 ///
453 /// The two are compared by calling `<` on their inner values.
454 fn lt(&self, other: &Arc<T>) -> bool {
455 *(*self) < *(*other)
456 }
457
458 /// 'Less than or equal to' comparison for two `Arc`s.
459 ///
460 /// The two are compared by calling `<=` on their inner values.
461 fn le(&self, other: &Arc<T>) -> bool {
462 *(*self) <= *(*other)
463 }
464
465 /// Greater-than comparison for two `Arc`s.
466 ///
467 /// The two are compared by calling `>` on their inner values.
468 fn gt(&self, other: &Arc<T>) -> bool {
469 *(*self) > *(*other)
470 }
471
472 /// 'Greater than or equal to' comparison for two `Arc`s.
473 ///
474 /// The two are compared by calling `>=` on their inner values.
475 fn ge(&self, other: &Arc<T>) -> bool {
476 *(*self) >= *(*other)
477 }
478}
479
480impl<T: ?Sized + PartialOrd> PartialOrd for UniqueArc<T> {
481 /// Partial comparison for two `UniqueArc`s.
482 ///
483 /// The two are compared by calling `partial_cmp()` on their inner values.
484 fn partial_cmp(&self, other: &UniqueArc<T>) -> Option<Ordering> {
485 (**self).partial_cmp(&**other)
486 }
487
488 /// Less-than comparison for two `UniqueArc`s.
489 ///
490 /// The two are compared by calling `<` on their inner values.
491 fn lt(&self, other: &UniqueArc<T>) -> bool {
492 *(*self) < *(*other)
493 }
494
495 /// 'Less than or equal to' comparison for two `UniqueArc`s.
496 ///
497 /// The two are compared by calling `<=` on their inner values.
498 fn le(&self, other: &UniqueArc<T>) -> bool {
499 *(*self) <= *(*other)
500 }
501
502 /// Greater-than comparison for two `UniqueArc`s.
503 ///
504 /// The two are compared by calling `>` on their inner values.
505 fn gt(&self, other: &UniqueArc<T>) -> bool {
506 *(*self) > *(*other)
507 }
508
509 /// 'Greater than or equal to' comparison for two `UniqueArc`s.
510 ///
511 /// The two are compared by calling `>=` on their inner values.
512 fn ge(&self, other: &UniqueArc<T>) -> bool {
513 *(*self) >= *(*other)
514 }
515}
516
517impl<T: ?Sized + Ord> Ord for Arc<T> {
518 /// Comparison for two `Arc`s.
519 ///
520 /// The two are compared by calling `cmp()` on their inner values.
521 fn cmp(&self, other: &Arc<T>) -> Ordering {
522 (**self).cmp(&**other)
523 }
524}
525
526impl<T: ?Sized + Ord> Ord for UniqueArc<T> {
527 /// Comparison for two `UniqueArc`s.
528 ///
529 /// The two are compared by calling `cmp()` on their inner values.
530 fn cmp(&self, other: &UniqueArc<T>) -> Ordering {
531 (**self).cmp(&**other)
532 }
533}
534
535impl<T> From<T> for Arc<T> {
536 fn from(t: T) -> Self {
537 Arc::new(t)
538 }
539}
540
541impl<T> From<T> for UniqueArc<T> {
542 fn from(t: T) -> Self {
543 UniqueArc::new(t)
544 }
545}
546
547impl<T: ?Sized + Hash> Hash for Arc<T> {
548 fn hash<H: Hasher>(&self, state: &mut H) {
549 (**self).hash(state)
550 }
551}
552
553impl<T: ?Sized + Hash> Hash for UniqueArc<T> {
554 fn hash<H: Hasher>(&self, state: &mut H) {
555 (**self).hash(state)
556 }
557}
558
559impl<T: ?Sized> Unpin for Arc<T> {}
560
561impl<T: ?Sized> Unpin for UniqueArc<T> {}