arc_swap/
lib.rs

1#![doc(test(attr(deny(warnings))))]
2#![warn(missing_docs)]
3#![cfg_attr(docsrs, feature(doc_cfg))]
4#![allow(deprecated)]
5#![cfg_attr(feature = "experimental-thread-local", no_std)]
6#![cfg_attr(feature = "experimental-thread-local", feature(thread_local))]
7
8//! Making [`Arc`] itself atomic
9//!
10//! The [`ArcSwap`] type is a container for an `Arc` that can be changed atomically. Semantically,
11//! it is similar to something like `Atomic<Arc<T>>` (if there was such a thing) or
12//! `RwLock<Arc<T>>` (but without the need for the locking). It is optimized for read-mostly
13//! scenarios, with consistent performance characteristics.
14//!
15//! # Motivation
16//!
17//! There are many situations in which one might want to have some data structure that is often
18//! read and seldom updated. Some examples might be a configuration of a service, routing tables,
19//! snapshot of some data that is renewed every few minutes, etc.
20//!
21//! In all these cases one needs:
22//! * Being able to read the current value of the data structure, fast, often and concurrently from
23//!   many threads.
24//! * Using the same version of the data structure over longer period of time ‒ a query should be
25//!   answered by a consistent version of data, a packet should be routed either by an old or by a
26//!   new version of the routing table but not by a combination, etc.
27//! * Perform an update without disrupting the processing.
28//!
29//! The first idea would be to use [`RwLock<T>`][RwLock] and keep a read-lock for the whole time of
30//! processing. Update would, however, pause all processing until done.
31//!
32//! Better option would be to have [`RwLock<Arc<T>>`][RwLock]. Then one would lock, clone the [Arc]
33//! and unlock. This suffers from CPU-level contention (on the lock and on the reference count of
34//! the [Arc]) which makes it relatively slow. Depending on the implementation, an update may be
35//! blocked for arbitrary long time by a steady inflow of readers.
36//!
37//! ```rust
38//! # use std::sync::{Arc, RwLock};
39//! # use once_cell::sync::Lazy;
40//! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} }
41//! static ROUTING_TABLE: Lazy<RwLock<Arc<RoutingTable>>> = Lazy::new(|| {
42//!     RwLock::new(Arc::new(RoutingTable))
43//! });
44//!
45//! fn process_packet(packet: Packet) {
46//!     let table = Arc::clone(&ROUTING_TABLE.read().unwrap());
47//!     table.route(packet);
48//! }
49//! # fn main() { process_packet(Packet); }
50//! ```
51//!
52//! The [ArcSwap] can be used instead, which solves the above problems and has better performance
53//! characteristics than the [RwLock], both in contended and non-contended scenarios.
54//!
55//! ```rust
56//! # use arc_swap::ArcSwap;
57//! # use once_cell::sync::Lazy;
58//! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} }
59//! static ROUTING_TABLE: Lazy<ArcSwap<RoutingTable>> = Lazy::new(|| {
60//!     ArcSwap::from_pointee(RoutingTable)
61//! });
62//!
63//! fn process_packet(packet: Packet) {
64//!     let table = ROUTING_TABLE.load();
65//!     table.route(packet);
66//! }
67//! # fn main() { process_packet(Packet); }
68//! ```
69//!
70//! # Crate contents
71//!
72//! At the heart of the crate there are [`ArcSwap`] and [`ArcSwapOption`] types, containers for an
73//! [`Arc`] and [`Option<Arc>`][Option].
74//!
75//! Technically, these are type aliases for partial instantiations of the [`ArcSwapAny`] type. The
76//! [`ArcSwapAny`] is more flexible and allows tweaking of many things (can store other things than
77//! [`Arc`]s, can configure the locking [`Strategy`]). For details about the tweaking, see the
78//! documentation of the [`strategy`] module and the [`RefCnt`] trait.
79//!
80//! The [`cache`] module provides means for speeding up read access of the contained data at the
81//! cost of delayed reclamation.
82//!
83//! The [`access`] module can be used to do projections into the contained data to separate parts
84//! of application from each other (eg. giving a component access to only its own part of
85//! configuration while still having it reloaded as a whole).
86//!
87//! # Before using
88//!
89//! The data structure is a bit niche. Before using, please check the
90//! [limitations and common pitfalls][docs::limitations] and the [performance
91//! characteristics][docs::performance], including choosing the right [read
92//! operation][docs::performance#read-operations].
93//!
94//! You can also get an inspiration about what's possible in the [common patterns][docs::patterns]
95//! section.
96//!
97//! # Examples
98//!
99//! ```rust
100//! use std::sync::Arc;
101//!
102//! use arc_swap::ArcSwap;
103//! use crossbeam_utils::thread;
104//!
105//! let config = ArcSwap::from(Arc::new(String::default()));
106//! thread::scope(|scope| {
107//!     scope.spawn(|_| {
108//!         let new_conf = Arc::new("New configuration".to_owned());
109//!         config.store(new_conf);
110//!     });
111//!     for _ in 0..10 {
112//!         scope.spawn(|_| {
113//!             loop {
114//!                 let cfg = config.load();
115//!                 if !cfg.is_empty() {
116//!                     assert_eq!(**cfg, "New configuration");
117//!                     return;
118//!                 }
119//!             }
120//!         });
121//!     }
122//! }).unwrap();
123//! ```
124//!
125//! [RwLock]: https://doc.rust-lang.org/std/sync/struct.RwLock.html
126
127#[rustversion::since(1.36.0)]
128#[allow(unused_imports)]
129#[cfg_attr(feature = "experimental-thread-local", macro_use)]
130extern crate alloc;
131
132pub mod access;
133mod as_raw;
134pub mod cache;
135mod compile_fail_tests;
136mod debt;
137pub mod docs;
138mod ref_cnt;
139#[cfg(feature = "serde")]
140mod serde;
141pub mod strategy;
142#[cfg(feature = "weak")]
143mod weak;
144
145// Hack to not rely on std on newer compilers (where alloc is stabilized) but still fall back to
146// std on old compilers.
147mod imports {
148    #[rustversion::since(1.36.0)]
149    pub use alloc::{boxed::Box, rc::Rc, sync::Arc};
150
151    #[rustversion::before(1.36.0)]
152    pub use std::{boxed::Box, rc::Rc, sync::Arc};
153}
154
155use core::borrow::Borrow;
156use core::fmt::{Debug, Display, Formatter, Result as FmtResult};
157use core::marker::PhantomData;
158use core::mem;
159use core::ops::Deref;
160use core::ptr;
161use core::sync::atomic::{AtomicPtr, Ordering};
162
163use crate::imports::Arc;
164
165use crate::access::{Access, Map};
166pub use crate::as_raw::AsRaw;
167pub use crate::cache::Cache;
168pub use crate::ref_cnt::RefCnt;
169use crate::strategy::hybrid::{DefaultConfig, HybridStrategy};
170use crate::strategy::sealed::Protected;
171use crate::strategy::{CaS, Strategy};
172pub use crate::strategy::{DefaultStrategy, IndependentStrategy};
173
174/// A temporary storage of the pointer.
175///
176/// This guard object is returned from most loading methods (with the notable exception of
177/// [`load_full`](struct.ArcSwapAny.html#method.load_full)). It dereferences to the smart pointer
178/// loaded, so most operations are to be done using that.
179pub struct Guard<T: RefCnt, S: Strategy<T> = DefaultStrategy> {
180    inner: S::Protected,
181}
182
183impl<T: RefCnt, S: Strategy<T>> Guard<T, S> {
184    /// Converts it into the held value.
185    ///
186    /// This, on occasion, may be a tiny bit faster than cloning the Arc or whatever is being held
187    /// inside.
188    // Associated function on purpose, because of deref
189    #[allow(clippy::wrong_self_convention)]
190    #[inline]
191    pub fn into_inner(lease: Self) -> T {
192        lease.inner.into_inner()
193    }
194
195    /// Create a guard for a given value `inner`.
196    ///
197    /// This can be useful on occasion to pass a specific object to code that expects or
198    /// wants to store a Guard.
199    ///
200    /// # Example
201    ///
202    /// ```rust
203    /// # use arc_swap::{ArcSwap, DefaultStrategy, Guard};
204    /// # use std::sync::Arc;
205    /// # let p = ArcSwap::from_pointee(42);
206    /// // Create two guards pointing to the same object
207    /// let g1 = p.load();
208    /// let g2 = Guard::<_, DefaultStrategy>::from_inner(Arc::clone(&*g1));
209    /// # drop(g2);
210    /// ```
211    pub fn from_inner(inner: T) -> Self {
212        Guard {
213            inner: S::Protected::from_inner(inner),
214        }
215    }
216}
217
218impl<T: RefCnt, S: Strategy<T>> Deref for Guard<T, S> {
219    type Target = T;
220    #[inline]
221    fn deref(&self) -> &T {
222        self.inner.borrow()
223    }
224}
225
226impl<T: RefCnt, S: Strategy<T>> From<T> for Guard<T, S> {
227    fn from(inner: T) -> Self {
228        Self::from_inner(inner)
229    }
230}
231
232impl<T: Default + RefCnt, S: Strategy<T>> Default for Guard<T, S> {
233    fn default() -> Self {
234        Self::from(T::default())
235    }
236}
237
238impl<T: Debug + RefCnt, S: Strategy<T>> Debug for Guard<T, S> {
239    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
240        self.deref().fmt(formatter)
241    }
242}
243
244impl<T: Display + RefCnt, S: Strategy<T>> Display for Guard<T, S> {
245    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
246        self.deref().fmt(formatter)
247    }
248}
249
250/// Comparison of two pointer-like things.
251// A and B are likely to *be* references, or thin wrappers around that. Calling that with extra
252// reference is just annoying.
253#[allow(clippy::needless_pass_by_value)]
254fn ptr_eq<Base, A, B>(a: A, b: B) -> bool
255where
256    A: AsRaw<Base>,
257    B: AsRaw<Base>,
258{
259    let a = a.as_raw();
260    let b = b.as_raw();
261    ptr::eq(a, b)
262}
263
264/// An atomic storage for a reference counted smart pointer like [`Arc`] or `Option<Arc>`.
265///
266/// This is a storage where a smart pointer may live. It can be read and written atomically from
267/// several threads, but doesn't act like a pointer itself.
268///
269/// One can be created [`from`] an [`Arc`]. To get the pointer back, use the
270/// [`load`](#method.load).
271///
272/// # Note
273///
274/// This is the common generic implementation. This allows sharing the same code for storing
275/// both `Arc` and `Option<Arc>` (and possibly other similar types).
276///
277/// In your code, you most probably want to interact with it through the
278/// [`ArcSwap`](type.ArcSwap.html) and [`ArcSwapOption`](type.ArcSwapOption.html) aliases. However,
279/// the methods they share are described here and are applicable to both of them. That's why the
280/// examples here use `ArcSwap` ‒ but they could as well be written with `ArcSwapOption` or
281/// `ArcSwapAny`.
282///
283/// # Type parameters
284///
285/// * `T`: The smart pointer to be kept inside. This crate provides implementation for `Arc<_>` and
286///   `Option<Arc<_>>` (`Rc` too, but that one is not practically useful). But third party could
287///   provide implementations of the [`RefCnt`] trait and plug in others.
288/// * `S`: Chooses the [strategy] used to protect the data inside. They come with various
289///   performance trade offs, the default [`DefaultStrategy`] is good rule of thumb for most use
290///   cases.
291///
292/// # Examples
293///
294/// ```rust
295/// # use std::sync::Arc;
296/// # use arc_swap::ArcSwap;
297/// let arc = Arc::new(42);
298/// let arc_swap = ArcSwap::from(arc);
299/// assert_eq!(42, **arc_swap.load());
300/// // It can be read multiple times
301/// assert_eq!(42, **arc_swap.load());
302///
303/// // Put a new one in there
304/// let new_arc = Arc::new(0);
305/// assert_eq!(42, *arc_swap.swap(new_arc));
306/// assert_eq!(0, **arc_swap.load());
307/// ```
308///
309/// # Known bugs
310///
311/// Currently, things like `ArcSwapAny<Option<Option<Arc<_>>>>` (notice the double Option) don't
312/// work properly. A proper solution is being looked into
313/// ([#81](https://github.com/vorner/arc-swap/issues)).
314///
315/// [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
316/// [`from`]: https://doc.rust-lang.org/nightly/std/convert/trait.From.html#tymethod.from
317/// [`RefCnt`]: trait.RefCnt.html
318pub struct ArcSwapAny<T: RefCnt, S: Strategy<T> = DefaultStrategy> {
319    // Notes: AtomicPtr needs Sized
320    /// The actual pointer, extracted from the Arc.
321    ptr: AtomicPtr<T::Base>,
322
323    /// We are basically an Arc in disguise. Inherit parameters from Arc by pretending to contain
324    /// it.
325    _phantom_arc: PhantomData<T>,
326
327    /// Strategy to protect the data.
328    strategy: S,
329}
330
331impl<T: RefCnt, S: Default + Strategy<T>> From<T> for ArcSwapAny<T, S> {
332    fn from(val: T) -> Self {
333        Self::with_strategy(val, S::default())
334    }
335}
336
337impl<T: RefCnt, S: Strategy<T>> Drop for ArcSwapAny<T, S> {
338    fn drop(&mut self) {
339        let ptr = *self.ptr.get_mut();
340        unsafe {
341            // To pay any possible debts
342            self.strategy.wait_for_readers(ptr, &self.ptr);
343            // We are getting rid of the one stored ref count
344            T::dec(ptr);
345        }
346    }
347}
348
349impl<T, S: Strategy<T>> Debug for ArcSwapAny<T, S>
350where
351    T: Debug + RefCnt,
352{
353    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
354        formatter
355            .debug_tuple("ArcSwapAny")
356            .field(&self.load())
357            .finish()
358    }
359}
360
361impl<T, S: Strategy<T>> Display for ArcSwapAny<T, S>
362where
363    T: Display + RefCnt,
364{
365    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
366        self.load().fmt(formatter)
367    }
368}
369
370impl<T: RefCnt + Default, S: Default + Strategy<T>> Default for ArcSwapAny<T, S> {
371    fn default() -> Self {
372        Self::new(T::default())
373    }
374}
375
376impl<T: RefCnt, S: Strategy<T>> ArcSwapAny<T, S> {
377    /// Constructs a new storage.
378    pub fn new(val: T) -> Self
379    where
380        S: Default,
381    {
382        Self::from(val)
383    }
384
385    /// Constructs a new storage while customizing the protection strategy.
386    pub fn with_strategy(val: T, strategy: S) -> Self {
387        // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it.
388        // However, we always go back to *const right away when we get the pointer on the other
389        // side, so it should be fine.
390        let ptr = T::into_ptr(val);
391        Self {
392            ptr: AtomicPtr::new(ptr),
393            _phantom_arc: PhantomData,
394            strategy,
395        }
396    }
397
398    /// Extracts the value inside.
399    pub fn into_inner(mut self) -> T {
400        let ptr = *self.ptr.get_mut();
401        // To pay all the debts
402        unsafe { self.strategy.wait_for_readers(ptr, &self.ptr) };
403        mem::forget(self);
404        unsafe { T::from_ptr(ptr) }
405    }
406
407    /// Loads the value.
408    ///
409    /// This makes another copy of the held pointer and returns it, atomically (it is
410    /// safe even when other thread stores into the same instance at the same time).
411    ///
412    /// The method is lock-free and wait-free, but usually more expensive than
413    /// [`load`](#method.load).
414    pub fn load_full(&self) -> T {
415        Guard::into_inner(self.load())
416    }
417
418    /// Provides a temporary borrow of the object inside.
419    ///
420    /// This returns a proxy object allowing access to the thing held inside. However, there's
421    /// only limited amount of possible cheap proxies in existence for each thread ‒ if more are
422    /// created, it falls back to equivalent of [`load_full`](#method.load_full) internally.
423    ///
424    /// This is therefore a good choice to use for eg. searching a data structure or juggling the
425    /// pointers around a bit, but not as something to store in larger amounts. The rule of thumb
426    /// is this is suited for local variables on stack, but not in long-living data structures.
427    ///
428    /// # Consistency
429    ///
430    /// In case multiple related operations are to be done on the loaded value, it is generally
431    /// recommended to call `load` just once and keep the result over calling it multiple times.
432    /// First, keeping it is usually faster. But more importantly, the value can change between the
433    /// calls to load, returning different objects, which could lead to logical inconsistency.
434    /// Keeping the result makes sure the same object is used.
435    ///
436    /// ```rust
437    /// # use arc_swap::ArcSwap;
438    /// struct Point {
439    ///     x: usize,
440    ///     y: usize,
441    /// }
442    ///
443    /// fn print_broken(p: &ArcSwap<Point>) {
444    ///     // This is broken, because the x and y may come from different points,
445    ///     // combining into an invalid point that never existed.
446    ///     println!("X: {}", p.load().x);
447    ///     // If someone changes the content now, between these two loads, we
448    ///     // have a problem
449    ///     println!("Y: {}", p.load().y);
450    /// }
451    ///
452    /// fn print_correct(p: &ArcSwap<Point>) {
453    ///     // Here we take a snapshot of one specific point so both x and y come
454    ///     // from the same one.
455    ///     let point = p.load();
456    ///     println!("X: {}", point.x);
457    ///     println!("Y: {}", point.y);
458    /// }
459    /// # let p = ArcSwap::from_pointee(Point { x: 10, y: 20 });
460    /// # print_correct(&p);
461    /// # print_broken(&p);
462    /// ```
463    #[inline]
464    pub fn load(&self) -> Guard<T, S> {
465        let protected = unsafe { self.strategy.load(&self.ptr) };
466        Guard { inner: protected }
467    }
468
469    /// Replaces the value inside this instance.
470    ///
471    /// Further loads will yield the new value. Uses [`swap`](#method.swap) internally.
472    pub fn store(&self, val: T) {
473        drop(self.swap(val));
474    }
475
476    /// Exchanges the value inside this instance.
477    pub fn swap(&self, new: T) -> T {
478        let new = T::into_ptr(new);
479        // AcqRel needed to publish the target of the new pointer and get the target of the old
480        // one.
481        //
482        // SeqCst to synchronize the time lines with the group counters.
483        let old = self.ptr.swap(new, Ordering::SeqCst);
484        unsafe {
485            self.strategy.wait_for_readers(old, &self.ptr);
486            T::from_ptr(old)
487        }
488    }
489
490    /// Swaps the stored Arc if it equals to `current`.
491    ///
492    /// If the current value of the `ArcSwapAny` equals to `current`, the `new` is stored inside.
493    /// If not, nothing happens.
494    ///
495    /// The previous value (no matter if the swap happened or not) is returned. Therefore, if the
496    /// returned value is equal to `current`, the swap happened. You want to do a pointer-based
497    /// comparison to determine it.
498    ///
499    /// In other words, if the caller „guesses“ the value of current correctly, it acts like
500    /// [`swap`](#method.swap), otherwise it acts like [`load_full`](#method.load_full) (including
501    /// the limitations).
502    ///
503    /// The `current` can be specified as `&Arc`, [`Guard`](struct.Guard.html),
504    /// [`&Guards`](struct.Guards.html) or as a raw pointer (but _not_ owned `Arc`). See the
505    /// [`AsRaw`] trait.
506    pub fn compare_and_swap<C>(&self, current: C, new: T) -> Guard<T, S>
507    where
508        C: AsRaw<T::Base>,
509        S: CaS<T>,
510    {
511        let protected = unsafe { self.strategy.compare_and_swap(&self.ptr, current, new) };
512        Guard { inner: protected }
513    }
514
515    /// Read-Copy-Update of the pointer inside.
516    ///
517    /// This is useful in read-heavy situations with several threads that sometimes update the data
518    /// pointed to. The readers can just repeatedly use [`load`](#method.load) without any locking.
519    /// The writer uses this method to perform the update.
520    ///
521    /// In case there's only one thread that does updates or in case the next version is
522    /// independent of the previous one, simple [`swap`](#method.swap) or [`store`](#method.store)
523    /// is enough. Otherwise, it may be needed to retry the update operation if some other thread
524    /// made an update in between. This is what this method does.
525    ///
526    /// # Examples
527    ///
528    /// This will *not* work as expected, because between loading and storing, some other thread
529    /// might have updated the value.
530    ///
531    /// ```rust
532    /// # use std::sync::Arc;
533    /// #
534    /// # use arc_swap::ArcSwap;
535    /// # use crossbeam_utils::thread;
536    /// #
537    /// let cnt = ArcSwap::from_pointee(0);
538    /// thread::scope(|scope| {
539    ///     for _ in 0..10 {
540    ///         scope.spawn(|_| {
541    ///            let inner = cnt.load_full();
542    ///             // Another thread might have stored some other number than what we have
543    ///             // between the load and store.
544    ///             cnt.store(Arc::new(*inner + 1));
545    ///         });
546    ///     }
547    /// }).unwrap();
548    /// // This will likely fail:
549    /// // assert_eq!(10, *cnt.load_full());
550    /// ```
551    ///
552    /// This will, but it can call the closure multiple times to retry:
553    ///
554    /// ```rust
555    /// # use arc_swap::ArcSwap;
556    /// # use crossbeam_utils::thread;
557    /// #
558    /// let cnt = ArcSwap::from_pointee(0);
559    /// thread::scope(|scope| {
560    ///     for _ in 0..10 {
561    ///         scope.spawn(|_| cnt.rcu(|inner| **inner + 1));
562    ///     }
563    /// }).unwrap();
564    /// assert_eq!(10, *cnt.load_full());
565    /// ```
566    ///
567    /// Due to the retries, you might want to perform all the expensive operations *before* the
568    /// rcu. As an example, if there's a cache of some computations as a map, and the map is cheap
569    /// to clone but the computations are not, you could do something like this:
570    ///
571    /// ```rust
572    /// # use std::collections::HashMap;
573    /// #
574    /// # use arc_swap::ArcSwap;
575    /// # use once_cell::sync::Lazy;
576    /// #
577    /// fn expensive_computation(x: usize) -> usize {
578    ///     x * 2 // Let's pretend multiplication is *really expensive expensive*
579    /// }
580    ///
581    /// type Cache = HashMap<usize, usize>;
582    ///
583    /// static CACHE: Lazy<ArcSwap<Cache>> = Lazy::new(|| ArcSwap::default());
584    ///
585    /// fn cached_computation(x: usize) -> usize {
586    ///     let cache = CACHE.load();
587    ///     if let Some(result) = cache.get(&x) {
588    ///         return *result;
589    ///     }
590    ///     // Not in cache. Compute and store.
591    ///     // The expensive computation goes outside, so it is not retried.
592    ///     let result = expensive_computation(x);
593    ///     CACHE.rcu(|cache| {
594    ///         // The cheaper clone of the cache can be retried if need be.
595    ///         let mut cache = HashMap::clone(&cache);
596    ///         cache.insert(x, result);
597    ///         cache
598    ///     });
599    ///     result
600    /// }
601    ///
602    /// assert_eq!(42, cached_computation(21));
603    /// assert_eq!(42, cached_computation(21));
604    /// ```
605    ///
606    /// # The cost of cloning
607    ///
608    /// Depending on the size of cache above, the cloning might not be as cheap. You can however
609    /// use persistent data structures ‒ each modification creates a new data structure, but it
610    /// shares most of the data with the old one (which is usually accomplished by using `Arc`s
611    /// inside to share the unchanged values). Something like
612    /// [`rpds`](https://crates.io/crates/rpds) or [`im`](https://crates.io/crates/im) might do
613    /// what you need.
614    pub fn rcu<R, F>(&self, mut f: F) -> T
615    where
616        F: FnMut(&T) -> R,
617        R: Into<T>,
618        S: CaS<T>,
619    {
620        let mut cur = self.load();
621        loop {
622            let new = f(&cur).into();
623            let prev = self.compare_and_swap(&*cur, new);
624            let swapped = ptr_eq(&*cur, &*prev);
625            if swapped {
626                return Guard::into_inner(prev);
627            } else {
628                cur = prev;
629            }
630        }
631    }
632
633    /// Provides an access to an up to date projection of the carried data.
634    ///
635    /// # Motivation
636    ///
637    /// Sometimes, an application consists of components. Each component has its own configuration
638    /// structure. The whole configuration contains all the smaller config parts.
639    ///
640    /// For the sake of separation and abstraction, it is not desirable to pass the whole
641    /// configuration to each of the components. This allows the component to take only access to
642    /// its own part.
643    ///
644    /// # Lifetimes & flexibility
645    ///
646    /// This method is not the most flexible way, as the returned type borrows into the `ArcSwap`.
647    /// To provide access into eg. `Arc<ArcSwap<T>>`, you can create the [`Map`] type directly. See
648    /// the [`access`] module.
649    ///
650    /// # Performance
651    ///
652    /// As the provided function is called on each load from the shared storage, it should
653    /// generally be cheap. It is expected this will usually be just referencing of a field inside
654    /// the structure.
655    ///
656    /// # Examples
657    ///
658    /// ```rust
659    /// use std::sync::Arc;
660    ///
661    /// use arc_swap::ArcSwap;
662    /// use arc_swap::access::Access;
663    ///
664    /// struct Cfg {
665    ///     value: usize,
666    /// }
667    ///
668    /// fn print_many_times<V: Access<usize>>(value: V) {
669    ///     for _ in 0..25 {
670    ///         let value = value.load();
671    ///         println!("{}", *value);
672    ///     }
673    /// }
674    ///
675    /// let shared = ArcSwap::from_pointee(Cfg { value: 0 });
676    /// let mapped = shared.map(|c: &Cfg| &c.value);
677    /// crossbeam_utils::thread::scope(|s| {
678    ///     // Will print some zeroes and some twos
679    ///     s.spawn(|_| print_many_times(mapped));
680    ///     s.spawn(|_| shared.store(Arc::new(Cfg { value: 2 })));
681    /// }).expect("Something panicked in a thread");
682    /// ```
683    pub fn map<I, R, F>(&self, f: F) -> Map<&Self, I, F>
684    where
685        F: Fn(&I) -> &R + Clone,
686        Self: Access<I>,
687    {
688        Map::new(self, f)
689    }
690}
691
692/// An atomic storage for `Arc`.
693///
694/// This is a type alias only. Most of its methods are described on
695/// [`ArcSwapAny`](struct.ArcSwapAny.html).
696pub type ArcSwap<T> = ArcSwapAny<Arc<T>>;
697
698impl<T, S: Strategy<Arc<T>>> ArcSwapAny<Arc<T>, S> {
699    /// A convenience constructor directly from the pointed-to value.
700    ///
701    /// Direct equivalent for `ArcSwap::new(Arc::new(val))`.
702    pub fn from_pointee(val: T) -> Self
703    where
704        S: Default,
705    {
706        Self::from(Arc::new(val))
707    }
708}
709
710/// An atomic storage for `Option<Arc>`.
711///
712/// This is very similar to [`ArcSwap`](type.ArcSwap.html), but allows storing NULL values, which
713/// is useful in some situations.
714///
715/// This is a type alias only. Most of the methods are described on
716/// [`ArcSwapAny`](struct.ArcSwapAny.html). Even though the examples there often use `ArcSwap`,
717/// they are applicable to `ArcSwapOption` with appropriate changes.
718///
719/// # Examples
720///
721/// ```
722/// use std::sync::Arc;
723/// use arc_swap::ArcSwapOption;
724///
725/// let shared = ArcSwapOption::from(None);
726/// assert!(shared.load_full().is_none());
727/// assert!(shared.swap(Some(Arc::new(42))).is_none());
728/// assert_eq!(42, **shared.load_full().as_ref().unwrap());
729/// ```
730pub type ArcSwapOption<T> = ArcSwapAny<Option<Arc<T>>>;
731
732impl<T, S: Strategy<Option<Arc<T>>>> ArcSwapAny<Option<Arc<T>>, S> {
733    /// A convenience constructor directly from a pointed-to value.
734    ///
735    /// This just allocates the `Arc` under the hood.
736    ///
737    /// # Examples
738    ///
739    /// ```rust
740    /// use arc_swap::ArcSwapOption;
741    ///
742    /// let empty: ArcSwapOption<usize> = ArcSwapOption::from_pointee(None);
743    /// assert!(empty.load().is_none());
744    /// let non_empty: ArcSwapOption<usize> = ArcSwapOption::from_pointee(42);
745    /// assert_eq!(42, **non_empty.load().as_ref().unwrap());
746    /// ```
747    pub fn from_pointee<V: Into<Option<T>>>(val: V) -> Self
748    where
749        S: Default,
750    {
751        Self::new(val.into().map(Arc::new))
752    }
753
754    /// A convenience constructor for an empty value.
755    ///
756    /// This is equivalent to `ArcSwapOption::new(None)`.
757    pub fn empty() -> Self
758    where
759        S: Default,
760    {
761        Self::new(None)
762    }
763}
764
765impl<T> ArcSwapOption<T> {
766    /// A const-fn equivalent of [empty].
767    ///
768    /// Just like [empty], this creates an `None`-holding `ArcSwapOption`. The [empty] is, however,
769    /// more general ‒ this is available only for the default strategy, while [empty] is for any
770    /// [Default]-constructible strategy (current or future one).
771    ///
772    /// [empty]: ArcSwapAny::empty
773    ///
774    /// # Examples
775    ///
776    /// ```rust
777    /// # use std::sync::Arc;
778    /// # use arc_swap::ArcSwapOption;
779    /// static GLOBAL_DATA: ArcSwapOption<usize> = ArcSwapOption::const_empty();
780    ///
781    /// assert!(GLOBAL_DATA.load().is_none());
782    /// GLOBAL_DATA.store(Some(Arc::new(42)));
783    /// assert_eq!(42, **GLOBAL_DATA.load().as_ref().unwrap());
784    /// ```
785    pub const fn const_empty() -> Self {
786        Self {
787            ptr: AtomicPtr::new(ptr::null_mut()),
788            _phantom_arc: PhantomData,
789            strategy: HybridStrategy {
790                _config: DefaultConfig,
791            },
792        }
793    }
794}
795
796/// An atomic storage that doesn't share the internal generation locks with others.
797///
798/// This makes it bigger and it also might suffer contention (on the HW level) if used from many
799/// threads at once. On the other hand, it can't block writes in other instances.
800///
801/// See the [`IndependentStrategy`] for further details.
802// Being phased out. Will deprecate once we verify in production that the new strategy works fine.
803#[doc(hidden)]
804pub type IndependentArcSwap<T> = ArcSwapAny<Arc<T>, IndependentStrategy>;
805
806/// Arc swap for the [Weak] pointer.
807///
808/// This is similar to [ArcSwap], but it doesn't store [Arc], it stores [Weak]. It doesn't keep the
809/// data alive when pointed to.
810///
811/// This is a type alias only. Most of the methods are described on the
812/// [`ArcSwapAny`](struct.ArcSwapAny.html).
813///
814/// Needs the `weak` feature turned on.
815///
816/// [Weak]: std::sync::Weak
817#[cfg(feature = "weak")]
818pub type ArcSwapWeak<T> = ArcSwapAny<alloc::sync::Weak<T>>;
819
820macro_rules! t {
821    ($name: ident, $strategy: ty) => {
822        #[cfg(test)]
823        mod $name {
824            use alloc::borrow::ToOwned;
825            use alloc::string::String;
826            use alloc::vec::Vec;
827            use core::sync::atomic::{self, AtomicUsize};
828
829            use adaptive_barrier::{Barrier, PanicMode};
830            use crossbeam_utils::thread;
831
832            use super::*;
833
834            const ITERATIONS: usize = 10;
835
836            #[allow(deprecated)] // We use "deprecated" testing strategies in here.
837            type As<T> = ArcSwapAny<Arc<T>, $strategy>;
838            #[allow(deprecated)] // We use "deprecated" testing strategies in here.
839            type Aso<T> = ArcSwapAny<Option<Arc<T>>, $strategy>;
840
841            /// Similar to the one in doc tests of the lib, but more times and more intensive (we
842            /// want to torture it a bit).
843            #[test]
844            #[cfg_attr(miri, ignore)] // Takes like 1 or 2 infinities to run under miri
845            fn publish() {
846                const READERS: usize = 2;
847                for _ in 0..ITERATIONS {
848                    let config = As::<String>::default();
849                    let ended = AtomicUsize::new(0);
850                    thread::scope(|scope| {
851                        for _ in 0..READERS {
852                            scope.spawn(|_| loop {
853                                let cfg = config.load_full();
854                                if !cfg.is_empty() {
855                                    assert_eq!(*cfg, "New configuration");
856                                    ended.fetch_add(1, Ordering::Relaxed);
857                                    return;
858                                }
859                                atomic::spin_loop_hint();
860                            });
861                        }
862                        scope.spawn(|_| {
863                            let new_conf = Arc::new("New configuration".to_owned());
864                            config.store(new_conf);
865                        });
866                    })
867                    .unwrap();
868                    assert_eq!(READERS, ended.load(Ordering::Relaxed));
869                    let arc = config.load_full();
870                    assert_eq!(2, Arc::strong_count(&arc));
871                    assert_eq!(0, Arc::weak_count(&arc));
872                }
873            }
874
875            /// Similar to the doc tests of ArcSwap, but happens more times.
876            #[test]
877            fn swap_load() {
878                for _ in 0..100 {
879                    let arc = Arc::new(42);
880                    let arc_swap = As::from(Arc::clone(&arc));
881                    assert_eq!(42, **arc_swap.load());
882                    // It can be read multiple times
883                    assert_eq!(42, **arc_swap.load());
884
885                    // Put a new one in there
886                    let new_arc = Arc::new(0);
887                    assert_eq!(42, *arc_swap.swap(Arc::clone(&new_arc)));
888                    assert_eq!(0, **arc_swap.load());
889                    // One loaded here, one in the arc_swap, one in new_arc
890                    let loaded = arc_swap.load_full();
891                    assert_eq!(3, Arc::strong_count(&loaded));
892                    assert_eq!(0, Arc::weak_count(&loaded));
893                    // The original got released from the arc_swap
894                    assert_eq!(1, Arc::strong_count(&arc));
895                    assert_eq!(0, Arc::weak_count(&arc));
896                }
897            }
898
899            /// Two different writers publish two series of values. The readers check that it is
900            /// always increasing in each series.
901            ///
902            /// For performance, we try to reuse the threads here.
903            #[test]
904            fn multi_writers() {
905                let first_value = Arc::new((0, 0));
906                let shared = As::from(Arc::clone(&first_value));
907                const WRITER_CNT: usize = 2;
908                const READER_CNT: usize = 3;
909                #[cfg(miri)]
910                const ITERATIONS: usize = 5;
911                #[cfg(not(miri))]
912                const ITERATIONS: usize = 100;
913                const SEQ: usize = 50;
914                let barrier = Barrier::new(PanicMode::Poison);
915                thread::scope(|scope| {
916                    for w in 0..WRITER_CNT {
917                        // We need to move w into the closure. But we want to just reference the
918                        // other things.
919                        let mut barrier = barrier.clone();
920                        let shared = &shared;
921                        let first_value = &first_value;
922                        scope.spawn(move |_| {
923                            for _ in 0..ITERATIONS {
924                                barrier.wait();
925                                shared.store(Arc::clone(&first_value));
926                                barrier.wait();
927                                for i in 0..SEQ {
928                                    shared.store(Arc::new((w, i + 1)));
929                                }
930                            }
931                        });
932                    }
933                    for _ in 0..READER_CNT {
934                        let mut barrier = barrier.clone();
935                        let shared = &shared;
936                        let first_value = &first_value;
937                        scope.spawn(move |_| {
938                            for _ in 0..ITERATIONS {
939                                barrier.wait();
940                                barrier.wait();
941                                let mut previous = [0; WRITER_CNT];
942                                let mut last = Arc::clone(&first_value);
943                                loop {
944                                    let cur = shared.load();
945                                    if Arc::ptr_eq(&last, &cur) {
946                                        atomic::spin_loop_hint();
947                                        continue;
948                                    }
949                                    let (w, s) = **cur;
950                                    assert!(previous[w] < s, "{:?} vs {:?}", previous, cur);
951                                    previous[w] = s;
952                                    last = Guard::into_inner(cur);
953                                    if s == SEQ {
954                                        break;
955                                    }
956                                }
957                            }
958                        });
959                    }
960
961                    drop(barrier);
962                })
963                .unwrap();
964            }
965
966            #[test]
967            fn load_null() {
968                let shared = Aso::<usize>::default();
969                let guard = shared.load();
970                assert!(guard.is_none());
971                shared.store(Some(Arc::new(42)));
972                assert_eq!(42, **shared.load().as_ref().unwrap());
973            }
974
975            #[test]
976            fn from_into() {
977                let a = Arc::new(42);
978                let shared = As::new(a);
979                let guard = shared.load();
980                let a = shared.into_inner();
981                assert_eq!(42, *a);
982                assert_eq!(2, Arc::strong_count(&a));
983                drop(guard);
984                assert_eq!(1, Arc::strong_count(&a));
985            }
986
987            // Note on the Relaxed order here. This should be enough, because there's that
988            // barrier.wait in between that should do the synchronization of happens-before for us.
989            // And using SeqCst would probably not help either, as there's nothing else with SeqCst
990            // here in this test to relate it to.
991            #[derive(Default)]
992            struct ReportDrop(Arc<AtomicUsize>);
993            impl Drop for ReportDrop {
994                fn drop(&mut self) {
995                    self.0.fetch_add(1, Ordering::Relaxed);
996                }
997            }
998
999            /// Interaction of two threads about a guard and dropping it.
1000            ///
1001            /// We make sure everything works in timely manner (eg. dropping of stuff) even if multiple
1002            /// threads interact.
1003            ///
1004            /// The idea is:
1005            /// * Thread 1 loads a value.
1006            /// * Thread 2 replaces the shared value. The original value is not destroyed.
1007            /// * Thread 1 drops the guard. The value is destroyed and this is observable in both threads.
1008            #[test]
1009            fn guard_drop_in_thread() {
1010                for _ in 0..ITERATIONS {
1011                    let cnt = Arc::new(AtomicUsize::new(0));
1012
1013                    let shared = As::from_pointee(ReportDrop(cnt.clone()));
1014                    assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely");
1015                    // We need the threads to wait for each other at places.
1016                    let sync = Barrier::new(PanicMode::Poison);
1017
1018                    thread::scope(|scope| {
1019                        scope.spawn({
1020                            let sync = sync.clone();
1021                            |_| {
1022                                let mut sync = sync; // Move into the closure
1023                                let guard = shared.load();
1024                                sync.wait();
1025                                // Thread 2 replaces the shared value. We wait for it to confirm.
1026                                sync.wait();
1027                                drop(guard);
1028                                assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped");
1029                                // Let thread 2 know we already dropped it.
1030                                sync.wait();
1031                            }
1032                        });
1033
1034                        scope.spawn(|_| {
1035                            let mut sync = sync;
1036                            // Thread 1 loads, we wait for that
1037                            sync.wait();
1038                            shared.store(Default::default());
1039                            assert_eq!(
1040                                cnt.load(Ordering::Relaxed),
1041                                0,
1042                                "Dropped while still in use"
1043                            );
1044                            // Let thread 2 know we replaced it
1045                            sync.wait();
1046                            // Thread 1 drops its guard. We wait for it to confirm.
1047                            sync.wait();
1048                            assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped");
1049                        });
1050                    })
1051                    .unwrap();
1052                }
1053            }
1054
1055            /// Check dropping a lease in a different thread than it was created doesn't cause any
1056            /// problems.
1057            #[test]
1058            fn guard_drop_in_another_thread() {
1059                for _ in 0..ITERATIONS {
1060                    let cnt = Arc::new(AtomicUsize::new(0));
1061                    let shared = As::from_pointee(ReportDrop(cnt.clone()));
1062                    assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely");
1063                    let guard = shared.load();
1064
1065                    drop(shared);
1066                    assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely");
1067
1068                    thread::scope(|scope| {
1069                        scope.spawn(|_| {
1070                            drop(guard);
1071                        });
1072                    })
1073                    .unwrap();
1074
1075                    assert_eq!(cnt.load(Ordering::Relaxed), 1, "Not dropped");
1076                }
1077            }
1078
1079            #[test]
1080            fn load_option() {
1081                let shared = Aso::from_pointee(42);
1082                // The type here is not needed in real code, it's just addition test the type matches.
1083                let opt: Option<_> = Guard::into_inner(shared.load());
1084                assert_eq!(42, *opt.unwrap());
1085
1086                shared.store(None);
1087                assert!(shared.load().is_none());
1088            }
1089
1090            // Check stuff can get formatted
1091            #[test]
1092            fn debug_impl() {
1093                let shared = As::from_pointee(42);
1094                assert_eq!("ArcSwapAny(42)", &format!("{:?}", shared));
1095                assert_eq!("42", &format!("{:?}", shared.load()));
1096            }
1097
1098            #[test]
1099            fn display_impl() {
1100                let shared = As::from_pointee(42);
1101                assert_eq!("42", &format!("{}", shared));
1102                assert_eq!("42", &format!("{}", shared.load()));
1103            }
1104
1105            // The following "tests" are not run, only compiled. They check that things that should be
1106            // Send/Sync actually are.
1107            fn _check_stuff_is_send_sync() {
1108                let shared = As::from_pointee(42);
1109                let moved = As::from_pointee(42);
1110                let shared_ref = &shared;
1111                let lease = shared.load();
1112                let lease_ref = &lease;
1113                let lease = shared.load();
1114                thread::scope(|s| {
1115                    s.spawn(move |_| {
1116                        let _ = lease;
1117                        let _ = lease_ref;
1118                        let _ = shared_ref;
1119                        let _ = moved;
1120                    });
1121                })
1122                .unwrap();
1123            }
1124
1125            /// We have a callback in RCU. Check what happens if we access the value from within.
1126            #[test]
1127            fn recursive() {
1128                let shared = ArcSwap::from(Arc::new(0));
1129
1130                shared.rcu(|i| {
1131                    if **i < 10 {
1132                        shared.rcu(|i| **i + 1);
1133                    }
1134                    **i
1135                });
1136                assert_eq!(10, **shared.load());
1137                assert_eq!(2, Arc::strong_count(&shared.load_full()));
1138            }
1139
1140            /// A panic from within the rcu callback should not change anything.
1141            #[test]
1142            #[cfg(not(feature = "experimental-thread-local"))]
1143            fn rcu_panic() {
1144                use std::panic;
1145                let shared = ArcSwap::from(Arc::new(0));
1146                assert!(panic::catch_unwind(|| shared.rcu(|_| -> usize { panic!() })).is_err());
1147                assert_eq!(1, Arc::strong_count(&shared.swap(Arc::new(42))));
1148            }
1149
1150            /// Handling null/none values
1151            #[test]
1152            fn nulls() {
1153                let shared = ArcSwapOption::from(Some(Arc::new(0)));
1154                let orig = shared.swap(None);
1155                assert_eq!(1, Arc::strong_count(&orig.unwrap()));
1156                let null = shared.load();
1157                assert!(null.is_none());
1158                let a = Arc::new(42);
1159                let orig = shared.compare_and_swap(ptr::null(), Some(Arc::clone(&a)));
1160                assert!(orig.is_none());
1161                assert_eq!(2, Arc::strong_count(&a));
1162                let orig = Guard::into_inner(shared.compare_and_swap(&None::<Arc<_>>, None));
1163                assert_eq!(3, Arc::strong_count(&a));
1164                assert!(ptr_eq(&a, &orig));
1165            }
1166
1167            #[test]
1168            /// Multiple RCUs interacting.
1169            fn rcu() {
1170                const ITERATIONS: usize = 50;
1171                const THREADS: usize = 10;
1172                let shared = ArcSwap::from(Arc::new(0));
1173                thread::scope(|scope| {
1174                    for _ in 0..THREADS {
1175                        scope.spawn(|_| {
1176                            for _ in 0..ITERATIONS {
1177                                shared.rcu(|old| **old + 1);
1178                            }
1179                        });
1180                    }
1181                })
1182                .unwrap();
1183                assert_eq!(THREADS * ITERATIONS, **shared.load());
1184            }
1185
1186            #[test]
1187            /// Make sure the reference count and compare_and_swap works as expected.
1188            fn cas_ref_cnt() {
1189                #[cfg(miri)]
1190                const ITERATIONS: usize = 10;
1191                #[cfg(not(miri))]
1192                const ITERATIONS: usize = 50;
1193                let shared = ArcSwap::from(Arc::new(0));
1194                for i in 0..ITERATIONS {
1195                    let orig = shared.load_full();
1196                    assert_eq!(i, *orig);
1197                    if i % 2 == 1 {
1198                        // One for orig, one for shared
1199                        assert_eq!(2, Arc::strong_count(&orig));
1200                    }
1201                    let n1 = Arc::new(i + 1);
1202                    // Fill up the slots sometimes
1203                    let fillup = || {
1204                        if i % 2 == 0 {
1205                            Some((0..ITERATIONS).map(|_| shared.load()).collect::<Vec<_>>())
1206                        } else {
1207                            None
1208                        }
1209                    };
1210                    let guards = fillup();
1211                    // Success
1212                    let prev = shared.compare_and_swap(&orig, Arc::clone(&n1));
1213                    assert!(ptr_eq(&orig, &prev));
1214                    drop(guards);
1215                    // One for orig, one for prev
1216                    assert_eq!(2, Arc::strong_count(&orig));
1217                    // One for n1, one for shared
1218                    assert_eq!(2, Arc::strong_count(&n1));
1219                    assert_eq!(i + 1, **shared.load());
1220                    let n2 = Arc::new(i);
1221                    drop(prev);
1222                    let guards = fillup();
1223                    // Failure
1224                    let prev = Guard::into_inner(shared.compare_and_swap(&orig, Arc::clone(&n2)));
1225                    drop(guards);
1226                    assert!(ptr_eq(&n1, &prev));
1227                    // One for orig
1228                    assert_eq!(1, Arc::strong_count(&orig));
1229                    // One for n1, one for shared, one for prev
1230                    assert_eq!(3, Arc::strong_count(&n1));
1231                    // n2 didn't get increased
1232                    assert_eq!(1, Arc::strong_count(&n2));
1233                    assert_eq!(i + 1, **shared.load());
1234                }
1235
1236                let a = shared.load_full();
1237                // One inside shared, one for a
1238                assert_eq!(2, Arc::strong_count(&a));
1239                drop(shared);
1240                // Only a now
1241                assert_eq!(1, Arc::strong_count(&a));
1242            }
1243        }
1244    };
1245}
1246
1247t!(tests_default, DefaultStrategy);
1248#[cfg(all(feature = "internal-test-strategies", test))]
1249#[allow(deprecated)]
1250mod internal_strategies {
1251    use super::*;
1252    t!(
1253        tests_full_slots,
1254        crate::strategy::test_strategies::FillFastSlots
1255    );
1256}
1257
1258/// These tests assume details about the used strategy.
1259#[cfg(test)]
1260mod tests {
1261    use super::*;
1262
1263    use alloc::vec::Vec;
1264
1265    /// Accessing the value inside ArcSwap with Guards (and checks for the reference
1266    /// counts).
1267    #[test]
1268    fn load_cnt() {
1269        let a = Arc::new(0);
1270        let shared = ArcSwap::from(Arc::clone(&a));
1271        // One in shared, one in a
1272        assert_eq!(2, Arc::strong_count(&a));
1273        let guard = shared.load();
1274        assert_eq!(0, **guard);
1275        // The guard doesn't have its own ref count now
1276        assert_eq!(2, Arc::strong_count(&a));
1277        let guard_2 = shared.load();
1278        // Unlike with guard, this does not deadlock
1279        shared.store(Arc::new(1));
1280        // But now, each guard got a full Arc inside it
1281        assert_eq!(3, Arc::strong_count(&a));
1282        // And when we get rid of them, they disappear
1283        drop(guard_2);
1284        assert_eq!(2, Arc::strong_count(&a));
1285        let _b = Arc::clone(&guard);
1286        assert_eq!(3, Arc::strong_count(&a));
1287        // We can drop the guard it came from
1288        drop(guard);
1289        assert_eq!(2, Arc::strong_count(&a));
1290        let guard = shared.load();
1291        assert_eq!(1, **guard);
1292        drop(shared);
1293        // We can still use the guard after the shared disappears
1294        assert_eq!(1, **guard);
1295        let ptr = Arc::clone(&guard);
1296        // One in shared, one in guard
1297        assert_eq!(2, Arc::strong_count(&ptr));
1298        drop(guard);
1299        assert_eq!(1, Arc::strong_count(&ptr));
1300    }
1301
1302    /// There can be only limited amount of leases on one thread. Following ones are
1303    /// created, but contain full Arcs.
1304    #[test]
1305    fn lease_overflow() {
1306        #[cfg(miri)]
1307        const GUARD_COUNT: usize = 100;
1308        #[cfg(not(miri))]
1309        const GUARD_COUNT: usize = 1000;
1310        let a = Arc::new(0);
1311        let shared = ArcSwap::from(Arc::clone(&a));
1312        assert_eq!(2, Arc::strong_count(&a));
1313        let mut guards = (0..GUARD_COUNT).map(|_| shared.load()).collect::<Vec<_>>();
1314        let count = Arc::strong_count(&a);
1315        assert!(count > 2);
1316        let guard = shared.load();
1317        assert_eq!(count + 1, Arc::strong_count(&a));
1318        drop(guard);
1319        assert_eq!(count, Arc::strong_count(&a));
1320        // When we delete the first one, it didn't have an Arc in it, so the ref count
1321        // doesn't drop
1322        guards.swap_remove(0);
1323        assert_eq!(count, Arc::strong_count(&a));
1324        // But new one reuses now vacant the slot and doesn't create a new Arc
1325        let _guard = shared.load();
1326        assert_eq!(count, Arc::strong_count(&a));
1327    }
1328}