arc_swap/
lib.rs

1#![doc(test(attr(deny(warnings))))]
2#![warn(missing_docs)]
3#![cfg_attr(docsrs, feature(doc_cfg))]
4#![allow(deprecated)]
5#![cfg_attr(feature = "experimental-thread-local", no_std)]
6#![cfg_attr(feature = "experimental-thread-local", feature(thread_local))]
7
8//! Making [`Arc`] itself atomic
9//!
10//! The [`ArcSwap`] type is a container for an `Arc` that can be changed atomically. Semantically,
11//! it is similar to something like `Atomic<Arc<T>>` (if there was such a thing) or
12//! `RwLock<Arc<T>>` (but without the need for the locking). It is optimized for read-mostly
13//! scenarios, with consistent performance characteristics.
14//!
15//! # Motivation
16//!
17//! There are many situations in which one might want to have some data structure that is often
18//! read and seldom updated. Some examples might be a configuration of a service, routing tables,
19//! snapshot of some data that is renewed every few minutes, etc.
20//!
21//! In all these cases one needs:
22//! * Being able to read the current value of the data structure, fast, often and concurrently from
23//!   many threads.
24//! * Using the same version of the data structure over longer period of time ‒ a query should be
25//!   answered by a consistent version of data, a packet should be routed either by an old or by a
26//!   new version of the routing table but not by a combination, etc.
27//! * Perform an update without disrupting the processing.
28//!
29//! The first idea would be to use [`RwLock<T>`][RwLock] and keep a read-lock for the whole time of
30//! processing. Update would, however, pause all processing until done.
31//!
32//! Better option would be to have [`RwLock<Arc<T>>`][RwLock]. Then one would lock, clone the [Arc]
33//! and unlock. This suffers from CPU-level contention (on the lock and on the reference count of
34//! the [Arc]) which makes it relatively slow. Depending on the implementation, an update may be
35//! blocked for arbitrary long time by a steady inflow of readers.
36//!
37//! ```rust
38//! # use std::sync::{Arc, RwLock};
39//! # use once_cell::sync::Lazy;
40//! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} }
41//! static ROUTING_TABLE: Lazy<RwLock<Arc<RoutingTable>>> = Lazy::new(|| {
42//!     RwLock::new(Arc::new(RoutingTable))
43//! });
44//!
45//! fn process_packet(packet: Packet) {
46//!     let table = Arc::clone(&ROUTING_TABLE.read().unwrap());
47//!     table.route(packet);
48//! }
49//! # fn main() { process_packet(Packet); }
50//! ```
51//!
52//! The [ArcSwap] can be used instead, which solves the above problems and has better performance
53//! characteristics than the [RwLock], both in contended and non-contended scenarios.
54//!
55//! ```rust
56//! # use arc_swap::ArcSwap;
57//! # use once_cell::sync::Lazy;
58//! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} }
59//! static ROUTING_TABLE: Lazy<ArcSwap<RoutingTable>> = Lazy::new(|| {
60//!     ArcSwap::from_pointee(RoutingTable)
61//! });
62//!
63//! fn process_packet(packet: Packet) {
64//!     let table = ROUTING_TABLE.load();
65//!     table.route(packet);
66//! }
67//! # fn main() { process_packet(Packet); }
68//! ```
69//!
70//! # Crate contents
71//!
72//! At the heart of the crate there are [`ArcSwap`] and [`ArcSwapOption`] types, containers for an
73//! [`Arc`] and [`Option<Arc>`][Option].
74//!
75//! Technically, these are type aliases for partial instantiations of the [`ArcSwapAny`] type. The
76//! [`ArcSwapAny`] is more flexible and allows tweaking of many things (can store other things than
77//! [`Arc`]s, can configure the locking [`Strategy`]). For details about the tweaking, see the
78//! documentation of the [`strategy`] module and the [`RefCnt`] trait.
79//!
80//! The [`cache`] module provides means for speeding up read access of the contained data at the
81//! cost of delayed reclamation.
82//!
83//! The [`access`] module can be used to do projections into the contained data to separate parts
84//! of application from each other (eg. giving a component access to only its own part of
85//! configuration while still having it reloaded as a whole).
86//!
87//! # Before using
88//!
89//! The data structure is a bit niche. Before using, please check the
90//! [limitations and common pitfalls][docs::limitations] and the [performance
91//! characteristics][docs::performance], including choosing the right [read
92//! operation][docs::performance#read-operations].
93//!
94//! You can also get an inspiration about what's possible in the [common patterns][docs::patterns]
95//! section.
96//!
97//! # Release 1.9
98//!
99//! Unfortunately, several orderings were too weak in the original code (proofs based on wrong
100//! assumptions / wrong reading of the standard). The 1.9 version should fix them, but probably
101//! introduces some performance degradation.
102//!
103//! I hope to re-design and rewrite from scratch eventually, with less amount of SeqCst needed.
104//!
105//! # Examples
106//!
107//! ```rust
108//! use std::sync::Arc;
109//!
110//! use arc_swap::ArcSwap;
111//! use crossbeam_utils::thread;
112//!
113//! let config = ArcSwap::from(Arc::new(String::default()));
114//! thread::scope(|scope| {
115//!     scope.spawn(|_| {
116//!         let new_conf = Arc::new("New configuration".to_owned());
117//!         config.store(new_conf);
118//!     });
119//!     for _ in 0..10 {
120//!         scope.spawn(|_| {
121//!             loop {
122//!                 let cfg = config.load();
123//!                 if !cfg.is_empty() {
124//!                     assert_eq!(**cfg, "New configuration");
125//!                     return;
126//!                 }
127//!             }
128//!         });
129//!     }
130//! }).unwrap();
131//! ```
132//!
133//! [RwLock]: https://doc.rust-lang.org/std/sync/struct.RwLock.html
134
135#[rustversion::since(1.36.0)]
136#[allow(unused_imports)]
137#[cfg_attr(feature = "experimental-thread-local", macro_use)]
138extern crate alloc;
139
140pub mod access;
141mod as_raw;
142pub mod cache;
143mod compile_fail_tests;
144mod debt;
145pub mod docs;
146mod ref_cnt;
147#[cfg(feature = "serde")]
148mod serde;
149pub mod strategy;
150#[cfg(feature = "weak")]
151mod weak;
152
153// Hack to not rely on std on newer compilers (where alloc is stabilized) but still fall back to
154// std on old compilers.
155mod imports {
156    #[rustversion::since(1.36.0)]
157    pub use alloc::{boxed::Box, rc::Rc, sync::Arc};
158
159    #[rustversion::before(1.36.0)]
160    pub use std::{boxed::Box, rc::Rc, sync::Arc};
161}
162
163use core::borrow::Borrow;
164use core::fmt::{Debug, Display, Formatter, Result as FmtResult};
165use core::marker::PhantomData;
166use core::mem;
167use core::ops::Deref;
168use core::ptr;
169use core::sync::atomic::{AtomicPtr, Ordering};
170
171use crate::imports::Arc;
172
173use crate::access::{Access, Map};
174pub use crate::as_raw::AsRaw;
175pub use crate::cache::Cache;
176pub use crate::ref_cnt::RefCnt;
177use crate::strategy::hybrid::{DefaultConfig, HybridStrategy};
178use crate::strategy::sealed::Protected;
179use crate::strategy::{CaS, Strategy};
180pub use crate::strategy::{DefaultStrategy, IndependentStrategy};
181
182/// A temporary storage of the pointer.
183///
184/// This guard object is returned from most loading methods (with the notable exception of
185/// [`load_full`](struct.ArcSwapAny.html#method.load_full)). It dereferences to the smart pointer
186/// loaded, so most operations are to be done using that.
187pub struct Guard<T: RefCnt, S: Strategy<T> = DefaultStrategy> {
188    inner: S::Protected,
189}
190
191impl<T: RefCnt, S: Strategy<T>> Guard<T, S> {
192    /// Converts it into the held value.
193    ///
194    /// This, on occasion, may be a tiny bit faster than cloning the Arc or whatever is being held
195    /// inside.
196    // Associated function on purpose, because of deref
197    #[allow(clippy::wrong_self_convention)]
198    #[inline]
199    pub fn into_inner(lease: Self) -> T {
200        lease.inner.into_inner()
201    }
202
203    /// Create a guard for a given value `inner`.
204    ///
205    /// This can be useful on occasion to pass a specific object to code that expects or
206    /// wants to store a Guard.
207    ///
208    /// # Example
209    ///
210    /// ```rust
211    /// # use arc_swap::{ArcSwap, DefaultStrategy, Guard};
212    /// # use std::sync::Arc;
213    /// # let p = ArcSwap::from_pointee(42);
214    /// // Create two guards pointing to the same object
215    /// let g1 = p.load();
216    /// let g2 = Guard::<_, DefaultStrategy>::from_inner(Arc::clone(&*g1));
217    /// # drop(g2);
218    /// ```
219    pub fn from_inner(inner: T) -> Self {
220        Guard {
221            inner: S::Protected::from_inner(inner),
222        }
223    }
224}
225
226impl<T: RefCnt, S: Strategy<T>> Deref for Guard<T, S> {
227    type Target = T;
228    #[inline]
229    fn deref(&self) -> &T {
230        self.inner.borrow()
231    }
232}
233
234impl<T: RefCnt, S: Strategy<T>> From<T> for Guard<T, S> {
235    fn from(inner: T) -> Self {
236        Self::from_inner(inner)
237    }
238}
239
240impl<T: Default + RefCnt, S: Strategy<T>> Default for Guard<T, S> {
241    fn default() -> Self {
242        Self::from(T::default())
243    }
244}
245
246impl<T: Debug + RefCnt, S: Strategy<T>> Debug for Guard<T, S> {
247    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
248        self.deref().fmt(formatter)
249    }
250}
251
252impl<T: Display + RefCnt, S: Strategy<T>> Display for Guard<T, S> {
253    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
254        self.deref().fmt(formatter)
255    }
256}
257
258/// Comparison of two pointer-like things.
259// A and B are likely to *be* references, or thin wrappers around that. Calling that with extra
260// reference is just annoying.
261#[allow(clippy::needless_pass_by_value)]
262fn ptr_eq<Base, A, B>(a: A, b: B) -> bool
263where
264    A: AsRaw<Base>,
265    B: AsRaw<Base>,
266{
267    let a = a.as_raw();
268    let b = b.as_raw();
269    ptr::eq(a, b)
270}
271
272/// An atomic storage for a reference counted smart pointer like [`Arc`] or `Option<Arc>`.
273///
274/// This is a storage where a smart pointer may live. It can be read and written atomically from
275/// several threads, but doesn't act like a pointer itself.
276///
277/// One can be created [`from`] an [`Arc`]. To get the pointer back, use the
278/// [`load`](#method.load).
279///
280/// # Note
281///
282/// This is the common generic implementation. This allows sharing the same code for storing
283/// both `Arc` and `Option<Arc>` (and possibly other similar types).
284///
285/// In your code, you most probably want to interact with it through the
286/// [`ArcSwap`](type.ArcSwap.html) and [`ArcSwapOption`](type.ArcSwapOption.html) aliases. However,
287/// the methods they share are described here and are applicable to both of them. That's why the
288/// examples here use `ArcSwap` ‒ but they could as well be written with `ArcSwapOption` or
289/// `ArcSwapAny`.
290///
291/// # Type parameters
292///
293/// * `T`: The smart pointer to be kept inside. This crate provides implementation for `Arc<_>` and
294///   `Option<Arc<_>>` (`Rc` too, but that one is not practically useful). But third party could
295///   provide implementations of the [`RefCnt`] trait and plug in others.
296/// * `S`: Chooses the [strategy] used to protect the data inside. They come with various
297///   performance trade offs, the default [`DefaultStrategy`] is good rule of thumb for most use
298///   cases.
299///
300/// # Examples
301///
302/// ```rust
303/// # use std::sync::Arc;
304/// # use arc_swap::ArcSwap;
305/// let arc = Arc::new(42);
306/// let arc_swap = ArcSwap::from(arc);
307/// assert_eq!(42, **arc_swap.load());
308/// // It can be read multiple times
309/// assert_eq!(42, **arc_swap.load());
310///
311/// // Put a new one in there
312/// let new_arc = Arc::new(0);
313/// assert_eq!(42, *arc_swap.swap(new_arc));
314/// assert_eq!(0, **arc_swap.load());
315/// ```
316///
317/// # Known bugs
318///
319/// Currently, things like `ArcSwapAny<Option<Option<Arc<_>>>>` (notice the double Option) don't
320/// work properly. A proper solution is being looked into
321/// ([#81](https://github.com/vorner/arc-swap/issues)).
322///
323/// [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
324/// [`from`]: https://doc.rust-lang.org/nightly/std/convert/trait.From.html#tymethod.from
325/// [`RefCnt`]: trait.RefCnt.html
326pub struct ArcSwapAny<T: RefCnt, S: Strategy<T> = DefaultStrategy> {
327    // Notes: AtomicPtr needs Sized
328    /// The actual pointer, extracted from the Arc.
329    ptr: AtomicPtr<T::Base>,
330
331    /// We are basically an Arc in disguise. Inherit parameters from Arc by pretending to contain
332    /// it.
333    _phantom_arc: PhantomData<T>,
334
335    /// Strategy to protect the data.
336    strategy: S,
337}
338
339impl<T: RefCnt, S: Default + Strategy<T>> From<T> for ArcSwapAny<T, S> {
340    fn from(val: T) -> Self {
341        Self::with_strategy(val, S::default())
342    }
343}
344
345impl<T: RefCnt, S: Strategy<T>> Drop for ArcSwapAny<T, S> {
346    fn drop(&mut self) {
347        let ptr = *self.ptr.get_mut();
348        unsafe {
349            // To pay any possible debts
350            self.strategy.wait_for_readers(ptr, &self.ptr);
351            // We are getting rid of the one stored ref count
352            T::dec(ptr);
353        }
354    }
355}
356
357impl<T, S: Strategy<T>> Debug for ArcSwapAny<T, S>
358where
359    T: Debug + RefCnt,
360{
361    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
362        formatter
363            .debug_tuple("ArcSwapAny")
364            .field(&self.load())
365            .finish()
366    }
367}
368
369impl<T, S: Strategy<T>> Display for ArcSwapAny<T, S>
370where
371    T: Display + RefCnt,
372{
373    fn fmt(&self, formatter: &mut Formatter) -> FmtResult {
374        self.load().fmt(formatter)
375    }
376}
377
378impl<T: RefCnt + Default, S: Default + Strategy<T>> Default for ArcSwapAny<T, S> {
379    fn default() -> Self {
380        Self::new(T::default())
381    }
382}
383
384impl<T: RefCnt, S: Strategy<T>> ArcSwapAny<T, S> {
385    /// Constructs a new storage.
386    pub fn new(val: T) -> Self
387    where
388        S: Default,
389    {
390        Self::from(val)
391    }
392
393    /// Constructs a new storage while customizing the protection strategy.
394    pub fn with_strategy(val: T, strategy: S) -> Self {
395        // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it.
396        // However, we always go back to *const right away when we get the pointer on the other
397        // side, so it should be fine.
398        let ptr = T::into_ptr(val);
399        Self {
400            ptr: AtomicPtr::new(ptr),
401            _phantom_arc: PhantomData,
402            strategy,
403        }
404    }
405
406    /// Extracts the value inside.
407    pub fn into_inner(mut self) -> T {
408        let ptr = *self.ptr.get_mut();
409        // To pay all the debts
410        unsafe { self.strategy.wait_for_readers(ptr, &self.ptr) };
411        mem::forget(self);
412        unsafe { T::from_ptr(ptr) }
413    }
414
415    /// Loads the value.
416    ///
417    /// This makes another copy of the held pointer and returns it, atomically (it is
418    /// safe even when other thread stores into the same instance at the same time).
419    ///
420    /// The method is lock-free and wait-free, but usually more expensive than
421    /// [`load`](#method.load).
422    pub fn load_full(&self) -> T {
423        Guard::into_inner(self.load())
424    }
425
426    /// Provides a temporary borrow of the object inside.
427    ///
428    /// This returns a proxy object allowing access to the thing held inside. However, there's
429    /// only limited amount of possible cheap proxies in existence for each thread ‒ if more are
430    /// created, it falls back to equivalent of [`load_full`](#method.load_full) internally.
431    ///
432    /// This is therefore a good choice to use for eg. searching a data structure or juggling the
433    /// pointers around a bit, but not as something to store in larger amounts. The rule of thumb
434    /// is this is suited for local variables on stack, but not in long-living data structures.
435    ///
436    /// # Consistency
437    ///
438    /// In case multiple related operations are to be done on the loaded value, it is generally
439    /// recommended to call `load` just once and keep the result over calling it multiple times.
440    /// First, keeping it is usually faster. But more importantly, the value can change between the
441    /// calls to load, returning different objects, which could lead to logical inconsistency.
442    /// Keeping the result makes sure the same object is used.
443    ///
444    /// ```rust
445    /// # use arc_swap::ArcSwap;
446    /// struct Point {
447    ///     x: usize,
448    ///     y: usize,
449    /// }
450    ///
451    /// fn print_broken(p: &ArcSwap<Point>) {
452    ///     // This is broken, because the x and y may come from different points,
453    ///     // combining into an invalid point that never existed.
454    ///     println!("X: {}", p.load().x);
455    ///     // If someone changes the content now, between these two loads, we
456    ///     // have a problem
457    ///     println!("Y: {}", p.load().y);
458    /// }
459    ///
460    /// fn print_correct(p: &ArcSwap<Point>) {
461    ///     // Here we take a snapshot of one specific point so both x and y come
462    ///     // from the same one.
463    ///     let point = p.load();
464    ///     println!("X: {}", point.x);
465    ///     println!("Y: {}", point.y);
466    /// }
467    /// # let p = ArcSwap::from_pointee(Point { x: 10, y: 20 });
468    /// # print_correct(&p);
469    /// # print_broken(&p);
470    /// ```
471    #[inline]
472    pub fn load(&self) -> Guard<T, S> {
473        let protected = unsafe { self.strategy.load(&self.ptr) };
474        Guard { inner: protected }
475    }
476
477    /// Replaces the value inside this instance.
478    ///
479    /// Further loads will yield the new value. Uses [`swap`](#method.swap) internally.
480    pub fn store(&self, val: T) {
481        drop(self.swap(val));
482    }
483
484    /// Exchanges the value inside this instance.
485    pub fn swap(&self, new: T) -> T {
486        let new = T::into_ptr(new);
487        // AcqRel needed to publish the target of the new pointer and get the target of the old
488        // one.
489        //
490        // SeqCst to synchronize the time lines with the group counters.
491        let old = self.ptr.swap(new, Ordering::SeqCst);
492        unsafe {
493            self.strategy.wait_for_readers(old, &self.ptr);
494            T::from_ptr(old)
495        }
496    }
497
498    /// Swaps the stored Arc if it equals to `current`.
499    ///
500    /// If the current value of the `ArcSwapAny` equals to `current`, the `new` is stored inside.
501    /// If not, nothing happens.
502    ///
503    /// The previous value (no matter if the swap happened or not) is returned. Therefore, if the
504    /// returned value is equal to `current`, the swap happened. You want to do a pointer-based
505    /// comparison to determine it.
506    ///
507    /// In other words, if the caller „guesses“ the value of current correctly, it acts like
508    /// [`swap`](#method.swap), otherwise it acts like [`load_full`](#method.load_full) (including
509    /// the limitations).
510    ///
511    /// The `current` can be specified as `&Arc`, [`Guard`](struct.Guard.html),
512    /// [`&Guards`](struct.Guards.html) or as a raw pointer (but _not_ owned `Arc`). See the
513    /// [`AsRaw`] trait.
514    pub fn compare_and_swap<C>(&self, current: C, new: T) -> Guard<T, S>
515    where
516        C: AsRaw<T::Base>,
517        S: CaS<T>,
518    {
519        let protected = unsafe { self.strategy.compare_and_swap(&self.ptr, current, new) };
520        Guard { inner: protected }
521    }
522
523    /// Read-Copy-Update of the pointer inside.
524    ///
525    /// This is useful in read-heavy situations with several threads that sometimes update the data
526    /// pointed to. The readers can just repeatedly use [`load`](#method.load) without any locking.
527    /// The writer uses this method to perform the update.
528    ///
529    /// In case there's only one thread that does updates or in case the next version is
530    /// independent of the previous one, simple [`swap`](#method.swap) or [`store`](#method.store)
531    /// is enough. Otherwise, it may be needed to retry the update operation if some other thread
532    /// made an update in between. This is what this method does.
533    ///
534    /// # Examples
535    ///
536    /// This will *not* work as expected, because between loading and storing, some other thread
537    /// might have updated the value.
538    ///
539    /// ```rust
540    /// # use std::sync::Arc;
541    /// #
542    /// # use arc_swap::ArcSwap;
543    /// # use crossbeam_utils::thread;
544    /// #
545    /// let cnt = ArcSwap::from_pointee(0);
546    /// thread::scope(|scope| {
547    ///     for _ in 0..10 {
548    ///         scope.spawn(|_| {
549    ///            let inner = cnt.load_full();
550    ///             // Another thread might have stored some other number than what we have
551    ///             // between the load and store.
552    ///             cnt.store(Arc::new(*inner + 1));
553    ///         });
554    ///     }
555    /// }).unwrap();
556    /// // This will likely fail:
557    /// // assert_eq!(10, *cnt.load_full());
558    /// ```
559    ///
560    /// This will, but it can call the closure multiple times to retry:
561    ///
562    /// ```rust
563    /// # use arc_swap::ArcSwap;
564    /// # use crossbeam_utils::thread;
565    /// #
566    /// let cnt = ArcSwap::from_pointee(0);
567    /// thread::scope(|scope| {
568    ///     for _ in 0..10 {
569    ///         scope.spawn(|_| cnt.rcu(|inner| **inner + 1));
570    ///     }
571    /// }).unwrap();
572    /// assert_eq!(10, *cnt.load_full());
573    /// ```
574    ///
575    /// Due to the retries, you might want to perform all the expensive operations *before* the
576    /// rcu. As an example, if there's a cache of some computations as a map, and the map is cheap
577    /// to clone but the computations are not, you could do something like this:
578    ///
579    /// ```rust
580    /// # use std::collections::HashMap;
581    /// #
582    /// # use arc_swap::ArcSwap;
583    /// # use once_cell::sync::Lazy;
584    /// #
585    /// fn expensive_computation(x: usize) -> usize {
586    ///     x * 2 // Let's pretend multiplication is *really expensive expensive*
587    /// }
588    ///
589    /// type Cache = HashMap<usize, usize>;
590    ///
591    /// static CACHE: Lazy<ArcSwap<Cache>> = Lazy::new(|| ArcSwap::default());
592    ///
593    /// fn cached_computation(x: usize) -> usize {
594    ///     let cache = CACHE.load();
595    ///     if let Some(result) = cache.get(&x) {
596    ///         return *result;
597    ///     }
598    ///     // Not in cache. Compute and store.
599    ///     // The expensive computation goes outside, so it is not retried.
600    ///     let result = expensive_computation(x);
601    ///     CACHE.rcu(|cache| {
602    ///         // The cheaper clone of the cache can be retried if need be.
603    ///         let mut cache = HashMap::clone(&cache);
604    ///         cache.insert(x, result);
605    ///         cache
606    ///     });
607    ///     result
608    /// }
609    ///
610    /// assert_eq!(42, cached_computation(21));
611    /// assert_eq!(42, cached_computation(21));
612    /// ```
613    ///
614    /// # The cost of cloning
615    ///
616    /// Depending on the size of cache above, the cloning might not be as cheap. You can however
617    /// use persistent data structures ‒ each modification creates a new data structure, but it
618    /// shares most of the data with the old one (which is usually accomplished by using `Arc`s
619    /// inside to share the unchanged values). Something like
620    /// [`rpds`](https://crates.io/crates/rpds) or [`im`](https://crates.io/crates/im) might do
621    /// what you need.
622    pub fn rcu<R, F>(&self, mut f: F) -> T
623    where
624        F: FnMut(&T) -> R,
625        R: Into<T>,
626        S: CaS<T>,
627    {
628        let mut cur = self.load();
629        loop {
630            let new = f(&cur).into();
631            let prev = self.compare_and_swap(&*cur, new);
632            let swapped = ptr_eq(&*cur, &*prev);
633            if swapped {
634                return Guard::into_inner(prev);
635            } else {
636                cur = prev;
637            }
638        }
639    }
640
641    /// Provides an access to an up to date projection of the carried data.
642    ///
643    /// # Motivation
644    ///
645    /// Sometimes, an application consists of components. Each component has its own configuration
646    /// structure. The whole configuration contains all the smaller config parts.
647    ///
648    /// For the sake of separation and abstraction, it is not desirable to pass the whole
649    /// configuration to each of the components. This allows the component to take only access to
650    /// its own part.
651    ///
652    /// # Lifetimes & flexibility
653    ///
654    /// This method is not the most flexible way, as the returned type borrows into the `ArcSwap`.
655    /// To provide access into eg. `Arc<ArcSwap<T>>`, you can create the [`Map`] type directly. See
656    /// the [`access`] module.
657    ///
658    /// # Performance
659    ///
660    /// As the provided function is called on each load from the shared storage, it should
661    /// generally be cheap. It is expected this will usually be just referencing of a field inside
662    /// the structure.
663    ///
664    /// # Examples
665    ///
666    /// ```rust
667    /// use std::sync::Arc;
668    ///
669    /// use arc_swap::ArcSwap;
670    /// use arc_swap::access::Access;
671    ///
672    /// struct Cfg {
673    ///     value: usize,
674    /// }
675    ///
676    /// fn print_many_times<V: Access<usize>>(value: V) {
677    ///     for _ in 0..25 {
678    ///         let value = value.load();
679    ///         println!("{}", *value);
680    ///     }
681    /// }
682    ///
683    /// let shared = ArcSwap::from_pointee(Cfg { value: 0 });
684    /// let mapped = shared.map(|c: &Cfg| &c.value);
685    /// crossbeam_utils::thread::scope(|s| {
686    ///     // Will print some zeroes and some twos
687    ///     s.spawn(|_| print_many_times(mapped));
688    ///     s.spawn(|_| shared.store(Arc::new(Cfg { value: 2 })));
689    /// }).expect("Something panicked in a thread");
690    /// ```
691    pub fn map<I, R, F>(&self, f: F) -> Map<&Self, I, F>
692    where
693        F: Fn(&I) -> &R + Clone,
694        Self: Access<I>,
695    {
696        Map::new(self, f)
697    }
698}
699
700/// An atomic storage for `Arc`.
701///
702/// This is a type alias only. Most of its methods are described on
703/// [`ArcSwapAny`](struct.ArcSwapAny.html).
704pub type ArcSwap<T> = ArcSwapAny<Arc<T>>;
705
706impl<T, S: Strategy<Arc<T>>> ArcSwapAny<Arc<T>, S> {
707    /// A convenience constructor directly from the pointed-to value.
708    ///
709    /// Direct equivalent for `ArcSwap::new(Arc::new(val))`.
710    pub fn from_pointee(val: T) -> Self
711    where
712        S: Default,
713    {
714        Self::from(Arc::new(val))
715    }
716}
717
718/// An atomic storage for `Option<Arc>`.
719///
720/// This is very similar to [`ArcSwap`](type.ArcSwap.html), but allows storing NULL values, which
721/// is useful in some situations.
722///
723/// This is a type alias only. Most of the methods are described on
724/// [`ArcSwapAny`](struct.ArcSwapAny.html). Even though the examples there often use `ArcSwap`,
725/// they are applicable to `ArcSwapOption` with appropriate changes.
726///
727/// # Examples
728///
729/// ```
730/// use std::sync::Arc;
731/// use arc_swap::ArcSwapOption;
732///
733/// let shared = ArcSwapOption::from(None);
734/// assert!(shared.load_full().is_none());
735/// assert!(shared.swap(Some(Arc::new(42))).is_none());
736/// assert_eq!(42, **shared.load_full().as_ref().unwrap());
737/// ```
738pub type ArcSwapOption<T> = ArcSwapAny<Option<Arc<T>>>;
739
740impl<T, S: Strategy<Option<Arc<T>>>> ArcSwapAny<Option<Arc<T>>, S> {
741    /// A convenience constructor directly from a pointed-to value.
742    ///
743    /// This just allocates the `Arc` under the hood.
744    ///
745    /// # Examples
746    ///
747    /// ```rust
748    /// use arc_swap::ArcSwapOption;
749    ///
750    /// let empty: ArcSwapOption<usize> = ArcSwapOption::from_pointee(None);
751    /// assert!(empty.load().is_none());
752    /// let non_empty: ArcSwapOption<usize> = ArcSwapOption::from_pointee(42);
753    /// assert_eq!(42, **non_empty.load().as_ref().unwrap());
754    /// ```
755    pub fn from_pointee<V: Into<Option<T>>>(val: V) -> Self
756    where
757        S: Default,
758    {
759        Self::new(val.into().map(Arc::new))
760    }
761
762    /// A convenience constructor for an empty value.
763    ///
764    /// This is equivalent to `ArcSwapOption::new(None)`.
765    pub fn empty() -> Self
766    where
767        S: Default,
768    {
769        Self::new(None)
770    }
771}
772
773impl<T> ArcSwapOption<T> {
774    /// A const-fn equivalent of [empty].
775    ///
776    /// Just like [empty], this creates an `None`-holding `ArcSwapOption`. The [empty] is, however,
777    /// more general ‒ this is available only for the default strategy, while [empty] is for any
778    /// [Default]-constructible strategy (current or future one).
779    ///
780    /// [empty]: ArcSwapAny::empty
781    ///
782    /// # Examples
783    ///
784    /// ```rust
785    /// # use std::sync::Arc;
786    /// # use arc_swap::ArcSwapOption;
787    /// static GLOBAL_DATA: ArcSwapOption<usize> = ArcSwapOption::const_empty();
788    ///
789    /// assert!(GLOBAL_DATA.load().is_none());
790    /// GLOBAL_DATA.store(Some(Arc::new(42)));
791    /// assert_eq!(42, **GLOBAL_DATA.load().as_ref().unwrap());
792    /// ```
793    pub const fn const_empty() -> Self {
794        Self {
795            ptr: AtomicPtr::new(ptr::null_mut()),
796            _phantom_arc: PhantomData,
797            strategy: HybridStrategy {
798                _config: DefaultConfig,
799            },
800        }
801    }
802}
803
804/// An atomic storage that doesn't share the internal generation locks with others.
805///
806/// This makes it bigger and it also might suffer contention (on the HW level) if used from many
807/// threads at once. On the other hand, it can't block writes in other instances.
808///
809/// See the [`IndependentStrategy`] for further details.
810// Being phased out. Will deprecate once we verify in production that the new strategy works fine.
811#[doc(hidden)]
812pub type IndependentArcSwap<T> = ArcSwapAny<Arc<T>, IndependentStrategy>;
813
814/// Arc swap for the [Weak] pointer.
815///
816/// This is similar to [ArcSwap], but it doesn't store [Arc], it stores [Weak]. It doesn't keep the
817/// data alive when pointed to.
818///
819/// This is a type alias only. Most of the methods are described on the
820/// [`ArcSwapAny`](struct.ArcSwapAny.html).
821///
822/// Needs the `weak` feature turned on.
823///
824/// [Weak]: std::sync::Weak
825#[cfg(feature = "weak")]
826pub type ArcSwapWeak<T> = ArcSwapAny<alloc::sync::Weak<T>>;
827
828macro_rules! t {
829    ($name: ident, $strategy: ty) => {
830        #[cfg(test)]
831        mod $name {
832            use alloc::borrow::ToOwned;
833            use alloc::string::String;
834            use alloc::vec::Vec;
835            use core::sync::atomic::{self, AtomicUsize};
836
837            use adaptive_barrier::{Barrier, PanicMode};
838            use crossbeam_utils::thread;
839
840            use super::*;
841
842            const ITERATIONS: usize = 10;
843
844            #[allow(deprecated)] // We use "deprecated" testing strategies in here.
845            type As<T> = ArcSwapAny<Arc<T>, $strategy>;
846            #[allow(deprecated)] // We use "deprecated" testing strategies in here.
847            type Aso<T> = ArcSwapAny<Option<Arc<T>>, $strategy>;
848
849            /// Similar to the one in doc tests of the lib, but more times and more intensive (we
850            /// want to torture it a bit).
851            #[test]
852            #[cfg_attr(miri, ignore)] // Takes like 1 or 2 infinities to run under miri
853            fn publish() {
854                const READERS: usize = 2;
855                for _ in 0..ITERATIONS {
856                    let config = As::<String>::default();
857                    let ended = AtomicUsize::new(0);
858                    thread::scope(|scope| {
859                        for _ in 0..READERS {
860                            scope.spawn(|_| loop {
861                                let cfg = config.load_full();
862                                if !cfg.is_empty() {
863                                    assert_eq!(*cfg, "New configuration");
864                                    ended.fetch_add(1, Ordering::Relaxed);
865                                    return;
866                                }
867                                atomic::spin_loop_hint();
868                            });
869                        }
870                        scope.spawn(|_| {
871                            let new_conf = Arc::new("New configuration".to_owned());
872                            config.store(new_conf);
873                        });
874                    })
875                    .unwrap();
876                    assert_eq!(READERS, ended.load(Ordering::Relaxed));
877                    let arc = config.load_full();
878                    assert_eq!(2, Arc::strong_count(&arc));
879                    assert_eq!(0, Arc::weak_count(&arc));
880                }
881            }
882
883            /// Similar to the doc tests of ArcSwap, but happens more times.
884            #[test]
885            fn swap_load() {
886                for _ in 0..100 {
887                    let arc = Arc::new(42);
888                    let arc_swap = As::from(Arc::clone(&arc));
889                    assert_eq!(42, **arc_swap.load());
890                    // It can be read multiple times
891                    assert_eq!(42, **arc_swap.load());
892
893                    // Put a new one in there
894                    let new_arc = Arc::new(0);
895                    assert_eq!(42, *arc_swap.swap(Arc::clone(&new_arc)));
896                    assert_eq!(0, **arc_swap.load());
897                    // One loaded here, one in the arc_swap, one in new_arc
898                    let loaded = arc_swap.load_full();
899                    assert_eq!(3, Arc::strong_count(&loaded));
900                    assert_eq!(0, Arc::weak_count(&loaded));
901                    // The original got released from the arc_swap
902                    assert_eq!(1, Arc::strong_count(&arc));
903                    assert_eq!(0, Arc::weak_count(&arc));
904                }
905            }
906
907            /// Two different writers publish two series of values. The readers check that it is
908            /// always increasing in each series.
909            ///
910            /// For performance, we try to reuse the threads here.
911            #[test]
912            fn multi_writers() {
913                let first_value = Arc::new((0, 0));
914                let shared = As::from(Arc::clone(&first_value));
915                const WRITER_CNT: usize = 2;
916                const READER_CNT: usize = 3;
917                #[cfg(miri)]
918                const ITERATIONS: usize = 5;
919                #[cfg(not(miri))]
920                const ITERATIONS: usize = 100;
921                const SEQ: usize = 50;
922                let barrier = Barrier::new(PanicMode::Poison);
923                thread::scope(|scope| {
924                    for w in 0..WRITER_CNT {
925                        // We need to move w into the closure. But we want to just reference the
926                        // other things.
927                        let mut barrier = barrier.clone();
928                        let shared = &shared;
929                        let first_value = &first_value;
930                        scope.spawn(move |_| {
931                            for _ in 0..ITERATIONS {
932                                barrier.wait();
933                                shared.store(Arc::clone(&first_value));
934                                barrier.wait();
935                                for i in 0..SEQ {
936                                    shared.store(Arc::new((w, i + 1)));
937                                }
938                            }
939                        });
940                    }
941                    for _ in 0..READER_CNT {
942                        let mut barrier = barrier.clone();
943                        let shared = &shared;
944                        let first_value = &first_value;
945                        scope.spawn(move |_| {
946                            for _ in 0..ITERATIONS {
947                                barrier.wait();
948                                barrier.wait();
949                                let mut previous = [0; WRITER_CNT];
950                                let mut last = Arc::clone(&first_value);
951                                loop {
952                                    let cur = shared.load();
953                                    if Arc::ptr_eq(&last, &cur) {
954                                        atomic::spin_loop_hint();
955                                        continue;
956                                    }
957                                    let (w, s) = **cur;
958                                    assert!(previous[w] < s, "{:?} vs {:?}", previous, cur);
959                                    previous[w] = s;
960                                    last = Guard::into_inner(cur);
961                                    if s == SEQ {
962                                        break;
963                                    }
964                                }
965                            }
966                        });
967                    }
968
969                    drop(barrier);
970                })
971                .unwrap();
972            }
973
974            #[test]
975            fn load_null() {
976                let shared = Aso::<usize>::default();
977                let guard = shared.load();
978                assert!(guard.is_none());
979                shared.store(Some(Arc::new(42)));
980                assert_eq!(42, **shared.load().as_ref().unwrap());
981            }
982
983            #[test]
984            fn from_into() {
985                let a = Arc::new(42);
986                let shared = As::new(a);
987                let guard = shared.load();
988                let a = shared.into_inner();
989                assert_eq!(42, *a);
990                assert_eq!(2, Arc::strong_count(&a));
991                drop(guard);
992                assert_eq!(1, Arc::strong_count(&a));
993            }
994
995            // Note on the Relaxed order here. This should be enough, because there's that
996            // barrier.wait in between that should do the synchronization of happens-before for us.
997            // And using SeqCst would probably not help either, as there's nothing else with SeqCst
998            // here in this test to relate it to.
999            #[derive(Default)]
1000            struct ReportDrop(Arc<AtomicUsize>);
1001            impl Drop for ReportDrop {
1002                fn drop(&mut self) {
1003                    self.0.fetch_add(1, Ordering::Relaxed);
1004                }
1005            }
1006
1007            /// Interaction of two threads about a guard and dropping it.
1008            ///
1009            /// We make sure everything works in timely manner (eg. dropping of stuff) even if multiple
1010            /// threads interact.
1011            ///
1012            /// The idea is:
1013            /// * Thread 1 loads a value.
1014            /// * Thread 2 replaces the shared value. The original value is not destroyed.
1015            /// * Thread 1 drops the guard. The value is destroyed and this is observable in both threads.
1016            #[test]
1017            fn guard_drop_in_thread() {
1018                for _ in 0..ITERATIONS {
1019                    let cnt = Arc::new(AtomicUsize::new(0));
1020
1021                    let shared = As::from_pointee(ReportDrop(cnt.clone()));
1022                    assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely");
1023                    // We need the threads to wait for each other at places.
1024                    let sync = Barrier::new(PanicMode::Poison);
1025
1026                    thread::scope(|scope| {
1027                        scope.spawn({
1028                            let sync = sync.clone();
1029                            |_| {
1030                                let mut sync = sync; // Move into the closure
1031                                let guard = shared.load();
1032                                sync.wait();
1033                                // Thread 2 replaces the shared value. We wait for it to confirm.
1034                                sync.wait();
1035                                drop(guard);
1036                                assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped");
1037                                // Let thread 2 know we already dropped it.
1038                                sync.wait();
1039                            }
1040                        });
1041
1042                        scope.spawn(|_| {
1043                            let mut sync = sync;
1044                            // Thread 1 loads, we wait for that
1045                            sync.wait();
1046                            shared.store(Default::default());
1047                            assert_eq!(
1048                                cnt.load(Ordering::Relaxed),
1049                                0,
1050                                "Dropped while still in use"
1051                            );
1052                            // Let thread 2 know we replaced it
1053                            sync.wait();
1054                            // Thread 1 drops its guard. We wait for it to confirm.
1055                            sync.wait();
1056                            assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped");
1057                        });
1058                    })
1059                    .unwrap();
1060                }
1061            }
1062
1063            /// Check dropping a lease in a different thread than it was created doesn't cause any
1064            /// problems.
1065            #[test]
1066            fn guard_drop_in_another_thread() {
1067                for _ in 0..ITERATIONS {
1068                    let cnt = Arc::new(AtomicUsize::new(0));
1069                    let shared = As::from_pointee(ReportDrop(cnt.clone()));
1070                    assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely");
1071                    let guard = shared.load();
1072
1073                    drop(shared);
1074                    assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely");
1075
1076                    thread::scope(|scope| {
1077                        scope.spawn(|_| {
1078                            drop(guard);
1079                        });
1080                    })
1081                    .unwrap();
1082
1083                    assert_eq!(cnt.load(Ordering::Relaxed), 1, "Not dropped");
1084                }
1085            }
1086
1087            #[test]
1088            fn load_option() {
1089                let shared = Aso::from_pointee(42);
1090                // The type here is not needed in real code, it's just addition test the type matches.
1091                let opt: Option<_> = Guard::into_inner(shared.load());
1092                assert_eq!(42, *opt.unwrap());
1093
1094                shared.store(None);
1095                assert!(shared.load().is_none());
1096            }
1097
1098            // Check stuff can get formatted
1099            #[test]
1100            fn debug_impl() {
1101                let shared = As::from_pointee(42);
1102                assert_eq!("ArcSwapAny(42)", &format!("{:?}", shared));
1103                assert_eq!("42", &format!("{:?}", shared.load()));
1104            }
1105
1106            #[test]
1107            fn display_impl() {
1108                let shared = As::from_pointee(42);
1109                assert_eq!("42", &format!("{}", shared));
1110                assert_eq!("42", &format!("{}", shared.load()));
1111            }
1112
1113            // The following "tests" are not run, only compiled. They check that things that should be
1114            // Send/Sync actually are.
1115            fn _check_stuff_is_send_sync() {
1116                let shared = As::from_pointee(42);
1117                let moved = As::from_pointee(42);
1118                let shared_ref = &shared;
1119                let lease = shared.load();
1120                let lease_ref = &lease;
1121                let lease = shared.load();
1122                thread::scope(|s| {
1123                    s.spawn(move |_| {
1124                        let _ = lease;
1125                        let _ = lease_ref;
1126                        let _ = shared_ref;
1127                        let _ = moved;
1128                    });
1129                })
1130                .unwrap();
1131            }
1132
1133            /// We have a callback in RCU. Check what happens if we access the value from within.
1134            #[test]
1135            fn recursive() {
1136                let shared = ArcSwap::from(Arc::new(0));
1137
1138                shared.rcu(|i| {
1139                    if **i < 10 {
1140                        shared.rcu(|i| **i + 1);
1141                    }
1142                    **i
1143                });
1144                assert_eq!(10, **shared.load());
1145                assert_eq!(2, Arc::strong_count(&shared.load_full()));
1146            }
1147
1148            /// A panic from within the rcu callback should not change anything.
1149            #[test]
1150            #[cfg(not(feature = "experimental-thread-local"))]
1151            fn rcu_panic() {
1152                use std::panic;
1153                let shared = ArcSwap::from(Arc::new(0));
1154                assert!(panic::catch_unwind(|| shared.rcu(|_| -> usize { panic!() })).is_err());
1155                assert_eq!(1, Arc::strong_count(&shared.swap(Arc::new(42))));
1156            }
1157
1158            /// Handling null/none values
1159            #[test]
1160            fn nulls() {
1161                let shared = ArcSwapOption::from(Some(Arc::new(0)));
1162                let orig = shared.swap(None);
1163                assert_eq!(1, Arc::strong_count(&orig.unwrap()));
1164                let null = shared.load();
1165                assert!(null.is_none());
1166                let a = Arc::new(42);
1167                let orig = shared.compare_and_swap(ptr::null(), Some(Arc::clone(&a)));
1168                assert!(orig.is_none());
1169                assert_eq!(2, Arc::strong_count(&a));
1170                let orig = Guard::into_inner(shared.compare_and_swap(&None::<Arc<_>>, None));
1171                assert_eq!(3, Arc::strong_count(&a));
1172                assert!(ptr_eq(&a, &orig));
1173            }
1174
1175            #[test]
1176            /// Multiple RCUs interacting.
1177            fn rcu() {
1178                const ITERATIONS: usize = 50;
1179                const THREADS: usize = 10;
1180                let shared = ArcSwap::from(Arc::new(0));
1181                thread::scope(|scope| {
1182                    for _ in 0..THREADS {
1183                        scope.spawn(|_| {
1184                            for _ in 0..ITERATIONS {
1185                                shared.rcu(|old| **old + 1);
1186                            }
1187                        });
1188                    }
1189                })
1190                .unwrap();
1191                assert_eq!(THREADS * ITERATIONS, **shared.load());
1192            }
1193
1194            #[test]
1195            /// Make sure the reference count and compare_and_swap works as expected.
1196            fn cas_ref_cnt() {
1197                #[cfg(miri)]
1198                const ITERATIONS: usize = 10;
1199                #[cfg(not(miri))]
1200                const ITERATIONS: usize = 50;
1201                let shared = ArcSwap::from(Arc::new(0));
1202                for i in 0..ITERATIONS {
1203                    let orig = shared.load_full();
1204                    assert_eq!(i, *orig);
1205                    if i % 2 == 1 {
1206                        // One for orig, one for shared
1207                        assert_eq!(2, Arc::strong_count(&orig));
1208                    }
1209                    let n1 = Arc::new(i + 1);
1210                    // Fill up the slots sometimes
1211                    let fillup = || {
1212                        if i % 2 == 0 {
1213                            Some((0..ITERATIONS).map(|_| shared.load()).collect::<Vec<_>>())
1214                        } else {
1215                            None
1216                        }
1217                    };
1218                    let guards = fillup();
1219                    // Success
1220                    let prev = shared.compare_and_swap(&orig, Arc::clone(&n1));
1221                    assert!(ptr_eq(&orig, &prev));
1222                    drop(guards);
1223                    // One for orig, one for prev
1224                    assert_eq!(2, Arc::strong_count(&orig));
1225                    // One for n1, one for shared
1226                    assert_eq!(2, Arc::strong_count(&n1));
1227                    assert_eq!(i + 1, **shared.load());
1228                    let n2 = Arc::new(i);
1229                    drop(prev);
1230                    let guards = fillup();
1231                    // Failure
1232                    let prev = Guard::into_inner(shared.compare_and_swap(&orig, Arc::clone(&n2)));
1233                    drop(guards);
1234                    assert!(ptr_eq(&n1, &prev));
1235                    // One for orig
1236                    assert_eq!(1, Arc::strong_count(&orig));
1237                    // One for n1, one for shared, one for prev
1238                    assert_eq!(3, Arc::strong_count(&n1));
1239                    // n2 didn't get increased
1240                    assert_eq!(1, Arc::strong_count(&n2));
1241                    assert_eq!(i + 1, **shared.load());
1242                }
1243
1244                let a = shared.load_full();
1245                // One inside shared, one for a
1246                assert_eq!(2, Arc::strong_count(&a));
1247                drop(shared);
1248                // Only a now
1249                assert_eq!(1, Arc::strong_count(&a));
1250            }
1251        }
1252    };
1253}
1254
1255t!(tests_default, DefaultStrategy);
1256#[cfg(all(feature = "internal-test-strategies", test))]
1257#[allow(deprecated)]
1258mod internal_strategies {
1259    use super::*;
1260    t!(
1261        tests_full_slots,
1262        crate::strategy::test_strategies::FillFastSlots
1263    );
1264}
1265
1266/// These tests assume details about the used strategy.
1267#[cfg(test)]
1268mod tests {
1269    use super::*;
1270
1271    use alloc::vec::Vec;
1272
1273    /// Accessing the value inside ArcSwap with Guards (and checks for the reference
1274    /// counts).
1275    #[test]
1276    fn load_cnt() {
1277        let a = Arc::new(0);
1278        let shared = ArcSwap::from(Arc::clone(&a));
1279        // One in shared, one in a
1280        assert_eq!(2, Arc::strong_count(&a));
1281        let guard = shared.load();
1282        assert_eq!(0, **guard);
1283        // The guard doesn't have its own ref count now
1284        assert_eq!(2, Arc::strong_count(&a));
1285        let guard_2 = shared.load();
1286        // Unlike with guard, this does not deadlock
1287        shared.store(Arc::new(1));
1288        // But now, each guard got a full Arc inside it
1289        assert_eq!(3, Arc::strong_count(&a));
1290        // And when we get rid of them, they disappear
1291        drop(guard_2);
1292        assert_eq!(2, Arc::strong_count(&a));
1293        let _b = Arc::clone(&guard);
1294        assert_eq!(3, Arc::strong_count(&a));
1295        // We can drop the guard it came from
1296        drop(guard);
1297        assert_eq!(2, Arc::strong_count(&a));
1298        let guard = shared.load();
1299        assert_eq!(1, **guard);
1300        drop(shared);
1301        // We can still use the guard after the shared disappears
1302        assert_eq!(1, **guard);
1303        let ptr = Arc::clone(&guard);
1304        // One in shared, one in guard
1305        assert_eq!(2, Arc::strong_count(&ptr));
1306        drop(guard);
1307        assert_eq!(1, Arc::strong_count(&ptr));
1308    }
1309
1310    /// There can be only limited amount of leases on one thread. Following ones are
1311    /// created, but contain full Arcs.
1312    #[test]
1313    fn lease_overflow() {
1314        #[cfg(miri)]
1315        const GUARD_COUNT: usize = 100;
1316        #[cfg(not(miri))]
1317        const GUARD_COUNT: usize = 1000;
1318        let a = Arc::new(0);
1319        let shared = ArcSwap::from(Arc::clone(&a));
1320        assert_eq!(2, Arc::strong_count(&a));
1321        let mut guards = (0..GUARD_COUNT).map(|_| shared.load()).collect::<Vec<_>>();
1322        let count = Arc::strong_count(&a);
1323        assert!(count > 2);
1324        let guard = shared.load();
1325        assert_eq!(count + 1, Arc::strong_count(&a));
1326        drop(guard);
1327        assert_eq!(count, Arc::strong_count(&a));
1328        // When we delete the first one, it didn't have an Arc in it, so the ref count
1329        // doesn't drop
1330        guards.swap_remove(0);
1331        assert_eq!(count, Arc::strong_count(&a));
1332        // But new one reuses now vacant the slot and doesn't create a new Arc
1333        let _guard = shared.load();
1334        assert_eq!(count, Arc::strong_count(&a));
1335    }
1336}