/rust/registry/src/index.crates.io-6f17d22bba15001f/arc-swap-1.7.1/src/lib.rs
Line | Count | Source (jump to first uncovered line) |
1 | | #![doc(test(attr(deny(warnings))))] |
2 | | #![warn(missing_docs)] |
3 | | #![cfg_attr(docsrs, feature(doc_cfg))] |
4 | | #![allow(deprecated)] |
5 | | #![cfg_attr(feature = "experimental-thread-local", no_std)] |
6 | | #![cfg_attr(feature = "experimental-thread-local", feature(thread_local))] |
7 | | |
8 | | //! Making [`Arc`][Arc] itself atomic |
9 | | //! |
10 | | //! The [`ArcSwap`] type is a container for an `Arc` that can be changed atomically. Semantically, |
11 | | //! it is similar to something like `Atomic<Arc<T>>` (if there was such a thing) or |
12 | | //! `RwLock<Arc<T>>` (but without the need for the locking). It is optimized for read-mostly |
13 | | //! scenarios, with consistent performance characteristics. |
14 | | //! |
15 | | //! # Motivation |
16 | | //! |
17 | | //! There are many situations in which one might want to have some data structure that is often |
18 | | //! read and seldom updated. Some examples might be a configuration of a service, routing tables, |
19 | | //! snapshot of some data that is renewed every few minutes, etc. |
20 | | //! |
21 | | //! In all these cases one needs: |
22 | | //! * Being able to read the current value of the data structure, fast, often and concurrently from |
23 | | //! many threads. |
24 | | //! * Using the same version of the data structure over longer period of time ‒ a query should be |
25 | | //! answered by a consistent version of data, a packet should be routed either by an old or by a |
26 | | //! new version of the routing table but not by a combination, etc. |
27 | | //! * Perform an update without disrupting the processing. |
28 | | //! |
29 | | //! The first idea would be to use [`RwLock<T>`][RwLock] and keep a read-lock for the whole time of |
30 | | //! processing. Update would, however, pause all processing until done. |
31 | | //! |
32 | | //! Better option would be to have [`RwLock<Arc<T>>`][RwLock]. Then one would lock, clone the [Arc] |
33 | | //! and unlock. This suffers from CPU-level contention (on the lock and on the reference count of |
34 | | //! the [Arc]) which makes it relatively slow. Depending on the implementation, an update may be |
35 | | //! blocked for arbitrary long time by a steady inflow of readers. |
36 | | //! |
37 | | //! ```rust |
38 | | //! # use std::sync::{Arc, RwLock}; |
39 | | //! # use once_cell::sync::Lazy; |
40 | | //! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} } |
41 | | //! static ROUTING_TABLE: Lazy<RwLock<Arc<RoutingTable>>> = Lazy::new(|| { |
42 | | //! RwLock::new(Arc::new(RoutingTable)) |
43 | | //! }); |
44 | | //! |
45 | | //! fn process_packet(packet: Packet) { |
46 | | //! let table = Arc::clone(&ROUTING_TABLE.read().unwrap()); |
47 | | //! table.route(packet); |
48 | | //! } |
49 | | //! # fn main() { process_packet(Packet); } |
50 | | //! ``` |
51 | | //! |
52 | | //! The [ArcSwap] can be used instead, which solves the above problems and has better performance |
53 | | //! characteristics than the [RwLock], both in contended and non-contended scenarios. |
54 | | //! |
55 | | //! ```rust |
56 | | //! # use arc_swap::ArcSwap; |
57 | | //! # use once_cell::sync::Lazy; |
58 | | //! # struct RoutingTable; struct Packet; impl RoutingTable { fn route(&self, _: Packet) {} } |
59 | | //! static ROUTING_TABLE: Lazy<ArcSwap<RoutingTable>> = Lazy::new(|| { |
60 | | //! ArcSwap::from_pointee(RoutingTable) |
61 | | //! }); |
62 | | //! |
63 | | //! fn process_packet(packet: Packet) { |
64 | | //! let table = ROUTING_TABLE.load(); |
65 | | //! table.route(packet); |
66 | | //! } |
67 | | //! # fn main() { process_packet(Packet); } |
68 | | //! ``` |
69 | | //! |
70 | | //! # Crate contents |
71 | | //! |
72 | | //! At the heart of the crate there are [`ArcSwap`] and [`ArcSwapOption`] types, containers for an |
73 | | //! [`Arc`] and [`Option<Arc>`][Option]. |
74 | | //! |
75 | | //! Technically, these are type aliases for partial instantiations of the [`ArcSwapAny`] type. The |
76 | | //! [`ArcSwapAny`] is more flexible and allows tweaking of many things (can store other things than |
77 | | //! [`Arc`]s, can configure the locking [`Strategy`]). For details about the tweaking, see the |
78 | | //! documentation of the [`strategy`] module and the [`RefCnt`] trait. |
79 | | //! |
80 | | //! The [`cache`] module provides means for speeding up read access of the contained data at the |
81 | | //! cost of delayed reclamation. |
82 | | //! |
83 | | //! The [`access`] module can be used to do projections into the contained data to separate parts |
84 | | //! of application from each other (eg. giving a component access to only its own part of |
85 | | //! configuration while still having it reloaded as a whole). |
86 | | //! |
87 | | //! # Before using |
88 | | //! |
89 | | //! The data structure is a bit niche. Before using, please check the |
90 | | //! [limitations and common pitfalls][docs::limitations] and the [performance |
91 | | //! characteristics][docs::performance], including choosing the right [read |
92 | | //! operation][docs::performance#read-operations]. |
93 | | //! |
94 | | //! You can also get an inspiration about what's possible in the [common patterns][docs::patterns] |
95 | | //! section. |
96 | | //! |
97 | | //! # Examples |
98 | | //! |
99 | | //! ```rust |
100 | | //! use std::sync::Arc; |
101 | | //! |
102 | | //! use arc_swap::ArcSwap; |
103 | | //! use crossbeam_utils::thread; |
104 | | //! |
105 | | //! let config = ArcSwap::from(Arc::new(String::default())); |
106 | | //! thread::scope(|scope| { |
107 | | //! scope.spawn(|_| { |
108 | | //! let new_conf = Arc::new("New configuration".to_owned()); |
109 | | //! config.store(new_conf); |
110 | | //! }); |
111 | | //! for _ in 0..10 { |
112 | | //! scope.spawn(|_| { |
113 | | //! loop { |
114 | | //! let cfg = config.load(); |
115 | | //! if !cfg.is_empty() { |
116 | | //! assert_eq!(**cfg, "New configuration"); |
117 | | //! return; |
118 | | //! } |
119 | | //! } |
120 | | //! }); |
121 | | //! } |
122 | | //! }).unwrap(); |
123 | | //! ``` |
124 | | //! |
125 | | //! [RwLock]: https://doc.rust-lang.org/std/sync/struct.RwLock.html |
126 | | |
127 | | #[allow(unused_imports)] |
128 | | #[macro_use] |
129 | | extern crate alloc; |
130 | | |
131 | | pub mod access; |
132 | | mod as_raw; |
133 | | pub mod cache; |
134 | | mod compile_fail_tests; |
135 | | mod debt; |
136 | | pub mod docs; |
137 | | mod ref_cnt; |
138 | | #[cfg(feature = "serde")] |
139 | | mod serde; |
140 | | pub mod strategy; |
141 | | #[cfg(feature = "weak")] |
142 | | mod weak; |
143 | | |
144 | | use core::borrow::Borrow; |
145 | | use core::fmt::{Debug, Display, Formatter, Result as FmtResult}; |
146 | | use core::marker::PhantomData; |
147 | | use core::mem; |
148 | | use core::ops::Deref; |
149 | | use core::ptr; |
150 | | use core::sync::atomic::{AtomicPtr, Ordering}; |
151 | | |
152 | | use alloc::sync::Arc; |
153 | | |
154 | | use crate::access::{Access, Map}; |
155 | | pub use crate::as_raw::AsRaw; |
156 | | pub use crate::cache::Cache; |
157 | | pub use crate::ref_cnt::RefCnt; |
158 | | use crate::strategy::hybrid::{DefaultConfig, HybridStrategy}; |
159 | | use crate::strategy::sealed::Protected; |
160 | | use crate::strategy::{CaS, Strategy}; |
161 | | pub use crate::strategy::{DefaultStrategy, IndependentStrategy}; |
162 | | |
163 | | /// A temporary storage of the pointer. |
164 | | /// |
165 | | /// This guard object is returned from most loading methods (with the notable exception of |
166 | | /// [`load_full`](struct.ArcSwapAny.html#method.load_full)). It dereferences to the smart pointer |
167 | | /// loaded, so most operations are to be done using that. |
168 | | pub struct Guard<T: RefCnt, S: Strategy<T> = DefaultStrategy> { |
169 | | inner: S::Protected, |
170 | | } |
171 | | |
172 | | impl<T: RefCnt, S: Strategy<T>> Guard<T, S> { |
173 | | /// Converts it into the held value. |
174 | | /// |
175 | | /// This, on occasion, may be a tiny bit faster than cloning the Arc or whatever is being held |
176 | | /// inside. |
177 | | // Associated function on purpose, because of deref |
178 | | #[allow(clippy::wrong_self_convention)] |
179 | | #[inline] |
180 | 0 | pub fn into_inner(lease: Self) -> T { |
181 | 0 | lease.inner.into_inner() |
182 | 0 | } |
183 | | |
184 | | /// Create a guard for a given value `inner`. |
185 | | /// |
186 | | /// This can be useful on occasion to pass a specific object to code that expects or |
187 | | /// wants to store a Guard. |
188 | | /// |
189 | | /// # Example |
190 | | /// |
191 | | /// ```rust |
192 | | /// # use arc_swap::{ArcSwap, DefaultStrategy, Guard}; |
193 | | /// # use std::sync::Arc; |
194 | | /// # let p = ArcSwap::from_pointee(42); |
195 | | /// // Create two guards pointing to the same object |
196 | | /// let g1 = p.load(); |
197 | | /// let g2 = Guard::<_, DefaultStrategy>::from_inner(Arc::clone(&*g1)); |
198 | | /// # drop(g2); |
199 | | /// ``` |
200 | 166k | pub fn from_inner(inner: T) -> Self { |
201 | 166k | Guard { |
202 | 166k | inner: S::Protected::from_inner(inner), |
203 | 166k | } |
204 | 166k | } <arc_swap::Guard<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from_inner Line | Count | Source | 200 | 166k | pub fn from_inner(inner: T) -> Self { | 201 | 166k | Guard { | 202 | 166k | inner: S::Protected::from_inner(inner), | 203 | 166k | } | 204 | 166k | } |
Unexecuted instantiation: <arc_swap::Guard<_, _>>::from_inner |
205 | | } |
206 | | |
207 | | impl<T: RefCnt, S: Strategy<T>> Deref for Guard<T, S> { |
208 | | type Target = T; |
209 | | #[inline] |
210 | 6.50M | fn deref(&self) -> &T { |
211 | 6.50M | self.inner.borrow() |
212 | 6.50M | } <arc_swap::Guard<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::ops::deref::Deref>::deref Line | Count | Source | 210 | 6.50M | fn deref(&self) -> &T { | 211 | 6.50M | self.inner.borrow() | 212 | 6.50M | } |
Unexecuted instantiation: <arc_swap::Guard<_, _> as core::ops::deref::Deref>::deref Unexecuted instantiation: <arc_swap::Guard<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::ops::deref::Deref>::deref |
213 | | } |
214 | | |
215 | | impl<T: RefCnt, S: Strategy<T>> From<T> for Guard<T, S> { |
216 | 0 | fn from(inner: T) -> Self { |
217 | 0 | Self::from_inner(inner) |
218 | 0 | } |
219 | | } |
220 | | |
221 | | impl<T: Default + RefCnt, S: Strategy<T>> Default for Guard<T, S> { |
222 | 0 | fn default() -> Self { |
223 | 0 | Self::from(T::default()) |
224 | 0 | } |
225 | | } |
226 | | |
227 | | impl<T: Debug + RefCnt, S: Strategy<T>> Debug for Guard<T, S> { |
228 | 0 | fn fmt(&self, formatter: &mut Formatter) -> FmtResult { |
229 | 0 | self.deref().fmt(formatter) |
230 | 0 | } |
231 | | } |
232 | | |
233 | | impl<T: Display + RefCnt, S: Strategy<T>> Display for Guard<T, S> { |
234 | 0 | fn fmt(&self, formatter: &mut Formatter) -> FmtResult { |
235 | 0 | self.deref().fmt(formatter) |
236 | 0 | } |
237 | | } |
238 | | |
239 | | /// Comparison of two pointer-like things. |
240 | | // A and B are likely to *be* references, or thin wrappers around that. Calling that with extra |
241 | | // reference is just annoying. |
242 | | #[allow(clippy::needless_pass_by_value)] |
243 | 0 | fn ptr_eq<Base, A, B>(a: A, b: B) -> bool |
244 | 0 | where |
245 | 0 | A: AsRaw<Base>, |
246 | 0 | B: AsRaw<Base>, |
247 | 0 | { |
248 | 0 | let a = a.as_raw(); |
249 | 0 | let b = b.as_raw(); |
250 | 0 | ptr::eq(a, b) |
251 | 0 | } |
252 | | |
253 | | /// An atomic storage for a reference counted smart pointer like [`Arc`] or `Option<Arc>`. |
254 | | /// |
255 | | /// This is a storage where a smart pointer may live. It can be read and written atomically from |
256 | | /// several threads, but doesn't act like a pointer itself. |
257 | | /// |
258 | | /// One can be created [`from`] an [`Arc`]. To get the pointer back, use the |
259 | | /// [`load`](#method.load). |
260 | | /// |
261 | | /// # Note |
262 | | /// |
263 | | /// This is the common generic implementation. This allows sharing the same code for storing |
264 | | /// both `Arc` and `Option<Arc>` (and possibly other similar types). |
265 | | /// |
266 | | /// In your code, you most probably want to interact with it through the |
267 | | /// [`ArcSwap`](type.ArcSwap.html) and [`ArcSwapOption`](type.ArcSwapOption.html) aliases. However, |
268 | | /// the methods they share are described here and are applicable to both of them. That's why the |
269 | | /// examples here use `ArcSwap` ‒ but they could as well be written with `ArcSwapOption` or |
270 | | /// `ArcSwapAny`. |
271 | | /// |
272 | | /// # Type parameters |
273 | | /// |
274 | | /// * `T`: The smart pointer to be kept inside. This crate provides implementation for `Arc<_>` and |
275 | | /// `Option<Arc<_>>` (`Rc` too, but that one is not practically useful). But third party could |
276 | | /// provide implementations of the [`RefCnt`] trait and plug in others. |
277 | | /// * `S`: Chooses the [strategy] used to protect the data inside. They come with various |
278 | | /// performance trade offs, the default [`DefaultStrategy`] is good rule of thumb for most use |
279 | | /// cases. |
280 | | /// |
281 | | /// # Examples |
282 | | /// |
283 | | /// ```rust |
284 | | /// # use std::sync::Arc; |
285 | | /// # use arc_swap::ArcSwap; |
286 | | /// let arc = Arc::new(42); |
287 | | /// let arc_swap = ArcSwap::from(arc); |
288 | | /// assert_eq!(42, **arc_swap.load()); |
289 | | /// // It can be read multiple times |
290 | | /// assert_eq!(42, **arc_swap.load()); |
291 | | /// |
292 | | /// // Put a new one in there |
293 | | /// let new_arc = Arc::new(0); |
294 | | /// assert_eq!(42, *arc_swap.swap(new_arc)); |
295 | | /// assert_eq!(0, **arc_swap.load()); |
296 | | /// ``` |
297 | | /// |
298 | | /// # Known bugs |
299 | | /// |
300 | | /// Currently, things like `ArcSwapAny<Option<Option<Arc<_>>>>` (notice the double Option) don't |
301 | | /// work properly. A proper solution is being looked into |
302 | | /// ([#81](https://github.com/vorner/arc-swap/issues)). |
303 | | /// |
304 | | /// [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html |
305 | | /// [`from`]: https://doc.rust-lang.org/nightly/std/convert/trait.From.html#tymethod.from |
306 | | /// [`RefCnt`]: trait.RefCnt.html |
307 | | pub struct ArcSwapAny<T: RefCnt, S: Strategy<T> = DefaultStrategy> { |
308 | | // Notes: AtomicPtr needs Sized |
309 | | /// The actual pointer, extracted from the Arc. |
310 | | ptr: AtomicPtr<T::Base>, |
311 | | |
312 | | /// We are basically an Arc in disguise. Inherit parameters from Arc by pretending to contain |
313 | | /// it. |
314 | | _phantom_arc: PhantomData<T>, |
315 | | |
316 | | /// Strategy to protect the data. |
317 | | strategy: S, |
318 | | } |
319 | | |
320 | | impl<T: RefCnt, S: Default + Strategy<T>> From<T> for ArcSwapAny<T, S> { |
321 | 5.59k | fn from(val: T) -> Self { |
322 | 5.59k | Self::with_strategy(val, S::default()) |
323 | 5.59k | } <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 639 | fn from(val: T) -> Self { | 322 | 639 | Self::with_strategy(val, S::default()) | 323 | 639 | } |
Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _> as core::convert::From<_>>::from <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 1.25k | fn from(val: T) -> Self { | 322 | 1.25k | Self::with_strategy(val, S::default()) | 323 | 1.25k | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 872 | fn from(val: T) -> Self { | 322 | 872 | Self::with_strategy(val, S::default()) | 323 | 872 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 671 | fn from(val: T) -> Self { | 322 | 671 | Self::with_strategy(val, S::default()) | 323 | 671 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 411 | fn from(val: T) -> Self { | 322 | 411 | Self::with_strategy(val, S::default()) | 323 | 411 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 752 | fn from(val: T) -> Self { | 322 | 752 | Self::with_strategy(val, S::default()) | 323 | 752 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 479 | fn from(val: T) -> Self { | 322 | 479 | Self::with_strategy(val, S::default()) | 323 | 479 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::convert::From<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::from Line | Count | Source | 321 | 524 | fn from(val: T) -> Self { | 322 | 524 | Self::with_strategy(val, S::default()) | 323 | 524 | } |
|
324 | | } |
325 | | |
326 | | impl<T: RefCnt, S: Strategy<T>> Drop for ArcSwapAny<T, S> { |
327 | 5.59k | fn drop(&mut self) { |
328 | 5.59k | let ptr = *self.ptr.get_mut(); |
329 | 5.59k | unsafe { |
330 | 5.59k | // To pay any possible debts |
331 | 5.59k | self.strategy.wait_for_readers(ptr, &self.ptr); |
332 | 5.59k | // We are getting rid of the one stored ref count |
333 | 5.59k | T::dec(ptr); |
334 | 5.59k | } |
335 | 5.59k | } <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>> as core::ops::drop::Drop>::drop Line | Count | Source | 327 | 5.59k | fn drop(&mut self) { | 328 | 5.59k | let ptr = *self.ptr.get_mut(); | 329 | 5.59k | unsafe { | 330 | 5.59k | // To pay any possible debts | 331 | 5.59k | self.strategy.wait_for_readers(ptr, &self.ptr); | 332 | 5.59k | // We are getting rid of the one stored ref count | 333 | 5.59k | T::dec(ptr); | 334 | 5.59k | } | 335 | 5.59k | } |
Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _> as core::ops::drop::Drop>::drop |
336 | | } |
337 | | |
338 | | impl<T, S: Strategy<T>> Debug for ArcSwapAny<T, S> |
339 | | where |
340 | | T: Debug + RefCnt, |
341 | | { |
342 | 0 | fn fmt(&self, formatter: &mut Formatter) -> FmtResult { |
343 | 0 | formatter |
344 | 0 | .debug_tuple("ArcSwapAny") |
345 | 0 | .field(&self.load()) |
346 | 0 | .finish() |
347 | 0 | } |
348 | | } |
349 | | |
350 | | impl<T, S: Strategy<T>> Display for ArcSwapAny<T, S> |
351 | | where |
352 | | T: Display + RefCnt, |
353 | | { |
354 | 0 | fn fmt(&self, formatter: &mut Formatter) -> FmtResult { |
355 | 0 | self.load().fmt(formatter) |
356 | 0 | } |
357 | | } |
358 | | |
359 | | impl<T: RefCnt + Default, S: Default + Strategy<T>> Default for ArcSwapAny<T, S> { |
360 | 0 | fn default() -> Self { |
361 | 0 | Self::new(T::default()) |
362 | 0 | } |
363 | | } |
364 | | |
365 | | impl<T: RefCnt, S: Strategy<T>> ArcSwapAny<T, S> { |
366 | | /// Constructs a new storage. |
367 | 5.59k | pub fn new(val: T) -> Self |
368 | 5.59k | where |
369 | 5.59k | S: Default, |
370 | 5.59k | { |
371 | 5.59k | Self::from(val) |
372 | 5.59k | } <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 639 | pub fn new(val: T) -> Self | 368 | 639 | where | 369 | 639 | S: Default, | 370 | 639 | { | 371 | 639 | Self::from(val) | 372 | 639 | } |
Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _>>::new <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 1.25k | pub fn new(val: T) -> Self | 368 | 1.25k | where | 369 | 1.25k | S: Default, | 370 | 1.25k | { | 371 | 1.25k | Self::from(val) | 372 | 1.25k | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 872 | pub fn new(val: T) -> Self | 368 | 872 | where | 369 | 872 | S: Default, | 370 | 872 | { | 371 | 872 | Self::from(val) | 372 | 872 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 671 | pub fn new(val: T) -> Self | 368 | 671 | where | 369 | 671 | S: Default, | 370 | 671 | { | 371 | 671 | Self::from(val) | 372 | 671 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 411 | pub fn new(val: T) -> Self | 368 | 411 | where | 369 | 411 | S: Default, | 370 | 411 | { | 371 | 411 | Self::from(val) | 372 | 411 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 752 | pub fn new(val: T) -> Self | 368 | 752 | where | 369 | 752 | S: Default, | 370 | 752 | { | 371 | 752 | Self::from(val) | 372 | 752 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 479 | pub fn new(val: T) -> Self | 368 | 479 | where | 369 | 479 | S: Default, | 370 | 479 | { | 371 | 479 | Self::from(val) | 372 | 479 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::new Line | Count | Source | 367 | 524 | pub fn new(val: T) -> Self | 368 | 524 | where | 369 | 524 | S: Default, | 370 | 524 | { | 371 | 524 | Self::from(val) | 372 | 524 | } |
|
373 | | |
374 | | /// Constructs a new storage while customizing the protection strategy. |
375 | 5.59k | pub fn with_strategy(val: T, strategy: S) -> Self { |
376 | 5.59k | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. |
377 | 5.59k | // However, we always go back to *const right away when we get the pointer on the other |
378 | 5.59k | // side, so it should be fine. |
379 | 5.59k | let ptr = T::into_ptr(val); |
380 | 5.59k | Self { |
381 | 5.59k | ptr: AtomicPtr::new(ptr), |
382 | 5.59k | _phantom_arc: PhantomData, |
383 | 5.59k | strategy, |
384 | 5.59k | } |
385 | 5.59k | } <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 639 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 639 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 639 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 639 | // side, so it should be fine. | 379 | 639 | let ptr = T::into_ptr(val); | 380 | 639 | Self { | 381 | 639 | ptr: AtomicPtr::new(ptr), | 382 | 639 | _phantom_arc: PhantomData, | 383 | 639 | strategy, | 384 | 639 | } | 385 | 639 | } |
Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _>>::with_strategy <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 1.25k | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 1.25k | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 1.25k | // However, we always go back to *const right away when we get the pointer on the other | 378 | 1.25k | // side, so it should be fine. | 379 | 1.25k | let ptr = T::into_ptr(val); | 380 | 1.25k | Self { | 381 | 1.25k | ptr: AtomicPtr::new(ptr), | 382 | 1.25k | _phantom_arc: PhantomData, | 383 | 1.25k | strategy, | 384 | 1.25k | } | 385 | 1.25k | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 872 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 872 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 872 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 872 | // side, so it should be fine. | 379 | 872 | let ptr = T::into_ptr(val); | 380 | 872 | Self { | 381 | 872 | ptr: AtomicPtr::new(ptr), | 382 | 872 | _phantom_arc: PhantomData, | 383 | 872 | strategy, | 384 | 872 | } | 385 | 872 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 671 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 671 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 671 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 671 | // side, so it should be fine. | 379 | 671 | let ptr = T::into_ptr(val); | 380 | 671 | Self { | 381 | 671 | ptr: AtomicPtr::new(ptr), | 382 | 671 | _phantom_arc: PhantomData, | 383 | 671 | strategy, | 384 | 671 | } | 385 | 671 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 411 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 411 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 411 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 411 | // side, so it should be fine. | 379 | 411 | let ptr = T::into_ptr(val); | 380 | 411 | Self { | 381 | 411 | ptr: AtomicPtr::new(ptr), | 382 | 411 | _phantom_arc: PhantomData, | 383 | 411 | strategy, | 384 | 411 | } | 385 | 411 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 752 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 752 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 752 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 752 | // side, so it should be fine. | 379 | 752 | let ptr = T::into_ptr(val); | 380 | 752 | Self { | 381 | 752 | ptr: AtomicPtr::new(ptr), | 382 | 752 | _phantom_arc: PhantomData, | 383 | 752 | strategy, | 384 | 752 | } | 385 | 752 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 479 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 479 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 479 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 479 | // side, so it should be fine. | 379 | 479 | let ptr = T::into_ptr(val); | 380 | 479 | Self { | 381 | 479 | ptr: AtomicPtr::new(ptr), | 382 | 479 | _phantom_arc: PhantomData, | 383 | 479 | strategy, | 384 | 479 | } | 385 | 479 | } |
<arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::with_strategy Line | Count | Source | 375 | 524 | pub fn with_strategy(val: T, strategy: S) -> Self { | 376 | 524 | // The AtomicPtr requires *mut in its interface. We are more like *const, so we cast it. | 377 | 524 | // However, we always go back to *const right away when we get the pointer on the other | 378 | 524 | // side, so it should be fine. | 379 | 524 | let ptr = T::into_ptr(val); | 380 | 524 | Self { | 381 | 524 | ptr: AtomicPtr::new(ptr), | 382 | 524 | _phantom_arc: PhantomData, | 383 | 524 | strategy, | 384 | 524 | } | 385 | 524 | } |
|
386 | | |
387 | | /// Extracts the value inside. |
388 | 0 | pub fn into_inner(mut self) -> T { |
389 | 0 | let ptr = *self.ptr.get_mut(); |
390 | 0 | // To pay all the debts |
391 | 0 | unsafe { self.strategy.wait_for_readers(ptr, &self.ptr) }; |
392 | 0 | mem::forget(self); |
393 | 0 | unsafe { T::from_ptr(ptr) } |
394 | 0 | } |
395 | | |
396 | | /// Loads the value. |
397 | | /// |
398 | | /// This makes another copy of the held pointer and returns it, atomically (it is |
399 | | /// safe even when other thread stores into the same instance at the same time). |
400 | | /// |
401 | | /// The method is lock-free and wait-free, but usually more expensive than |
402 | | /// [`load`](#method.load). |
403 | 0 | pub fn load_full(&self) -> T { |
404 | 0 | Guard::into_inner(self.load()) |
405 | 0 | } |
406 | | |
407 | | /// Provides a temporary borrow of the object inside. |
408 | | /// |
409 | | /// This returns a proxy object allowing access to the thing held inside. However, there's |
410 | | /// only limited amount of possible cheap proxies in existence for each thread ‒ if more are |
411 | | /// created, it falls back to equivalent of [`load_full`](#method.load_full) internally. |
412 | | /// |
413 | | /// This is therefore a good choice to use for eg. searching a data structure or juggling the |
414 | | /// pointers around a bit, but not as something to store in larger amounts. The rule of thumb |
415 | | /// is this is suited for local variables on stack, but not in long-living data structures. |
416 | | /// |
417 | | /// # Consistency |
418 | | /// |
419 | | /// In case multiple related operations are to be done on the loaded value, it is generally |
420 | | /// recommended to call `load` just once and keep the result over calling it multiple times. |
421 | | /// First, keeping it is usually faster. But more importantly, the value can change between the |
422 | | /// calls to load, returning different objects, which could lead to logical inconsistency. |
423 | | /// Keeping the result makes sure the same object is used. |
424 | | /// |
425 | | /// ```rust |
426 | | /// # use arc_swap::ArcSwap; |
427 | | /// struct Point { |
428 | | /// x: usize, |
429 | | /// y: usize, |
430 | | /// } |
431 | | /// |
432 | | /// fn print_broken(p: &ArcSwap<Point>) { |
433 | | /// // This is broken, because the x and y may come from different points, |
434 | | /// // combining into an invalid point that never existed. |
435 | | /// println!("X: {}", p.load().x); |
436 | | /// // If someone changes the content now, between these two loads, we |
437 | | /// // have a problem |
438 | | /// println!("Y: {}", p.load().y); |
439 | | /// } |
440 | | /// |
441 | | /// fn print_correct(p: &ArcSwap<Point>) { |
442 | | /// // Here we take a snapshot of one specific point so both x and y come |
443 | | /// // from the same one. |
444 | | /// let point = p.load(); |
445 | | /// println!("X: {}", point.x); |
446 | | /// println!("Y: {}", point.y); |
447 | | /// } |
448 | | /// # let p = ArcSwap::from_pointee(Point { x: 10, y: 20 }); |
449 | | /// # print_correct(&p); |
450 | | /// # print_broken(&p); |
451 | | /// ``` |
452 | | #[inline] |
453 | 170k | pub fn load(&self) -> Guard<T, S> { |
454 | 170k | let protected = unsafe { self.strategy.load(&self.ptr) }; |
455 | 170k | Guard { inner: protected } |
456 | 170k | } <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::load Line | Count | Source | 453 | 170k | pub fn load(&self) -> Guard<T, S> { | 454 | 170k | let protected = unsafe { self.strategy.load(&self.ptr) }; | 455 | 170k | Guard { inner: protected } | 456 | 170k | } |
Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _>>::load Unexecuted instantiation: <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::load |
457 | | |
458 | | /// Replaces the value inside this instance. |
459 | | /// |
460 | | /// Further loads will yield the new value. Uses [`swap`](#method.swap) internally. |
461 | 0 | pub fn store(&self, val: T) { |
462 | 0 | drop(self.swap(val)); |
463 | 0 | } Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _>>::store Unexecuted instantiation: <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::store |
464 | | |
465 | | /// Exchanges the value inside this instance. |
466 | 0 | pub fn swap(&self, new: T) -> T { |
467 | 0 | let new = T::into_ptr(new); |
468 | 0 | // AcqRel needed to publish the target of the new pointer and get the target of the old |
469 | 0 | // one. |
470 | 0 | // |
471 | 0 | // SeqCst to synchronize the time lines with the group counters. |
472 | 0 | let old = self.ptr.swap(new, Ordering::SeqCst); |
473 | 0 | unsafe { |
474 | 0 | self.strategy.wait_for_readers(old, &self.ptr); |
475 | 0 | T::from_ptr(old) |
476 | 0 | } |
477 | 0 | } Unexecuted instantiation: <arc_swap::ArcSwapAny<_, _>>::swap Unexecuted instantiation: <arc_swap::ArcSwapAny<alloc::sync::Arc<vm_memory::mmap::GuestMemoryMmap<vm_memory::bitmap::backend::atomic_bitmap::AtomicBitmap>>>>::swap |
478 | | |
479 | | /// Swaps the stored Arc if it equals to `current`. |
480 | | /// |
481 | | /// If the current value of the `ArcSwapAny` equals to `current`, the `new` is stored inside. |
482 | | /// If not, nothing happens. |
483 | | /// |
484 | | /// The previous value (no matter if the swap happened or not) is returned. Therefore, if the |
485 | | /// returned value is equal to `current`, the swap happened. You want to do a pointer-based |
486 | | /// comparison to determine it. |
487 | | /// |
488 | | /// In other words, if the caller „guesses“ the value of current correctly, it acts like |
489 | | /// [`swap`](#method.swap), otherwise it acts like [`load_full`](#method.load_full) (including |
490 | | /// the limitations). |
491 | | /// |
492 | | /// The `current` can be specified as `&Arc`, [`Guard`](struct.Guard.html), |
493 | | /// [`&Guards`](struct.Guards.html) or as a raw pointer (but _not_ owned `Arc`). See the |
494 | | /// [`AsRaw`] trait. |
495 | 0 | pub fn compare_and_swap<C>(&self, current: C, new: T) -> Guard<T, S> |
496 | 0 | where |
497 | 0 | C: AsRaw<T::Base>, |
498 | 0 | S: CaS<T>, |
499 | 0 | { |
500 | 0 | let protected = unsafe { self.strategy.compare_and_swap(&self.ptr, current, new) }; |
501 | 0 | Guard { inner: protected } |
502 | 0 | } |
503 | | |
504 | | /// Read-Copy-Update of the pointer inside. |
505 | | /// |
506 | | /// This is useful in read-heavy situations with several threads that sometimes update the data |
507 | | /// pointed to. The readers can just repeatedly use [`load`](#method.load) without any locking. |
508 | | /// The writer uses this method to perform the update. |
509 | | /// |
510 | | /// In case there's only one thread that does updates or in case the next version is |
511 | | /// independent of the previous one, simple [`swap`](#method.swap) or [`store`](#method.store) |
512 | | /// is enough. Otherwise, it may be needed to retry the update operation if some other thread |
513 | | /// made an update in between. This is what this method does. |
514 | | /// |
515 | | /// # Examples |
516 | | /// |
517 | | /// This will *not* work as expected, because between loading and storing, some other thread |
518 | | /// might have updated the value. |
519 | | /// |
520 | | /// ```rust |
521 | | /// # use std::sync::Arc; |
522 | | /// # |
523 | | /// # use arc_swap::ArcSwap; |
524 | | /// # use crossbeam_utils::thread; |
525 | | /// # |
526 | | /// let cnt = ArcSwap::from_pointee(0); |
527 | | /// thread::scope(|scope| { |
528 | | /// for _ in 0..10 { |
529 | | /// scope.spawn(|_| { |
530 | | /// let inner = cnt.load_full(); |
531 | | /// // Another thread might have stored some other number than what we have |
532 | | /// // between the load and store. |
533 | | /// cnt.store(Arc::new(*inner + 1)); |
534 | | /// }); |
535 | | /// } |
536 | | /// }).unwrap(); |
537 | | /// // This will likely fail: |
538 | | /// // assert_eq!(10, *cnt.load_full()); |
539 | | /// ``` |
540 | | /// |
541 | | /// This will, but it can call the closure multiple times to retry: |
542 | | /// |
543 | | /// ```rust |
544 | | /// # use arc_swap::ArcSwap; |
545 | | /// # use crossbeam_utils::thread; |
546 | | /// # |
547 | | /// let cnt = ArcSwap::from_pointee(0); |
548 | | /// thread::scope(|scope| { |
549 | | /// for _ in 0..10 { |
550 | | /// scope.spawn(|_| cnt.rcu(|inner| **inner + 1)); |
551 | | /// } |
552 | | /// }).unwrap(); |
553 | | /// assert_eq!(10, *cnt.load_full()); |
554 | | /// ``` |
555 | | /// |
556 | | /// Due to the retries, you might want to perform all the expensive operations *before* the |
557 | | /// rcu. As an example, if there's a cache of some computations as a map, and the map is cheap |
558 | | /// to clone but the computations are not, you could do something like this: |
559 | | /// |
560 | | /// ```rust |
561 | | /// # use std::collections::HashMap; |
562 | | /// # |
563 | | /// # use arc_swap::ArcSwap; |
564 | | /// # use once_cell::sync::Lazy; |
565 | | /// # |
566 | | /// fn expensive_computation(x: usize) -> usize { |
567 | | /// x * 2 // Let's pretend multiplication is *really expensive expensive* |
568 | | /// } |
569 | | /// |
570 | | /// type Cache = HashMap<usize, usize>; |
571 | | /// |
572 | | /// static CACHE: Lazy<ArcSwap<Cache>> = Lazy::new(|| ArcSwap::default()); |
573 | | /// |
574 | | /// fn cached_computation(x: usize) -> usize { |
575 | | /// let cache = CACHE.load(); |
576 | | /// if let Some(result) = cache.get(&x) { |
577 | | /// return *result; |
578 | | /// } |
579 | | /// // Not in cache. Compute and store. |
580 | | /// // The expensive computation goes outside, so it is not retried. |
581 | | /// let result = expensive_computation(x); |
582 | | /// CACHE.rcu(|cache| { |
583 | | /// // The cheaper clone of the cache can be retried if need be. |
584 | | /// let mut cache = HashMap::clone(&cache); |
585 | | /// cache.insert(x, result); |
586 | | /// cache |
587 | | /// }); |
588 | | /// result |
589 | | /// } |
590 | | /// |
591 | | /// assert_eq!(42, cached_computation(21)); |
592 | | /// assert_eq!(42, cached_computation(21)); |
593 | | /// ``` |
594 | | /// |
595 | | /// # The cost of cloning |
596 | | /// |
597 | | /// Depending on the size of cache above, the cloning might not be as cheap. You can however |
598 | | /// use persistent data structures ‒ each modification creates a new data structure, but it |
599 | | /// shares most of the data with the old one (which is usually accomplished by using `Arc`s |
600 | | /// inside to share the unchanged values). Something like |
601 | | /// [`rpds`](https://crates.io/crates/rpds) or [`im`](https://crates.io/crates/im) might do |
602 | | /// what you need. |
603 | 0 | pub fn rcu<R, F>(&self, mut f: F) -> T |
604 | 0 | where |
605 | 0 | F: FnMut(&T) -> R, |
606 | 0 | R: Into<T>, |
607 | 0 | S: CaS<T>, |
608 | 0 | { |
609 | 0 | let mut cur = self.load(); |
610 | 0 | loop { |
611 | 0 | let new = f(&cur).into(); |
612 | 0 | let prev = self.compare_and_swap(&*cur, new); |
613 | 0 | let swapped = ptr_eq(&*cur, &*prev); |
614 | 0 | if swapped { |
615 | 0 | return Guard::into_inner(prev); |
616 | 0 | } else { |
617 | 0 | cur = prev; |
618 | 0 | } |
619 | | } |
620 | 0 | } |
621 | | |
622 | | /// Provides an access to an up to date projection of the carried data. |
623 | | /// |
624 | | /// # Motivation |
625 | | /// |
626 | | /// Sometimes, an application consists of components. Each component has its own configuration |
627 | | /// structure. The whole configuration contains all the smaller config parts. |
628 | | /// |
629 | | /// For the sake of separation and abstraction, it is not desirable to pass the whole |
630 | | /// configuration to each of the components. This allows the component to take only access to |
631 | | /// its own part. |
632 | | /// |
633 | | /// # Lifetimes & flexibility |
634 | | /// |
635 | | /// This method is not the most flexible way, as the returned type borrows into the `ArcSwap`. |
636 | | /// To provide access into eg. `Arc<ArcSwap<T>>`, you can create the [`Map`] type directly. See |
637 | | /// the [`access`] module. |
638 | | /// |
639 | | /// # Performance |
640 | | /// |
641 | | /// As the provided function is called on each load from the shared storage, it should |
642 | | /// generally be cheap. It is expected this will usually be just referencing of a field inside |
643 | | /// the structure. |
644 | | /// |
645 | | /// # Examples |
646 | | /// |
647 | | /// ```rust |
648 | | /// use std::sync::Arc; |
649 | | /// |
650 | | /// use arc_swap::ArcSwap; |
651 | | /// use arc_swap::access::Access; |
652 | | /// |
653 | | /// struct Cfg { |
654 | | /// value: usize, |
655 | | /// } |
656 | | /// |
657 | | /// fn print_many_times<V: Access<usize>>(value: V) { |
658 | | /// for _ in 0..25 { |
659 | | /// let value = value.load(); |
660 | | /// println!("{}", *value); |
661 | | /// } |
662 | | /// } |
663 | | /// |
664 | | /// let shared = ArcSwap::from_pointee(Cfg { value: 0 }); |
665 | | /// let mapped = shared.map(|c: &Cfg| &c.value); |
666 | | /// crossbeam_utils::thread::scope(|s| { |
667 | | /// // Will print some zeroes and some twos |
668 | | /// s.spawn(|_| print_many_times(mapped)); |
669 | | /// s.spawn(|_| shared.store(Arc::new(Cfg { value: 2 }))); |
670 | | /// }).expect("Something panicked in a thread"); |
671 | | /// ``` |
672 | 0 | pub fn map<I, R, F>(&self, f: F) -> Map<&Self, I, F> |
673 | 0 | where |
674 | 0 | F: Fn(&I) -> &R + Clone, |
675 | 0 | Self: Access<I>, |
676 | 0 | { |
677 | 0 | Map::new(self, f) |
678 | 0 | } |
679 | | } |
680 | | |
681 | | /// An atomic storage for `Arc`. |
682 | | /// |
683 | | /// This is a type alias only. Most of its methods are described on |
684 | | /// [`ArcSwapAny`](struct.ArcSwapAny.html). |
685 | | pub type ArcSwap<T> = ArcSwapAny<Arc<T>>; |
686 | | |
687 | | impl<T, S: Strategy<Arc<T>>> ArcSwapAny<Arc<T>, S> { |
688 | | /// A convenience constructor directly from the pointed-to value. |
689 | | /// |
690 | | /// Direct equivalent for `ArcSwap::new(Arc::new(val))`. |
691 | 0 | pub fn from_pointee(val: T) -> Self |
692 | 0 | where |
693 | 0 | S: Default, |
694 | 0 | { |
695 | 0 | Self::from(Arc::new(val)) |
696 | 0 | } |
697 | | } |
698 | | |
699 | | /// An atomic storage for `Option<Arc>`. |
700 | | /// |
701 | | /// This is very similar to [`ArcSwap`](type.ArcSwap.html), but allows storing NULL values, which |
702 | | /// is useful in some situations. |
703 | | /// |
704 | | /// This is a type alias only. Most of the methods are described on |
705 | | /// [`ArcSwapAny`](struct.ArcSwapAny.html). Even though the examples there often use `ArcSwap`, |
706 | | /// they are applicable to `ArcSwapOption` with appropriate changes. |
707 | | /// |
708 | | /// # Examples |
709 | | /// |
710 | | /// ``` |
711 | | /// use std::sync::Arc; |
712 | | /// use arc_swap::ArcSwapOption; |
713 | | /// |
714 | | /// let shared = ArcSwapOption::from(None); |
715 | | /// assert!(shared.load_full().is_none()); |
716 | | /// assert!(shared.swap(Some(Arc::new(42))).is_none()); |
717 | | /// assert_eq!(42, **shared.load_full().as_ref().unwrap()); |
718 | | /// ``` |
719 | | pub type ArcSwapOption<T> = ArcSwapAny<Option<Arc<T>>>; |
720 | | |
721 | | impl<T, S: Strategy<Option<Arc<T>>>> ArcSwapAny<Option<Arc<T>>, S> { |
722 | | /// A convenience constructor directly from a pointed-to value. |
723 | | /// |
724 | | /// This just allocates the `Arc` under the hood. |
725 | | /// |
726 | | /// # Examples |
727 | | /// |
728 | | /// ```rust |
729 | | /// use arc_swap::ArcSwapOption; |
730 | | /// |
731 | | /// let empty: ArcSwapOption<usize> = ArcSwapOption::from_pointee(None); |
732 | | /// assert!(empty.load().is_none()); |
733 | | /// let non_empty: ArcSwapOption<usize> = ArcSwapOption::from_pointee(42); |
734 | | /// assert_eq!(42, **non_empty.load().as_ref().unwrap()); |
735 | | /// ``` |
736 | 0 | pub fn from_pointee<V: Into<Option<T>>>(val: V) -> Self |
737 | 0 | where |
738 | 0 | S: Default, |
739 | 0 | { |
740 | 0 | Self::new(val.into().map(Arc::new)) |
741 | 0 | } |
742 | | |
743 | | /// A convenience constructor for an empty value. |
744 | | /// |
745 | | /// This is equivalent to `ArcSwapOption::new(None)`. |
746 | 0 | pub fn empty() -> Self |
747 | 0 | where |
748 | 0 | S: Default, |
749 | 0 | { |
750 | 0 | Self::new(None) |
751 | 0 | } |
752 | | } |
753 | | |
754 | | impl<T> ArcSwapOption<T> { |
755 | | /// A const-fn equivalent of [empty]. |
756 | | /// |
757 | | /// Just like [empty], this creates an `None`-holding `ArcSwapOption`. The [empty] is, however, |
758 | | /// more general ‒ this is available only for the default strategy, while [empty] is for any |
759 | | /// [Default]-constructible strategy (current or future one). |
760 | | /// |
761 | | /// [empty]: ArcSwapAny::empty |
762 | | /// |
763 | | /// # Examples |
764 | | /// |
765 | | /// ```rust |
766 | | /// # use std::sync::Arc; |
767 | | /// # use arc_swap::ArcSwapOption; |
768 | | /// static GLOBAL_DATA: ArcSwapOption<usize> = ArcSwapOption::const_empty(); |
769 | | /// |
770 | | /// assert!(GLOBAL_DATA.load().is_none()); |
771 | | /// GLOBAL_DATA.store(Some(Arc::new(42))); |
772 | | /// assert_eq!(42, **GLOBAL_DATA.load().as_ref().unwrap()); |
773 | | /// ``` |
774 | 0 | pub const fn const_empty() -> Self { |
775 | 0 | Self { |
776 | 0 | ptr: AtomicPtr::new(ptr::null_mut()), |
777 | 0 | _phantom_arc: PhantomData, |
778 | 0 | strategy: HybridStrategy { |
779 | 0 | _config: DefaultConfig, |
780 | 0 | }, |
781 | 0 | } |
782 | 0 | } |
783 | | } |
784 | | |
785 | | /// An atomic storage that doesn't share the internal generation locks with others. |
786 | | /// |
787 | | /// This makes it bigger and it also might suffer contention (on the HW level) if used from many |
788 | | /// threads at once. On the other hand, it can't block writes in other instances. |
789 | | /// |
790 | | /// See the [`IndependentStrategy`] for further details. |
791 | | // Being phased out. Will deprecate once we verify in production that the new strategy works fine. |
792 | | #[doc(hidden)] |
793 | | pub type IndependentArcSwap<T> = ArcSwapAny<Arc<T>, IndependentStrategy>; |
794 | | |
795 | | /// Arc swap for the [Weak] pointer. |
796 | | /// |
797 | | /// This is similar to [ArcSwap], but it doesn't store [Arc], it stores [Weak]. It doesn't keep the |
798 | | /// data alive when pointed to. |
799 | | /// |
800 | | /// This is a type alias only. Most of the methods are described on the |
801 | | /// [`ArcSwapAny`](struct.ArcSwapAny.html). |
802 | | /// |
803 | | /// Needs the `weak` feature turned on. |
804 | | /// |
805 | | /// [Weak]: std::sync::Weak |
806 | | #[cfg(feature = "weak")] |
807 | | pub type ArcSwapWeak<T> = ArcSwapAny<alloc::sync::Weak<T>>; |
808 | | |
809 | | macro_rules! t { |
810 | | ($name: ident, $strategy: ty) => { |
811 | | #[cfg(test)] |
812 | | mod $name { |
813 | | use alloc::borrow::ToOwned; |
814 | | use alloc::string::String; |
815 | | use alloc::vec::Vec; |
816 | | use core::sync::atomic::{self, AtomicUsize}; |
817 | | |
818 | | use adaptive_barrier::{Barrier, PanicMode}; |
819 | | use crossbeam_utils::thread; |
820 | | |
821 | | use super::*; |
822 | | |
823 | | const ITERATIONS: usize = 10; |
824 | | |
825 | | #[allow(deprecated)] // We use "deprecated" testing strategies in here. |
826 | | type As<T> = ArcSwapAny<Arc<T>, $strategy>; |
827 | | #[allow(deprecated)] // We use "deprecated" testing strategies in here. |
828 | | type Aso<T> = ArcSwapAny<Option<Arc<T>>, $strategy>; |
829 | | |
830 | | /// Similar to the one in doc tests of the lib, but more times and more intensive (we |
831 | | /// want to torture it a bit). |
832 | | #[test] |
833 | | #[cfg_attr(miri, ignore)] // Takes like 1 or 2 infinities to run under miri |
834 | | fn publish() { |
835 | | const READERS: usize = 2; |
836 | | for _ in 0..ITERATIONS { |
837 | | let config = As::<String>::default(); |
838 | | let ended = AtomicUsize::new(0); |
839 | | thread::scope(|scope| { |
840 | | for _ in 0..READERS { |
841 | | scope.spawn(|_| loop { |
842 | | let cfg = config.load_full(); |
843 | | if !cfg.is_empty() { |
844 | | assert_eq!(*cfg, "New configuration"); |
845 | | ended.fetch_add(1, Ordering::Relaxed); |
846 | | return; |
847 | | } |
848 | | atomic::spin_loop_hint(); |
849 | | }); |
850 | | } |
851 | | scope.spawn(|_| { |
852 | | let new_conf = Arc::new("New configuration".to_owned()); |
853 | | config.store(new_conf); |
854 | | }); |
855 | | }) |
856 | | .unwrap(); |
857 | | assert_eq!(READERS, ended.load(Ordering::Relaxed)); |
858 | | let arc = config.load_full(); |
859 | | assert_eq!(2, Arc::strong_count(&arc)); |
860 | | assert_eq!(0, Arc::weak_count(&arc)); |
861 | | } |
862 | | } |
863 | | |
864 | | /// Similar to the doc tests of ArcSwap, but happens more times. |
865 | | #[test] |
866 | | fn swap_load() { |
867 | | for _ in 0..100 { |
868 | | let arc = Arc::new(42); |
869 | | let arc_swap = As::from(Arc::clone(&arc)); |
870 | | assert_eq!(42, **arc_swap.load()); |
871 | | // It can be read multiple times |
872 | | assert_eq!(42, **arc_swap.load()); |
873 | | |
874 | | // Put a new one in there |
875 | | let new_arc = Arc::new(0); |
876 | | assert_eq!(42, *arc_swap.swap(Arc::clone(&new_arc))); |
877 | | assert_eq!(0, **arc_swap.load()); |
878 | | // One loaded here, one in the arc_swap, one in new_arc |
879 | | let loaded = arc_swap.load_full(); |
880 | | assert_eq!(3, Arc::strong_count(&loaded)); |
881 | | assert_eq!(0, Arc::weak_count(&loaded)); |
882 | | // The original got released from the arc_swap |
883 | | assert_eq!(1, Arc::strong_count(&arc)); |
884 | | assert_eq!(0, Arc::weak_count(&arc)); |
885 | | } |
886 | | } |
887 | | |
888 | | /// Two different writers publish two series of values. The readers check that it is |
889 | | /// always increasing in each serie. |
890 | | /// |
891 | | /// For performance, we try to reuse the threads here. |
892 | | #[test] |
893 | | fn multi_writers() { |
894 | | let first_value = Arc::new((0, 0)); |
895 | | let shared = As::from(Arc::clone(&first_value)); |
896 | | const WRITER_CNT: usize = 2; |
897 | | const READER_CNT: usize = 3; |
898 | | #[cfg(miri)] |
899 | | const ITERATIONS: usize = 5; |
900 | | #[cfg(not(miri))] |
901 | | const ITERATIONS: usize = 100; |
902 | | const SEQ: usize = 50; |
903 | | let barrier = Barrier::new(PanicMode::Poison); |
904 | | thread::scope(|scope| { |
905 | | for w in 0..WRITER_CNT { |
906 | | // We need to move w into the closure. But we want to just reference the |
907 | | // other things. |
908 | | let mut barrier = barrier.clone(); |
909 | | let shared = &shared; |
910 | | let first_value = &first_value; |
911 | | scope.spawn(move |_| { |
912 | | for _ in 0..ITERATIONS { |
913 | | barrier.wait(); |
914 | | shared.store(Arc::clone(&first_value)); |
915 | | barrier.wait(); |
916 | | for i in 0..SEQ { |
917 | | shared.store(Arc::new((w, i + 1))); |
918 | | } |
919 | | } |
920 | | }); |
921 | | } |
922 | | for _ in 0..READER_CNT { |
923 | | let mut barrier = barrier.clone(); |
924 | | let shared = &shared; |
925 | | let first_value = &first_value; |
926 | | scope.spawn(move |_| { |
927 | | for _ in 0..ITERATIONS { |
928 | | barrier.wait(); |
929 | | barrier.wait(); |
930 | | let mut previous = [0; WRITER_CNT]; |
931 | | let mut last = Arc::clone(&first_value); |
932 | | loop { |
933 | | let cur = shared.load(); |
934 | | if Arc::ptr_eq(&last, &cur) { |
935 | | atomic::spin_loop_hint(); |
936 | | continue; |
937 | | } |
938 | | let (w, s) = **cur; |
939 | | assert!(previous[w] < s, "{:?} vs {:?}", previous, cur); |
940 | | previous[w] = s; |
941 | | last = Guard::into_inner(cur); |
942 | | if s == SEQ { |
943 | | break; |
944 | | } |
945 | | } |
946 | | } |
947 | | }); |
948 | | } |
949 | | |
950 | | drop(barrier); |
951 | | }) |
952 | | .unwrap(); |
953 | | } |
954 | | |
955 | | #[test] |
956 | | fn load_null() { |
957 | | let shared = Aso::<usize>::default(); |
958 | | let guard = shared.load(); |
959 | | assert!(guard.is_none()); |
960 | | shared.store(Some(Arc::new(42))); |
961 | | assert_eq!(42, **shared.load().as_ref().unwrap()); |
962 | | } |
963 | | |
964 | | #[test] |
965 | | fn from_into() { |
966 | | let a = Arc::new(42); |
967 | | let shared = As::new(a); |
968 | | let guard = shared.load(); |
969 | | let a = shared.into_inner(); |
970 | | assert_eq!(42, *a); |
971 | | assert_eq!(2, Arc::strong_count(&a)); |
972 | | drop(guard); |
973 | | assert_eq!(1, Arc::strong_count(&a)); |
974 | | } |
975 | | |
976 | | // Note on the Relaxed order here. This should be enough, because there's that |
977 | | // barrier.wait in between that should do the synchronization of happens-before for us. |
978 | | // And using SeqCst would probably not help either, as there's nothing else with SeqCst |
979 | | // here in this test to relate it to. |
980 | | #[derive(Default)] |
981 | | struct ReportDrop(Arc<AtomicUsize>); |
982 | | impl Drop for ReportDrop { |
983 | | fn drop(&mut self) { |
984 | | self.0.fetch_add(1, Ordering::Relaxed); |
985 | | } |
986 | | } |
987 | | |
988 | | /// Interaction of two threads about a guard and dropping it. |
989 | | /// |
990 | | /// We make sure everything works in timely manner (eg. dropping of stuff) even if multiple |
991 | | /// threads interact. |
992 | | /// |
993 | | /// The idea is: |
994 | | /// * Thread 1 loads a value. |
995 | | /// * Thread 2 replaces the shared value. The original value is not destroyed. |
996 | | /// * Thread 1 drops the guard. The value is destroyed and this is observable in both threads. |
997 | | #[test] |
998 | | fn guard_drop_in_thread() { |
999 | | for _ in 0..ITERATIONS { |
1000 | | let cnt = Arc::new(AtomicUsize::new(0)); |
1001 | | |
1002 | | let shared = As::from_pointee(ReportDrop(cnt.clone())); |
1003 | | assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); |
1004 | | // We need the threads to wait for each other at places. |
1005 | | let sync = Barrier::new(PanicMode::Poison); |
1006 | | |
1007 | | thread::scope(|scope| { |
1008 | | scope.spawn({ |
1009 | | let sync = sync.clone(); |
1010 | | |_| { |
1011 | | let mut sync = sync; // Move into the closure |
1012 | | let guard = shared.load(); |
1013 | | sync.wait(); |
1014 | | // Thread 2 replaces the shared value. We wait for it to confirm. |
1015 | | sync.wait(); |
1016 | | drop(guard); |
1017 | | assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped"); |
1018 | | // Let thread 2 know we already dropped it. |
1019 | | sync.wait(); |
1020 | | } |
1021 | | }); |
1022 | | |
1023 | | scope.spawn(|_| { |
1024 | | let mut sync = sync; |
1025 | | // Thread 1 loads, we wait for that |
1026 | | sync.wait(); |
1027 | | shared.store(Default::default()); |
1028 | | assert_eq!( |
1029 | | cnt.load(Ordering::Relaxed), |
1030 | | 0, |
1031 | | "Dropped while still in use" |
1032 | | ); |
1033 | | // Let thread 2 know we replaced it |
1034 | | sync.wait(); |
1035 | | // Thread 1 drops its guard. We wait for it to confirm. |
1036 | | sync.wait(); |
1037 | | assert_eq!(cnt.load(Ordering::Relaxed), 1, "Value not dropped"); |
1038 | | }); |
1039 | | }) |
1040 | | .unwrap(); |
1041 | | } |
1042 | | } |
1043 | | |
1044 | | /// Check dropping a lease in a different thread than it was created doesn't cause any |
1045 | | /// problems. |
1046 | | #[test] |
1047 | | fn guard_drop_in_another_thread() { |
1048 | | for _ in 0..ITERATIONS { |
1049 | | let cnt = Arc::new(AtomicUsize::new(0)); |
1050 | | let shared = As::from_pointee(ReportDrop(cnt.clone())); |
1051 | | assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); |
1052 | | let guard = shared.load(); |
1053 | | |
1054 | | drop(shared); |
1055 | | assert_eq!(cnt.load(Ordering::Relaxed), 0, "Dropped prematurely"); |
1056 | | |
1057 | | thread::scope(|scope| { |
1058 | | scope.spawn(|_| { |
1059 | | drop(guard); |
1060 | | }); |
1061 | | }) |
1062 | | .unwrap(); |
1063 | | |
1064 | | assert_eq!(cnt.load(Ordering::Relaxed), 1, "Not dropped"); |
1065 | | } |
1066 | | } |
1067 | | |
1068 | | #[test] |
1069 | | fn load_option() { |
1070 | | let shared = Aso::from_pointee(42); |
1071 | | // The type here is not needed in real code, it's just addition test the type matches. |
1072 | | let opt: Option<_> = Guard::into_inner(shared.load()); |
1073 | | assert_eq!(42, *opt.unwrap()); |
1074 | | |
1075 | | shared.store(None); |
1076 | | assert!(shared.load().is_none()); |
1077 | | } |
1078 | | |
1079 | | // Check stuff can get formatted |
1080 | | #[test] |
1081 | | fn debug_impl() { |
1082 | | let shared = As::from_pointee(42); |
1083 | | assert_eq!("ArcSwapAny(42)", &format!("{:?}", shared)); |
1084 | | assert_eq!("42", &format!("{:?}", shared.load())); |
1085 | | } |
1086 | | |
1087 | | #[test] |
1088 | | fn display_impl() { |
1089 | | let shared = As::from_pointee(42); |
1090 | | assert_eq!("42", &format!("{}", shared)); |
1091 | | assert_eq!("42", &format!("{}", shared.load())); |
1092 | | } |
1093 | | |
1094 | | // The following "tests" are not run, only compiled. They check that things that should be |
1095 | | // Send/Sync actually are. |
1096 | | fn _check_stuff_is_send_sync() { |
1097 | | let shared = As::from_pointee(42); |
1098 | | let moved = As::from_pointee(42); |
1099 | | let shared_ref = &shared; |
1100 | | let lease = shared.load(); |
1101 | | let lease_ref = &lease; |
1102 | | let lease = shared.load(); |
1103 | | thread::scope(|s| { |
1104 | | s.spawn(move |_| { |
1105 | | let _ = lease; |
1106 | | let _ = lease_ref; |
1107 | | let _ = shared_ref; |
1108 | | let _ = moved; |
1109 | | }); |
1110 | | }) |
1111 | | .unwrap(); |
1112 | | } |
1113 | | |
1114 | | /// We have a callback in RCU. Check what happens if we access the value from within. |
1115 | | #[test] |
1116 | | fn recursive() { |
1117 | | let shared = ArcSwap::from(Arc::new(0)); |
1118 | | |
1119 | | shared.rcu(|i| { |
1120 | | if **i < 10 { |
1121 | | shared.rcu(|i| **i + 1); |
1122 | | } |
1123 | | **i |
1124 | | }); |
1125 | | assert_eq!(10, **shared.load()); |
1126 | | assert_eq!(2, Arc::strong_count(&shared.load_full())); |
1127 | | } |
1128 | | |
1129 | | /// A panic from within the rcu callback should not change anything. |
1130 | | #[test] |
1131 | | #[cfg(not(feature = "experimental-thread-local"))] |
1132 | | fn rcu_panic() { |
1133 | | use std::panic; |
1134 | | let shared = ArcSwap::from(Arc::new(0)); |
1135 | | assert!(panic::catch_unwind(|| shared.rcu(|_| -> usize { panic!() })).is_err()); |
1136 | | assert_eq!(1, Arc::strong_count(&shared.swap(Arc::new(42)))); |
1137 | | } |
1138 | | |
1139 | | /// Handling null/none values |
1140 | | #[test] |
1141 | | fn nulls() { |
1142 | | let shared = ArcSwapOption::from(Some(Arc::new(0))); |
1143 | | let orig = shared.swap(None); |
1144 | | assert_eq!(1, Arc::strong_count(&orig.unwrap())); |
1145 | | let null = shared.load(); |
1146 | | assert!(null.is_none()); |
1147 | | let a = Arc::new(42); |
1148 | | let orig = shared.compare_and_swap(ptr::null(), Some(Arc::clone(&a))); |
1149 | | assert!(orig.is_none()); |
1150 | | assert_eq!(2, Arc::strong_count(&a)); |
1151 | | let orig = Guard::into_inner(shared.compare_and_swap(&None::<Arc<_>>, None)); |
1152 | | assert_eq!(3, Arc::strong_count(&a)); |
1153 | | assert!(ptr_eq(&a, &orig)); |
1154 | | } |
1155 | | |
1156 | | #[test] |
1157 | | /// Multiple RCUs interacting. |
1158 | | fn rcu() { |
1159 | | const ITERATIONS: usize = 50; |
1160 | | const THREADS: usize = 10; |
1161 | | let shared = ArcSwap::from(Arc::new(0)); |
1162 | | thread::scope(|scope| { |
1163 | | for _ in 0..THREADS { |
1164 | | scope.spawn(|_| { |
1165 | | for _ in 0..ITERATIONS { |
1166 | | shared.rcu(|old| **old + 1); |
1167 | | } |
1168 | | }); |
1169 | | } |
1170 | | }) |
1171 | | .unwrap(); |
1172 | | assert_eq!(THREADS * ITERATIONS, **shared.load()); |
1173 | | } |
1174 | | |
1175 | | #[test] |
1176 | | /// Make sure the reference count and compare_and_swap works as expected. |
1177 | | fn cas_ref_cnt() { |
1178 | | #[cfg(miri)] |
1179 | | const ITERATIONS: usize = 10; |
1180 | | #[cfg(not(miri))] |
1181 | | const ITERATIONS: usize = 50; |
1182 | | let shared = ArcSwap::from(Arc::new(0)); |
1183 | | for i in 0..ITERATIONS { |
1184 | | let orig = shared.load_full(); |
1185 | | assert_eq!(i, *orig); |
1186 | | if i % 2 == 1 { |
1187 | | // One for orig, one for shared |
1188 | | assert_eq!(2, Arc::strong_count(&orig)); |
1189 | | } |
1190 | | let n1 = Arc::new(i + 1); |
1191 | | // Fill up the slots sometimes |
1192 | | let fillup = || { |
1193 | | if i % 2 == 0 { |
1194 | | Some((0..ITERATIONS).map(|_| shared.load()).collect::<Vec<_>>()) |
1195 | | } else { |
1196 | | None |
1197 | | } |
1198 | | }; |
1199 | | let guards = fillup(); |
1200 | | // Success |
1201 | | let prev = shared.compare_and_swap(&orig, Arc::clone(&n1)); |
1202 | | assert!(ptr_eq(&orig, &prev)); |
1203 | | drop(guards); |
1204 | | // One for orig, one for prev |
1205 | | assert_eq!(2, Arc::strong_count(&orig)); |
1206 | | // One for n1, one for shared |
1207 | | assert_eq!(2, Arc::strong_count(&n1)); |
1208 | | assert_eq!(i + 1, **shared.load()); |
1209 | | let n2 = Arc::new(i); |
1210 | | drop(prev); |
1211 | | let guards = fillup(); |
1212 | | // Failure |
1213 | | let prev = Guard::into_inner(shared.compare_and_swap(&orig, Arc::clone(&n2))); |
1214 | | drop(guards); |
1215 | | assert!(ptr_eq(&n1, &prev)); |
1216 | | // One for orig |
1217 | | assert_eq!(1, Arc::strong_count(&orig)); |
1218 | | // One for n1, one for shared, one for prev |
1219 | | assert_eq!(3, Arc::strong_count(&n1)); |
1220 | | // n2 didn't get increased |
1221 | | assert_eq!(1, Arc::strong_count(&n2)); |
1222 | | assert_eq!(i + 1, **shared.load()); |
1223 | | } |
1224 | | |
1225 | | let a = shared.load_full(); |
1226 | | // One inside shared, one for a |
1227 | | assert_eq!(2, Arc::strong_count(&a)); |
1228 | | drop(shared); |
1229 | | // Only a now |
1230 | | assert_eq!(1, Arc::strong_count(&a)); |
1231 | | } |
1232 | | } |
1233 | | }; |
1234 | | } |
1235 | | |
1236 | | t!(tests_default, DefaultStrategy); |
1237 | | #[cfg(all(feature = "internal-test-strategies", test))] |
1238 | | #[allow(deprecated)] |
1239 | | mod internal_strategies { |
1240 | | use super::*; |
1241 | | t!( |
1242 | | tests_full_slots, |
1243 | | crate::strategy::test_strategies::FillFastSlots |
1244 | | ); |
1245 | | } |
1246 | | |
1247 | | /// These tests assume details about the used strategy. |
1248 | | #[cfg(test)] |
1249 | | mod tests { |
1250 | | use super::*; |
1251 | | |
1252 | | use alloc::vec::Vec; |
1253 | | |
1254 | | /// Accessing the value inside ArcSwap with Guards (and checks for the reference |
1255 | | /// counts). |
1256 | | #[test] |
1257 | | fn load_cnt() { |
1258 | | let a = Arc::new(0); |
1259 | | let shared = ArcSwap::from(Arc::clone(&a)); |
1260 | | // One in shared, one in a |
1261 | | assert_eq!(2, Arc::strong_count(&a)); |
1262 | | let guard = shared.load(); |
1263 | | assert_eq!(0, **guard); |
1264 | | // The guard doesn't have its own ref count now |
1265 | | assert_eq!(2, Arc::strong_count(&a)); |
1266 | | let guard_2 = shared.load(); |
1267 | | // Unlike with guard, this does not deadlock |
1268 | | shared.store(Arc::new(1)); |
1269 | | // But now, each guard got a full Arc inside it |
1270 | | assert_eq!(3, Arc::strong_count(&a)); |
1271 | | // And when we get rid of them, they disappear |
1272 | | drop(guard_2); |
1273 | | assert_eq!(2, Arc::strong_count(&a)); |
1274 | | let _b = Arc::clone(&guard); |
1275 | | assert_eq!(3, Arc::strong_count(&a)); |
1276 | | // We can drop the guard it came from |
1277 | | drop(guard); |
1278 | | assert_eq!(2, Arc::strong_count(&a)); |
1279 | | let guard = shared.load(); |
1280 | | assert_eq!(1, **guard); |
1281 | | drop(shared); |
1282 | | // We can still use the guard after the shared disappears |
1283 | | assert_eq!(1, **guard); |
1284 | | let ptr = Arc::clone(&guard); |
1285 | | // One in shared, one in guard |
1286 | | assert_eq!(2, Arc::strong_count(&ptr)); |
1287 | | drop(guard); |
1288 | | assert_eq!(1, Arc::strong_count(&ptr)); |
1289 | | } |
1290 | | |
1291 | | /// There can be only limited amount of leases on one thread. Following ones are |
1292 | | /// created, but contain full Arcs. |
1293 | | #[test] |
1294 | | fn lease_overflow() { |
1295 | | #[cfg(miri)] |
1296 | | const GUARD_COUNT: usize = 100; |
1297 | | #[cfg(not(miri))] |
1298 | | const GUARD_COUNT: usize = 1000; |
1299 | | let a = Arc::new(0); |
1300 | | let shared = ArcSwap::from(Arc::clone(&a)); |
1301 | | assert_eq!(2, Arc::strong_count(&a)); |
1302 | | let mut guards = (0..GUARD_COUNT).map(|_| shared.load()).collect::<Vec<_>>(); |
1303 | | let count = Arc::strong_count(&a); |
1304 | | assert!(count > 2); |
1305 | | let guard = shared.load(); |
1306 | | assert_eq!(count + 1, Arc::strong_count(&a)); |
1307 | | drop(guard); |
1308 | | assert_eq!(count, Arc::strong_count(&a)); |
1309 | | // When we delete the first one, it didn't have an Arc in it, so the ref count |
1310 | | // doesn't drop |
1311 | | guards.swap_remove(0); |
1312 | | assert_eq!(count, Arc::strong_count(&a)); |
1313 | | // But new one reuses now vacant the slot and doesn't create a new Arc |
1314 | | let _guard = shared.load(); |
1315 | | assert_eq!(count, Arc::strong_count(&a)); |
1316 | | } |
1317 | | } |