Coverage Report

Created: 2025-12-05 07:37

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/crossbeam-epoch-0.9.18/src/atomic.rs
Line
Count
Source
1
use alloc::boxed::Box;
2
use core::alloc::Layout;
3
use core::borrow::{Borrow, BorrowMut};
4
use core::cmp;
5
use core::fmt;
6
use core::marker::PhantomData;
7
use core::mem::{self, MaybeUninit};
8
use core::ops::{Deref, DerefMut};
9
use core::ptr;
10
use core::slice;
11
12
use crate::guard::Guard;
13
use crate::primitive::sync::atomic::{AtomicUsize, Ordering};
14
use crossbeam_utils::atomic::AtomicConsume;
15
16
/// Given ordering for the success case in a compare-exchange operation, returns the strongest
17
/// appropriate ordering for the failure case.
18
#[inline]
19
0
fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20
    use self::Ordering::*;
21
0
    match ord {
22
0
        Relaxed | Release => Relaxed,
23
0
        Acquire | AcqRel => Acquire,
24
0
        _ => SeqCst,
25
    }
26
0
}
27
28
/// The error returned on failed compare-and-set operation.
29
// TODO: remove in the next major version.
30
#[deprecated(note = "Use `CompareExchangeError` instead")]
31
pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32
33
/// The error returned on failed compare-and-swap operation.
34
pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
35
    /// The value in the atomic pointer at the time of the failed operation.
36
    pub current: Shared<'g, T>,
37
38
    /// The new value, which the operation failed to store.
39
    pub new: P,
40
}
41
42
impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
43
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
44
0
        f.debug_struct("CompareExchangeError")
45
0
            .field("current", &self.current)
46
0
            .field("new", &self.new)
47
0
            .finish()
48
0
    }
49
}
50
51
/// Memory orderings for compare-and-set operations.
52
///
53
/// A compare-and-set operation can have different memory orderings depending on whether it
54
/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55
///
56
/// The two ways of specifying orderings for compare-and-set are:
57
///
58
/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59
///    ordering is chosen.
60
/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61
///    for the failure case.
62
// TODO: remove in the next major version.
63
#[deprecated(
64
    note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65
            use `compare_exchange` or `compare_exchange_weak instead`"
66
)]
67
pub trait CompareAndSetOrdering {
68
    /// The ordering of the operation when it succeeds.
69
    fn success(&self) -> Ordering;
70
71
    /// The ordering of the operation when it fails.
72
    ///
73
    /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74
    /// the success ordering.
75
    fn failure(&self) -> Ordering;
76
}
77
78
#[allow(deprecated)]
79
impl CompareAndSetOrdering for Ordering {
80
    #[inline]
81
0
    fn success(&self) -> Ordering {
82
0
        *self
83
0
    }
84
85
    #[inline]
86
0
    fn failure(&self) -> Ordering {
87
0
        strongest_failure_ordering(*self)
88
0
    }
89
}
90
91
#[allow(deprecated)]
92
impl CompareAndSetOrdering for (Ordering, Ordering) {
93
    #[inline]
94
0
    fn success(&self) -> Ordering {
95
0
        self.0
96
0
    }
97
98
    #[inline]
99
0
    fn failure(&self) -> Ordering {
100
0
        self.1
101
0
    }
102
}
103
104
/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
105
#[inline]
106
332M
fn low_bits<T: ?Sized + Pointable>() -> usize {
107
332M
    (1 << T::ALIGN.trailing_zeros()) - 1
108
332M
}
crossbeam_epoch::atomic::low_bits::<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>
Line
Count
Source
106
262k
fn low_bits<T: ?Sized + Pointable>() -> usize {
107
262k
    (1 << T::ALIGN.trailing_zeros()) - 1
108
262k
}
crossbeam_epoch::atomic::low_bits::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>
Line
Count
Source
106
12.9M
fn low_bits<T: ?Sized + Pointable>() -> usize {
107
12.9M
    (1 << T::ALIGN.trailing_zeros()) - 1
108
12.9M
}
crossbeam_epoch::atomic::low_bits::<crossbeam_epoch::internal::Local>
Line
Count
Source
106
399k
fn low_bits<T: ?Sized + Pointable>() -> usize {
107
399k
    (1 << T::ALIGN.trailing_zeros()) - 1
108
399k
}
crossbeam_epoch::atomic::low_bits::<crossbeam_epoch::sync::list::Entry>
Line
Count
Source
106
318M
fn low_bits<T: ?Sized + Pointable>() -> usize {
107
318M
    (1 << T::ALIGN.trailing_zeros()) - 1
108
318M
}
109
110
/// Panics if the pointer is not properly unaligned.
111
#[inline]
112
114k
fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113
114k
    assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114
114k
}
crossbeam_epoch::atomic::ensure_aligned::<crossbeam_epoch::internal::Local>
Line
Count
Source
112
57.0k
fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113
57.0k
    assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114
57.0k
}
crossbeam_epoch::atomic::ensure_aligned::<crossbeam_epoch::sync::list::Entry>
Line
Count
Source
112
57.0k
fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113
57.0k
    assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114
57.0k
}
115
116
/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117
///
118
/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119
#[inline]
120
70.0k
fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
121
70.0k
    (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122
70.0k
}
123
124
/// Decomposes a tagged pointer `data` into the pointer and the tag.
125
#[inline]
126
165M
fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127
165M
    (data & !low_bits::<T>(), data & low_bits::<T>())
128
165M
}
crossbeam_epoch::atomic::decompose_tag::<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>
Line
Count
Source
126
131k
fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127
131k
    (data & !low_bits::<T>(), data & low_bits::<T>())
128
131k
}
crossbeam_epoch::atomic::decompose_tag::<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>
Line
Count
Source
126
6.49M
fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127
6.49M
    (data & !low_bits::<T>(), data & low_bits::<T>())
128
6.49M
}
crossbeam_epoch::atomic::decompose_tag::<crossbeam_epoch::internal::Local>
Line
Count
Source
126
171k
fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127
171k
    (data & !low_bits::<T>(), data & low_bits::<T>())
128
171k
}
crossbeam_epoch::atomic::decompose_tag::<crossbeam_epoch::sync::list::Entry>
Line
Count
Source
126
159M
fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127
159M
    (data & !low_bits::<T>(), data & low_bits::<T>())
128
159M
}
129
130
/// Types that are pointed to by a single word.
131
///
132
/// In concurrent programming, it is necessary to represent an object within a word because atomic
133
/// operations (e.g., reads, writes, read-modify-writes) support only single words.  This trait
134
/// qualifies such types that are pointed to by a single word.
135
///
136
/// The trait generalizes `Box<T>` for a sized type `T`.  In a box, an object of type `T` is
137
/// allocated in heap and it is owned by a single-word pointer.  This trait is also implemented for
138
/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139
/// size and elements.
140
///
141
/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`].  In
142
/// particular, Crossbeam supports dynamically sized slices as follows.
143
///
144
/// ```
145
/// use std::mem::MaybeUninit;
146
/// use crossbeam_epoch::Owned;
147
///
148
/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149
/// ```
150
pub trait Pointable {
151
    /// The alignment of pointer.
152
    const ALIGN: usize;
153
154
    /// The type for initializers.
155
    type Init;
156
157
    /// Initializes a with the given initializer.
158
    ///
159
    /// # Safety
160
    ///
161
    /// The result should be a multiple of `ALIGN`.
162
    unsafe fn init(init: Self::Init) -> usize;
163
164
    /// Dereferences the given pointer.
165
    ///
166
    /// # Safety
167
    ///
168
    /// - The given `ptr` should have been initialized with [`Pointable::init`].
169
    /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170
    /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
171
    unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172
173
    /// Mutably dereferences the given pointer.
174
    ///
175
    /// # Safety
176
    ///
177
    /// - The given `ptr` should have been initialized with [`Pointable::init`].
178
    /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179
    /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180
    ///   concurrently.
181
    unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182
183
    /// Drops the object pointed to by the given pointer.
184
    ///
185
    /// # Safety
186
    ///
187
    /// - The given `ptr` should have been initialized with [`Pointable::init`].
188
    /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189
    /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190
    ///   concurrently.
191
    unsafe fn drop(ptr: usize);
192
}
193
194
impl<T> Pointable for T {
195
    const ALIGN: usize = mem::align_of::<T>();
196
197
    type Init = T;
198
199
245k
    unsafe fn init(init: Self::Init) -> usize {
200
245k
        Box::into_raw(Box::new(init)) as usize
201
245k
    }
<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef> as crossbeam_epoch::atomic::Pointable>::init
Line
Count
Source
199
131k
    unsafe fn init(init: Self::Init) -> usize {
200
131k
        Box::into_raw(Box::new(init)) as usize
201
131k
    }
<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag> as crossbeam_epoch::atomic::Pointable>::init
Line
Count
Source
199
57.0k
    unsafe fn init(init: Self::Init) -> usize {
200
57.0k
        Box::into_raw(Box::new(init)) as usize
201
57.0k
    }
<crossbeam_epoch::internal::Local as crossbeam_epoch::atomic::Pointable>::init
Line
Count
Source
199
57.0k
    unsafe fn init(init: Self::Init) -> usize {
200
57.0k
        Box::into_raw(Box::new(init)) as usize
201
57.0k
    }
202
203
82.0M
    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204
82.0M
        &*(ptr as *const T)
205
82.0M
    }
Unexecuted instantiation: <crossbeam_deque::deque::Buffer<rayon_core::job::JobRef> as crossbeam_epoch::atomic::Pointable>::deref
<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag> as crossbeam_epoch::atomic::Pointable>::deref
Line
Count
Source
203
3.32M
    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204
3.32M
        &*(ptr as *const T)
205
3.32M
    }
<crossbeam_epoch::internal::Local as crossbeam_epoch::atomic::Pointable>::deref
Line
Count
Source
203
57.0k
    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204
57.0k
        &*(ptr as *const T)
205
57.0k
    }
<crossbeam_epoch::sync::list::Entry as crossbeam_epoch::atomic::Pointable>::deref
Line
Count
Source
203
78.6M
    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204
78.6M
        &*(ptr as *const T)
205
78.6M
    }
206
207
0
    unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208
0
        &mut *(ptr as *mut T)
209
0
    }
210
211
113k
    unsafe fn drop(ptr: usize) {
212
113k
        drop(Box::from_raw(ptr as *mut T));
213
113k
    }
Unexecuted instantiation: <crossbeam_deque::deque::Buffer<rayon_core::job::JobRef> as crossbeam_epoch::atomic::Pointable>::drop
<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag> as crossbeam_epoch::atomic::Pointable>::drop
Line
Count
Source
211
56.9k
    unsafe fn drop(ptr: usize) {
212
56.9k
        drop(Box::from_raw(ptr as *mut T));
213
56.9k
    }
<crossbeam_epoch::internal::Local as crossbeam_epoch::atomic::Pointable>::drop
Line
Count
Source
211
56.9k
    unsafe fn drop(ptr: usize) {
212
56.9k
        drop(Box::from_raw(ptr as *mut T));
213
56.9k
    }
214
}
215
216
/// Array with size.
217
///
218
/// # Memory layout
219
///
220
/// An array consisting of size and elements:
221
///
222
/// ```text
223
///          elements
224
///          |
225
///          |
226
/// ------------------------------------
227
/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228
/// ------------------------------------
229
/// ```
230
///
231
/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232
/// along with pointer as in `Box<[T]>`).
233
///
234
/// Elements are not present in the type, but they will be in the allocation.
235
/// ```
236
#[repr(C)]
237
struct Array<T> {
238
    /// The number of elements (not the number of bytes).
239
    len: usize,
240
    elements: [MaybeUninit<T>; 0],
241
}
242
243
impl<T> Array<T> {
244
0
    fn layout(len: usize) -> Layout {
245
0
        Layout::new::<Self>()
246
0
            .extend(Layout::array::<MaybeUninit<T>>(len).unwrap())
247
0
            .unwrap()
248
0
            .0
249
0
            .pad_to_align()
250
0
    }
251
}
252
253
impl<T> Pointable for [MaybeUninit<T>] {
254
    const ALIGN: usize = mem::align_of::<Array<T>>();
255
256
    type Init = usize;
257
258
0
    unsafe fn init(len: Self::Init) -> usize {
259
0
        let layout = Array::<T>::layout(len);
260
0
        let ptr = alloc::alloc::alloc(layout).cast::<Array<T>>();
261
0
        if ptr.is_null() {
262
0
            alloc::alloc::handle_alloc_error(layout);
263
0
        }
264
0
        ptr::addr_of_mut!((*ptr).len).write(len);
265
0
        ptr as usize
266
0
    }
267
268
0
    unsafe fn deref<'a>(ptr: usize) -> &'a Self {
269
0
        let array = &*(ptr as *const Array<T>);
270
0
        slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len)
271
0
    }
272
273
0
    unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
274
0
        let array = &*(ptr as *mut Array<T>);
275
0
        slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len)
276
0
    }
277
278
0
    unsafe fn drop(ptr: usize) {
279
0
        let len = (*(ptr as *mut Array<T>)).len;
280
0
        let layout = Array::<T>::layout(len);
281
0
        alloc::alloc::dealloc(ptr as *mut u8, layout);
282
0
    }
283
}
284
285
/// An atomic pointer that can be safely shared between threads.
286
///
287
/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
288
/// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
289
/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
290
///
291
/// Any method that loads the pointer must be passed a reference to a [`Guard`].
292
///
293
/// Crossbeam supports dynamically sized types.  See [`Pointable`] for details.
294
pub struct Atomic<T: ?Sized + Pointable> {
295
    data: AtomicUsize,
296
    _marker: PhantomData<*mut T>,
297
}
298
299
unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
300
unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
301
302
impl<T> Atomic<T> {
303
    /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
304
    ///
305
    /// # Examples
306
    ///
307
    /// ```
308
    /// use crossbeam_epoch::Atomic;
309
    ///
310
    /// let a = Atomic::new(1234);
311
    /// # unsafe { drop(a.into_owned()); } // avoid leak
312
    /// ```
313
131k
    pub fn new(init: T) -> Atomic<T> {
314
131k
        Self::init(init)
315
131k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::new
Line
Count
Source
313
131k
    pub fn new(init: T) -> Atomic<T> {
314
131k
        Self::init(init)
315
131k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Atomic<_>>::new
316
}
317
318
impl<T: ?Sized + Pointable> Atomic<T> {
319
    /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
320
    ///
321
    /// # Examples
322
    ///
323
    /// ```
324
    /// use crossbeam_epoch::Atomic;
325
    ///
326
    /// let a = Atomic::<i32>::init(1234);
327
    /// # unsafe { drop(a.into_owned()); } // avoid leak
328
    /// ```
329
131k
    pub fn init(init: T::Init) -> Atomic<T> {
330
131k
        Self::from(Owned::init(init))
331
131k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::init
Line
Count
Source
329
131k
    pub fn init(init: T::Init) -> Atomic<T> {
330
131k
        Self::from(Owned::init(init))
331
131k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Atomic<_>>::init
332
333
    /// Returns a new atomic pointer pointing to the tagged pointer `data`.
334
131k
    fn from_usize(data: usize) -> Self {
335
131k
        Self {
336
131k
            data: AtomicUsize::new(data),
337
131k
            _marker: PhantomData,
338
131k
        }
339
131k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::from_usize
Line
Count
Source
334
131k
    fn from_usize(data: usize) -> Self {
335
131k
        Self {
336
131k
            data: AtomicUsize::new(data),
337
131k
            _marker: PhantomData,
338
131k
        }
339
131k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Atomic<_>>::from_usize
340
341
    /// Returns a new null atomic pointer.
342
    ///
343
    /// # Examples
344
    ///
345
    /// ```
346
    /// use crossbeam_epoch::Atomic;
347
    ///
348
    /// let a = Atomic::<i32>::null();
349
    /// ```
350
    #[cfg(not(crossbeam_loom))]
351
114k
    pub const fn null() -> Atomic<T> {
352
114k
        Self {
353
114k
            data: AtomicUsize::new(0),
354
114k
            _marker: PhantomData,
355
114k
        }
356
114k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::null
Line
Count
Source
351
57.0k
    pub const fn null() -> Atomic<T> {
352
57.0k
        Self {
353
57.0k
            data: AtomicUsize::new(0),
354
57.0k
            _marker: PhantomData,
355
57.0k
        }
356
57.0k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::list::Entry>>::null
Line
Count
Source
351
57.0k
    pub const fn null() -> Atomic<T> {
352
57.0k
        Self {
353
57.0k
            data: AtomicUsize::new(0),
354
57.0k
            _marker: PhantomData,
355
57.0k
        }
356
57.0k
    }
357
    /// Returns a new null atomic pointer.
358
    #[cfg(crossbeam_loom)]
359
    pub fn null() -> Atomic<T> {
360
        Self {
361
            data: AtomicUsize::new(0),
362
            _marker: PhantomData,
363
        }
364
    }
365
366
    /// Loads a `Shared` from the atomic pointer.
367
    ///
368
    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
369
    /// operation.
370
    ///
371
    /// # Examples
372
    ///
373
    /// ```
374
    /// use crossbeam_epoch::{self as epoch, Atomic};
375
    /// use std::sync::atomic::Ordering::SeqCst;
376
    ///
377
    /// let a = Atomic::new(1234);
378
    /// let guard = &epoch::pin();
379
    /// let p = a.load(SeqCst, guard);
380
    /// # unsafe { drop(a.into_owned()); } // avoid leak
381
    /// ```
382
88.3M
    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
383
88.3M
        unsafe { Shared::from_usize(self.data.load(ord)) }
384
88.3M
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::load
Line
Count
Source
382
131k
    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
383
131k
        unsafe { Shared::from_usize(self.data.load(ord)) }
384
131k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::load
Line
Count
Source
382
6.49M
    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
383
6.49M
        unsafe { Shared::from_usize(self.data.load(ord)) }
384
6.49M
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::list::Entry>>::load
Line
Count
Source
382
81.7M
    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
383
81.7M
        unsafe { Shared::from_usize(self.data.load(ord)) }
384
81.7M
    }
385
386
    /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
387
    ///
388
    /// This is similar to the "acquire" ordering, except that an ordering is
389
    /// only guaranteed with operations that "depend on" the result of the load.
390
    /// However consume loads are usually much faster than acquire loads on
391
    /// architectures with a weak memory model since they don't require memory
392
    /// fence instructions.
393
    ///
394
    /// The exact definition of "depend on" is a bit vague, but it works as you
395
    /// would expect in practice since a lot of software, especially the Linux
396
    /// kernel, rely on this behavior.
397
    ///
398
    /// # Examples
399
    ///
400
    /// ```
401
    /// use crossbeam_epoch::{self as epoch, Atomic};
402
    ///
403
    /// let a = Atomic::new(1234);
404
    /// let guard = &epoch::pin();
405
    /// let p = a.load_consume(guard);
406
    /// # unsafe { drop(a.into_owned()); } // avoid leak
407
    /// ```
408
0
    pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
409
0
        unsafe { Shared::from_usize(self.data.load_consume()) }
410
0
    }
411
412
    /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
413
    ///
414
    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
415
    /// operation.
416
    ///
417
    /// # Examples
418
    ///
419
    /// ```
420
    /// use crossbeam_epoch::{Atomic, Owned, Shared};
421
    /// use std::sync::atomic::Ordering::SeqCst;
422
    ///
423
    /// let a = Atomic::new(1234);
424
    /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak
425
    /// a.store(Shared::null(), SeqCst);
426
    /// a.store(Owned::new(1234), SeqCst);
427
    /// # unsafe { drop(a.into_owned()); } // avoid leak
428
    /// ```
429
57.8k
    pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
430
57.8k
        self.data.store(new.into_usize(), ord);
431
57.8k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::store::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>
Line
Count
Source
429
4
    pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
430
4
        self.data.store(new.into_usize(), ord);
431
4
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::list::Entry>>::store::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry>>
Line
Count
Source
429
57.8k
    pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
430
57.8k
        self.data.store(new.into_usize(), ord);
431
57.8k
    }
432
433
    /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
434
    /// `Shared`.
435
    ///
436
    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
437
    /// operation.
438
    ///
439
    /// # Examples
440
    ///
441
    /// ```
442
    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
443
    /// use std::sync::atomic::Ordering::SeqCst;
444
    ///
445
    /// let a = Atomic::new(1234);
446
    /// let guard = &epoch::pin();
447
    /// let p = a.swap(Shared::null(), SeqCst, guard);
448
    /// # unsafe { drop(p.into_owned()); } // avoid leak
449
    /// ```
450
0
    pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
451
0
        unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
452
0
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::swap::<crossbeam_epoch::atomic::Shared<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>
Unexecuted instantiation: <crossbeam_epoch::atomic::Atomic<_>>::swap::<_>
453
454
    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
455
    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
456
    /// same object, but with different tags, will not be considered equal.
457
    ///
458
    /// The return value is a result indicating whether the new pointer was written. On success the
459
    /// pointer that was written is returned. On failure the actual current value and `new` are
460
    /// returned.
461
    ///
462
    /// This method takes two `Ordering` arguments to describe the memory
463
    /// ordering of this operation. `success` describes the required ordering for the
464
    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
465
    /// `failure` describes the required ordering for the load operation that takes place when
466
    /// the comparison fails. Using `Acquire` as success ordering makes the store part
467
    /// of this operation `Relaxed`, and using `Release` makes the successful load
468
    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
469
    /// and must be equivalent to or weaker than the success ordering.
470
    ///
471
    /// # Examples
472
    ///
473
    /// ```
474
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
475
    /// use std::sync::atomic::Ordering::SeqCst;
476
    ///
477
    /// let a = Atomic::new(1234);
478
    ///
479
    /// let guard = &epoch::pin();
480
    /// let curr = a.load(SeqCst, guard);
481
    /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
482
    /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
483
    /// # unsafe { drop(curr.into_owned()); } // avoid leak
484
    /// ```
485
295k
    pub fn compare_exchange<'g, P>(
486
295k
        &self,
487
295k
        current: Shared<'_, T>,
488
295k
        new: P,
489
295k
        success: Ordering,
490
295k
        failure: Ordering,
491
295k
        _: &'g Guard,
492
295k
    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
493
295k
    where
494
295k
        P: Pointer<T>,
495
    {
496
295k
        let new = new.into_usize();
497
295k
        self.data
498
295k
            .compare_exchange(current.into_usize(), new, success, failure)
499
295k
            .map(|_| unsafe { Shared::from_usize(new) })
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::compare_exchange::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::{closure#0}
Line
Count
Source
499
171k
            .map(|_| unsafe { Shared::from_usize(new) })
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::list::Entry>>::compare_exchange::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry>>::{closure#0}
Line
Count
Source
499
57.0k
            .map(|_| unsafe { Shared::from_usize(new) })
500
295k
            .map_err(|current| unsafe {
501
67.0k
                CompareExchangeError {
502
67.0k
                    current: Shared::from_usize(current),
503
67.0k
                    new: P::from_usize(new),
504
67.0k
                }
505
67.0k
            })
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::compare_exchange::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::{closure#1}
Line
Count
Source
501
54.0k
                CompareExchangeError {
502
54.0k
                    current: Shared::from_usize(current),
503
54.0k
                    new: P::from_usize(new),
504
54.0k
                }
505
54.0k
            })
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::list::Entry>>::compare_exchange::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry>>::{closure#1}
Line
Count
Source
501
13.0k
                CompareExchangeError {
502
13.0k
                    current: Shared::from_usize(current),
503
13.0k
                    new: P::from_usize(new),
504
13.0k
                }
505
13.0k
            })
506
295k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::compare_exchange::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>
Line
Count
Source
485
225k
    pub fn compare_exchange<'g, P>(
486
225k
        &self,
487
225k
        current: Shared<'_, T>,
488
225k
        new: P,
489
225k
        success: Ordering,
490
225k
        failure: Ordering,
491
225k
        _: &'g Guard,
492
225k
    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
493
225k
    where
494
225k
        P: Pointer<T>,
495
    {
496
225k
        let new = new.into_usize();
497
225k
        self.data
498
225k
            .compare_exchange(current.into_usize(), new, success, failure)
499
225k
            .map(|_| unsafe { Shared::from_usize(new) })
500
225k
            .map_err(|current| unsafe {
501
                CompareExchangeError {
502
                    current: Shared::from_usize(current),
503
                    new: P::from_usize(new),
504
                }
505
            })
506
225k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::list::Entry>>::compare_exchange::<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry>>
Line
Count
Source
485
70.0k
    pub fn compare_exchange<'g, P>(
486
70.0k
        &self,
487
70.0k
        current: Shared<'_, T>,
488
70.0k
        new: P,
489
70.0k
        success: Ordering,
490
70.0k
        failure: Ordering,
491
70.0k
        _: &'g Guard,
492
70.0k
    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
493
70.0k
    where
494
70.0k
        P: Pointer<T>,
495
    {
496
70.0k
        let new = new.into_usize();
497
70.0k
        self.data
498
70.0k
            .compare_exchange(current.into_usize(), new, success, failure)
499
70.0k
            .map(|_| unsafe { Shared::from_usize(new) })
500
70.0k
            .map_err(|current| unsafe {
501
                CompareExchangeError {
502
                    current: Shared::from_usize(current),
503
                    new: P::from_usize(new),
504
                }
505
            })
506
70.0k
    }
507
508
    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
509
    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
510
    /// same object, but with different tags, will not be considered equal.
511
    ///
512
    /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
513
    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
514
    /// result indicating whether the new pointer was written. On success the pointer that was
515
    /// written is returned. On failure the actual current value and `new` are returned.
516
    ///
517
    /// This method takes two `Ordering` arguments to describe the memory
518
    /// ordering of this operation. `success` describes the required ordering for the
519
    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
520
    /// `failure` describes the required ordering for the load operation that takes place when
521
    /// the comparison fails. Using `Acquire` as success ordering makes the store part
522
    /// of this operation `Relaxed`, and using `Release` makes the successful load
523
    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
524
    /// and must be equivalent to or weaker than the success ordering.
525
    ///
526
    /// [`compare_exchange`]: Atomic::compare_exchange
527
    ///
528
    /// # Examples
529
    ///
530
    /// ```
531
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
532
    /// use std::sync::atomic::Ordering::SeqCst;
533
    ///
534
    /// let a = Atomic::new(1234);
535
    /// let guard = &epoch::pin();
536
    ///
537
    /// let mut new = Owned::new(5678);
538
    /// let mut ptr = a.load(SeqCst, guard);
539
    /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
540
    /// loop {
541
    ///     match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
542
    ///         Ok(p) => {
543
    ///             ptr = p;
544
    ///             break;
545
    ///         }
546
    ///         Err(err) => {
547
    ///             ptr = err.current;
548
    ///             new = err.new;
549
    ///         }
550
    ///     }
551
    /// }
552
    ///
553
    /// let mut curr = a.load(SeqCst, guard);
554
    /// loop {
555
    ///     match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
556
    ///         Ok(_) => break,
557
    ///         Err(err) => curr = err.current,
558
    ///     }
559
    /// }
560
    /// # unsafe { drop(curr.into_owned()); } // avoid leak
561
    /// ```
562
57.8k
    pub fn compare_exchange_weak<'g, P>(
563
57.8k
        &self,
564
57.8k
        current: Shared<'_, T>,
565
57.8k
        new: P,
566
57.8k
        success: Ordering,
567
57.8k
        failure: Ordering,
568
57.8k
        _: &'g Guard,
569
57.8k
    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
570
57.8k
    where
571
57.8k
        P: Pointer<T>,
572
    {
573
57.8k
        let new = new.into_usize();
574
57.8k
        self.data
575
57.8k
            .compare_exchange_weak(current.into_usize(), new, success, failure)
576
57.8k
            .map(|_| unsafe { Shared::from_usize(new) })
577
57.8k
            .map_err(|current| unsafe {
578
806
                CompareExchangeError {
579
806
                    current: Shared::from_usize(current),
580
806
                    new: P::from_usize(new),
581
806
                }
582
806
            })
583
57.8k
    }
584
585
    /// Fetches the pointer, and then applies a function to it that returns a new value.
586
    /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
587
    ///
588
    /// Note that the given function may be called multiple times if the value has been changed by
589
    /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
590
    /// will have been applied only once to the stored value.
591
    ///
592
    /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
593
    /// ordering of this operation. The first describes the required ordering for
594
    /// when the operation finally succeeds while the second describes the
595
    /// required ordering for loads. These correspond to the success and failure
596
    /// orderings of [`Atomic::compare_exchange`] respectively.
597
    ///
598
    /// Using [`Acquire`] as success ordering makes the store part of this
599
    /// operation [`Relaxed`], and using [`Release`] makes the final successful
600
    /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
601
    /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
602
    /// success ordering.
603
    ///
604
    /// [`Relaxed`]: Ordering::Relaxed
605
    /// [`Acquire`]: Ordering::Acquire
606
    /// [`Release`]: Ordering::Release
607
    /// [`SeqCst`]: Ordering::SeqCst
608
    ///
609
    /// # Examples
610
    ///
611
    /// ```
612
    /// use crossbeam_epoch::{self as epoch, Atomic};
613
    /// use std::sync::atomic::Ordering::SeqCst;
614
    ///
615
    /// let a = Atomic::new(1234);
616
    /// let guard = &epoch::pin();
617
    ///
618
    /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
619
    /// assert!(res1.is_ok());
620
    ///
621
    /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
622
    /// assert!(res2.is_err());
623
    /// # unsafe { drop(a.into_owned()); } // avoid leak
624
    /// ```
625
0
    pub fn fetch_update<'g, F>(
626
0
        &self,
627
0
        set_order: Ordering,
628
0
        fail_order: Ordering,
629
0
        guard: &'g Guard,
630
0
        mut func: F,
631
0
    ) -> Result<Shared<'g, T>, Shared<'g, T>>
632
0
    where
633
0
        F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,
634
    {
635
0
        let mut prev = self.load(fail_order, guard);
636
0
        while let Some(next) = func(prev) {
637
0
            match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) {
638
0
                Ok(shared) => return Ok(shared),
639
0
                Err(next_prev) => prev = next_prev.current,
640
            }
641
        }
642
0
        Err(prev)
643
0
    }
644
645
    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
646
    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
647
    /// same object, but with different tags, will not be considered equal.
648
    ///
649
    /// The return value is a result indicating whether the new pointer was written. On success the
650
    /// pointer that was written is returned. On failure the actual current value and `new` are
651
    /// returned.
652
    ///
653
    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
654
    /// ordering of this operation.
655
    ///
656
    /// # Migrating to `compare_exchange`
657
    ///
658
    /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
659
    /// memory orderings:
660
    ///
661
    /// Original | Success | Failure
662
    /// -------- | ------- | -------
663
    /// Relaxed  | Relaxed | Relaxed
664
    /// Acquire  | Acquire | Acquire
665
    /// Release  | Release | Relaxed
666
    /// AcqRel   | AcqRel  | Acquire
667
    /// SeqCst   | SeqCst  | SeqCst
668
    ///
669
    /// # Examples
670
    ///
671
    /// ```
672
    /// # #![allow(deprecated)]
673
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
674
    /// use std::sync::atomic::Ordering::SeqCst;
675
    ///
676
    /// let a = Atomic::new(1234);
677
    ///
678
    /// let guard = &epoch::pin();
679
    /// let curr = a.load(SeqCst, guard);
680
    /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
681
    /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
682
    /// # unsafe { drop(curr.into_owned()); } // avoid leak
683
    /// ```
684
    // TODO: remove in the next major version.
685
    #[allow(deprecated)]
686
    #[deprecated(note = "Use `compare_exchange` instead")]
687
0
    pub fn compare_and_set<'g, O, P>(
688
0
        &self,
689
0
        current: Shared<'_, T>,
690
0
        new: P,
691
0
        ord: O,
692
0
        guard: &'g Guard,
693
0
    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
694
0
    where
695
0
        O: CompareAndSetOrdering,
696
0
        P: Pointer<T>,
697
    {
698
0
        self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
699
0
    }
700
701
    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
702
    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
703
    /// same object, but with different tags, will not be considered equal.
704
    ///
705
    /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
706
    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
707
    /// result indicating whether the new pointer was written. On success the pointer that was
708
    /// written is returned. On failure the actual current value and `new` are returned.
709
    ///
710
    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
711
    /// ordering of this operation.
712
    ///
713
    /// [`compare_and_set`]: Atomic::compare_and_set
714
    ///
715
    /// # Migrating to `compare_exchange_weak`
716
    ///
717
    /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
718
    /// memory orderings:
719
    ///
720
    /// Original | Success | Failure
721
    /// -------- | ------- | -------
722
    /// Relaxed  | Relaxed | Relaxed
723
    /// Acquire  | Acquire | Acquire
724
    /// Release  | Release | Relaxed
725
    /// AcqRel   | AcqRel  | Acquire
726
    /// SeqCst   | SeqCst  | SeqCst
727
    ///
728
    /// # Examples
729
    ///
730
    /// ```
731
    /// # #![allow(deprecated)]
732
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
733
    /// use std::sync::atomic::Ordering::SeqCst;
734
    ///
735
    /// let a = Atomic::new(1234);
736
    /// let guard = &epoch::pin();
737
    ///
738
    /// let mut new = Owned::new(5678);
739
    /// let mut ptr = a.load(SeqCst, guard);
740
    /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
741
    /// loop {
742
    ///     match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
743
    ///         Ok(p) => {
744
    ///             ptr = p;
745
    ///             break;
746
    ///         }
747
    ///         Err(err) => {
748
    ///             ptr = err.current;
749
    ///             new = err.new;
750
    ///         }
751
    ///     }
752
    /// }
753
    ///
754
    /// let mut curr = a.load(SeqCst, guard);
755
    /// loop {
756
    ///     match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
757
    ///         Ok(_) => break,
758
    ///         Err(err) => curr = err.current,
759
    ///     }
760
    /// }
761
    /// # unsafe { drop(curr.into_owned()); } // avoid leak
762
    /// ```
763
    // TODO: remove in the next major version.
764
    #[allow(deprecated)]
765
    #[deprecated(note = "Use `compare_exchange_weak` instead")]
766
0
    pub fn compare_and_set_weak<'g, O, P>(
767
0
        &self,
768
0
        current: Shared<'_, T>,
769
0
        new: P,
770
0
        ord: O,
771
0
        guard: &'g Guard,
772
0
    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
773
0
    where
774
0
        O: CompareAndSetOrdering,
775
0
        P: Pointer<T>,
776
    {
777
0
        self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
778
0
    }
779
780
    /// Bitwise "and" with the current tag.
781
    ///
782
    /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
783
    /// new tag to the result. Returns the previous pointer.
784
    ///
785
    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
786
    /// operation.
787
    ///
788
    /// # Examples
789
    ///
790
    /// ```
791
    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
792
    /// use std::sync::atomic::Ordering::SeqCst;
793
    ///
794
    /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
795
    /// let guard = &epoch::pin();
796
    /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
797
    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
798
    /// ```
799
0
    pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
800
0
        unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
801
0
    }
802
803
    /// Bitwise "or" with the current tag.
804
    ///
805
    /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
806
    /// new tag to the result. Returns the previous pointer.
807
    ///
808
    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
809
    /// operation.
810
    ///
811
    /// # Examples
812
    ///
813
    /// ```
814
    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
815
    /// use std::sync::atomic::Ordering::SeqCst;
816
    ///
817
    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
818
    /// let guard = &epoch::pin();
819
    /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
820
    /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
821
    /// ```
822
57.0k
    pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
823
57.0k
        unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
824
57.0k
    }
825
826
    /// Bitwise "xor" with the current tag.
827
    ///
828
    /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
829
    /// new tag to the result. Returns the previous pointer.
830
    ///
831
    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
832
    /// operation.
833
    ///
834
    /// # Examples
835
    ///
836
    /// ```
837
    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
838
    /// use std::sync::atomic::Ordering::SeqCst;
839
    ///
840
    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
841
    /// let guard = &epoch::pin();
842
    /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
843
    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
844
    /// ```
845
0
    pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
846
0
        unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
847
0
    }
848
849
    /// Takes ownership of the pointee.
850
    ///
851
    /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
852
    /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
853
    /// destructors of data structures.
854
    ///
855
    /// # Panics
856
    ///
857
    /// Panics if this pointer is null, but only in debug mode.
858
    ///
859
    /// # Safety
860
    ///
861
    /// This method may be called only if the pointer is valid and nobody else is holding a
862
    /// reference to the same object.
863
    ///
864
    /// # Examples
865
    ///
866
    /// ```rust
867
    /// # use std::mem;
868
    /// # use crossbeam_epoch::Atomic;
869
    /// struct DataStructure {
870
    ///     ptr: Atomic<usize>,
871
    /// }
872
    ///
873
    /// impl Drop for DataStructure {
874
    ///     fn drop(&mut self) {
875
    ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
876
    ///         // any Shared or & to it ourselves.
877
    ///         unsafe {
878
    ///             drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
879
    ///         }
880
    ///     }
881
    /// }
882
    /// ```
883
0
    pub unsafe fn into_owned(self) -> Owned<T> {
884
0
        Owned::from_usize(self.data.into_inner())
885
0
    }
886
887
    /// Takes ownership of the pointee if it is non-null.
888
    ///
889
    /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
890
    /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
891
    /// destructors of data structures.
892
    ///
893
    /// # Safety
894
    ///
895
    /// This method may be called only if the pointer is valid and nobody else is holding a
896
    /// reference to the same object, or the pointer is null.
897
    ///
898
    /// # Examples
899
    ///
900
    /// ```rust
901
    /// # use std::mem;
902
    /// # use crossbeam_epoch::Atomic;
903
    /// struct DataStructure {
904
    ///     ptr: Atomic<usize>,
905
    /// }
906
    ///
907
    /// impl Drop for DataStructure {
908
    ///     fn drop(&mut self) {
909
    ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
910
    ///         // any Shared or & to it ourselves, but it may be null, so we have to be careful.
911
    ///         let old = mem::replace(&mut self.ptr, Atomic::null());
912
    ///         unsafe {
913
    ///             if let Some(x) = old.try_into_owned() {
914
    ///                 drop(x)
915
    ///             }
916
    ///         }
917
    ///     }
918
    /// }
919
    /// ```
920
0
    pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
921
0
        let data = self.data.into_inner();
922
0
        if decompose_tag::<T>(data).0 == 0 {
923
0
            None
924
        } else {
925
0
            Some(Owned::from_usize(data))
926
        }
927
0
    }
928
}
929
930
impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
931
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
932
0
        let data = self.data.load(Ordering::SeqCst);
933
0
        let (raw, tag) = decompose_tag::<T>(data);
934
935
0
        f.debug_struct("Atomic")
936
0
            .field("raw", &raw)
937
0
            .field("tag", &tag)
938
0
            .finish()
939
0
    }
940
}
941
942
impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
943
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
944
0
        let data = self.data.load(Ordering::SeqCst);
945
0
        let (raw, _) = decompose_tag::<T>(data);
946
0
        fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
947
0
    }
948
}
949
950
impl<T: ?Sized + Pointable> Clone for Atomic<T> {
951
    /// Returns a copy of the atomic value.
952
    ///
953
    /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
954
    /// atomics or fences.
955
0
    fn clone(&self) -> Self {
956
0
        let data = self.data.load(Ordering::Relaxed);
957
0
        Atomic::from_usize(data)
958
0
    }
959
}
960
961
impl<T: ?Sized + Pointable> Default for Atomic<T> {
962
0
    fn default() -> Self {
963
0
        Atomic::null()
964
0
    }
965
}
966
967
impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
968
    /// Returns a new atomic pointer pointing to `owned`.
969
    ///
970
    /// # Examples
971
    ///
972
    /// ```
973
    /// use crossbeam_epoch::{Atomic, Owned};
974
    ///
975
    /// let a = Atomic::<i32>::from(Owned::new(1234));
976
    /// # unsafe { drop(a.into_owned()); } // avoid leak
977
    /// ```
978
131k
    fn from(owned: Owned<T>) -> Self {
979
131k
        let data = owned.data;
980
131k
        mem::forget(owned);
981
131k
        Self::from_usize(data)
982
131k
    }
<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as core::convert::From<crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>>::from
Line
Count
Source
978
131k
    fn from(owned: Owned<T>) -> Self {
979
131k
        let data = owned.data;
980
131k
        mem::forget(owned);
981
131k
        Self::from_usize(data)
982
131k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Atomic<_> as core::convert::From<crossbeam_epoch::atomic::Owned<_>>>::from
983
}
984
985
impl<T> From<Box<T>> for Atomic<T> {
986
0
    fn from(b: Box<T>) -> Self {
987
0
        Self::from(Owned::from(b))
988
0
    }
989
}
990
991
impl<T> From<T> for Atomic<T> {
992
0
    fn from(t: T) -> Self {
993
0
        Self::new(t)
994
0
    }
995
}
996
997
impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
998
    /// Returns a new atomic pointer pointing to `ptr`.
999
    ///
1000
    /// # Examples
1001
    ///
1002
    /// ```
1003
    /// use crossbeam_epoch::{Atomic, Shared};
1004
    ///
1005
    /// let a = Atomic::<i32>::from(Shared::<i32>::null());
1006
    /// ```
1007
0
    fn from(ptr: Shared<'g, T>) -> Self {
1008
0
        Self::from_usize(ptr.data)
1009
0
    }
1010
}
1011
1012
impl<T> From<*const T> for Atomic<T> {
1013
    /// Returns a new atomic pointer pointing to `raw`.
1014
    ///
1015
    /// # Examples
1016
    ///
1017
    /// ```
1018
    /// use std::ptr;
1019
    /// use crossbeam_epoch::Atomic;
1020
    ///
1021
    /// let a = Atomic::<i32>::from(ptr::null::<i32>());
1022
    /// ```
1023
0
    fn from(raw: *const T) -> Self {
1024
0
        Self::from_usize(raw as usize)
1025
0
    }
1026
}
1027
1028
/// A trait for either `Owned` or `Shared` pointers.
1029
pub trait Pointer<T: ?Sized + Pointable> {
1030
    /// Returns the machine representation of the pointer.
1031
    fn into_usize(self) -> usize;
1032
1033
    /// Returns a new pointer pointing to the tagged pointer `data`.
1034
    ///
1035
    /// # Safety
1036
    ///
1037
    /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
1038
    /// not be converted back by `Pointer::from_usize()` multiple times.
1039
    unsafe fn from_usize(data: usize) -> Self;
1040
}
1041
1042
/// An owned heap-allocated object.
1043
///
1044
/// This type is very similar to `Box<T>`.
1045
///
1046
/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1047
/// least significant bits of the address.
1048
pub struct Owned<T: ?Sized + Pointable> {
1049
    data: usize,
1050
    _marker: PhantomData<Box<T>>,
1051
}
1052
1053
impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
1054
    #[inline]
1055
114k
    fn into_usize(self) -> usize {
1056
114k
        let data = self.data;
1057
114k
        mem::forget(self);
1058
114k
        data
1059
114k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as crossbeam_epoch::atomic::Pointer<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::into_usize
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::into_usize
Line
Count
Source
1055
57.0k
    fn into_usize(self) -> usize {
1056
57.0k
        let data = self.data;
1057
57.0k
        mem::forget(self);
1058
57.0k
        data
1059
57.0k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::internal::Local>>::into_usize
Line
Count
Source
1055
57.0k
    fn into_usize(self) -> usize {
1056
57.0k
        let data = self.data;
1057
57.0k
        mem::forget(self);
1058
57.0k
        data
1059
57.0k
    }
1060
1061
    /// Returns a new pointer pointing to the tagged pointer `data`.
1062
    ///
1063
    /// # Panics
1064
    ///
1065
    /// Panics if the data is zero in debug mode.
1066
    #[inline]
1067
490k
    unsafe fn from_usize(data: usize) -> Self {
1068
490k
        debug_assert!(data != 0, "converting zero into `Owned`");
1069
490k
        Owned {
1070
490k
            data,
1071
490k
            _marker: PhantomData,
1072
490k
        }
1073
490k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as crossbeam_epoch::atomic::Pointer<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::from_usize
Line
Count
Source
1067
262k
    unsafe fn from_usize(data: usize) -> Self {
1068
262k
        debug_assert!(data != 0, "converting zero into `Owned`");
1069
262k
        Owned {
1070
262k
            data,
1071
262k
            _marker: PhantomData,
1072
262k
        }
1073
262k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::from_usize
Line
Count
Source
1067
114k
    unsafe fn from_usize(data: usize) -> Self {
1068
114k
        debug_assert!(data != 0, "converting zero into `Owned`");
1069
114k
        Owned {
1070
114k
            data,
1071
114k
            _marker: PhantomData,
1072
114k
        }
1073
114k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::internal::Local>>::from_usize
Line
Count
Source
1067
114k
    unsafe fn from_usize(data: usize) -> Self {
1068
114k
        debug_assert!(data != 0, "converting zero into `Owned`");
1069
114k
        Owned {
1070
114k
            data,
1071
114k
            _marker: PhantomData,
1072
114k
        }
1073
114k
    }
1074
}
1075
1076
impl<T> Owned<T> {
1077
    /// Returns a new owned pointer pointing to `raw`.
1078
    ///
1079
    /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1080
    /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1081
    /// the same raw pointer.
1082
    ///
1083
    /// # Panics
1084
    ///
1085
    /// Panics if `raw` is not properly aligned.
1086
    ///
1087
    /// # Safety
1088
    ///
1089
    /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1090
    /// back by `Owned::from_raw()` multiple times.
1091
    ///
1092
    /// # Examples
1093
    ///
1094
    /// ```
1095
    /// use crossbeam_epoch::Owned;
1096
    ///
1097
    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1098
    /// ```
1099
0
    pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
1100
0
        let raw = raw as usize;
1101
0
        ensure_aligned::<T>(raw);
1102
0
        Self::from_usize(raw)
1103
0
    }
1104
1105
    /// Converts the owned pointer into a `Box`.
1106
    ///
1107
    /// # Examples
1108
    ///
1109
    /// ```
1110
    /// use crossbeam_epoch::Owned;
1111
    ///
1112
    /// let o = Owned::new(1234);
1113
    /// let b: Box<i32> = o.into_box();
1114
    /// assert_eq!(*b, 1234);
1115
    /// ```
1116
131k
    pub fn into_box(self) -> Box<T> {
1117
131k
        let (raw, _) = decompose_tag::<T>(self.data);
1118
131k
        mem::forget(self);
1119
131k
        unsafe { Box::from_raw(raw as *mut _) }
1120
131k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::into_box
Line
Count
Source
1116
131k
    pub fn into_box(self) -> Box<T> {
1117
131k
        let (raw, _) = decompose_tag::<T>(self.data);
1118
131k
        mem::forget(self);
1119
131k
        unsafe { Box::from_raw(raw as *mut _) }
1120
131k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Owned<_>>::into_box
1121
1122
    /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1123
    ///
1124
    /// # Examples
1125
    ///
1126
    /// ```
1127
    /// use crossbeam_epoch::Owned;
1128
    ///
1129
    /// let o = Owned::new(1234);
1130
    /// ```
1131
114k
    pub fn new(init: T) -> Owned<T> {
1132
114k
        Self::init(init)
1133
114k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::new
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::new
Line
Count
Source
1131
57.0k
    pub fn new(init: T) -> Owned<T> {
1132
57.0k
        Self::init(init)
1133
57.0k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>>::new
Line
Count
Source
1131
57.0k
    pub fn new(init: T) -> Owned<T> {
1132
57.0k
        Self::init(init)
1133
57.0k
    }
1134
}
1135
1136
impl<T: ?Sized + Pointable> Owned<T> {
1137
    /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1138
    ///
1139
    /// # Examples
1140
    ///
1141
    /// ```
1142
    /// use crossbeam_epoch::Owned;
1143
    ///
1144
    /// let o = Owned::<i32>::init(1234);
1145
    /// ```
1146
245k
    pub fn init(init: T::Init) -> Owned<T> {
1147
245k
        unsafe { Self::from_usize(T::init(init)) }
1148
245k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::init
Line
Count
Source
1146
131k
    pub fn init(init: T::Init) -> Owned<T> {
1147
131k
        unsafe { Self::from_usize(T::init(init)) }
1148
131k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::init
Line
Count
Source
1146
57.0k
    pub fn init(init: T::Init) -> Owned<T> {
1147
57.0k
        unsafe { Self::from_usize(T::init(init)) }
1148
57.0k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>>::init
Line
Count
Source
1146
57.0k
    pub fn init(init: T::Init) -> Owned<T> {
1147
57.0k
        unsafe { Self::from_usize(T::init(init)) }
1148
57.0k
    }
1149
1150
    /// Converts the owned pointer into a [`Shared`].
1151
    ///
1152
    /// # Examples
1153
    ///
1154
    /// ```
1155
    /// use crossbeam_epoch::{self as epoch, Owned};
1156
    ///
1157
    /// let o = Owned::new(1234);
1158
    /// let guard = &epoch::pin();
1159
    /// let p = o.into_shared(guard);
1160
    /// # unsafe { drop(p.into_owned()); } // avoid leak
1161
    /// ```
1162
    #[allow(clippy::needless_lifetimes)]
1163
114k
    pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1164
114k
        unsafe { Shared::from_usize(self.into_usize()) }
1165
114k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::into_shared
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::into_shared
Line
Count
Source
1163
57.0k
    pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1164
57.0k
        unsafe { Shared::from_usize(self.into_usize()) }
1165
57.0k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local>>::into_shared
Line
Count
Source
1163
57.0k
    pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1164
57.0k
        unsafe { Shared::from_usize(self.into_usize()) }
1165
57.0k
    }
1166
1167
    /// Returns the tag stored within the pointer.
1168
    ///
1169
    /// # Examples
1170
    ///
1171
    /// ```
1172
    /// use crossbeam_epoch::Owned;
1173
    ///
1174
    /// assert_eq!(Owned::new(1234).tag(), 0);
1175
    /// ```
1176
0
    pub fn tag(&self) -> usize {
1177
0
        let (_, tag) = decompose_tag::<T>(self.data);
1178
0
        tag
1179
0
    }
1180
1181
    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1182
    /// unused bits of the pointer to `T`.
1183
    ///
1184
    /// # Examples
1185
    ///
1186
    /// ```
1187
    /// use crossbeam_epoch::Owned;
1188
    ///
1189
    /// let o = Owned::new(0u64);
1190
    /// assert_eq!(o.tag(), 0);
1191
    /// let o = o.with_tag(2);
1192
    /// assert_eq!(o.tag(), 2);
1193
    /// ```
1194
0
    pub fn with_tag(self, tag: usize) -> Owned<T> {
1195
0
        let data = self.into_usize();
1196
0
        unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1197
0
    }
1198
}
1199
1200
impl<T: ?Sized + Pointable> Drop for Owned<T> {
1201
113k
    fn drop(&mut self) {
1202
113k
        let (raw, _) = decompose_tag::<T>(self.data);
1203
113k
        unsafe {
1204
113k
            T::drop(raw);
1205
113k
        }
1206
113k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Owned<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as core::ops::drop::Drop>::drop
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> as core::ops::drop::Drop>::drop
Line
Count
Source
1201
56.9k
    fn drop(&mut self) {
1202
56.9k
        let (raw, _) = decompose_tag::<T>(self.data);
1203
56.9k
        unsafe {
1204
56.9k
            T::drop(raw);
1205
56.9k
        }
1206
56.9k
    }
<crossbeam_epoch::atomic::Owned<crossbeam_epoch::internal::Local> as core::ops::drop::Drop>::drop
Line
Count
Source
1201
56.9k
    fn drop(&mut self) {
1202
56.9k
        let (raw, _) = decompose_tag::<T>(self.data);
1203
56.9k
        unsafe {
1204
56.9k
            T::drop(raw);
1205
56.9k
        }
1206
56.9k
    }
1207
}
1208
1209
impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
1210
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1211
0
        let (raw, tag) = decompose_tag::<T>(self.data);
1212
1213
0
        f.debug_struct("Owned")
1214
0
            .field("raw", &raw)
1215
0
            .field("tag", &tag)
1216
0
            .finish()
1217
0
    }
1218
}
1219
1220
impl<T: Clone> Clone for Owned<T> {
1221
0
    fn clone(&self) -> Self {
1222
0
        Owned::new((**self).clone()).with_tag(self.tag())
1223
0
    }
1224
}
1225
1226
impl<T: ?Sized + Pointable> Deref for Owned<T> {
1227
    type Target = T;
1228
1229
0
    fn deref(&self) -> &T {
1230
0
        let (raw, _) = decompose_tag::<T>(self.data);
1231
0
        unsafe { T::deref(raw) }
1232
0
    }
1233
}
1234
1235
impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
1236
0
    fn deref_mut(&mut self) -> &mut T {
1237
0
        let (raw, _) = decompose_tag::<T>(self.data);
1238
0
        unsafe { T::deref_mut(raw) }
1239
0
    }
1240
}
1241
1242
impl<T> From<T> for Owned<T> {
1243
0
    fn from(t: T) -> Self {
1244
0
        Owned::new(t)
1245
0
    }
1246
}
1247
1248
impl<T> From<Box<T>> for Owned<T> {
1249
    /// Returns a new owned pointer pointing to `b`.
1250
    ///
1251
    /// # Panics
1252
    ///
1253
    /// Panics if the pointer (the `Box`) is not properly aligned.
1254
    ///
1255
    /// # Examples
1256
    ///
1257
    /// ```
1258
    /// use crossbeam_epoch::Owned;
1259
    ///
1260
    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1261
    /// ```
1262
0
    fn from(b: Box<T>) -> Self {
1263
0
        unsafe { Self::from_raw(Box::into_raw(b)) }
1264
0
    }
1265
}
1266
1267
impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
1268
0
    fn borrow(&self) -> &T {
1269
0
        self.deref()
1270
0
    }
1271
}
1272
1273
impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
1274
0
    fn borrow_mut(&mut self) -> &mut T {
1275
0
        self.deref_mut()
1276
0
    }
1277
}
1278
1279
impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
1280
0
    fn as_ref(&self) -> &T {
1281
0
        self.deref()
1282
0
    }
1283
}
1284
1285
impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
1286
0
    fn as_mut(&mut self) -> &mut T {
1287
0
        self.deref_mut()
1288
0
    }
1289
}
1290
1291
/// A pointer to an object protected by the epoch GC.
1292
///
1293
/// The pointer is valid for use only during the lifetime `'g`.
1294
///
1295
/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1296
/// least significant bits of the address.
1297
pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1298
    data: usize,
1299
    _marker: PhantomData<(&'g (), *const T)>,
1300
}
1301
1302
impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
1303
0
    fn clone(&self) -> Self {
1304
0
        *self
1305
0
    }
1306
}
1307
1308
impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1309
1310
impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1311
    #[inline]
1312
764k
    fn into_usize(self) -> usize {
1313
764k
        self.data
1314
764k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Shared<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as crossbeam_epoch::atomic::Pointer<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::into_usize
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::into_usize
Line
Count
Source
1312
450k
    fn into_usize(self) -> usize {
1313
450k
        self.data
1314
450k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::sync::list::Entry>>::into_usize
Line
Count
Source
1312
313k
    fn into_usize(self) -> usize {
1313
313k
        self.data
1314
313k
    }
1315
1316
    #[inline]
1317
89.1M
    unsafe fn from_usize(data: usize) -> Self {
1318
89.1M
        Shared {
1319
89.1M
            data,
1320
89.1M
            _marker: PhantomData,
1321
89.1M
        }
1322
89.1M
    }
<crossbeam_epoch::atomic::Shared<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as crossbeam_epoch::atomic::Pointer<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::from_usize
Line
Count
Source
1317
131k
    unsafe fn from_usize(data: usize) -> Self {
1318
131k
        Shared {
1319
131k
            data,
1320
131k
            _marker: PhantomData,
1321
131k
        }
1322
131k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::from_usize
Line
Count
Source
1317
6.82M
    unsafe fn from_usize(data: usize) -> Self {
1318
6.82M
        Shared {
1319
6.82M
            data,
1320
6.82M
            _marker: PhantomData,
1321
6.82M
        }
1322
6.82M
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::internal::Local> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::internal::Local>>::from_usize
Line
Count
Source
1317
114k
    unsafe fn from_usize(data: usize) -> Self {
1318
114k
        Shared {
1319
114k
            data,
1320
114k
            _marker: PhantomData,
1321
114k
        }
1322
114k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry> as crossbeam_epoch::atomic::Pointer<crossbeam_epoch::sync::list::Entry>>::from_usize
Line
Count
Source
1317
82.0M
    unsafe fn from_usize(data: usize) -> Self {
1318
82.0M
        Shared {
1319
82.0M
            data,
1320
82.0M
            _marker: PhantomData,
1321
82.0M
        }
1322
82.0M
    }
1323
}
1324
1325
impl<'g, T> Shared<'g, T> {
1326
    /// Converts the pointer to a raw pointer (without the tag).
1327
    ///
1328
    /// # Examples
1329
    ///
1330
    /// ```
1331
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1332
    /// use std::sync::atomic::Ordering::SeqCst;
1333
    ///
1334
    /// let o = Owned::new(1234);
1335
    /// let raw = &*o as *const _;
1336
    /// let a = Atomic::from(o);
1337
    ///
1338
    /// let guard = &epoch::pin();
1339
    /// let p = a.load(SeqCst, guard);
1340
    /// assert_eq!(p.as_raw(), raw);
1341
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1342
    /// ```
1343
57.0k
    pub fn as_raw(&self) -> *const T {
1344
57.0k
        let (raw, _) = decompose_tag::<T>(self.data);
1345
57.0k
        raw as *const _
1346
57.0k
    }
1347
}
1348
1349
impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1350
    /// Returns a new null pointer.
1351
    ///
1352
    /// # Examples
1353
    ///
1354
    /// ```
1355
    /// use crossbeam_epoch::Shared;
1356
    ///
1357
    /// let p = Shared::<i32>::null();
1358
    /// assert!(p.is_null());
1359
    /// ```
1360
70.6k
    pub fn null() -> Shared<'g, T> {
1361
70.6k
        Shared {
1362
70.6k
            data: 0,
1363
70.6k
            _marker: PhantomData,
1364
70.6k
        }
1365
70.6k
    }
1366
1367
    /// Returns `true` if the pointer is null.
1368
    ///
1369
    /// # Examples
1370
    ///
1371
    /// ```
1372
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1373
    /// use std::sync::atomic::Ordering::SeqCst;
1374
    ///
1375
    /// let a = Atomic::null();
1376
    /// let guard = &epoch::pin();
1377
    /// assert!(a.load(SeqCst, guard).is_null());
1378
    /// a.store(Owned::new(1234), SeqCst);
1379
    /// assert!(!a.load(SeqCst, guard).is_null());
1380
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1381
    /// ```
1382
0
    pub fn is_null(&self) -> bool {
1383
0
        let (raw, _) = decompose_tag::<T>(self.data);
1384
0
        raw == 0
1385
0
    }
1386
1387
    /// Dereferences the pointer.
1388
    ///
1389
    /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1390
    ///
1391
    /// # Safety
1392
    ///
1393
    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1394
    ///
1395
    /// Another concern is the possibility of data races due to lack of proper synchronization.
1396
    /// For example, consider the following scenario:
1397
    ///
1398
    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1399
    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1400
    ///
1401
    /// The problem is that relaxed orderings don't synchronize initialization of the object with
1402
    /// the read from the second thread. This is a data race. A possible solution would be to use
1403
    /// `Release` and `Acquire` orderings.
1404
    ///
1405
    /// # Examples
1406
    ///
1407
    /// ```
1408
    /// use crossbeam_epoch::{self as epoch, Atomic};
1409
    /// use std::sync::atomic::Ordering::SeqCst;
1410
    ///
1411
    /// let a = Atomic::new(1234);
1412
    /// let guard = &epoch::pin();
1413
    /// let p = a.load(SeqCst, guard);
1414
    /// unsafe {
1415
    ///     assert_eq!(p.deref(), &1234);
1416
    /// }
1417
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1418
    /// ```
1419
3.33M
    pub unsafe fn deref(&self) -> &'g T {
1420
3.33M
        let (raw, _) = decompose_tag::<T>(self.data);
1421
3.33M
        T::deref(raw)
1422
3.33M
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Shared<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::deref
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::deref
Line
Count
Source
1419
3.21M
    pub unsafe fn deref(&self) -> &'g T {
1420
3.21M
        let (raw, _) = decompose_tag::<T>(self.data);
1421
3.21M
        T::deref(raw)
1422
3.21M
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::internal::Local>>::deref
Line
Count
Source
1419
57.0k
    pub unsafe fn deref(&self) -> &'g T {
1420
57.0k
        let (raw, _) = decompose_tag::<T>(self.data);
1421
57.0k
        T::deref(raw)
1422
57.0k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry>>::deref
Line
Count
Source
1419
57.0k
    pub unsafe fn deref(&self) -> &'g T {
1420
57.0k
        let (raw, _) = decompose_tag::<T>(self.data);
1421
57.0k
        T::deref(raw)
1422
57.0k
    }
1423
1424
    /// Dereferences the pointer.
1425
    ///
1426
    /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1427
    ///
1428
    /// # Safety
1429
    ///
1430
    /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1431
    ///   actual object at the same time.
1432
    ///
1433
    ///   The user must know that there are no concurrent accesses towards the object itself.
1434
    ///
1435
    /// * Other than the above, all safety concerns of `deref()` applies here.
1436
    ///
1437
    /// # Examples
1438
    ///
1439
    /// ```
1440
    /// use crossbeam_epoch::{self as epoch, Atomic};
1441
    /// use std::sync::atomic::Ordering::SeqCst;
1442
    ///
1443
    /// let a = Atomic::new(vec![1, 2, 3, 4]);
1444
    /// let guard = &epoch::pin();
1445
    ///
1446
    /// let mut p = a.load(SeqCst, guard);
1447
    /// unsafe {
1448
    ///     assert!(!p.is_null());
1449
    ///     let b = p.deref_mut();
1450
    ///     assert_eq!(b, &vec![1, 2, 3, 4]);
1451
    ///     b.push(5);
1452
    ///     assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1453
    /// }
1454
    ///
1455
    /// let p = a.load(SeqCst, guard);
1456
    /// unsafe {
1457
    ///     assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1458
    /// }
1459
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1460
    /// ```
1461
0
    pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1462
0
        let (raw, _) = decompose_tag::<T>(self.data);
1463
0
        T::deref_mut(raw)
1464
0
    }
1465
1466
    /// Converts the pointer to a reference.
1467
    ///
1468
    /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1469
    ///
1470
    /// # Safety
1471
    ///
1472
    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1473
    ///
1474
    /// Another concern is the possibility of data races due to lack of proper synchronization.
1475
    /// For example, consider the following scenario:
1476
    ///
1477
    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1478
    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1479
    ///
1480
    /// The problem is that relaxed orderings don't synchronize initialization of the object with
1481
    /// the read from the second thread. This is a data race. A possible solution would be to use
1482
    /// `Release` and `Acquire` orderings.
1483
    ///
1484
    /// # Examples
1485
    ///
1486
    /// ```
1487
    /// use crossbeam_epoch::{self as epoch, Atomic};
1488
    /// use std::sync::atomic::Ordering::SeqCst;
1489
    ///
1490
    /// let a = Atomic::new(1234);
1491
    /// let guard = &epoch::pin();
1492
    /// let p = a.load(SeqCst, guard);
1493
    /// unsafe {
1494
    ///     assert_eq!(p.as_ref(), Some(&1234));
1495
    /// }
1496
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1497
    /// ```
1498
83.6M
    pub unsafe fn as_ref(&self) -> Option<&'g T> {
1499
83.6M
        let (raw, _) = decompose_tag::<T>(self.data);
1500
83.6M
        if raw == 0 {
1501
4.85M
            None
1502
        } else {
1503
78.7M
            Some(T::deref(raw))
1504
        }
1505
83.6M
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::as_ref
Line
Count
Source
1498
3.21M
    pub unsafe fn as_ref(&self) -> Option<&'g T> {
1499
3.21M
        let (raw, _) = decompose_tag::<T>(self.data);
1500
3.21M
        if raw == 0 {
1501
3.10M
            None
1502
        } else {
1503
112k
            Some(T::deref(raw))
1504
        }
1505
3.21M
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry>>::as_ref
Line
Count
Source
1498
80.3M
    pub unsafe fn as_ref(&self) -> Option<&'g T> {
1499
80.3M
        let (raw, _) = decompose_tag::<T>(self.data);
1500
80.3M
        if raw == 0 {
1501
1.74M
            None
1502
        } else {
1503
78.6M
            Some(T::deref(raw))
1504
        }
1505
80.3M
    }
1506
1507
    /// Takes ownership of the pointee.
1508
    ///
1509
    /// # Panics
1510
    ///
1511
    /// Panics if this pointer is null, but only in debug mode.
1512
    ///
1513
    /// # Safety
1514
    ///
1515
    /// This method may be called only if the pointer is valid and nobody else is holding a
1516
    /// reference to the same object.
1517
    ///
1518
    /// # Examples
1519
    ///
1520
    /// ```
1521
    /// use crossbeam_epoch::{self as epoch, Atomic};
1522
    /// use std::sync::atomic::Ordering::SeqCst;
1523
    ///
1524
    /// let a = Atomic::new(1234);
1525
    /// unsafe {
1526
    ///     let guard = &epoch::unprotected();
1527
    ///     let p = a.load(SeqCst, guard);
1528
    ///     drop(p.into_owned());
1529
    /// }
1530
    /// ```
1531
244k
    pub unsafe fn into_owned(self) -> Owned<T> {
1532
244k
        debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1533
244k
        Owned::from_usize(self.data)
1534
244k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>::into_owned
Line
Count
Source
1531
131k
    pub unsafe fn into_owned(self) -> Owned<T> {
1532
131k
        debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1533
131k
        Owned::from_usize(self.data)
1534
131k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>::into_owned
Line
Count
Source
1531
56.9k
    pub unsafe fn into_owned(self) -> Owned<T> {
1532
56.9k
        debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1533
56.9k
        Owned::from_usize(self.data)
1534
56.9k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::internal::Local>>::into_owned
Line
Count
Source
1531
56.9k
    pub unsafe fn into_owned(self) -> Owned<T> {
1532
56.9k
        debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1533
56.9k
        Owned::from_usize(self.data)
1534
56.9k
    }
1535
1536
    /// Takes ownership of the pointee if it is not null.
1537
    ///
1538
    /// # Safety
1539
    ///
1540
    /// This method may be called only if the pointer is valid and nobody else is holding a
1541
    /// reference to the same object, or if the pointer is null.
1542
    ///
1543
    /// # Examples
1544
    ///
1545
    /// ```
1546
    /// use crossbeam_epoch::{self as epoch, Atomic};
1547
    /// use std::sync::atomic::Ordering::SeqCst;
1548
    ///
1549
    /// let a = Atomic::new(1234);
1550
    /// unsafe {
1551
    ///     let guard = &epoch::unprotected();
1552
    ///     let p = a.load(SeqCst, guard);
1553
    ///     if let Some(x) = p.try_into_owned() {
1554
    ///         drop(x);
1555
    ///     }
1556
    /// }
1557
    /// ```
1558
0
    pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
1559
0
        if self.is_null() {
1560
0
            None
1561
        } else {
1562
0
            Some(Owned::from_usize(self.data))
1563
        }
1564
0
    }
1565
1566
    /// Returns the tag stored within the pointer.
1567
    ///
1568
    /// # Examples
1569
    ///
1570
    /// ```
1571
    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1572
    /// use std::sync::atomic::Ordering::SeqCst;
1573
    ///
1574
    /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1575
    /// let guard = &epoch::pin();
1576
    /// let p = a.load(SeqCst, guard);
1577
    /// assert_eq!(p.tag(), 2);
1578
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1579
    /// ```
1580
78.7M
    pub fn tag(&self) -> usize {
1581
78.7M
        let (_, tag) = decompose_tag::<T>(self.data);
1582
78.7M
        tag
1583
78.7M
    }
1584
1585
    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1586
    /// unused bits of the pointer to `T`.
1587
    ///
1588
    /// # Examples
1589
    ///
1590
    /// ```
1591
    /// use crossbeam_epoch::{self as epoch, Atomic};
1592
    /// use std::sync::atomic::Ordering::SeqCst;
1593
    ///
1594
    /// let a = Atomic::new(0u64);
1595
    /// let guard = &epoch::pin();
1596
    /// let p1 = a.load(SeqCst, guard);
1597
    /// let p2 = p1.with_tag(2);
1598
    ///
1599
    /// assert_eq!(p1.tag(), 0);
1600
    /// assert_eq!(p2.tag(), 2);
1601
    /// assert_eq!(p1.as_raw(), p2.as_raw());
1602
    /// # unsafe { drop(a.into_owned()); } // avoid leak
1603
    /// ```
1604
70.0k
    pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1605
70.0k
        unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1606
70.0k
    }
1607
}
1608
1609
impl<T> From<*const T> for Shared<'_, T> {
1610
    /// Returns a new pointer pointing to `raw`.
1611
    ///
1612
    /// # Panics
1613
    ///
1614
    /// Panics if `raw` is not properly aligned.
1615
    ///
1616
    /// # Examples
1617
    ///
1618
    /// ```
1619
    /// use crossbeam_epoch::Shared;
1620
    ///
1621
    /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1622
    /// assert!(!p.is_null());
1623
    /// # unsafe { drop(p.into_owned()); } // avoid leak
1624
    /// ```
1625
114k
    fn from(raw: *const T) -> Self {
1626
114k
        let raw = raw as usize;
1627
114k
        ensure_aligned::<T>(raw);
1628
114k
        unsafe { Self::from_usize(raw) }
1629
114k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::internal::Local> as core::convert::From<*const crossbeam_epoch::internal::Local>>::from
Line
Count
Source
1625
57.0k
    fn from(raw: *const T) -> Self {
1626
57.0k
        let raw = raw as usize;
1627
57.0k
        ensure_aligned::<T>(raw);
1628
57.0k
        unsafe { Self::from_usize(raw) }
1629
57.0k
    }
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::list::Entry> as core::convert::From<*const crossbeam_epoch::sync::list::Entry>>::from
Line
Count
Source
1625
57.0k
    fn from(raw: *const T) -> Self {
1626
57.0k
        let raw = raw as usize;
1627
57.0k
        ensure_aligned::<T>(raw);
1628
57.0k
        unsafe { Self::from_usize(raw) }
1629
57.0k
    }
1630
}
1631
1632
impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1633
57.0k
    fn eq(&self, other: &Self) -> bool {
1634
57.0k
        self.data == other.data
1635
57.0k
    }
Unexecuted instantiation: <crossbeam_epoch::atomic::Shared<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>> as core::cmp::PartialEq>::eq
<crossbeam_epoch::atomic::Shared<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>> as core::cmp::PartialEq>::eq
Line
Count
Source
1633
57.0k
    fn eq(&self, other: &Self) -> bool {
1634
57.0k
        self.data == other.data
1635
57.0k
    }
1636
}
1637
1638
impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1639
1640
impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1641
0
    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1642
0
        self.data.partial_cmp(&other.data)
1643
0
    }
1644
}
1645
1646
impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
1647
0
    fn cmp(&self, other: &Self) -> cmp::Ordering {
1648
0
        self.data.cmp(&other.data)
1649
0
    }
1650
}
1651
1652
impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1653
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1654
0
        let (raw, tag) = decompose_tag::<T>(self.data);
1655
1656
0
        f.debug_struct("Shared")
1657
0
            .field("raw", &raw)
1658
0
            .field("tag", &tag)
1659
0
            .finish()
1660
0
    }
1661
}
1662
1663
impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1664
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1665
0
        fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1666
0
    }
1667
}
1668
1669
impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
1670
0
    fn default() -> Self {
1671
0
        Shared::null()
1672
0
    }
1673
}
1674
1675
#[cfg(all(test, not(crossbeam_loom)))]
1676
mod tests {
1677
    use super::{Owned, Shared};
1678
    use std::mem::MaybeUninit;
1679
1680
    #[test]
1681
    fn valid_tag_i8() {
1682
        Shared::<i8>::null().with_tag(0);
1683
    }
1684
1685
    #[test]
1686
    fn valid_tag_i64() {
1687
        Shared::<i64>::null().with_tag(7);
1688
    }
1689
1690
    #[test]
1691
    fn const_atomic_null() {
1692
        use super::Atomic;
1693
        static _U: Atomic<u8> = Atomic::<u8>::null();
1694
    }
1695
1696
    #[test]
1697
    fn array_init() {
1698
        let owned = Owned::<[MaybeUninit<usize>]>::init(10);
1699
        let arr: &[MaybeUninit<usize>] = &owned;
1700
        assert_eq!(arr.len(), 10);
1701
    }
1702
}