Coverage Report

Created: 2025-12-28 06:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/seize-0.3.3/src/collector.rs
Line
Count
Source
1
use crate::raw;
2
use crate::tls::Thread;
3
4
use std::cell::UnsafeCell;
5
use std::marker::PhantomData;
6
use std::num::NonZeroU64;
7
use std::sync::atomic::{AtomicPtr, Ordering};
8
use std::{fmt, ptr};
9
10
/// Fast, efficient, and robust memory reclamation.
11
///
12
/// See the [crate documentation](crate) for details.
13
pub struct Collector {
14
    raw: raw::Collector,
15
    unique: *mut u8,
16
}
17
18
unsafe impl Send for Collector {}
19
unsafe impl Sync for Collector {}
20
21
impl Collector {
22
    const DEFAULT_RETIRE_TICK: usize = 120;
23
    const DEFAULT_EPOCH_TICK: NonZeroU64 = unsafe { NonZeroU64::new_unchecked(110) };
24
25
    /// Creates a new collector.
26
0
    pub fn new() -> Self {
27
0
        let cpus = std::thread::available_parallelism()
28
0
            .map(Into::into)
29
0
            .unwrap_or(1);
30
31
0
        Self {
32
0
            raw: raw::Collector::new(cpus, Self::DEFAULT_EPOCH_TICK, Self::DEFAULT_RETIRE_TICK),
33
0
            unique: Box::into_raw(Box::new(0)),
34
0
        }
35
0
    }
36
37
    /// Sets the frequency of epoch advancement.
38
    ///
39
    /// Seize uses epochs to protect against stalled threads.
40
    /// The more frequently the epoch is advanced, the faster
41
    /// stalled threads can be detected. However, it also means
42
    /// that threads will have to do work to catch up to the
43
    /// current epoch more often.
44
    ///
45
    /// The default epoch frequency is `110`, meaning that
46
    /// the epoch will advance after every 110 values are
47
    /// linked to the collector. Benchmarking has shown that
48
    /// this is a good tradeoff between throughput and memory
49
    /// efficiency.
50
    ///
51
    /// If `None` is passed epoch tracking, and protection
52
    /// against stalled threads, will be disabled completely.
53
0
    pub fn epoch_frequency(mut self, n: Option<NonZeroU64>) -> Self {
54
0
        self.raw.epoch_frequency = n;
55
0
        self
56
0
    }
57
58
    /// Sets the number of values that must be in a batch
59
    /// before reclamation is attempted.
60
    ///
61
    /// Retired values are added to thread-local *batches*
62
    /// before starting the reclamation process. After
63
    /// `batch_size` is hit, values are moved to separate
64
    /// *retirement lists*, where reference counting kicks
65
    /// in and batches are eventually reclaimed.
66
    ///
67
    /// A larger batch size means that deallocation is done
68
    /// less frequently, but reclamation also becomes more
69
    /// expensive due to longer retirement lists needing
70
    /// to be traversed and freed.
71
    ///
72
    /// Note that batch sizes should generally be larger
73
    /// than the number of threads accessing objects.
74
    ///
75
    /// The default batch size is `120`. Tests have shown that
76
    /// this makes a good tradeoff between throughput and memory
77
    /// efficiency.
78
0
    pub fn batch_size(mut self, n: usize) -> Self {
79
0
        self.raw.batch_size = n;
80
0
        self
81
0
    }
82
83
    /// Marks the current thread as active, returning a guard
84
    /// that allows protecting loads of atomic pointers. The thread
85
    /// will be marked as inactive when the guard is dropped.
86
    ///
87
    /// See [the guide](crate#starting-operations) for an introduction
88
    /// to using guards.
89
    ///
90
    /// # Examples
91
    ///
92
    /// ```rust
93
    /// # use std::sync::atomic::{AtomicPtr, Ordering};
94
    /// # let collector = seize::Collector::new();
95
    /// use seize::{reclaim, Linked};
96
    ///
97
    /// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
98
    ///
99
    /// let guard = collector.enter();
100
    /// let value = guard.protect(&ptr, Ordering::Acquire);
101
    /// unsafe { assert_eq!(**value, 1) }
102
    /// # unsafe { guard.defer_retire(value, reclaim::boxed::<Linked<usize>>) };
103
    /// ```
104
    ///
105
    /// Note that `enter` is reentrant, and it is legal to create
106
    /// multiple guards on the same thread. The thread will stay
107
    /// marked as active until the last guard is dropped:
108
    ///
109
    /// ```rust
110
    /// # use std::sync::atomic::{AtomicPtr, Ordering};
111
    /// # let collector = seize::Collector::new();
112
    /// use seize::{reclaim, Linked};
113
    ///
114
    /// let ptr = AtomicPtr::new(collector.link_boxed(1_usize));
115
    ///
116
    /// let guard1 = collector.enter();
117
    /// let guard2 = collector.enter();
118
    ///
119
    /// let value = guard2.protect(&ptr, Ordering::Acquire);
120
    /// drop(guard1);
121
    /// // the first guard is dropped, but `value`
122
    /// // is still safe to access as a guard still
123
    /// // exists
124
    /// unsafe { assert_eq!(**value, 1) }
125
    /// # unsafe { guard2.defer_retire(value, reclaim::boxed::<Linked<usize>>) };
126
    /// drop(guard2) // _now_, the thread is marked as inactive
127
    /// ```
128
0
    pub fn enter(&self) -> Guard<'_> {
129
0
        let thread = Thread::current();
130
0
        unsafe { self.raw.enter(thread) };
131
132
0
        Guard {
133
0
            thread,
134
0
            collector: Some(self),
135
0
            _unsend: PhantomData,
136
0
        }
137
0
    }
138
139
    /// Link a value to the collector.
140
    ///
141
    /// See [the guide](crate#allocating-objects) for details.
142
0
    pub fn link(&self) -> Link {
143
0
        Link {
144
0
            node: UnsafeCell::new(self.raw.node()),
145
0
        }
146
0
    }
147
148
    /// Creates a new `Linked` object with the given value.
149
    ///
150
    /// This is equivalent to:
151
    ///
152
    /// ```ignore
153
    /// Linked {
154
    ///     value,
155
    ///     link: collector.link()
156
    /// }
157
    /// ```
158
0
    pub fn link_value<T>(&self, value: T) -> Linked<T> {
159
0
        Linked {
160
0
            link: self.link(),
161
0
            value,
162
0
        }
163
0
    }
164
165
    /// Links a value to the collector and allocates it with `Box`.
166
    ///
167
    /// This is equivalent to:
168
    ///
169
    /// ```ignore
170
    /// Box::into_raw(Box::new(Linked {
171
    ///     value,
172
    ///     link: collector.link()
173
    /// }))
174
    /// ```
175
0
    pub fn link_boxed<T>(&self, value: T) -> *mut Linked<T> {
176
0
        Box::into_raw(Box::new(Linked {
177
0
            link: self.link(),
178
0
            value,
179
0
        }))
180
0
    }
Unexecuted instantiation: <seize::collector::Collector>::link_boxed::<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>
Unexecuted instantiation: <seize::collector::Collector>::link_boxed::<flurry::raw::Table<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>
Unexecuted instantiation: <seize::collector::Collector>::link_boxed::<flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>
Unexecuted instantiation: <seize::collector::Collector>::link_boxed::<std::thread::Thread>
Unexecuted instantiation: <seize::collector::Collector>::link_boxed::<_>
181
182
    /// Retires a value, running `reclaim` when no threads hold a reference to it.
183
    ///
184
    /// Note that this method is disconnected from any guards on the current thread,
185
    /// so the pointer may be reclaimed immediately. See [`Guard::defer_retire`] if
186
    /// the pointer may still be accessed by the current thread.
187
    ///
188
    /// See [the guide](crate#retiring-objects) for details.
189
    ///
190
    /// # Safety
191
    ///
192
    /// The retired object must no longer be accessible to any thread that enters
193
    /// after it is removed. It also cannot be accessed by the current thread
194
    /// after `retire` is called.
195
    ///
196
    /// Additionally, he reclaimer passed to `retire` must correctly deallocate values of type `T`.
197
    #[allow(clippy::missing_safety_doc)] // in guide
198
0
    pub unsafe fn retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
199
0
        debug_assert!(!ptr.is_null(), "attempted to retire null pointer");
200
201
        // note that `add` doesn't actually reclaim the pointer immediately if the
202
        // current thread is active, it instead adds it to it's reclamation list,
203
        // but we don't guarantee that publicly.
204
0
        unsafe { self.raw.add(ptr, reclaim, Thread::current()) }
205
0
    }
206
207
    /// Returns true if both references point to the same collector.
208
0
    pub fn ptr_eq(this: &Collector, other: &Collector) -> bool {
209
0
        ptr::eq(this.unique, other.unique)
210
0
    }
211
}
212
213
impl Drop for Collector {
214
0
    fn drop(&mut self) {
215
0
        unsafe {
216
0
            let _ = Box::from_raw(self.unique);
217
0
        }
218
0
    }
219
}
220
221
impl Clone for Collector {
222
0
    fn clone(&self) -> Self {
223
0
        Collector::new()
224
0
            .batch_size(self.raw.batch_size)
225
0
            .epoch_frequency(self.raw.epoch_frequency)
226
0
    }
227
}
228
229
impl Default for Collector {
230
0
    fn default() -> Self {
231
0
        Self::new()
232
0
    }
233
}
234
235
impl fmt::Debug for Collector {
236
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
237
0
        let mut strukt = f.debug_struct("Collector");
238
239
0
        if self.raw.epoch_frequency.is_some() {
240
0
            strukt.field("epoch", &self.raw.epoch.load(Ordering::Acquire));
241
0
        }
242
243
0
        strukt
244
0
            .field("batch_size", &self.raw.batch_size)
245
0
            .field("epoch_frequency", &self.raw.epoch_frequency)
246
0
            .finish()
247
0
    }
248
}
249
250
/// A guard that keeps the current thread marked as active,
251
/// enabling protected loads of atomic pointers.
252
///
253
/// See [`Collector::enter`] for details.
254
pub struct Guard<'a> {
255
    collector: Option<&'a Collector>,
256
    thread: Thread,
257
    // must not be Send or Sync as we are tied to the current threads state in
258
    // the collector
259
    _unsend: PhantomData<*mut ()>,
260
}
261
262
impl Guard<'_> {
263
    /// Returns a dummy guard.
264
    ///
265
    /// Calling [`protect`](Guard::protect) on an unprotected guard will
266
    /// load the pointer directly, and [`retire`](Guard::defer_retire) will
267
    /// reclaim objects immediately.
268
    ///
269
    /// Unprotected guards are useful when calling guarded functions
270
    /// on a data structure that has just been created or is about
271
    /// to be destroyed, because you know that no other thread holds
272
    /// a reference to it.
273
    ///
274
    /// # Safety
275
    ///
276
    /// You must ensure that code used with this guard is sound with
277
    /// the unprotected behavior described above.
278
0
    pub const unsafe fn unprotected() -> Guard<'static> {
279
0
        Guard {
280
0
            thread: Thread::EMPTY,
281
0
            collector: None,
282
0
            _unsend: PhantomData,
283
0
        }
284
0
    }
285
286
    /// Protects the load of an atomic pointer.
287
    ///
288
    /// See [the guide](crate#protecting-pointers) for details.
289
    #[inline]
290
0
    pub fn protect<T: AsLink>(&self, ptr: &AtomicPtr<T>, ordering: Ordering) -> *mut T {
291
0
        match self.collector {
292
0
            Some(collector) => unsafe { collector.raw.protect(ptr, ordering, self.thread) },
293
            // unprotected guard
294
0
            None => ptr.load(ordering),
295
        }
296
0
    }
Unexecuted instantiation: <seize::collector::Guard>::protect::<seize::collector::Linked<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>
Unexecuted instantiation: <seize::collector::Guard>::protect::<seize::collector::Linked<flurry::raw::Table<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>>
Unexecuted instantiation: <seize::collector::Guard>::protect::<seize::collector::Linked<flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>>
Unexecuted instantiation: <seize::collector::Guard>::protect::<seize::collector::Linked<std::thread::Thread>>
Unexecuted instantiation: <seize::collector::Guard>::protect::<_>
297
298
    /// Retires a value, running `reclaim` when no threads hold a reference to it.
299
    ///
300
    /// This method delays reclamation until the guard is dropped as opposed to
301
    /// [`Collector::retire`], which may reclaim objects immediately.
302
    ///
303
    /// See [the guide](crate#retiring-objects) for details.
304
    #[allow(clippy::missing_safety_doc)] // in guide
305
0
    pub unsafe fn defer_retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
306
0
        debug_assert!(!ptr.is_null(), "attempted to retire null pointer");
307
308
0
        match self.collector {
309
0
            Some(collector) => unsafe { collector.raw.add(ptr, reclaim, self.thread) },
310
            // unprotected guard
311
0
            None => unsafe { (reclaim)(ptr.cast::<Link>()) },
312
        }
313
0
    }
Unexecuted instantiation: <seize::collector::Guard>::defer_retire::<seize::collector::Linked<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>
Unexecuted instantiation: <seize::collector::Guard>::defer_retire::<seize::collector::Linked<flurry::raw::Table<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>>
Unexecuted instantiation: <seize::collector::Guard>::defer_retire::<seize::collector::Linked<flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>>
Unexecuted instantiation: <seize::collector::Guard>::defer_retire::<seize::collector::Linked<std::thread::Thread>>
Unexecuted instantiation: <seize::collector::Guard>::defer_retire::<_>
314
315
    /// Get a reference to the collector this guard we created from.
316
    ///
317
    /// This method is useful when you need to ensure that all guards
318
    /// used with a data structure come from the same collector.
319
    ///
320
    /// If this is an [`unprotected`](Guard::unprotected) guard
321
    /// this method will return `None`.
322
0
    pub fn collector(&self) -> Option<&Collector> {
323
0
        self.collector
324
0
    }
325
326
    /// Refreshes the guard.
327
    ///
328
    /// Refreshing a guard is similar to dropping and immediately
329
    /// creating a new guard. The curent thread remains active, but any
330
    /// pointers that were previously protected may be reclaimed.
331
    ///
332
    /// # Safety
333
    ///
334
    /// This method is not marked as `unsafe`, but will affect
335
    /// the validity of pointers returned by [`protect`](Guard::protect),
336
    /// similar to dropping a guard. It is intended to be used safely
337
    /// by users of concurrent data structures, as references will
338
    /// be tied to the guard and this method takes `&mut self`.
339
    ///
340
    /// If this is an [`unprotected`](Guard::unprotected) guard
341
    /// this method will be a no-op.
342
0
    pub fn refresh(&mut self) {
343
0
        match self.collector {
344
0
            None => {}
345
0
            Some(collector) => unsafe { collector.raw.refresh(self.thread) },
346
        }
347
0
    }
348
349
    /// Flush any retired values in the local batch.
350
    ///
351
    /// This method flushes any values from the current thread's local
352
    /// batch, starting the reclamation process. Note that no memory
353
    /// can be reclaimed while this guard is active, but calling `flush`
354
    /// may allow memory to be reclaimed more quickly after the guard is
355
    /// dropped.
356
    ///
357
    /// See [`Collector::batch_size`] for details about batching.
358
0
    pub fn flush(&self) {
359
0
        if let Some(collector) = self.collector {
360
0
            unsafe { collector.raw.try_retire_batch(self.thread) }
361
0
        }
362
0
    }
363
364
    /// Returns a numeric identifier for the current thread.
365
    ///
366
    /// Guards rely on thread-local state, including thread IDs. If you already
367
    /// have a guard you can use this method to get a cheap identifier for the
368
    /// current thread, avoiding TLS overhead. Note that thread IDs may be reused,
369
    /// so the value returned is only unique for the lifetime of this thread.
370
0
    pub fn thread_id(&self) -> usize {
371
0
        self.thread.id
372
0
    }
373
}
374
375
impl Drop for Guard<'_> {
376
0
    fn drop(&mut self) {
377
0
        if let Some(collector) = self.collector {
378
0
            unsafe { collector.raw.leave(self.thread) }
379
0
        }
380
0
    }
381
}
382
383
impl fmt::Debug for Guard<'_> {
384
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
385
0
        f.debug_tuple("Guard").finish()
386
0
    }
387
}
388
389
/// A link to the collector.
390
///
391
/// See [the guide](crate#custom-reclaimers) for details.
392
#[repr(transparent)]
393
pub struct Link {
394
    #[allow(dead_code)]
395
    pub(crate) node: UnsafeCell<raw::Node>,
396
}
397
398
impl Link {
399
    /// Cast this `link` to it's underlying type.
400
    ///
401
    /// Note that while this function is safe, using the returned
402
    /// pointer is only sound if the link is in fact a type-erased `T`.
403
    /// This means that when casting a link in a reclaimer, the value
404
    /// that was retired must be of type `T`.
405
0
    pub fn cast<T: AsLink>(link: *mut Link) -> *mut T {
406
0
        link.cast()
407
0
    }
Unexecuted instantiation: <seize::collector::Link>::cast::<seize::collector::Linked<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>
Unexecuted instantiation: <seize::collector::Link>::cast::<seize::collector::Linked<flurry::raw::Table<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>>
Unexecuted instantiation: <seize::collector::Link>::cast::<seize::collector::Linked<flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>>>
Unexecuted instantiation: <seize::collector::Link>::cast::<seize::collector::Linked<std::thread::Thread>>
Unexecuted instantiation: <seize::collector::Link>::cast::<_>
408
}
409
410
/// A type that can be pointer-cast to and from a [`Link`].
411
///
412
/// Most reclamation use cases can avoid this trait and work instead
413
/// with the [`Linked`] wrapper type. However, if you want more control
414
/// over the layout of your type (i.e. are working with a DST),
415
/// you may need to implement this trait directly.
416
///
417
/// # Safety
418
///
419
/// Types implementing this trait must be marked `#[repr(C)]`
420
/// and have a [`Link`] as their **first** field.
421
///
422
/// # Examples
423
///
424
/// ```rust
425
/// use seize::{AsLink, Collector, Link};
426
///
427
/// #[repr(C)]
428
/// struct Bytes {
429
///     // safety invariant: Link must be the first field
430
///     link: Link,
431
///     values: [*mut u8; 0],
432
/// }
433
///
434
/// // Safety: Bytes is repr(C) and has Link as it's first field
435
/// unsafe impl AsLink for Bytes {}
436
///
437
/// // Deallocate an `Bytes`.
438
/// unsafe fn dealloc(ptr: *mut Bytes, collector: &Collector) {
439
///     collector.retire(ptr, |link| {
440
///         // safety `ptr` is of type *mut Bytes
441
///         let link: *mut Bytes = Link::cast(link);
442
///         // ..
443
///     });
444
/// }
445
/// ```
446
pub unsafe trait AsLink {}
447
448
/// A value [linked](Collector::link) to a collector.
449
///
450
/// This type implements `Deref` and `DerefMut` to the
451
/// inner value, so you can access methods on fields
452
/// on it as normal. An extra `*` may be needed when
453
/// `T` needs to be accessed directly.
454
///
455
/// See [the guide](crate#allocating-objects) for details.
456
#[repr(C)]
457
pub struct Linked<T> {
458
    pub link: Link, // Safety Invariant: this field must come first
459
    pub value: T,
460
}
461
462
unsafe impl<T> AsLink for Linked<T> {}
463
464
impl<T: PartialEq> PartialEq for Linked<T> {
465
0
    fn eq(&self, other: &Self) -> bool {
466
0
        self.value == other.value
467
0
    }
468
}
469
470
impl<T: Eq> Eq for Linked<T> {}
471
472
impl<T: fmt::Debug> fmt::Debug for Linked<T> {
473
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
474
0
        write!(f, "{:?}", self.value)
475
0
    }
476
}
477
478
impl<T: fmt::Display> fmt::Display for Linked<T> {
479
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
480
0
        write!(f, "{}", self.value)
481
0
    }
482
}
483
484
impl<T> std::ops::Deref for Linked<T> {
485
    type Target = T;
486
487
0
    fn deref(&self) -> &Self::Target {
488
0
        &self.value
489
0
    }
Unexecuted instantiation: <seize::collector::Linked<core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <seize::collector::Linked<flurry::raw::Table<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <seize::collector::Linked<flurry::node::BinEntry<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <seize::collector::Linked<std::thread::Thread> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <seize::collector::Linked<_> as core::ops::deref::Deref>::deref
490
}
491
492
impl<T> std::ops::DerefMut for Linked<T> {
493
0
    fn deref_mut(&mut self) -> &mut Self::Target {
494
0
        &mut self.value
495
0
    }
Unexecuted instantiation: <seize::collector::Linked<flurry::raw::Table<u64, core::option::Option<alloc::sync::Arc<tokio::sync::mutex::Mutex<()>>>>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <seize::collector::Linked<_> as core::ops::deref::DerefMut>::deref_mut
496
}