Coverage Report

Created: 2025-03-07 06:49

/rust/registry/src/index.crates.io-6f17d22bba15001f/spin-0.9.8/src/once.rs
Line
Count
Source (jump to first uncovered line)
1
//! Synchronization primitives for one-time evaluation.
2
3
use crate::{
4
    atomic::{AtomicU8, Ordering},
5
    RelaxStrategy, Spin,
6
};
7
use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
8
9
/// A primitive that provides lazy one-time initialization.
10
///
11
/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
12
/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
13
/// `Once`).
14
///
15
/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
16
///
17
/// # Examples
18
///
19
/// ```
20
/// use spin;
21
///
22
/// static START: spin::Once = spin::Once::new();
23
///
24
/// START.call_once(|| {
25
///     // run initialization here
26
/// });
27
/// ```
28
pub struct Once<T = (), R = Spin> {
29
    phantom: PhantomData<R>,
30
    status: AtomicStatus,
31
    data: UnsafeCell<MaybeUninit<T>>,
32
}
33
34
impl<T, R> Default for Once<T, R> {
35
0
    fn default() -> Self {
36
0
        Self::new()
37
0
    }
38
}
39
40
impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
41
0
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
42
0
        match self.get() {
43
0
            Some(s) => write!(f, "Once {{ data: ")
44
0
                .and_then(|()| s.fmt(f))
45
0
                .and_then(|()| write!(f, "}}")),
46
0
            None => write!(f, "Once {{ <uninitialized> }}"),
47
        }
48
0
    }
49
}
50
51
// Same unsafe impls as `std::sync::RwLock`, because this also allows for
52
// concurrent reads.
53
unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
54
unsafe impl<T: Send, R> Send for Once<T, R> {}
55
56
mod status {
57
    use super::*;
58
59
    // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
60
    // a value for which there exists a valid Status. This means that users of this API must only
61
    // be allowed to load and store `Status`es.
62
    #[repr(transparent)]
63
    pub struct AtomicStatus(AtomicU8);
64
65
    // Four states that a Once can be in, encoded into the lower bits of `status` in
66
    // the Once structure.
67
    #[repr(u8)]
68
    #[derive(Clone, Copy, Debug, PartialEq)]
69
    pub enum Status {
70
        Incomplete = 0x00,
71
        Running = 0x01,
72
        Complete = 0x02,
73
        Panicked = 0x03,
74
    }
75
    impl Status {
76
        // Construct a status from an inner u8 integer.
77
        //
78
        // # Safety
79
        //
80
        // For this to be safe, the inner number must have a valid corresponding enum variant.
81
0
        unsafe fn new_unchecked(inner: u8) -> Self {
82
0
            core::mem::transmute(inner)
83
0
        }
84
    }
85
86
    impl AtomicStatus {
87
        #[inline(always)]
88
0
        pub const fn new(status: Status) -> Self {
89
0
            // SAFETY: We got the value directly from status, so transmuting back is fine.
90
0
            Self(AtomicU8::new(status as u8))
91
0
        }
92
        #[inline(always)]
93
0
        pub fn load(&self, ordering: Ordering) -> Status {
94
0
            // SAFETY: We know that the inner integer must have been constructed from a Status in
95
0
            // the first place.
96
0
            unsafe { Status::new_unchecked(self.0.load(ordering)) }
97
0
        }
98
        #[inline(always)]
99
0
        pub fn store(&self, status: Status, ordering: Ordering) {
100
0
            // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
101
0
            // a status, thus making transmutation safe.
102
0
            self.0.store(status as u8, ordering);
103
0
        }
104
        #[inline(always)]
105
0
        pub fn compare_exchange(
106
0
            &self,
107
0
            old: Status,
108
0
            new: Status,
109
0
            success: Ordering,
110
0
            failure: Ordering,
111
0
        ) -> Result<Status, Status> {
112
0
            match self
113
0
                .0
114
0
                .compare_exchange(old as u8, new as u8, success, failure)
115
            {
116
                // SAFETY: A compare exchange will always return a value that was later stored into
117
                // the atomic u8, but due to the invariant that it must be a valid Status, we know
118
                // that both Ok(_) and Err(_) will be safely transmutable.
119
0
                Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
120
0
                Err(err) => Err(unsafe { Status::new_unchecked(err) }),
121
            }
122
0
        }
123
        #[inline(always)]
124
0
        pub fn get_mut(&mut self) -> &mut Status {
125
0
            // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
126
0
            // it to a &mut Status.
127
0
            unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
128
0
        }
129
    }
130
}
131
use self::status::{AtomicStatus, Status};
132
133
impl<T, R: RelaxStrategy> Once<T, R> {
134
    /// Performs an initialization routine once and only once. The given closure
135
    /// will be executed if this is the first time `call_once` has been called,
136
    /// and otherwise the routine will *not* be invoked.
137
    ///
138
    /// This method will block the calling thread if another initialization
139
    /// routine is currently running.
140
    ///
141
    /// When this function returns, it is guaranteed that some initialization
142
    /// has run and completed (it may not be the closure specified). The
143
    /// returned pointer will point to the result from the closure that was
144
    /// run.
145
    ///
146
    /// # Panics
147
    ///
148
    /// This function will panic if the [`Once`] previously panicked while attempting
149
    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
150
    /// primitives.
151
    ///
152
    /// # Examples
153
    ///
154
    /// ```
155
    /// use spin;
156
    ///
157
    /// static INIT: spin::Once<usize> = spin::Once::new();
158
    ///
159
    /// fn get_cached_val() -> usize {
160
    ///     *INIT.call_once(expensive_computation)
161
    /// }
162
    ///
163
    /// fn expensive_computation() -> usize {
164
    ///     // ...
165
    /// # 2
166
    /// }
167
    /// ```
168
0
    pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
169
0
        match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
170
0
            Ok(x) => x,
171
0
            Err(void) => match void {},
172
0
        }
173
0
    }
174
175
    /// This method is similar to `call_once`, but allows the given closure to
176
    /// fail, and lets the `Once` in a uninitialized state if it does.
177
    ///
178
    /// This method will block the calling thread if another initialization
179
    /// routine is currently running.
180
    ///
181
    /// When this function returns without error, it is guaranteed that some
182
    /// initialization has run and completed (it may not be the closure
183
    /// specified). The returned reference will point to the result from the
184
    /// closure that was run.
185
    ///
186
    /// # Panics
187
    ///
188
    /// This function will panic if the [`Once`] previously panicked while attempting
189
    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
190
    /// primitives.
191
    ///
192
    /// # Examples
193
    ///
194
    /// ```
195
    /// use spin;
196
    ///
197
    /// static INIT: spin::Once<usize> = spin::Once::new();
198
    ///
199
    /// fn get_cached_val() -> Result<usize, String> {
200
    ///     INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
201
    /// }
202
    ///
203
    /// fn expensive_fallible_computation() -> Result<usize, String> {
204
    ///     // ...
205
    /// # Ok(2)
206
    /// }
207
    /// ```
208
0
    pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
209
0
        if let Some(value) = self.get() {
210
0
            Ok(value)
211
        } else {
212
0
            self.try_call_once_slow(f)
213
        }
214
0
    }
215
216
    #[cold]
217
0
    fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
218
        loop {
219
0
            let xchg = self.status.compare_exchange(
220
0
                Status::Incomplete,
221
0
                Status::Running,
222
0
                Ordering::Acquire,
223
0
                Ordering::Acquire,
224
0
            );
225
226
0
            match xchg {
227
0
                Ok(_must_be_state_incomplete) => {
228
0
                    // Impl is defined after the match for readability
229
0
                }
230
0
                Err(Status::Panicked) => panic!("Once panicked"),
231
0
                Err(Status::Running) => match self.poll() {
232
0
                    Some(v) => return Ok(v),
233
0
                    None => continue,
234
                },
235
                Err(Status::Complete) => {
236
0
                    return Ok(unsafe {
237
0
                        // SAFETY: The status is Complete
238
0
                        self.force_get()
239
0
                    });
240
                }
241
                Err(Status::Incomplete) => {
242
                    // The compare_exchange failed, so this shouldn't ever be reached,
243
                    // however if we decide to switch to compare_exchange_weak it will
244
                    // be safer to leave this here than hit an unreachable
245
0
                    continue;
246
                }
247
            }
248
249
            // The compare-exchange succeeded, so we shall initialize it.
250
251
            // We use a guard (Finish) to catch panics caused by builder
252
0
            let finish = Finish {
253
0
                status: &self.status,
254
0
            };
255
0
            let val = match f() {
256
0
                Ok(val) => val,
257
0
                Err(err) => {
258
0
                    // If an error occurs, clean up everything and leave.
259
0
                    core::mem::forget(finish);
260
0
                    self.status.store(Status::Incomplete, Ordering::Release);
261
0
                    return Err(err);
262
                }
263
            };
264
0
            unsafe {
265
0
                // SAFETY:
266
0
                // `UnsafeCell`/deref: currently the only accessor, mutably
267
0
                // and immutably by cas exclusion.
268
0
                // `write`: pointer comes from `MaybeUninit`.
269
0
                (*self.data.get()).as_mut_ptr().write(val);
270
0
            };
271
0
            // If there were to be a panic with unwind enabled, the code would
272
0
            // short-circuit and never reach the point where it writes the inner data.
273
0
            // The destructor for Finish will run, and poison the Once to ensure that other
274
0
            // threads accessing it do not exhibit unwanted behavior, if there were to be
275
0
            // any inconsistency in data structures caused by the panicking thread.
276
0
            //
277
0
            // However, f() is expected in the general case not to panic. In that case, we
278
0
            // simply forget the guard, bypassing its destructor. We could theoretically
279
0
            // clear a flag instead, but this eliminates the call to the destructor at
280
0
            // compile time, and unconditionally poisons during an eventual panic, if
281
0
            // unwinding is enabled.
282
0
            core::mem::forget(finish);
283
0
284
0
            // SAFETY: Release is required here, so that all memory accesses done in the
285
0
            // closure when initializing, become visible to other threads that perform Acquire
286
0
            // loads.
287
0
            //
288
0
            // And, we also know that the changes this thread has done will not magically
289
0
            // disappear from our cache, so it does not need to be AcqRel.
290
0
            self.status.store(Status::Complete, Ordering::Release);
291
0
292
0
            // This next line is mainly an optimization.
293
0
            return unsafe { Ok(self.force_get()) };
294
        }
295
0
    }
296
297
    /// Spins until the [`Once`] contains a value.
298
    ///
299
    /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
300
    ///
301
    /// # Panics
302
    ///
303
    /// This function will panic if the [`Once`] previously panicked while attempting
304
    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
305
    /// primitives.
306
0
    pub fn wait(&self) -> &T {
307
        loop {
308
0
            match self.poll() {
309
0
                Some(x) => break x,
310
0
                None => R::relax(),
311
            }
312
        }
313
0
    }
314
315
    /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
316
    /// initialized. If initialization has not even begun, `None` will be returned.
317
    ///
318
    /// Note that in releases prior to `0.7`, this function was named `wait`.
319
    ///
320
    /// # Panics
321
    ///
322
    /// This function will panic if the [`Once`] previously panicked while attempting
323
    /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
324
    /// primitives.
325
0
    pub fn poll(&self) -> Option<&T> {
326
        loop {
327
            // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
328
            // sure that all memory accessed done while initializing that value, are visible when
329
            // we return a reference to the inner data after this load.
330
0
            match self.status.load(Ordering::Acquire) {
331
0
                Status::Incomplete => return None,
332
0
                Status::Running => R::relax(), // We spin
333
0
                Status::Complete => return Some(unsafe { self.force_get() }),
334
0
                Status::Panicked => panic!("Once previously poisoned by a panicked"),
335
            }
336
        }
337
0
    }
338
}
339
340
impl<T, R> Once<T, R> {
341
    /// Initialization constant of [`Once`].
342
    #[allow(clippy::declare_interior_mutable_const)]
343
    pub const INIT: Self = Self {
344
        phantom: PhantomData,
345
        status: AtomicStatus::new(Status::Incomplete),
346
        data: UnsafeCell::new(MaybeUninit::uninit()),
347
    };
348
349
    /// Creates a new [`Once`].
350
0
    pub const fn new() -> Self {
351
0
        Self::INIT
352
0
    }
353
354
    /// Creates a new initialized [`Once`].
355
0
    pub const fn initialized(data: T) -> Self {
356
0
        Self {
357
0
            phantom: PhantomData,
358
0
            status: AtomicStatus::new(Status::Complete),
359
0
            data: UnsafeCell::new(MaybeUninit::new(data)),
360
0
        }
361
0
    }
362
363
    /// Retrieve a pointer to the inner data.
364
    ///
365
    /// While this method itself is safe, accessing the pointer before the [`Once`] has been
366
    /// initialized is UB, unless this method has already been written to from a pointer coming
367
    /// from this method.
368
0
    pub fn as_mut_ptr(&self) -> *mut T {
369
0
        // SAFETY:
370
0
        // * MaybeUninit<T> always has exactly the same layout as T
371
0
        self.data.get().cast::<T>()
372
0
    }
373
374
    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
375
0
    unsafe fn force_get(&self) -> &T {
376
0
        // SAFETY:
377
0
        // * `UnsafeCell`/inner deref: data never changes again
378
0
        // * `MaybeUninit`/outer deref: data was initialized
379
0
        &*(*self.data.get()).as_ptr()
380
0
    }
381
382
    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
383
0
    unsafe fn force_get_mut(&mut self) -> &mut T {
384
0
        // SAFETY:
385
0
        // * `UnsafeCell`/inner deref: data never changes again
386
0
        // * `MaybeUninit`/outer deref: data was initialized
387
0
        &mut *(*self.data.get()).as_mut_ptr()
388
0
    }
389
390
    /// Get a reference to the initialized instance. Must only be called once COMPLETE.
391
0
    unsafe fn force_into_inner(self) -> T {
392
0
        // SAFETY:
393
0
        // * `UnsafeCell`/inner deref: data never changes again
394
0
        // * `MaybeUninit`/outer deref: data was initialized
395
0
        (*self.data.get()).as_ptr().read()
396
0
    }
397
398
    /// Returns a reference to the inner value if the [`Once`] has been initialized.
399
0
    pub fn get(&self) -> Option<&T> {
400
0
        // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
401
0
        // nonatomic stores done when initializing, once we have loaded and checked the status.
402
0
        match self.status.load(Ordering::Acquire) {
403
0
            Status::Complete => Some(unsafe { self.force_get() }),
404
0
            _ => None,
405
        }
406
0
    }
407
408
    /// Returns a reference to the inner value on the unchecked assumption that the  [`Once`] has been initialized.
409
    ///
410
    /// # Safety
411
    ///
412
    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
413
    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
414
    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
415
    /// checking initialization is unacceptable and the `Once` has already been initialized.
416
0
    pub unsafe fn get_unchecked(&self) -> &T {
417
0
        debug_assert_eq!(
418
0
            self.status.load(Ordering::SeqCst),
419
            Status::Complete,
420
0
            "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
421
        );
422
0
        self.force_get()
423
0
    }
424
425
    /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
426
    ///
427
    /// Because this method requires a mutable reference to the [`Once`], no synchronization
428
    /// overhead is required to access the inner value. In effect, it is zero-cost.
429
0
    pub fn get_mut(&mut self) -> Option<&mut T> {
430
0
        match *self.status.get_mut() {
431
0
            Status::Complete => Some(unsafe { self.force_get_mut() }),
432
0
            _ => None,
433
        }
434
0
    }
435
436
    /// Returns a mutable reference to the inner value
437
    ///
438
    /// # Safety
439
    ///
440
    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
441
    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
442
    /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
443
    /// checking initialization is unacceptable and the `Once` has already been initialized.
444
0
    pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
445
0
        debug_assert_eq!(
446
0
            self.status.load(Ordering::SeqCst),
447
            Status::Complete,
448
0
            "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
449
        );
450
0
        self.force_get_mut()
451
0
    }
452
453
    /// Returns a the inner value if the [`Once`] has been initialized.
454
    ///
455
    /// Because this method requires ownership of the [`Once`], no synchronization overhead
456
    /// is required to access the inner value. In effect, it is zero-cost.
457
0
    pub fn try_into_inner(mut self) -> Option<T> {
458
0
        match *self.status.get_mut() {
459
0
            Status::Complete => Some(unsafe { self.force_into_inner() }),
460
0
            _ => None,
461
        }
462
0
    }
463
464
    /// Returns a the inner value if the [`Once`] has been initialized.  
465
    /// # Safety
466
    ///
467
    /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
468
    /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
469
    /// This can be useful, if `Once` has already been initialized, and you want to bypass an
470
    /// option check.
471
0
    pub unsafe fn into_inner_unchecked(self) -> T {
472
0
        debug_assert_eq!(
473
0
            self.status.load(Ordering::SeqCst),
474
            Status::Complete,
475
0
            "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
476
        );
477
0
        self.force_into_inner()
478
0
    }
479
480
    /// Checks whether the value has been initialized.
481
    ///
482
    /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
483
    /// therefore it is safe to access the value directly via
484
    /// [`get_unchecked`](Self::get_unchecked) if this returns true.
485
0
    pub fn is_completed(&self) -> bool {
486
0
        // TODO: Add a similar variant for Relaxed?
487
0
        self.status.load(Ordering::Acquire) == Status::Complete
488
0
    }
489
}
490
491
impl<T, R> From<T> for Once<T, R> {
492
0
    fn from(data: T) -> Self {
493
0
        Self::initialized(data)
494
0
    }
495
}
496
497
impl<T, R> Drop for Once<T, R> {
498
0
    fn drop(&mut self) {
499
0
        // No need to do any atomic access here, we have &mut!
500
0
        if *self.status.get_mut() == Status::Complete {
501
0
            unsafe {
502
0
                //TODO: Use MaybeUninit::assume_init_drop once stabilised
503
0
                core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
504
0
            }
505
0
        }
506
0
    }
507
}
508
509
struct Finish<'a> {
510
    status: &'a AtomicStatus,
511
}
512
513
impl<'a> Drop for Finish<'a> {
514
0
    fn drop(&mut self) {
515
0
        // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
516
0
        // This is mainly because panics are not meant to be fast at all, but also because if
517
0
        // there were to be a compiler bug which reorders accesses within the same thread,
518
0
        // where it should not, we want to be sure that the panic really is handled, and does
519
0
        // not cause additional problems. SeqCst will therefore help guarding against such
520
0
        // bugs.
521
0
        self.status.store(Status::Panicked, Ordering::SeqCst);
522
0
    }
523
}
524
525
#[cfg(test)]
526
mod tests {
527
    use std::prelude::v1::*;
528
529
    use std::sync::atomic::AtomicU32;
530
    use std::sync::mpsc::channel;
531
    use std::sync::Arc;
532
    use std::thread;
533
534
    use super::*;
535
536
    #[test]
537
    fn smoke_once() {
538
        static O: Once = Once::new();
539
        let mut a = 0;
540
        O.call_once(|| a += 1);
541
        assert_eq!(a, 1);
542
        O.call_once(|| a += 1);
543
        assert_eq!(a, 1);
544
    }
545
546
    #[test]
547
    fn smoke_once_value() {
548
        static O: Once<usize> = Once::new();
549
        let a = O.call_once(|| 1);
550
        assert_eq!(*a, 1);
551
        let b = O.call_once(|| 2);
552
        assert_eq!(*b, 1);
553
    }
554
555
    #[test]
556
    fn stampede_once() {
557
        static O: Once = Once::new();
558
        static mut RUN: bool = false;
559
560
        let (tx, rx) = channel();
561
        let mut ts = Vec::new();
562
        for _ in 0..10 {
563
            let tx = tx.clone();
564
            ts.push(thread::spawn(move || {
565
                for _ in 0..4 {
566
                    thread::yield_now()
567
                }
568
                unsafe {
569
                    O.call_once(|| {
570
                        assert!(!RUN);
571
                        RUN = true;
572
                    });
573
                    assert!(RUN);
574
                }
575
                tx.send(()).unwrap();
576
            }));
577
        }
578
579
        unsafe {
580
            O.call_once(|| {
581
                assert!(!RUN);
582
                RUN = true;
583
            });
584
            assert!(RUN);
585
        }
586
587
        for _ in 0..10 {
588
            rx.recv().unwrap();
589
        }
590
591
        for t in ts {
592
            t.join().unwrap();
593
        }
594
    }
595
596
    #[test]
597
    fn get() {
598
        static INIT: Once<usize> = Once::new();
599
600
        assert!(INIT.get().is_none());
601
        INIT.call_once(|| 2);
602
        assert_eq!(INIT.get().map(|r| *r), Some(2));
603
    }
604
605
    #[test]
606
    fn get_no_wait() {
607
        static INIT: Once<usize> = Once::new();
608
609
        assert!(INIT.get().is_none());
610
        let t = thread::spawn(move || {
611
            INIT.call_once(|| {
612
                thread::sleep(std::time::Duration::from_secs(3));
613
                42
614
            });
615
        });
616
        assert!(INIT.get().is_none());
617
618
        t.join().unwrap();
619
    }
620
621
    #[test]
622
    fn poll() {
623
        static INIT: Once<usize> = Once::new();
624
625
        assert!(INIT.poll().is_none());
626
        INIT.call_once(|| 3);
627
        assert_eq!(INIT.poll().map(|r| *r), Some(3));
628
    }
629
630
    #[test]
631
    fn wait() {
632
        static INIT: Once<usize> = Once::new();
633
634
        let t = std::thread::spawn(|| {
635
            assert_eq!(*INIT.wait(), 3);
636
            assert!(INIT.is_completed());
637
        });
638
639
        for _ in 0..4 {
640
            thread::yield_now()
641
        }
642
643
        assert!(INIT.poll().is_none());
644
        INIT.call_once(|| 3);
645
646
        t.join().unwrap();
647
    }
648
649
    #[test]
650
    fn panic() {
651
        use std::panic;
652
653
        static INIT: Once = Once::new();
654
655
        // poison the once
656
        let t = panic::catch_unwind(|| {
657
            INIT.call_once(|| panic!());
658
        });
659
        assert!(t.is_err());
660
661
        // poisoning propagates
662
        let t = panic::catch_unwind(|| {
663
            INIT.call_once(|| {});
664
        });
665
        assert!(t.is_err());
666
    }
667
668
    #[test]
669
    fn init_constant() {
670
        static O: Once = Once::INIT;
671
        let mut a = 0;
672
        O.call_once(|| a += 1);
673
        assert_eq!(a, 1);
674
        O.call_once(|| a += 1);
675
        assert_eq!(a, 1);
676
    }
677
678
    static mut CALLED: bool = false;
679
680
    struct DropTest {}
681
682
    impl Drop for DropTest {
683
        fn drop(&mut self) {
684
            unsafe {
685
                CALLED = true;
686
            }
687
        }
688
    }
689
690
    #[test]
691
    fn try_call_once_err() {
692
        let once = Once::<_, Spin>::new();
693
        let shared = Arc::new((once, AtomicU32::new(0)));
694
695
        let (tx, rx) = channel();
696
697
        let t0 = {
698
            let shared = shared.clone();
699
            thread::spawn(move || {
700
                let (once, called) = &*shared;
701
702
                once.try_call_once(|| {
703
                    called.fetch_add(1, Ordering::AcqRel);
704
                    tx.send(()).unwrap();
705
                    thread::sleep(std::time::Duration::from_millis(50));
706
                    Err(())
707
                })
708
                .ok();
709
            })
710
        };
711
712
        let t1 = {
713
            let shared = shared.clone();
714
            thread::spawn(move || {
715
                rx.recv().unwrap();
716
                let (once, called) = &*shared;
717
                assert_eq!(
718
                    called.load(Ordering::Acquire),
719
                    1,
720
                    "leader thread did not run first"
721
                );
722
723
                once.call_once(|| {
724
                    called.fetch_add(1, Ordering::AcqRel);
725
                });
726
            })
727
        };
728
729
        t0.join().unwrap();
730
        t1.join().unwrap();
731
732
        assert_eq!(shared.1.load(Ordering::Acquire), 2);
733
    }
734
735
    // This is sort of two test cases, but if we write them as separate test methods
736
    // they can be executed concurrently and then fail some small fraction of the
737
    // time.
738
    #[test]
739
    fn drop_occurs_and_skip_uninit_drop() {
740
        unsafe {
741
            CALLED = false;
742
        }
743
744
        {
745
            let once = Once::<_>::new();
746
            once.call_once(|| DropTest {});
747
        }
748
749
        assert!(unsafe { CALLED });
750
        // Now test that we skip drops for the uninitialized case.
751
        unsafe {
752
            CALLED = false;
753
        }
754
755
        let once = Once::<DropTest>::new();
756
        drop(once);
757
758
        assert!(unsafe { !CALLED });
759
    }
760
761
    #[test]
762
    fn call_once_test() {
763
        for _ in 0..20 {
764
            use std::sync::atomic::AtomicUsize;
765
            use std::sync::Arc;
766
            use std::time::Duration;
767
            let share = Arc::new(AtomicUsize::new(0));
768
            let once = Arc::new(Once::<_, Spin>::new());
769
            let mut hs = Vec::new();
770
            for _ in 0..8 {
771
                let h = thread::spawn({
772
                    let share = share.clone();
773
                    let once = once.clone();
774
                    move || {
775
                        thread::sleep(Duration::from_millis(10));
776
                        once.call_once(|| {
777
                            share.fetch_add(1, Ordering::SeqCst);
778
                        });
779
                    }
780
                });
781
                hs.push(h);
782
            }
783
            for h in hs {
784
                h.join().unwrap();
785
            }
786
            assert_eq!(1, share.load(Ordering::SeqCst));
787
        }
788
    }
789
}