Coverage Report

Created: 2025-07-01 06:50

/rust/registry/src/index.crates.io-6f17d22bba15001f/crossbeam-utils-0.8.21/src/cache_padded.rs
Line
Count
Source (jump to first uncovered line)
1
use core::fmt;
2
use core::ops::{Deref, DerefMut};
3
4
/// Pads and aligns a value to the length of a cache line.
5
///
6
/// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of
7
/// data are not placed into the same cache line. Updating an atomic value invalidates the whole
8
/// cache line it belongs to, which makes the next access to the same cache line slower for other
9
/// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other
10
/// cached data.
11
///
12
/// # Size and alignment
13
///
14
/// Cache lines are assumed to be N bytes long, depending on the architecture:
15
///
16
/// * On x86-64, aarch64, and powerpc64, N = 128.
17
/// * On arm, mips, mips64, sparc, and hexagon, N = 32.
18
/// * On m68k, N = 16.
19
/// * On s390x, N = 256.
20
/// * On all others, N = 64.
21
///
22
/// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line
23
/// length of the machine the program is running on. On modern Intel architectures, spatial
24
/// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that
25
/// cache lines are 128 bytes long.
26
///
27
/// The size of `CachePadded<T>` is the smallest multiple of N bytes large enough to accommodate
28
/// a value of type `T`.
29
///
30
/// The alignment of `CachePadded<T>` is the maximum of N bytes and the alignment of `T`.
31
///
32
/// # Examples
33
///
34
/// Alignment and padding:
35
///
36
/// ```
37
/// use crossbeam_utils::CachePadded;
38
///
39
/// let array = [CachePadded::new(1i8), CachePadded::new(2i8)];
40
/// let addr1 = &*array[0] as *const i8 as usize;
41
/// let addr2 = &*array[1] as *const i8 as usize;
42
///
43
/// assert!(addr2 - addr1 >= 32);
44
/// assert_eq!(addr1 % 32, 0);
45
/// assert_eq!(addr2 % 32, 0);
46
/// ```
47
///
48
/// When building a concurrent queue with a head and a tail index, it is wise to place them in
49
/// different cache lines so that concurrent threads pushing and popping elements don't invalidate
50
/// each other's cache lines:
51
///
52
/// ```
53
/// use crossbeam_utils::CachePadded;
54
/// use std::sync::atomic::AtomicUsize;
55
///
56
/// struct Queue<T> {
57
///     head: CachePadded<AtomicUsize>,
58
///     tail: CachePadded<AtomicUsize>,
59
///     buffer: *mut T,
60
/// }
61
/// ```
62
#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
63
// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
64
// lines at a time, so we have to align to 128 bytes rather than 64.
65
//
66
// Sources:
67
// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
68
// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
69
//
70
// aarch64/arm64ec's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
71
//
72
// Sources:
73
// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
74
//
75
// powerpc64 has 128-byte cache line size.
76
//
77
// Sources:
78
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
79
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/powerpc/include/asm/cache.h#L26
80
#[cfg_attr(
81
    any(
82
        target_arch = "x86_64",
83
        target_arch = "aarch64",
84
        target_arch = "arm64ec",
85
        target_arch = "powerpc64",
86
    ),
87
    repr(align(128))
88
)]
89
// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size.
90
//
91
// Sources:
92
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
93
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
94
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
95
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
96
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17
97
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12
98
#[cfg_attr(
99
    any(
100
        target_arch = "arm",
101
        target_arch = "mips",
102
        target_arch = "mips32r6",
103
        target_arch = "mips64",
104
        target_arch = "mips64r6",
105
        target_arch = "sparc",
106
        target_arch = "hexagon",
107
    ),
108
    repr(align(32))
109
)]
110
// m68k has 16-byte cache line size.
111
//
112
// Sources:
113
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9
114
#[cfg_attr(target_arch = "m68k", repr(align(16)))]
115
// s390x has 256-byte cache line size.
116
//
117
// Sources:
118
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
119
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13
120
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
121
// x86, wasm, riscv, and sparc64 have 64-byte cache line size.
122
//
123
// Sources:
124
// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
125
// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
126
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10
127
// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19
128
//
129
// All others are assumed to have 64-byte cache line size.
130
#[cfg_attr(
131
    not(any(
132
        target_arch = "x86_64",
133
        target_arch = "aarch64",
134
        target_arch = "arm64ec",
135
        target_arch = "powerpc64",
136
        target_arch = "arm",
137
        target_arch = "mips",
138
        target_arch = "mips32r6",
139
        target_arch = "mips64",
140
        target_arch = "mips64r6",
141
        target_arch = "sparc",
142
        target_arch = "hexagon",
143
        target_arch = "m68k",
144
        target_arch = "s390x",
145
    )),
146
    repr(align(64))
147
)]
148
pub struct CachePadded<T> {
149
    value: T,
150
}
151
152
unsafe impl<T: Send> Send for CachePadded<T> {}
153
unsafe impl<T: Sync> Sync for CachePadded<T> {}
154
155
impl<T> CachePadded<T> {
156
    /// Pads and aligns a value to the length of a cache line.
157
    ///
158
    /// # Examples
159
    ///
160
    /// ```
161
    /// use crossbeam_utils::CachePadded;
162
    ///
163
    /// let padded_value = CachePadded::new(1);
164
    /// ```
165
0
    pub const fn new(t: T) -> CachePadded<T> {
166
0
        CachePadded::<T> { value: t }
167
0
    }
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>>>::new
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_deque::deque::Inner<rayon_core::job::JobRef>>>::new
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_deque::deque::Position<rayon_core::job::JobRef>>>::new
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>>>::new
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_epoch::epoch::AtomicEpoch>>::new
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<_>>::new
168
169
    /// Returns the inner value.
170
    ///
171
    /// # Examples
172
    ///
173
    /// ```
174
    /// use crossbeam_utils::CachePadded;
175
    ///
176
    /// let padded_value = CachePadded::new(7);
177
    /// let value = padded_value.into_inner();
178
    /// assert_eq!(value, 7);
179
    /// ```
180
0
    pub fn into_inner(self) -> T {
181
0
        self.value
182
0
    }
183
}
184
185
impl<T> Deref for CachePadded<T> {
186
    type Target = T;
187
188
0
    fn deref(&self) -> &T {
189
0
        &self.value
190
0
    }
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_epoch::atomic::Atomic<crossbeam_deque::deque::Buffer<rayon_core::job::JobRef>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_deque::deque::Inner<rayon_core::job::JobRef>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_deque::deque::Position<rayon_core::job::JobRef>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<rayon_core::sleep::WorkerSleepState> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_epoch::atomic::Atomic<crossbeam_epoch::sync::queue::Node<crossbeam_epoch::internal::SealedBag>>> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_epoch::epoch::AtomicEpoch> as core::ops::deref::Deref>::deref
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<_> as core::ops::deref::Deref>::deref
191
}
192
193
impl<T> DerefMut for CachePadded<T> {
194
0
    fn deref_mut(&mut self) -> &mut T {
195
0
        &mut self.value
196
0
    }
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<crossbeam_deque::deque::Position<rayon_core::job::JobRef>> as core::ops::deref::DerefMut>::deref_mut
Unexecuted instantiation: <crossbeam_utils::cache_padded::CachePadded<_> as core::ops::deref::DerefMut>::deref_mut
197
}
198
199
impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
200
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
201
0
        f.debug_struct("CachePadded")
202
0
            .field("value", &self.value)
203
0
            .finish()
204
0
    }
205
}
206
207
impl<T> From<T> for CachePadded<T> {
208
0
    fn from(t: T) -> Self {
209
0
        CachePadded::new(t)
210
0
    }
211
}
212
213
impl<T: fmt::Display> fmt::Display for CachePadded<T> {
214
0
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
215
0
        fmt::Display::fmt(&self.value, f)
216
0
    }
217
}