Coverage Report

Created: 2025-08-26 07:49

/rust/registry/src/index.crates.io-6f17d22bba15001f/crc32fast-1.5.0/src/specialized/pclmulqdq.rs
Line
Count
Source (jump to first uncovered line)
1
//! Specialized checksum code for the x86 CPU architecture, based on the efficient algorithm described
2
//! in the following whitepaper:
3
//!
4
//! Gopal, V., Ozturk, E., Guilford, J., Wolrich, G., Feghali, W., Dixon, M., & Karakoyunlu, D. (2009).
5
//! _Fast CRC computation for generic polynomials using PCLMULQDQ instruction_. Intel.
6
//! (Mirror link: <https://fossies.org/linux/zlib-ng/doc/crc-pclmulqdq.pdf>, accessed 2024-05-20)
7
//!
8
//! Throughout the code, this work is referred to as "the paper".
9
10
#[cfg(target_arch = "x86")]
11
use core::arch::x86 as arch;
12
#[cfg(target_arch = "x86_64")]
13
use core::arch::x86_64 as arch;
14
15
#[derive(Clone)]
16
pub struct State {
17
    state: u32,
18
}
19
20
impl State {
21
    #[cfg(not(feature = "std"))]
22
    pub fn new(state: u32) -> Option<Self> {
23
        if cfg!(target_feature = "pclmulqdq")
24
            && cfg!(target_feature = "sse2")
25
            && cfg!(target_feature = "sse4.1")
26
        {
27
            // SAFETY: The conditions above ensure that all
28
            //         required instructions are supported by the CPU.
29
            Some(Self { state })
30
        } else {
31
            None
32
        }
33
    }
34
35
    #[cfg(feature = "std")]
36
29.0k
    pub fn new(state: u32) -> Option<Self> {
37
29.0k
        if is_x86_feature_detected!("pclmulqdq")
38
29.0k
            && is_x86_feature_detected!("sse2")
39
29.0k
            && is_x86_feature_detected!("sse4.1")
40
        {
41
            // SAFETY: The conditions above ensure that all
42
            //         required instructions are supported by the CPU.
43
29.0k
            Some(Self { state })
44
        } else {
45
0
            None
46
        }
47
29.0k
    }
48
49
24.9M
    pub fn update(&mut self, buf: &[u8]) {
50
24.9M
        // SAFETY: The `State::new` constructor ensures that all
51
24.9M
        //         required instructions are supported by the CPU.
52
24.9M
        self.state = unsafe { calculate(self.state, buf) }
53
24.9M
    }
54
55
262k
    pub fn finalize(self) -> u32 {
56
262k
        self.state
57
262k
    }
58
59
281k
    pub fn reset(&mut self) {
60
281k
        self.state = 0;
61
281k
    }
62
63
0
    pub fn combine(&mut self, other: u32, amount: u64) {
64
0
        self.state = crate::combine::combine(self.state, other, amount);
65
0
    }
66
}
67
68
const K1: i64 = 0x154442bd4;
69
const K2: i64 = 0x1c6e41596;
70
const K3: i64 = 0x1751997d0;
71
const K4: i64 = 0x0ccaa009e;
72
const K5: i64 = 0x163cd6124;
73
74
const P_X: i64 = 0x1DB710641;
75
const U_PRIME: i64 = 0x1F7011641;
76
77
#[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")]
78
24.9M
unsafe fn calculate(crc: u32, mut data: &[u8]) -> u32 {
79
24.9M
    // In theory we can accelerate smaller chunks too, but for now just rely on
80
24.9M
    // the fallback implementation as it's too much hassle and doesn't seem too
81
24.9M
    // beneficial.
82
24.9M
    if data.len() < 128 {
83
24.9M
        return crate::baseline::update_fast_16(crc, data);
84
17.1k
    }
85
17.1k
86
17.1k
    // Step 1: fold by 4 loop
87
17.1k
    let mut x3 = get(&mut data);
88
17.1k
    let mut x2 = get(&mut data);
89
17.1k
    let mut x1 = get(&mut data);
90
17.1k
    let mut x0 = get(&mut data);
91
17.1k
92
17.1k
    // fold in our initial value, part of the incremental crc checksum
93
17.1k
    x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32));
94
17.1k
95
17.1k
    let k1k2 = arch::_mm_set_epi64x(K2, K1);
96
756k
    while data.len() >= 64 {
97
739k
        x3 = reduce128(x3, get(&mut data), k1k2);
98
739k
        x2 = reduce128(x2, get(&mut data), k1k2);
99
739k
        x1 = reduce128(x1, get(&mut data), k1k2);
100
739k
        x0 = reduce128(x0, get(&mut data), k1k2);
101
739k
    }
102
103
17.1k
    let k3k4 = arch::_mm_set_epi64x(K4, K3);
104
17.1k
    let mut x = reduce128(x3, x2, k3k4);
105
17.1k
    x = reduce128(x, x1, k3k4);
106
17.1k
    x = reduce128(x, x0, k3k4);
107
108
    // Step 2: fold by 1 loop
109
32.2k
    while data.len() >= 16 {
110
15.0k
        x = reduce128(x, get(&mut data), k3k4);
111
15.0k
    }
112
113
    // Perform step 3, reduction from 128 bits to 64 bits. This is
114
    // significantly different from the paper and basically doesn't follow it
115
    // at all. It's not really clear why, but implementations of this algorithm
116
    // in Chrome/Linux diverge in the same way. It is beyond me why this is
117
    // different than the paper, maybe the paper has like errata or something?
118
    // Unclear.
119
    //
120
    // It's also not clear to me what's actually happening here and/or why, but
121
    // algebraically what's happening is:
122
    //
123
    // x = (x[0:63] • K4) ^ x[64:127]           // 96 bit result
124
    // x = ((x[0:31] as u64) • K5) ^ x[32:95]   // 64 bit result
125
    //
126
    // It's... not clear to me what's going on here. The paper itself is pretty
127
    // vague on this part but definitely uses different constants at least.
128
    // It's not clear to me, reading the paper, where the xor operations are
129
    // happening or why things are shifting around. This implementation...
130
    // appears to work though!
131
17.1k
    let x = arch::_mm_xor_si128(
132
17.1k
        arch::_mm_clmulepi64_si128(x, k3k4, 0x10),
133
17.1k
        arch::_mm_srli_si128(x, 8),
134
17.1k
    );
135
17.1k
    let x = arch::_mm_xor_si128(
136
17.1k
        arch::_mm_clmulepi64_si128(
137
17.1k
            arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
138
17.1k
            arch::_mm_set_epi64x(0, K5),
139
17.1k
            0x00,
140
17.1k
        ),
141
17.1k
        arch::_mm_srli_si128(x, 4),
142
17.1k
    );
143
17.1k
144
17.1k
    // Perform a Barrett reduction from our now 64 bits to 32 bits. The
145
17.1k
    // algorithm for this is described at the end of the paper, and note that
146
17.1k
    // this also implements the "bit reflected input" variant.
147
17.1k
    let pu = arch::_mm_set_epi64x(U_PRIME, P_X);
148
17.1k
149
17.1k
    // T1(x) = ⌊(R(x) % x^32)⌋ • μ
150
17.1k
    let t1 = arch::_mm_clmulepi64_si128(
151
17.1k
        arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
152
17.1k
        pu,
153
17.1k
        0x10,
154
17.1k
    );
155
17.1k
    // T2(x) = ⌊(T1(x) % x^32)⌋ • P(x)
156
17.1k
    let t2 = arch::_mm_clmulepi64_si128(
157
17.1k
        arch::_mm_and_si128(t1, arch::_mm_set_epi32(0, 0, 0, !0)),
158
17.1k
        pu,
159
17.1k
        0x00,
160
17.1k
    );
161
17.1k
    // We're doing the bit-reflected variant, so get the upper 32-bits of the
162
17.1k
    // 64-bit result instead of the lower 32-bits.
163
17.1k
    //
164
17.1k
    // C(x) = R(x) ^ T2(x) / x^32
165
17.1k
    let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32;
166
17.1k
167
17.1k
    if !data.is_empty() {
168
10.9k
        crate::baseline::update_fast_16(!c, data)
169
    } else {
170
6.11k
        !c
171
    }
172
24.9M
}
173
174
3.02M
unsafe fn reduce128(a: arch::__m128i, b: arch::__m128i, keys: arch::__m128i) -> arch::__m128i {
175
3.02M
    let t1 = arch::_mm_clmulepi64_si128(a, keys, 0x00);
176
3.02M
    let t2 = arch::_mm_clmulepi64_si128(a, keys, 0x11);
177
3.02M
    arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2)
178
3.02M
}
179
180
3.04M
unsafe fn get(a: &mut &[u8]) -> arch::__m128i {
181
3.04M
    debug_assert!(a.len() >= 16);
182
3.04M
    let r = arch::_mm_loadu_si128(a.as_ptr() as *const arch::__m128i);
183
3.04M
    *a = &a[16..];
184
3.04M
    r
185
3.04M
}
186
187
#[cfg(test)]
188
mod test {
189
    quickcheck::quickcheck! {
190
        fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
191
            let mut baseline = super::super::super::baseline::State::new(init);
192
            let mut pclmulqdq = super::State::new(init).expect("not supported");
193
            for (chunk, mut offset) in chunks {
194
                // simulate random alignments by offsetting the slice by up to 15 bytes
195
                offset &= 0xF;
196
                if chunk.len() <= offset {
197
                    baseline.update(&chunk);
198
                    pclmulqdq.update(&chunk);
199
                } else {
200
                    baseline.update(&chunk[offset..]);
201
                    pclmulqdq.update(&chunk[offset..]);
202
                }
203
            }
204
            pclmulqdq.finalize() == baseline.finalize()
205
        }
206
    }
207
}