Coverage Report

Created: 2025-07-23 07:29

/rust/registry/src/index.crates.io-6f17d22bba15001f/crc32fast-1.3.2/src/specialized/pclmulqdq.rs
Line
Count
Source (jump to first uncovered line)
1
#[cfg(target_arch = "x86")]
2
use core::arch::x86 as arch;
3
#[cfg(target_arch = "x86_64")]
4
use core::arch::x86_64 as arch;
5
6
#[derive(Clone)]
7
pub struct State {
8
    state: u32,
9
}
10
11
impl State {
12
    #[cfg(not(feature = "std"))]
13
    pub fn new(state: u32) -> Option<Self> {
14
        if cfg!(target_feature = "pclmulqdq")
15
            && cfg!(target_feature = "sse2")
16
            && cfg!(target_feature = "sse4.1")
17
        {
18
            // SAFETY: The conditions above ensure that all
19
            //         required instructions are supported by the CPU.
20
            Some(Self { state })
21
        } else {
22
            None
23
        }
24
    }
25
26
    #[cfg(feature = "std")]
27
14.2k
    pub fn new(state: u32) -> Option<Self> {
28
14.2k
        if is_x86_feature_detected!("pclmulqdq")
29
14.2k
            && is_x86_feature_detected!("sse2")
30
14.2k
            && is_x86_feature_detected!("sse4.1")
31
        {
32
            // SAFETY: The conditions above ensure that all
33
            //         required instructions are supported by the CPU.
34
14.2k
            Some(Self { state })
35
        } else {
36
0
            None
37
        }
38
14.2k
    }
39
40
189k
    pub fn update(&mut self, buf: &[u8]) {
41
189k
        // SAFETY: The `State::new` constructor ensures that all
42
189k
        //         required instructions are supported by the CPU.
43
189k
        self.state = unsafe { calculate(self.state, buf) }
44
189k
    }
45
46
3.30k
    pub fn finalize(self) -> u32 {
47
3.30k
        self.state
48
3.30k
    }
49
50
0
    pub fn reset(&mut self) {
51
0
        self.state = 0;
52
0
    }
53
54
0
    pub fn combine(&mut self, other: u32, amount: u64) {
55
0
        self.state = ::combine::combine(self.state, other, amount);
56
0
    }
57
}
58
59
const K1: i64 = 0x154442bd4;
60
const K2: i64 = 0x1c6e41596;
61
const K3: i64 = 0x1751997d0;
62
const K4: i64 = 0x0ccaa009e;
63
const K5: i64 = 0x163cd6124;
64
const K6: i64 = 0x1db710640;
65
66
const P_X: i64 = 0x1DB710641;
67
const U_PRIME: i64 = 0x1F7011641;
68
69
#[cfg(feature = "std")]
70
317k
unsafe fn debug(s: &str, a: arch::__m128i) -> arch::__m128i {
71
317k
    if false {
72
0
        union A {
73
            a: arch::__m128i,
74
            b: [u8; 16],
75
        }
76
0
        let x = A { a }.b;
77
0
        print!(" {:20} | ", s);
78
0
        for x in x.iter() {
79
0
            print!("{:02x} ", x);
80
0
        }
81
0
        println!();
82
317k
    }
83
317k
    return a;
84
317k
}
85
86
#[cfg(not(feature = "std"))]
87
unsafe fn debug(_s: &str, a: arch::__m128i) -> arch::__m128i {
88
    a
89
}
90
91
#[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")]
92
189k
unsafe fn calculate(crc: u32, mut data: &[u8]) -> u32 {
93
189k
    // In theory we can accelerate smaller chunks too, but for now just rely on
94
189k
    // the fallback implementation as it's too much hassle and doesn't seem too
95
189k
    // beneficial.
96
189k
    if data.len() < 128 {
97
30.4k
        return ::baseline::update_fast_16(crc, data);
98
158k
    }
99
158k
100
158k
    // Step 1: fold by 4 loop
101
158k
    let mut x3 = get(&mut data);
102
158k
    let mut x2 = get(&mut data);
103
158k
    let mut x1 = get(&mut data);
104
158k
    let mut x0 = get(&mut data);
105
158k
106
158k
    // fold in our initial value, part of the incremental crc checksum
107
158k
    x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32));
108
158k
109
158k
    let k1k2 = arch::_mm_set_epi64x(K2, K1);
110
9.00M
    while data.len() >= 64 {
111
8.85M
        x3 = reduce128(x3, get(&mut data), k1k2);
112
8.85M
        x2 = reduce128(x2, get(&mut data), k1k2);
113
8.85M
        x1 = reduce128(x1, get(&mut data), k1k2);
114
8.85M
        x0 = reduce128(x0, get(&mut data), k1k2);
115
8.85M
    }
116
117
158k
    let k3k4 = arch::_mm_set_epi64x(K4, K3);
118
158k
    let mut x = reduce128(x3, x2, k3k4);
119
158k
    x = reduce128(x, x1, k3k4);
120
158k
    x = reduce128(x, x0, k3k4);
121
122
    // Step 2: fold by 1 loop
123
223k
    while data.len() >= 16 {
124
64.4k
        x = reduce128(x, get(&mut data), k3k4);
125
64.4k
    }
126
127
158k
    debug("128 > 64 init", x);
128
158k
129
158k
    // Perform step 3, reduction from 128 bits to 64 bits. This is
130
158k
    // significantly different from the paper and basically doesn't follow it
131
158k
    // at all. It's not really clear why, but implementations of this algorithm
132
158k
    // in Chrome/Linux diverge in the same way. It is beyond me why this is
133
158k
    // different than the paper, maybe the paper has like errata or something?
134
158k
    // Unclear.
135
158k
    //
136
158k
    // It's also not clear to me what's actually happening here and/or why, but
137
158k
    // algebraically what's happening is:
138
158k
    //
139
158k
    // x = (x[0:63] • K4) ^ x[64:127]           // 96 bit result
140
158k
    // x = ((x[0:31] as u64) • K5) ^ x[32:95]   // 64 bit result
141
158k
    //
142
158k
    // It's... not clear to me what's going on here. The paper itself is pretty
143
158k
    // vague on this part but definitely uses different constants at least.
144
158k
    // It's not clear to me, reading the paper, where the xor operations are
145
158k
    // happening or why things are shifting around. This implementation...
146
158k
    // appears to work though!
147
158k
    drop(K6);
148
158k
    let x = arch::_mm_xor_si128(
149
158k
        arch::_mm_clmulepi64_si128(x, k3k4, 0x10),
150
158k
        arch::_mm_srli_si128(x, 8),
151
158k
    );
152
158k
    let x = arch::_mm_xor_si128(
153
158k
        arch::_mm_clmulepi64_si128(
154
158k
            arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
155
158k
            arch::_mm_set_epi64x(0, K5),
156
158k
            0x00,
157
158k
        ),
158
158k
        arch::_mm_srli_si128(x, 4),
159
158k
    );
160
158k
    debug("128 > 64 xx", x);
161
158k
162
158k
    // Perform a Barrett reduction from our now 64 bits to 32 bits. The
163
158k
    // algorithm for this is described at the end of the paper, and note that
164
158k
    // this also implements the "bit reflected input" variant.
165
158k
    let pu = arch::_mm_set_epi64x(U_PRIME, P_X);
166
158k
167
158k
    // T1(x) = ⌊(R(x) % x^32)⌋ • μ
168
158k
    let t1 = arch::_mm_clmulepi64_si128(
169
158k
        arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
170
158k
        pu,
171
158k
        0x10,
172
158k
    );
173
158k
    // T2(x) = ⌊(T1(x) % x^32)⌋ • P(x)
174
158k
    let t2 = arch::_mm_clmulepi64_si128(
175
158k
        arch::_mm_and_si128(t1, arch::_mm_set_epi32(0, 0, 0, !0)),
176
158k
        pu,
177
158k
        0x00,
178
158k
    );
179
158k
    // We're doing the bit-reflected variant, so get the upper 32-bits of the
180
158k
    // 64-bit result instead of the lower 32-bits.
181
158k
    //
182
158k
    // C(x) = R(x) ^ T2(x) / x^32
183
158k
    let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32;
184
158k
185
158k
    if !data.is_empty() {
186
36.4k
        ::baseline::update_fast_16(!c, data)
187
    } else {
188
122k
        !c
189
    }
190
189k
}
191
192
35.9M
unsafe fn reduce128(a: arch::__m128i, b: arch::__m128i, keys: arch::__m128i) -> arch::__m128i {
193
35.9M
    let t1 = arch::_mm_clmulepi64_si128(a, keys, 0x00);
194
35.9M
    let t2 = arch::_mm_clmulepi64_si128(a, keys, 0x11);
195
35.9M
    arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2)
196
35.9M
}
197
198
36.1M
unsafe fn get(a: &mut &[u8]) -> arch::__m128i {
199
36.1M
    debug_assert!(a.len() >= 16);
200
36.1M
    let r = arch::_mm_loadu_si128(a.as_ptr() as *const arch::__m128i);
201
36.1M
    *a = &a[16..];
202
36.1M
    return r;
203
36.1M
}
204
205
#[cfg(test)]
206
mod test {
207
    quickcheck! {
208
        fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
209
            let mut baseline = super::super::super::baseline::State::new(init);
210
            let mut pclmulqdq = super::State::new(init).expect("not supported");
211
            for (chunk, mut offset) in chunks {
212
                // simulate random alignments by offsetting the slice by up to 15 bytes
213
                offset &= 0xF;
214
                if chunk.len() <= offset {
215
                    baseline.update(&chunk);
216
                    pclmulqdq.update(&chunk);
217
                } else {
218
                    baseline.update(&chunk[offset..]);
219
                    pclmulqdq.update(&chunk[offset..]);
220
                }
221
            }
222
            pclmulqdq.finalize() == baseline.finalize()
223
        }
224
    }
225
}