/src/skia/third_party/externals/zlib/crc_folding.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ |
3 | | * instruction. |
4 | | * |
5 | | * A white paper describing this algorithm can be found at: |
6 | | * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf |
7 | | * |
8 | | * Copyright (C) 2013 Intel Corporation. All rights reserved. |
9 | | * Authors: |
10 | | * Wajdi Feghali <wajdi.k.feghali@intel.com> |
11 | | * Jim Guilford <james.guilford@intel.com> |
12 | | * Vinodh Gopal <vinodh.gopal@intel.com> |
13 | | * Erdinc Ozturk <erdinc.ozturk@intel.com> |
14 | | * Jim Kukunas <james.t.kukunas@linux.intel.com> |
15 | | * |
16 | | * For conditions of distribution and use, see copyright notice in zlib.h |
17 | | */ |
18 | | |
19 | | #include "deflate.h" |
20 | | |
21 | | #ifdef CRC32_SIMD_SSE42_PCLMUL |
22 | | |
23 | | #include <inttypes.h> |
24 | | #include <emmintrin.h> |
25 | | #include <immintrin.h> |
26 | | #include <wmmintrin.h> |
27 | | |
28 | | #define CRC_LOAD(s) \ |
29 | 0 | do { \ |
30 | 0 | __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\ |
31 | 0 | __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)s->crc0 + 1);\ |
32 | 0 | __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)s->crc0 + 2);\ |
33 | 0 | __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)s->crc0 + 3);\ |
34 | 0 | __m128i xmm_crc_part = _mm_loadu_si128((__m128i *)s->crc0 + 4); |
35 | | |
36 | | #define CRC_SAVE(s) \ |
37 | 0 | _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\ |
38 | 0 | _mm_storeu_si128((__m128i *)s->crc0 + 1, xmm_crc1);\ |
39 | 0 | _mm_storeu_si128((__m128i *)s->crc0 + 2, xmm_crc2);\ |
40 | 0 | _mm_storeu_si128((__m128i *)s->crc0 + 3, xmm_crc3);\ |
41 | 0 | _mm_storeu_si128((__m128i *)s->crc0 + 4, xmm_crc_part);\ |
42 | 0 | } while (0); |
43 | | |
44 | | ZLIB_INTERNAL void crc_fold_init(deflate_state *const s) |
45 | 0 | { |
46 | 0 | CRC_LOAD(s) |
47 | |
|
48 | 0 | xmm_crc0 = _mm_cvtsi32_si128(0x9db42487); |
49 | 0 | xmm_crc1 = _mm_setzero_si128(); |
50 | 0 | xmm_crc2 = _mm_setzero_si128(); |
51 | 0 | xmm_crc3 = _mm_setzero_si128(); |
52 | |
|
53 | 0 | CRC_SAVE(s) |
54 | |
|
55 | 0 | s->strm->adler = 0; |
56 | 0 | } |
57 | | |
58 | | local void fold_1(deflate_state *const s, |
59 | | __m128i *xmm_crc0, __m128i *xmm_crc1, |
60 | | __m128i *xmm_crc2, __m128i *xmm_crc3) |
61 | 0 | { |
62 | 0 | const __m128i xmm_fold4 = _mm_set_epi32( |
63 | 0 | 0x00000001, 0x54442bd4, |
64 | 0 | 0x00000001, 0xc6e41596); |
65 | |
|
66 | 0 | __m128i x_tmp3; |
67 | 0 | __m128 ps_crc0, ps_crc3, ps_res; |
68 | |
|
69 | 0 | x_tmp3 = *xmm_crc3; |
70 | |
|
71 | 0 | *xmm_crc3 = *xmm_crc0; |
72 | 0 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
73 | 0 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); |
74 | 0 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
75 | 0 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
76 | 0 | ps_res = _mm_xor_ps(ps_crc0, ps_crc3); |
77 | |
|
78 | 0 | *xmm_crc0 = *xmm_crc1; |
79 | 0 | *xmm_crc1 = *xmm_crc2; |
80 | 0 | *xmm_crc2 = x_tmp3; |
81 | 0 | *xmm_crc3 = _mm_castps_si128(ps_res); |
82 | 0 | } |
83 | | |
84 | | local void fold_2(deflate_state *const s, |
85 | | __m128i *xmm_crc0, __m128i *xmm_crc1, |
86 | | __m128i *xmm_crc2, __m128i *xmm_crc3) |
87 | 0 | { |
88 | 0 | const __m128i xmm_fold4 = _mm_set_epi32( |
89 | 0 | 0x00000001, 0x54442bd4, |
90 | 0 | 0x00000001, 0xc6e41596); |
91 | |
|
92 | 0 | __m128i x_tmp3, x_tmp2; |
93 | 0 | __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20; |
94 | |
|
95 | 0 | x_tmp3 = *xmm_crc3; |
96 | 0 | x_tmp2 = *xmm_crc2; |
97 | |
|
98 | 0 | *xmm_crc3 = *xmm_crc1; |
99 | 0 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); |
100 | 0 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); |
101 | 0 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
102 | 0 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
103 | 0 | ps_res31= _mm_xor_ps(ps_crc3, ps_crc1); |
104 | |
|
105 | 0 | *xmm_crc2 = *xmm_crc0; |
106 | 0 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
107 | 0 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); |
108 | 0 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
109 | 0 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
110 | 0 | ps_res20= _mm_xor_ps(ps_crc0, ps_crc2); |
111 | |
|
112 | 0 | *xmm_crc0 = x_tmp2; |
113 | 0 | *xmm_crc1 = x_tmp3; |
114 | 0 | *xmm_crc2 = _mm_castps_si128(ps_res20); |
115 | 0 | *xmm_crc3 = _mm_castps_si128(ps_res31); |
116 | 0 | } |
117 | | |
118 | | local void fold_3(deflate_state *const s, |
119 | | __m128i *xmm_crc0, __m128i *xmm_crc1, |
120 | | __m128i *xmm_crc2, __m128i *xmm_crc3) |
121 | 0 | { |
122 | 0 | const __m128i xmm_fold4 = _mm_set_epi32( |
123 | 0 | 0x00000001, 0x54442bd4, |
124 | 0 | 0x00000001, 0xc6e41596); |
125 | |
|
126 | 0 | __m128i x_tmp3; |
127 | 0 | __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10; |
128 | |
|
129 | 0 | x_tmp3 = *xmm_crc3; |
130 | |
|
131 | 0 | *xmm_crc3 = *xmm_crc2; |
132 | 0 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); |
133 | 0 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10); |
134 | 0 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
135 | 0 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
136 | 0 | ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3); |
137 | |
|
138 | 0 | *xmm_crc2 = *xmm_crc1; |
139 | 0 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); |
140 | 0 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10); |
141 | 0 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
142 | 0 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
143 | 0 | ps_res21= _mm_xor_ps(ps_crc1, ps_crc2); |
144 | |
|
145 | 0 | *xmm_crc1 = *xmm_crc0; |
146 | 0 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
147 | 0 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10); |
148 | 0 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
149 | 0 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
150 | 0 | ps_res10= _mm_xor_ps(ps_crc0, ps_crc1); |
151 | |
|
152 | 0 | *xmm_crc0 = x_tmp3; |
153 | 0 | *xmm_crc1 = _mm_castps_si128(ps_res10); |
154 | 0 | *xmm_crc2 = _mm_castps_si128(ps_res21); |
155 | 0 | *xmm_crc3 = _mm_castps_si128(ps_res32); |
156 | 0 | } |
157 | | |
158 | | local void fold_4(deflate_state *const s, |
159 | | __m128i *xmm_crc0, __m128i *xmm_crc1, |
160 | | __m128i *xmm_crc2, __m128i *xmm_crc3) |
161 | 0 | { |
162 | 0 | const __m128i xmm_fold4 = _mm_set_epi32( |
163 | 0 | 0x00000001, 0x54442bd4, |
164 | 0 | 0x00000001, 0xc6e41596); |
165 | |
|
166 | 0 | __m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3; |
167 | 0 | __m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3; |
168 | 0 | __m128 ps_t0, ps_t1, ps_t2, ps_t3; |
169 | 0 | __m128 ps_res0, ps_res1, ps_res2, ps_res3; |
170 | |
|
171 | 0 | x_tmp0 = *xmm_crc0; |
172 | 0 | x_tmp1 = *xmm_crc1; |
173 | 0 | x_tmp2 = *xmm_crc2; |
174 | 0 | x_tmp3 = *xmm_crc3; |
175 | |
|
176 | 0 | *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); |
177 | 0 | x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10); |
178 | 0 | ps_crc0 = _mm_castsi128_ps(*xmm_crc0); |
179 | 0 | ps_t0 = _mm_castsi128_ps(x_tmp0); |
180 | 0 | ps_res0 = _mm_xor_ps(ps_crc0, ps_t0); |
181 | |
|
182 | 0 | *xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01); |
183 | 0 | x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10); |
184 | 0 | ps_crc1 = _mm_castsi128_ps(*xmm_crc1); |
185 | 0 | ps_t1 = _mm_castsi128_ps(x_tmp1); |
186 | 0 | ps_res1 = _mm_xor_ps(ps_crc1, ps_t1); |
187 | |
|
188 | 0 | *xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01); |
189 | 0 | x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10); |
190 | 0 | ps_crc2 = _mm_castsi128_ps(*xmm_crc2); |
191 | 0 | ps_t2 = _mm_castsi128_ps(x_tmp2); |
192 | 0 | ps_res2 = _mm_xor_ps(ps_crc2, ps_t2); |
193 | |
|
194 | 0 | *xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01); |
195 | 0 | x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10); |
196 | 0 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
197 | 0 | ps_t3 = _mm_castsi128_ps(x_tmp3); |
198 | 0 | ps_res3 = _mm_xor_ps(ps_crc3, ps_t3); |
199 | |
|
200 | 0 | *xmm_crc0 = _mm_castps_si128(ps_res0); |
201 | 0 | *xmm_crc1 = _mm_castps_si128(ps_res1); |
202 | 0 | *xmm_crc2 = _mm_castps_si128(ps_res2); |
203 | 0 | *xmm_crc3 = _mm_castps_si128(ps_res3); |
204 | 0 | } |
205 | | |
206 | | local const unsigned zalign(32) pshufb_shf_table[60] = { |
207 | | 0x84838281,0x88878685,0x8c8b8a89,0x008f8e8d, /* shl 15 (16 - 1)/shr1 */ |
208 | | 0x85848382,0x89888786,0x8d8c8b8a,0x01008f8e, /* shl 14 (16 - 3)/shr2 */ |
209 | | 0x86858483,0x8a898887,0x8e8d8c8b,0x0201008f, /* shl 13 (16 - 4)/shr3 */ |
210 | | 0x87868584,0x8b8a8988,0x8f8e8d8c,0x03020100, /* shl 12 (16 - 4)/shr4 */ |
211 | | 0x88878685,0x8c8b8a89,0x008f8e8d,0x04030201, /* shl 11 (16 - 5)/shr5 */ |
212 | | 0x89888786,0x8d8c8b8a,0x01008f8e,0x05040302, /* shl 10 (16 - 6)/shr6 */ |
213 | | 0x8a898887,0x8e8d8c8b,0x0201008f,0x06050403, /* shl 9 (16 - 7)/shr7 */ |
214 | | 0x8b8a8988,0x8f8e8d8c,0x03020100,0x07060504, /* shl 8 (16 - 8)/shr8 */ |
215 | | 0x8c8b8a89,0x008f8e8d,0x04030201,0x08070605, /* shl 7 (16 - 9)/shr9 */ |
216 | | 0x8d8c8b8a,0x01008f8e,0x05040302,0x09080706, /* shl 6 (16 -10)/shr10*/ |
217 | | 0x8e8d8c8b,0x0201008f,0x06050403,0x0a090807, /* shl 5 (16 -11)/shr11*/ |
218 | | 0x8f8e8d8c,0x03020100,0x07060504,0x0b0a0908, /* shl 4 (16 -12)/shr12*/ |
219 | | 0x008f8e8d,0x04030201,0x08070605,0x0c0b0a09, /* shl 3 (16 -13)/shr13*/ |
220 | | 0x01008f8e,0x05040302,0x09080706,0x0d0c0b0a, /* shl 2 (16 -14)/shr14*/ |
221 | | 0x0201008f,0x06050403,0x0a090807,0x0e0d0c0b /* shl 1 (16 -15)/shr15*/ |
222 | | }; |
223 | | |
224 | | local void partial_fold(deflate_state *const s, const size_t len, |
225 | | __m128i *xmm_crc0, __m128i *xmm_crc1, |
226 | | __m128i *xmm_crc2, __m128i *xmm_crc3, |
227 | | __m128i *xmm_crc_part) |
228 | 0 | { |
229 | |
|
230 | 0 | const __m128i xmm_fold4 = _mm_set_epi32( |
231 | 0 | 0x00000001, 0x54442bd4, |
232 | 0 | 0x00000001, 0xc6e41596); |
233 | 0 | const __m128i xmm_mask3 = _mm_set1_epi32(0x80808080); |
234 | |
|
235 | 0 | __m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3; |
236 | 0 | __m128i xmm_a0_0, xmm_a0_1; |
237 | 0 | __m128 ps_crc3, psa0_0, psa0_1, ps_res; |
238 | |
|
239 | 0 | xmm_shl = _mm_load_si128((__m128i *)pshufb_shf_table + (len - 1)); |
240 | 0 | xmm_shr = xmm_shl; |
241 | 0 | xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3); |
242 | |
|
243 | 0 | xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl); |
244 | |
|
245 | 0 | *xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr); |
246 | 0 | xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl); |
247 | 0 | *xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1); |
248 | |
|
249 | 0 | *xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr); |
250 | 0 | xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl); |
251 | 0 | *xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2); |
252 | |
|
253 | 0 | *xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr); |
254 | 0 | xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl); |
255 | 0 | *xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3); |
256 | |
|
257 | 0 | *xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr); |
258 | 0 | *xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl); |
259 | 0 | *xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part); |
260 | |
|
261 | 0 | xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10); |
262 | 0 | xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01); |
263 | |
|
264 | 0 | ps_crc3 = _mm_castsi128_ps(*xmm_crc3); |
265 | 0 | psa0_0 = _mm_castsi128_ps(xmm_a0_0); |
266 | 0 | psa0_1 = _mm_castsi128_ps(xmm_a0_1); |
267 | |
|
268 | 0 | ps_res = _mm_xor_ps(ps_crc3, psa0_0); |
269 | 0 | ps_res = _mm_xor_ps(ps_res, psa0_1); |
270 | |
|
271 | 0 | *xmm_crc3 = _mm_castps_si128(ps_res); |
272 | 0 | } |
273 | | |
274 | | ZLIB_INTERNAL void crc_fold_copy(deflate_state *const s, |
275 | | unsigned char *dst, const unsigned char *src, long len) |
276 | 0 | { |
277 | 0 | unsigned long algn_diff; |
278 | 0 | __m128i xmm_t0, xmm_t1, xmm_t2, xmm_t3; |
279 | |
|
280 | 0 | CRC_LOAD(s) |
281 | |
|
282 | 0 | if (len < 16) { |
283 | 0 | if (len == 0) |
284 | 0 | return; |
285 | 0 | goto partial; |
286 | 0 | } |
287 | | |
288 | 0 | algn_diff = (0 - (uintptr_t)src) & 0xF; |
289 | 0 | if (algn_diff) { |
290 | 0 | xmm_crc_part = _mm_loadu_si128((__m128i *)src); |
291 | 0 | _mm_storeu_si128((__m128i *)dst, xmm_crc_part); |
292 | |
|
293 | 0 | dst += algn_diff; |
294 | 0 | src += algn_diff; |
295 | 0 | len -= algn_diff; |
296 | |
|
297 | 0 | partial_fold(s, algn_diff, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, |
298 | 0 | &xmm_crc_part); |
299 | 0 | } |
300 | |
|
301 | 0 | while ((len -= 64) >= 0) { |
302 | 0 | xmm_t0 = _mm_load_si128((__m128i *)src); |
303 | 0 | xmm_t1 = _mm_load_si128((__m128i *)src + 1); |
304 | 0 | xmm_t2 = _mm_load_si128((__m128i *)src + 2); |
305 | 0 | xmm_t3 = _mm_load_si128((__m128i *)src + 3); |
306 | |
|
307 | 0 | fold_4(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
308 | |
|
309 | 0 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
310 | 0 | _mm_storeu_si128((__m128i *)dst + 1, xmm_t1); |
311 | 0 | _mm_storeu_si128((__m128i *)dst + 2, xmm_t2); |
312 | 0 | _mm_storeu_si128((__m128i *)dst + 3, xmm_t3); |
313 | |
|
314 | 0 | xmm_crc0 = _mm_xor_si128(xmm_crc0, xmm_t0); |
315 | 0 | xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t1); |
316 | 0 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t2); |
317 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t3); |
318 | |
|
319 | 0 | src += 64; |
320 | 0 | dst += 64; |
321 | 0 | } |
322 | | |
323 | | /* |
324 | | * len = num bytes left - 64 |
325 | | */ |
326 | 0 | if (len + 16 >= 0) { |
327 | 0 | len += 16; |
328 | |
|
329 | 0 | xmm_t0 = _mm_load_si128((__m128i *)src); |
330 | 0 | xmm_t1 = _mm_load_si128((__m128i *)src + 1); |
331 | 0 | xmm_t2 = _mm_load_si128((__m128i *)src + 2); |
332 | |
|
333 | 0 | fold_3(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
334 | |
|
335 | 0 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
336 | 0 | _mm_storeu_si128((__m128i *)dst + 1, xmm_t1); |
337 | 0 | _mm_storeu_si128((__m128i *)dst + 2, xmm_t2); |
338 | |
|
339 | 0 | xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_t0); |
340 | 0 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t1); |
341 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t2); |
342 | |
|
343 | 0 | if (len == 0) |
344 | 0 | goto done; |
345 | | |
346 | 0 | dst += 48; |
347 | 0 | src += 48; |
348 | 0 | } else if (len + 32 >= 0) { |
349 | 0 | len += 32; |
350 | |
|
351 | 0 | xmm_t0 = _mm_load_si128((__m128i *)src); |
352 | 0 | xmm_t1 = _mm_load_si128((__m128i *)src + 1); |
353 | |
|
354 | 0 | fold_2(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
355 | |
|
356 | 0 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
357 | 0 | _mm_storeu_si128((__m128i *)dst + 1, xmm_t1); |
358 | |
|
359 | 0 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_t0); |
360 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t1); |
361 | |
|
362 | 0 | if (len == 0) |
363 | 0 | goto done; |
364 | | |
365 | 0 | dst += 32; |
366 | 0 | src += 32; |
367 | 0 | } else if (len + 48 >= 0) { |
368 | 0 | len += 48; |
369 | |
|
370 | 0 | xmm_t0 = _mm_load_si128((__m128i *)src); |
371 | |
|
372 | 0 | fold_1(s, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3); |
373 | |
|
374 | 0 | _mm_storeu_si128((__m128i *)dst, xmm_t0); |
375 | |
|
376 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_t0); |
377 | |
|
378 | 0 | if (len == 0) |
379 | 0 | goto done; |
380 | | |
381 | 0 | dst += 16; |
382 | 0 | src += 16; |
383 | 0 | } else { |
384 | 0 | len += 64; |
385 | 0 | if (len == 0) |
386 | 0 | goto done; |
387 | 0 | } |
388 | | |
389 | 0 | partial: |
390 | |
|
391 | | #if defined(_MSC_VER) |
392 | | /* VS does not permit the use of _mm_set_epi64x in 32-bit builds */ |
393 | | { |
394 | | int32_t parts[4] = {0, 0, 0, 0}; |
395 | | memcpy(&parts, src, len); |
396 | | xmm_crc_part = _mm_set_epi32(parts[3], parts[2], parts[1], parts[0]); |
397 | | } |
398 | | #else |
399 | 0 | { |
400 | 0 | int64_t parts[2] = {0, 0}; |
401 | 0 | memcpy(&parts, src, len); |
402 | 0 | xmm_crc_part = _mm_set_epi64x(parts[1], parts[0]); |
403 | 0 | } |
404 | 0 | #endif |
405 | |
|
406 | 0 | _mm_storeu_si128((__m128i *)dst, xmm_crc_part); |
407 | 0 | partial_fold(s, len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, |
408 | 0 | &xmm_crc_part); |
409 | 0 | done: |
410 | 0 | CRC_SAVE(s) |
411 | 0 | } |
412 | | |
413 | | local const unsigned zalign(16) crc_k[] = { |
414 | | 0xccaa009e, 0x00000000, /* rk1 */ |
415 | | 0x751997d0, 0x00000001, /* rk2 */ |
416 | | 0xccaa009e, 0x00000000, /* rk5 */ |
417 | | 0x63cd6124, 0x00000001, /* rk6 */ |
418 | | 0xf7011640, 0x00000001, /* rk7 */ |
419 | | 0xdb710640, 0x00000001 /* rk8 */ |
420 | | }; |
421 | | |
422 | | local const unsigned zalign(16) crc_mask[4] = { |
423 | | 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 |
424 | | }; |
425 | | |
426 | | local const unsigned zalign(16) crc_mask2[4] = { |
427 | | 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF |
428 | | }; |
429 | | |
430 | | unsigned ZLIB_INTERNAL crc_fold_512to32(deflate_state *const s) |
431 | 0 | { |
432 | 0 | const __m128i xmm_mask = _mm_load_si128((__m128i *)crc_mask); |
433 | 0 | const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2); |
434 | |
|
435 | 0 | unsigned crc; |
436 | 0 | __m128i x_tmp0, x_tmp1, x_tmp2, crc_fold; |
437 | |
|
438 | 0 | __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0); |
439 | 0 | __m128i xmm_crc1 = _mm_loadu_si128((__m128i *)s->crc0 + 1); |
440 | 0 | __m128i xmm_crc2 = _mm_loadu_si128((__m128i *)s->crc0 + 2); |
441 | 0 | __m128i xmm_crc3 = _mm_loadu_si128((__m128i *)s->crc0 + 3); |
442 | | |
443 | | /* |
444 | | * k1 |
445 | | */ |
446 | 0 | crc_fold = _mm_load_si128((__m128i *)crc_k); |
447 | |
|
448 | 0 | x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10); |
449 | 0 | xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01); |
450 | 0 | xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0); |
451 | 0 | xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0); |
452 | |
|
453 | 0 | x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10); |
454 | 0 | xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01); |
455 | 0 | xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1); |
456 | 0 | xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1); |
457 | |
|
458 | 0 | x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10); |
459 | 0 | xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01); |
460 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2); |
461 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); |
462 | | |
463 | | /* |
464 | | * k5 |
465 | | */ |
466 | 0 | crc_fold = _mm_load_si128((__m128i *)crc_k + 1); |
467 | |
|
468 | 0 | xmm_crc0 = xmm_crc3; |
469 | 0 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0); |
470 | 0 | xmm_crc0 = _mm_srli_si128(xmm_crc0, 8); |
471 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0); |
472 | |
|
473 | 0 | xmm_crc0 = xmm_crc3; |
474 | 0 | xmm_crc3 = _mm_slli_si128(xmm_crc3, 4); |
475 | 0 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10); |
476 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0); |
477 | 0 | xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2); |
478 | | |
479 | | /* |
480 | | * k7 |
481 | | */ |
482 | 0 | xmm_crc1 = xmm_crc3; |
483 | 0 | xmm_crc2 = xmm_crc3; |
484 | 0 | crc_fold = _mm_load_si128((__m128i *)crc_k + 2); |
485 | |
|
486 | 0 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0); |
487 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); |
488 | 0 | xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask); |
489 | |
|
490 | 0 | xmm_crc2 = xmm_crc3; |
491 | 0 | xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10); |
492 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); |
493 | 0 | xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1); |
494 | |
|
495 | 0 | crc = _mm_extract_epi32(xmm_crc3, 2); |
496 | 0 | return ~crc; |
497 | 0 | } |
498 | | |
499 | | #endif /* CRC32_SIMD_SSE42_PCLMUL */ |