Coverage Report

Created: 2025-07-23 06:33

/src/php-src/ext/standard/crc32_x86.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
  +----------------------------------------------------------------------+
3
  | Copyright (c) The PHP Group                                          |
4
  +----------------------------------------------------------------------+
5
  | This source file is subject to version 3.01 of the PHP license,      |
6
  | that is bundled with this package in the file LICENSE, and is        |
7
  | available through the world-wide-web at the following url:           |
8
  | https://www.php.net/license/3_01.txt                                 |
9
  | If you did not receive a copy of the PHP license and are unable to   |
10
  | obtain it through the world-wide-web, please send a note to          |
11
  | license@php.net so we can mail you a copy immediately.               |
12
  +----------------------------------------------------------------------+
13
  | Author: Frank Du <frank.du@intel.com>                                |
14
  +----------------------------------------------------------------------+
15
  | Compute the crc32 of the buffer. Based on:                           |
16
  | "Fast CRC Computation for Generic Polynomials Using PCLMULQDQ"       |
17
  |  V. Gopal, E. Ozturk, et al., 2009, http://intel.ly/2ySEwL0          |
18
*/
19
20
#include "crc32_x86.h"
21
22
#if defined(ZEND_INTRIN_SSE4_2_PCLMUL_NATIVE) || defined(ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER)
23
# include <nmmintrin.h>
24
# include <wmmintrin.h>
25
#endif
26
27
#ifdef ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER
28
# include "Zend/zend_cpuinfo.h"
29
#endif
30
31
#if defined(ZEND_INTRIN_SSE4_2_PCLMUL_NATIVE) || defined(ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER)
32
33
typedef struct _crc32_pclmul_bit_consts {
34
  uint64_t k1k2[2];
35
  uint64_t k3k4[2];
36
  uint64_t k5k6[2];
37
  uint64_t uPx[2];
38
} crc32_pclmul_consts;
39
40
static const crc32_pclmul_consts crc32_pclmul_consts_maps[X86_CRC32_MAX] = {
41
  { /* X86_CRC32, polynomial: 0x04C11DB7 */
42
    {0x00e6228b11, 0x008833794c}, /* endianness swap */
43
    {0x00e8a45605, 0x00c5b9cd4c}, /* endianness swap */
44
    {0x00490d678d, 0x00f200aa66}, /* endianness swap */
45
    {0x0104d101df, 0x0104c11db7}
46
  },
47
  { /* X86_CRC32B, polynomial: 0x04C11DB7 with reversed ordering */
48
    {0x0154442bd4, 0x01c6e41596},
49
    {0x01751997d0, 0x00ccaa009e},
50
    {0x0163cd6124, 0x01db710640},
51
    {0x01f7011641, 0x01db710641},
52
  },
53
  { /* X86_CRC32C, polynomial: 0x1EDC6F41 with reversed ordering */
54
    {0x00740eef02, 0x009e4addf8},
55
    {0x00f20c0dfe, 0x014cd00bd6},
56
    {0x00dd45aab8, 0x0000000000},
57
    {0x00dea713f1, 0x0105ec76f0}
58
  }
59
};
60
61
static uint8_t pclmul_shuf_mask_table[16] = {
62
  0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
63
  0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
64
};
65
66
/* Folding of 128-bit data chunks */
67
49.6k
#define CRC32_FOLDING_BLOCK_SIZE (16)
68
69
/* PCLMUL version of non-reflected crc32 */
70
ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_DECL(size_t crc32_pclmul_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts));
71
size_t crc32_pclmul_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts)
72
37
{
73
37
  size_t nr_in = nr;
74
37
  __m128i x0, x1, x2, k, shuf_mask;
75
76
37
  if (nr < CRC32_FOLDING_BLOCK_SIZE) {
77
3
    return 0;
78
3
  }
79
80
34
  shuf_mask = _mm_loadu_si128((__m128i *)(pclmul_shuf_mask_table));
81
34
  x0 = _mm_cvtsi32_si128(*crc);
82
34
  x1 = _mm_loadu_si128((__m128i *)(p + 0x00));
83
34
  x0 = _mm_slli_si128(x0, 12);
84
34
  x1 = _mm_shuffle_epi8(x1, shuf_mask); /* endianness swap */
85
34
  x0 = _mm_xor_si128(x1, x0);
86
34
  p += CRC32_FOLDING_BLOCK_SIZE;
87
34
  nr -= CRC32_FOLDING_BLOCK_SIZE;
88
89
34
  if (nr >= (CRC32_FOLDING_BLOCK_SIZE * 3)) {
90
19
    __m128i x3, x4;
91
92
19
    x1 = _mm_loadu_si128((__m128i *)(p + 0x00));
93
19
    x1 = _mm_shuffle_epi8(x1, shuf_mask); /* endianness swap */
94
19
    x2 = _mm_loadu_si128((__m128i *)(p + 0x10));
95
19
    x2 = _mm_shuffle_epi8(x2, shuf_mask); /* endianness swap */
96
19
    x3 = _mm_loadu_si128((__m128i *)(p + 0x20));
97
19
    x3 = _mm_shuffle_epi8(x3, shuf_mask); /* endianness swap */
98
19
    p += CRC32_FOLDING_BLOCK_SIZE * 3;
99
19
    nr -= CRC32_FOLDING_BLOCK_SIZE * 3;
100
101
19
    k = _mm_loadu_si128((__m128i *)consts->k1k2);
102
    /* parallel folding by 4 */
103
6.77k
    while (nr >= (CRC32_FOLDING_BLOCK_SIZE * 4)) {
104
6.75k
      __m128i x5, x6, x7, x8, x9, x10, x11;
105
6.75k
      x4 = _mm_clmulepi64_si128(x0, k, 0x00);
106
6.75k
      x5 = _mm_clmulepi64_si128(x1, k, 0x00);
107
6.75k
      x6 = _mm_clmulepi64_si128(x2, k, 0x00);
108
6.75k
      x7 = _mm_clmulepi64_si128(x3, k, 0x00);
109
6.75k
      x0 = _mm_clmulepi64_si128(x0, k, 0x11);
110
6.75k
      x1 = _mm_clmulepi64_si128(x1, k, 0x11);
111
6.75k
      x2 = _mm_clmulepi64_si128(x2, k, 0x11);
112
6.75k
      x3 = _mm_clmulepi64_si128(x3, k, 0x11);
113
6.75k
      x8 = _mm_loadu_si128((__m128i *)(p + 0x00));
114
6.75k
      x8 = _mm_shuffle_epi8(x8, shuf_mask); /* endianness swap */
115
6.75k
      x9 = _mm_loadu_si128((__m128i *)(p + 0x10));
116
6.75k
      x9 = _mm_shuffle_epi8(x9, shuf_mask); /* endianness swap */
117
6.75k
      x10 = _mm_loadu_si128((__m128i *)(p + 0x20));
118
6.75k
      x10 = _mm_shuffle_epi8(x10, shuf_mask); /* endianness swap */
119
6.75k
      x11 = _mm_loadu_si128((__m128i *)(p + 0x30));
120
6.75k
      x11 = _mm_shuffle_epi8(x11, shuf_mask); /* endianness swap */
121
6.75k
      x0 = _mm_xor_si128(x0, x4);
122
6.75k
      x1 = _mm_xor_si128(x1, x5);
123
6.75k
      x2 = _mm_xor_si128(x2, x6);
124
6.75k
      x3 = _mm_xor_si128(x3, x7);
125
6.75k
      x0 = _mm_xor_si128(x0, x8);
126
6.75k
      x1 = _mm_xor_si128(x1, x9);
127
6.75k
      x2 = _mm_xor_si128(x2, x10);
128
6.75k
      x3 = _mm_xor_si128(x3, x11);
129
130
6.75k
      p += CRC32_FOLDING_BLOCK_SIZE * 4;
131
6.75k
      nr -= CRC32_FOLDING_BLOCK_SIZE * 4;
132
6.75k
    }
133
134
19
    k = _mm_loadu_si128((__m128i *)consts->k3k4);
135
    /* fold 4 to 1, [x1, x2, x3] -> x0 */
136
19
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
137
19
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
138
19
    x0 = _mm_xor_si128(x0, x1);
139
19
    x0 = _mm_xor_si128(x0, x4);
140
19
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
141
19
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
142
19
    x0 = _mm_xor_si128(x0, x2);
143
19
    x0 = _mm_xor_si128(x0, x4);
144
19
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
145
19
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
146
19
    x0 = _mm_xor_si128(x0, x3);
147
19
    x0 = _mm_xor_si128(x0, x4);
148
19
  }
149
150
34
  k = _mm_loadu_si128((__m128i *)consts->k3k4);
151
  /* folding by 1 */
152
46
  while (nr >= CRC32_FOLDING_BLOCK_SIZE) {
153
    /* load next to x2, fold to x0, x1 */
154
12
    x2 = _mm_loadu_si128((__m128i *)(p + 0x00));
155
12
    x2 = _mm_shuffle_epi8(x2, shuf_mask); /* endianness swap */
156
12
    x1 = _mm_clmulepi64_si128(x0, k, 0x00);
157
12
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
158
12
    x0 = _mm_xor_si128(x0, x2);
159
12
    x0 = _mm_xor_si128(x0, x1);
160
12
    p += CRC32_FOLDING_BLOCK_SIZE;
161
12
    nr -= CRC32_FOLDING_BLOCK_SIZE;
162
12
  }
163
164
  /* reduce 128-bits(final fold) to 96-bits */
165
34
  k = _mm_loadu_si128((__m128i*)consts->k5k6);
166
34
  x1 = _mm_clmulepi64_si128(x0, k, 0x11);
167
34
  x0 = _mm_slli_si128(x0, 8);
168
34
  x0 = _mm_srli_si128(x0, 4);
169
34
  x0 = _mm_xor_si128(x0, x1);
170
  /* reduce 96-bits to 64-bits */
171
34
  x1 = _mm_clmulepi64_si128(x0, k, 0x01);
172
34
  x0 = _mm_xor_si128(x0, x1);
173
174
  /* barrett reduction */
175
34
  k = _mm_loadu_si128((__m128i*)consts->uPx);
176
34
  x1 = _mm_move_epi64(x0);
177
34
  x1 = _mm_srli_si128(x1, 4);
178
34
  x1 = _mm_clmulepi64_si128(x1, k, 0x00);
179
34
  x1 = _mm_srli_si128(x1, 4);
180
34
  x1 = _mm_clmulepi64_si128(x1, k, 0x10);
181
34
  x0 = _mm_xor_si128(x1, x0);
182
34
  *crc =  _mm_extract_epi32(x0, 0);
183
34
  return (nr_in - nr); /* the nr processed */
184
37
}
185
186
/* PCLMUL version of reflected crc32 */
187
ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_DECL(size_t crc32_pclmul_reflected_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts));
188
size_t crc32_pclmul_reflected_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts)
189
106
{
190
106
  size_t nr_in = nr;
191
106
  __m128i x0, x1, x2, k;
192
193
106
  if (nr < CRC32_FOLDING_BLOCK_SIZE) {
194
40
    return 0;
195
40
  }
196
197
66
  x0 = _mm_loadu_si128((__m128i *)(p + 0x00));
198
66
  x0 = _mm_xor_si128(x0, _mm_cvtsi32_si128(*crc));
199
66
  p += CRC32_FOLDING_BLOCK_SIZE;
200
66
  nr -= CRC32_FOLDING_BLOCK_SIZE;
201
66
  if (nr >= (CRC32_FOLDING_BLOCK_SIZE * 3)) {
202
43
    __m128i x3, x4;
203
204
43
    x1 = _mm_loadu_si128((__m128i *)(p + 0x00));
205
43
    x2 = _mm_loadu_si128((__m128i *)(p + 0x10));
206
43
    x3 = _mm_loadu_si128((__m128i *)(p + 0x20));
207
43
    p += CRC32_FOLDING_BLOCK_SIZE * 3;
208
43
    nr -= CRC32_FOLDING_BLOCK_SIZE * 3;
209
210
43
    k = _mm_loadu_si128((__m128i *)consts->k1k2);
211
    /* parallel folding by 4 */
212
9.52k
    while (nr >= (CRC32_FOLDING_BLOCK_SIZE * 4)) {
213
9.47k
      __m128i x5, x6, x7, x8, x9, x10, x11;
214
9.47k
      x4 = _mm_clmulepi64_si128(x0, k, 0x00);
215
9.47k
      x5 = _mm_clmulepi64_si128(x1, k, 0x00);
216
9.47k
      x6 = _mm_clmulepi64_si128(x2, k, 0x00);
217
9.47k
      x7 = _mm_clmulepi64_si128(x3, k, 0x00);
218
9.47k
      x0 = _mm_clmulepi64_si128(x0, k, 0x11);
219
9.47k
      x1 = _mm_clmulepi64_si128(x1, k, 0x11);
220
9.47k
      x2 = _mm_clmulepi64_si128(x2, k, 0x11);
221
9.47k
      x3 = _mm_clmulepi64_si128(x3, k, 0x11);
222
9.47k
      x8 = _mm_loadu_si128((__m128i *)(p + 0x00));
223
9.47k
      x9 = _mm_loadu_si128((__m128i *)(p + 0x10));
224
9.47k
      x10 = _mm_loadu_si128((__m128i *)(p + 0x20));
225
9.47k
      x11 = _mm_loadu_si128((__m128i *)(p + 0x30));
226
9.47k
      x0 = _mm_xor_si128(x0, x4);
227
9.47k
      x1 = _mm_xor_si128(x1, x5);
228
9.47k
      x2 = _mm_xor_si128(x2, x6);
229
9.47k
      x3 = _mm_xor_si128(x3, x7);
230
9.47k
      x0 = _mm_xor_si128(x0, x8);
231
9.47k
      x1 = _mm_xor_si128(x1, x9);
232
9.47k
      x2 = _mm_xor_si128(x2, x10);
233
9.47k
      x3 = _mm_xor_si128(x3, x11);
234
235
9.47k
      p += CRC32_FOLDING_BLOCK_SIZE * 4;
236
9.47k
      nr -= CRC32_FOLDING_BLOCK_SIZE * 4;
237
9.47k
    }
238
239
43
    k = _mm_loadu_si128((__m128i *)consts->k3k4);
240
    /* fold 4 to 1, [x1, x2, x3] -> x0 */
241
43
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
242
43
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
243
43
    x0 = _mm_xor_si128(x0, x1);
244
43
    x0 = _mm_xor_si128(x0, x4);
245
43
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
246
43
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
247
43
    x0 = _mm_xor_si128(x0, x2);
248
43
    x0 = _mm_xor_si128(x0, x4);
249
43
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
250
43
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
251
43
    x0 = _mm_xor_si128(x0, x3);
252
43
    x0 = _mm_xor_si128(x0, x4);
253
43
  }
254
255
66
  k = _mm_loadu_si128((__m128i *)consts->k3k4);
256
  /* folding by 1 */
257
132
  while (nr >= CRC32_FOLDING_BLOCK_SIZE) {
258
    /* load next to x2, fold to x0, x1 */
259
66
    x2 = _mm_loadu_si128((__m128i *)(p + 0x00));
260
66
    x1 = _mm_clmulepi64_si128(x0, k, 0x00);
261
66
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
262
66
    x0 = _mm_xor_si128(x0, x2);
263
66
    x0 = _mm_xor_si128(x0, x1);
264
66
    p += CRC32_FOLDING_BLOCK_SIZE;
265
66
    nr -= CRC32_FOLDING_BLOCK_SIZE;
266
66
  }
267
268
  /* reduce 128-bits(final fold) to 96-bits */
269
66
  x1 = _mm_clmulepi64_si128(x0, k, 0x10);
270
66
  x0 = _mm_srli_si128(x0, 8);
271
66
  x0 = _mm_xor_si128(x0, x1);
272
  /* reduce 96-bits to 64-bits */
273
66
  x1 = _mm_shuffle_epi32(x0, 0xfc);
274
66
  x0 = _mm_shuffle_epi32(x0, 0xf9);
275
66
  k = _mm_loadu_si128((__m128i*)consts->k5k6);
276
66
  x1 = _mm_clmulepi64_si128(x1, k, 0x00);
277
66
  x0 = _mm_xor_si128(x0, x1);
278
279
  /* barrett reduction */
280
66
  x1 = _mm_shuffle_epi32(x0, 0xf3);
281
66
  x0 = _mm_slli_si128(x0, 4);
282
66
  k = _mm_loadu_si128((__m128i*)consts->uPx);
283
66
  x1 = _mm_clmulepi64_si128(x1, k, 0x00);
284
66
  x1 = _mm_clmulepi64_si128(x1, k, 0x10);
285
66
  x0 = _mm_xor_si128(x1, x0);
286
66
  *crc =  _mm_extract_epi32(x0, 2);
287
66
  return (nr_in - nr); /* the nr processed */
288
106
}
289
290
# if defined(ZEND_INTRIN_SSE4_2_PCLMUL_NATIVE)
291
size_t crc32_x86_simd_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr)
292
# else /* ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER */
293
size_t crc32_sse42_pclmul_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr)
294
# endif
295
143
{
296
143
  if (type > X86_CRC32_MAX) {
297
0
    return 0;
298
0
  }
299
143
  const crc32_pclmul_consts *consts = &crc32_pclmul_consts_maps[type];
300
301
143
  switch (type) {
302
37
  case X86_CRC32:
303
37
    return crc32_pclmul_batch(crc, p, nr, consts);
304
67
  case X86_CRC32B:
305
106
  case X86_CRC32C:
306
106
    return crc32_pclmul_reflected_batch(crc, p, nr, consts);
307
0
  default:
308
0
    return 0;
309
143
  }
310
143
}
311
#endif
312
313
#ifdef ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER
314
static size_t crc32_x86_simd_update_default(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr)
315
0
{
316
0
  return 0;
317
0
}
318
319
# ifdef ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_PROTO
320
size_t crc32_x86_simd_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr) __attribute__((ifunc("resolve_crc32_x86_simd_update")));
321
322
typedef size_t (*crc32_x86_simd_func_t)(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr);
323
324
ZEND_NO_SANITIZE_ADDRESS
325
ZEND_ATTRIBUTE_UNUSED /* clang mistakenly warns about this */
326
static crc32_x86_simd_func_t resolve_crc32_x86_simd_update(void) {
327
  if (zend_cpu_supports_sse42() && zend_cpu_supports_pclmul()) {
328
    return crc32_sse42_pclmul_update;
329
  }
330
  return crc32_x86_simd_update_default;
331
}
332
# else /* ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_PTR */
333
static size_t (*crc32_x86_simd_ptr)(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr) = crc32_x86_simd_update_default;
334
335
143
size_t crc32_x86_simd_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr) {
336
143
  return crc32_x86_simd_ptr(type, crc, p, nr);
337
143
}
338
339
/* {{{ PHP_MINIT_FUNCTION */
340
PHP_MINIT_FUNCTION(crc32_x86_intrin)
341
16
{
342
16
  if (zend_cpu_supports_sse42() && zend_cpu_supports_pclmul()) {
343
16
    crc32_x86_simd_ptr = crc32_sse42_pclmul_update;
344
16
  }
345
16
  return SUCCESS;
346
16
}
347
/* }}} */
348
# endif
349
#endif