Coverage Report

Created: 2025-09-27 06:26

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/php-src/ext/standard/crc32_x86.c
Line
Count
Source
1
/*
2
  +----------------------------------------------------------------------+
3
  | Copyright (c) The PHP Group                                          |
4
  +----------------------------------------------------------------------+
5
  | This source file is subject to version 3.01 of the PHP license,      |
6
  | that is bundled with this package in the file LICENSE, and is        |
7
  | available through the world-wide-web at the following url:           |
8
  | https://www.php.net/license/3_01.txt                                 |
9
  | If you did not receive a copy of the PHP license and are unable to   |
10
  | obtain it through the world-wide-web, please send a note to          |
11
  | license@php.net so we can mail you a copy immediately.               |
12
  +----------------------------------------------------------------------+
13
  | Author: Frank Du <frank.du@intel.com>                                |
14
  +----------------------------------------------------------------------+
15
  | Compute the crc32 of the buffer. Based on:                           |
16
  | "Fast CRC Computation for Generic Polynomials Using PCLMULQDQ"       |
17
  |  V. Gopal, E. Ozturk, et al., 2009, http://intel.ly/2ySEwL0          |
18
*/
19
20
#include "crc32_x86.h"
21
22
#if defined(ZEND_INTRIN_SSE4_2_PCLMUL_NATIVE) || defined(ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER)
23
# include <nmmintrin.h>
24
# include <wmmintrin.h>
25
#endif
26
27
#ifdef ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER
28
# include "Zend/zend_cpuinfo.h"
29
#endif
30
31
#if defined(ZEND_INTRIN_SSE4_2_PCLMUL_NATIVE) || defined(ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER)
32
33
typedef struct _crc32_pclmul_bit_consts {
34
  uint64_t k1k2[2];
35
  uint64_t k3k4[2];
36
  uint64_t k5k6[2];
37
  uint64_t uPx[2];
38
} crc32_pclmul_consts;
39
40
static const crc32_pclmul_consts crc32_pclmul_consts_maps[X86_CRC32_MAX] = {
41
  { /* X86_CRC32, polynomial: 0x04C11DB7 */
42
    {0x00e6228b11, 0x008833794c}, /* endianness swap */
43
    {0x00e8a45605, 0x00c5b9cd4c}, /* endianness swap */
44
    {0x00490d678d, 0x00f200aa66}, /* endianness swap */
45
    {0x0104d101df, 0x0104c11db7}
46
  },
47
  { /* X86_CRC32B, polynomial: 0x04C11DB7 with reversed ordering */
48
    {0x0154442bd4, 0x01c6e41596},
49
    {0x01751997d0, 0x00ccaa009e},
50
    {0x0163cd6124, 0x01db710640},
51
    {0x01f7011641, 0x01db710641},
52
  },
53
  { /* X86_CRC32C, polynomial: 0x1EDC6F41 with reversed ordering */
54
    {0x00740eef02, 0x009e4addf8},
55
    {0x00f20c0dfe, 0x014cd00bd6},
56
    {0x00dd45aab8, 0x0000000000},
57
    {0x00dea713f1, 0x0105ec76f0}
58
  }
59
};
60
61
static uint8_t pclmul_shuf_mask_table[16] = {
62
  0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
63
  0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
64
};
65
66
/* Folding of 128-bit data chunks */
67
42.0k
#define CRC32_FOLDING_BLOCK_SIZE (16)
68
69
/* PCLMUL version of non-reflected crc32 */
70
ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_DECL(size_t crc32_pclmul_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts));
71
size_t crc32_pclmul_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts)
72
29
{
73
29
  size_t nr_in = nr;
74
29
  __m128i x0, x1, x2, k, shuf_mask;
75
76
29
  if (nr < CRC32_FOLDING_BLOCK_SIZE) {
77
4
    return 0;
78
4
  }
79
80
25
  shuf_mask = _mm_loadu_si128((__m128i *)(pclmul_shuf_mask_table));
81
25
  x0 = _mm_cvtsi32_si128(*crc);
82
25
  x1 = _mm_loadu_si128((__m128i *)(p + 0x00));
83
25
  x0 = _mm_slli_si128(x0, 12);
84
25
  x1 = _mm_shuffle_epi8(x1, shuf_mask); /* endianness swap */
85
25
  x0 = _mm_xor_si128(x1, x0);
86
25
  p += CRC32_FOLDING_BLOCK_SIZE;
87
25
  nr -= CRC32_FOLDING_BLOCK_SIZE;
88
89
25
  if (nr >= (CRC32_FOLDING_BLOCK_SIZE * 3)) {
90
20
    __m128i x3, x4;
91
92
20
    x1 = _mm_loadu_si128((__m128i *)(p + 0x00));
93
20
    x1 = _mm_shuffle_epi8(x1, shuf_mask); /* endianness swap */
94
20
    x2 = _mm_loadu_si128((__m128i *)(p + 0x10));
95
20
    x2 = _mm_shuffle_epi8(x2, shuf_mask); /* endianness swap */
96
20
    x3 = _mm_loadu_si128((__m128i *)(p + 0x20));
97
20
    x3 = _mm_shuffle_epi8(x3, shuf_mask); /* endianness swap */
98
20
    p += CRC32_FOLDING_BLOCK_SIZE * 3;
99
20
    nr -= CRC32_FOLDING_BLOCK_SIZE * 3;
100
101
20
    k = _mm_loadu_si128((__m128i *)consts->k1k2);
102
    /* parallel folding by 4 */
103
6.93k
    while (nr >= (CRC32_FOLDING_BLOCK_SIZE * 4)) {
104
6.91k
      __m128i x5, x6, x7, x8, x9, x10, x11;
105
6.91k
      x4 = _mm_clmulepi64_si128(x0, k, 0x00);
106
6.91k
      x5 = _mm_clmulepi64_si128(x1, k, 0x00);
107
6.91k
      x6 = _mm_clmulepi64_si128(x2, k, 0x00);
108
6.91k
      x7 = _mm_clmulepi64_si128(x3, k, 0x00);
109
6.91k
      x0 = _mm_clmulepi64_si128(x0, k, 0x11);
110
6.91k
      x1 = _mm_clmulepi64_si128(x1, k, 0x11);
111
6.91k
      x2 = _mm_clmulepi64_si128(x2, k, 0x11);
112
6.91k
      x3 = _mm_clmulepi64_si128(x3, k, 0x11);
113
6.91k
      x8 = _mm_loadu_si128((__m128i *)(p + 0x00));
114
6.91k
      x8 = _mm_shuffle_epi8(x8, shuf_mask); /* endianness swap */
115
6.91k
      x9 = _mm_loadu_si128((__m128i *)(p + 0x10));
116
6.91k
      x9 = _mm_shuffle_epi8(x9, shuf_mask); /* endianness swap */
117
6.91k
      x10 = _mm_loadu_si128((__m128i *)(p + 0x20));
118
6.91k
      x10 = _mm_shuffle_epi8(x10, shuf_mask); /* endianness swap */
119
6.91k
      x11 = _mm_loadu_si128((__m128i *)(p + 0x30));
120
6.91k
      x11 = _mm_shuffle_epi8(x11, shuf_mask); /* endianness swap */
121
6.91k
      x0 = _mm_xor_si128(x0, x4);
122
6.91k
      x1 = _mm_xor_si128(x1, x5);
123
6.91k
      x2 = _mm_xor_si128(x2, x6);
124
6.91k
      x3 = _mm_xor_si128(x3, x7);
125
6.91k
      x0 = _mm_xor_si128(x0, x8);
126
6.91k
      x1 = _mm_xor_si128(x1, x9);
127
6.91k
      x2 = _mm_xor_si128(x2, x10);
128
6.91k
      x3 = _mm_xor_si128(x3, x11);
129
130
6.91k
      p += CRC32_FOLDING_BLOCK_SIZE * 4;
131
6.91k
      nr -= CRC32_FOLDING_BLOCK_SIZE * 4;
132
6.91k
    }
133
134
20
    k = _mm_loadu_si128((__m128i *)consts->k3k4);
135
    /* fold 4 to 1, [x1, x2, x3] -> x0 */
136
20
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
137
20
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
138
20
    x0 = _mm_xor_si128(x0, x1);
139
20
    x0 = _mm_xor_si128(x0, x4);
140
20
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
141
20
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
142
20
    x0 = _mm_xor_si128(x0, x2);
143
20
    x0 = _mm_xor_si128(x0, x4);
144
20
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
145
20
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
146
20
    x0 = _mm_xor_si128(x0, x3);
147
20
    x0 = _mm_xor_si128(x0, x4);
148
20
  }
149
150
25
  k = _mm_loadu_si128((__m128i *)consts->k3k4);
151
  /* folding by 1 */
152
51
  while (nr >= CRC32_FOLDING_BLOCK_SIZE) {
153
    /* load next to x2, fold to x0, x1 */
154
26
    x2 = _mm_loadu_si128((__m128i *)(p + 0x00));
155
26
    x2 = _mm_shuffle_epi8(x2, shuf_mask); /* endianness swap */
156
26
    x1 = _mm_clmulepi64_si128(x0, k, 0x00);
157
26
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
158
26
    x0 = _mm_xor_si128(x0, x2);
159
26
    x0 = _mm_xor_si128(x0, x1);
160
26
    p += CRC32_FOLDING_BLOCK_SIZE;
161
26
    nr -= CRC32_FOLDING_BLOCK_SIZE;
162
26
  }
163
164
  /* reduce 128-bits(final fold) to 96-bits */
165
25
  k = _mm_loadu_si128((__m128i*)consts->k5k6);
166
25
  x1 = _mm_clmulepi64_si128(x0, k, 0x11);
167
25
  x0 = _mm_slli_si128(x0, 8);
168
25
  x0 = _mm_srli_si128(x0, 4);
169
25
  x0 = _mm_xor_si128(x0, x1);
170
  /* reduce 96-bits to 64-bits */
171
25
  x1 = _mm_clmulepi64_si128(x0, k, 0x01);
172
25
  x0 = _mm_xor_si128(x0, x1);
173
174
  /* barrett reduction */
175
25
  k = _mm_loadu_si128((__m128i*)consts->uPx);
176
25
  x1 = _mm_move_epi64(x0);
177
25
  x1 = _mm_srli_si128(x1, 4);
178
25
  x1 = _mm_clmulepi64_si128(x1, k, 0x00);
179
25
  x1 = _mm_srli_si128(x1, 4);
180
25
  x1 = _mm_clmulepi64_si128(x1, k, 0x10);
181
25
  x0 = _mm_xor_si128(x1, x0);
182
25
  *crc =  _mm_extract_epi32(x0, 0);
183
25
  return (nr_in - nr); /* the nr processed */
184
29
}
185
186
/* PCLMUL version of reflected crc32 */
187
ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_DECL(size_t crc32_pclmul_reflected_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts));
188
size_t crc32_pclmul_reflected_batch(uint32_t *crc, const unsigned char *p, size_t nr, const crc32_pclmul_consts *consts)
189
105
{
190
105
  size_t nr_in = nr;
191
105
  __m128i x0, x1, x2, k;
192
193
105
  if (nr < CRC32_FOLDING_BLOCK_SIZE) {
194
47
    return 0;
195
47
  }
196
197
58
  x0 = _mm_loadu_si128((__m128i *)(p + 0x00));
198
58
  x0 = _mm_xor_si128(x0, _mm_cvtsi32_si128(*crc));
199
58
  p += CRC32_FOLDING_BLOCK_SIZE;
200
58
  nr -= CRC32_FOLDING_BLOCK_SIZE;
201
58
  if (nr >= (CRC32_FOLDING_BLOCK_SIZE * 3)) {
202
31
    __m128i x3, x4;
203
204
31
    x1 = _mm_loadu_si128((__m128i *)(p + 0x00));
205
31
    x2 = _mm_loadu_si128((__m128i *)(p + 0x10));
206
31
    x3 = _mm_loadu_si128((__m128i *)(p + 0x20));
207
31
    p += CRC32_FOLDING_BLOCK_SIZE * 3;
208
31
    nr -= CRC32_FOLDING_BLOCK_SIZE * 3;
209
210
31
    k = _mm_loadu_si128((__m128i *)consts->k1k2);
211
    /* parallel folding by 4 */
212
6.84k
    while (nr >= (CRC32_FOLDING_BLOCK_SIZE * 4)) {
213
6.81k
      __m128i x5, x6, x7, x8, x9, x10, x11;
214
6.81k
      x4 = _mm_clmulepi64_si128(x0, k, 0x00);
215
6.81k
      x5 = _mm_clmulepi64_si128(x1, k, 0x00);
216
6.81k
      x6 = _mm_clmulepi64_si128(x2, k, 0x00);
217
6.81k
      x7 = _mm_clmulepi64_si128(x3, k, 0x00);
218
6.81k
      x0 = _mm_clmulepi64_si128(x0, k, 0x11);
219
6.81k
      x1 = _mm_clmulepi64_si128(x1, k, 0x11);
220
6.81k
      x2 = _mm_clmulepi64_si128(x2, k, 0x11);
221
6.81k
      x3 = _mm_clmulepi64_si128(x3, k, 0x11);
222
6.81k
      x8 = _mm_loadu_si128((__m128i *)(p + 0x00));
223
6.81k
      x9 = _mm_loadu_si128((__m128i *)(p + 0x10));
224
6.81k
      x10 = _mm_loadu_si128((__m128i *)(p + 0x20));
225
6.81k
      x11 = _mm_loadu_si128((__m128i *)(p + 0x30));
226
6.81k
      x0 = _mm_xor_si128(x0, x4);
227
6.81k
      x1 = _mm_xor_si128(x1, x5);
228
6.81k
      x2 = _mm_xor_si128(x2, x6);
229
6.81k
      x3 = _mm_xor_si128(x3, x7);
230
6.81k
      x0 = _mm_xor_si128(x0, x8);
231
6.81k
      x1 = _mm_xor_si128(x1, x9);
232
6.81k
      x2 = _mm_xor_si128(x2, x10);
233
6.81k
      x3 = _mm_xor_si128(x3, x11);
234
235
6.81k
      p += CRC32_FOLDING_BLOCK_SIZE * 4;
236
6.81k
      nr -= CRC32_FOLDING_BLOCK_SIZE * 4;
237
6.81k
    }
238
239
31
    k = _mm_loadu_si128((__m128i *)consts->k3k4);
240
    /* fold 4 to 1, [x1, x2, x3] -> x0 */
241
31
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
242
31
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
243
31
    x0 = _mm_xor_si128(x0, x1);
244
31
    x0 = _mm_xor_si128(x0, x4);
245
31
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
246
31
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
247
31
    x0 = _mm_xor_si128(x0, x2);
248
31
    x0 = _mm_xor_si128(x0, x4);
249
31
    x4 = _mm_clmulepi64_si128(x0, k, 0x00);
250
31
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
251
31
    x0 = _mm_xor_si128(x0, x3);
252
31
    x0 = _mm_xor_si128(x0, x4);
253
31
  }
254
255
58
  k = _mm_loadu_si128((__m128i *)consts->k3k4);
256
  /* folding by 1 */
257
107
  while (nr >= CRC32_FOLDING_BLOCK_SIZE) {
258
    /* load next to x2, fold to x0, x1 */
259
49
    x2 = _mm_loadu_si128((__m128i *)(p + 0x00));
260
49
    x1 = _mm_clmulepi64_si128(x0, k, 0x00);
261
49
    x0 = _mm_clmulepi64_si128(x0, k, 0x11);
262
49
    x0 = _mm_xor_si128(x0, x2);
263
49
    x0 = _mm_xor_si128(x0, x1);
264
49
    p += CRC32_FOLDING_BLOCK_SIZE;
265
49
    nr -= CRC32_FOLDING_BLOCK_SIZE;
266
49
  }
267
268
  /* reduce 128-bits(final fold) to 96-bits */
269
58
  x1 = _mm_clmulepi64_si128(x0, k, 0x10);
270
58
  x0 = _mm_srli_si128(x0, 8);
271
58
  x0 = _mm_xor_si128(x0, x1);
272
  /* reduce 96-bits to 64-bits */
273
58
  x1 = _mm_shuffle_epi32(x0, 0xfc);
274
58
  x0 = _mm_shuffle_epi32(x0, 0xf9);
275
58
  k = _mm_loadu_si128((__m128i*)consts->k5k6);
276
58
  x1 = _mm_clmulepi64_si128(x1, k, 0x00);
277
58
  x0 = _mm_xor_si128(x0, x1);
278
279
  /* barrett reduction */
280
58
  x1 = _mm_shuffle_epi32(x0, 0xf3);
281
58
  x0 = _mm_slli_si128(x0, 4);
282
58
  k = _mm_loadu_si128((__m128i*)consts->uPx);
283
58
  x1 = _mm_clmulepi64_si128(x1, k, 0x00);
284
58
  x1 = _mm_clmulepi64_si128(x1, k, 0x10);
285
58
  x0 = _mm_xor_si128(x1, x0);
286
58
  *crc =  _mm_extract_epi32(x0, 2);
287
58
  return (nr_in - nr); /* the nr processed */
288
105
}
289
290
# if defined(ZEND_INTRIN_SSE4_2_PCLMUL_NATIVE)
291
size_t crc32_x86_simd_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr)
292
# else /* ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER */
293
size_t crc32_sse42_pclmul_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr)
294
# endif
295
134
{
296
134
  if (type > X86_CRC32_MAX) {
297
0
    return 0;
298
0
  }
299
134
  const crc32_pclmul_consts *consts = &crc32_pclmul_consts_maps[type];
300
301
134
  switch (type) {
302
29
  case X86_CRC32:
303
29
    return crc32_pclmul_batch(crc, p, nr, consts);
304
78
  case X86_CRC32B:
305
105
  case X86_CRC32C:
306
105
    return crc32_pclmul_reflected_batch(crc, p, nr, consts);
307
0
  default:
308
0
    return 0;
309
134
  }
310
134
}
311
#endif
312
313
#ifdef ZEND_INTRIN_SSE4_2_PCLMUL_RESOLVER
314
static size_t crc32_x86_simd_update_default(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr)
315
0
{
316
0
  return 0;
317
0
}
318
319
# ifdef ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_PROTO
320
size_t crc32_x86_simd_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr) __attribute__((ifunc("resolve_crc32_x86_simd_update")));
321
322
typedef size_t (*crc32_x86_simd_func_t)(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr);
323
324
ZEND_NO_SANITIZE_ADDRESS
325
ZEND_ATTRIBUTE_UNUSED /* clang mistakenly warns about this */
326
static crc32_x86_simd_func_t resolve_crc32_x86_simd_update(void) {
327
  if (zend_cpu_supports_sse42() && zend_cpu_supports_pclmul()) {
328
    return crc32_sse42_pclmul_update;
329
  }
330
  return crc32_x86_simd_update_default;
331
}
332
# else /* ZEND_INTRIN_SSE4_2_PCLMUL_FUNC_PTR */
333
static size_t (*crc32_x86_simd_ptr)(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr) = crc32_x86_simd_update_default;
334
335
134
size_t crc32_x86_simd_update(X86_CRC32_TYPE type, uint32_t *crc, const unsigned char *p, size_t nr) {
336
134
  return crc32_x86_simd_ptr(type, crc, p, nr);
337
134
}
338
339
/* {{{ PHP_MINIT_FUNCTION */
340
PHP_MINIT_FUNCTION(crc32_x86_intrin)
341
16
{
342
16
  if (zend_cpu_supports_sse42() && zend_cpu_supports_pclmul()) {
343
16
    crc32_x86_simd_ptr = crc32_sse42_pclmul_update;
344
16
  }
345
16
  return SUCCESS;
346
16
}
347
/* }}} */
348
# endif
349
#endif