Coverage Report

Created: 2025-07-12 06:16

/src/zlib-ng/arch/x86/adler32_avx512_p.h
Line
Count
Source (jump to first uncovered line)
1
#ifndef AVX512_FUNCS_H
2
#define AVX512_FUNCS_H
3
4
#include <immintrin.h>
5
#include <stdint.h>
6
7
/* Written because Visual C++ toolchains before v142 have constant overflow in AVX512 intrinsic macros */
8
#if defined(_MSC_VER) && !defined(_MM_K0_REG8)
9
#  undef _mm512_extracti64x4_epi64
10
#  define _mm512_extracti64x4_epi64(v1, e1) _mm512_maskz_extracti64x4_epi64(UINT8_MAX, v1, e1)
11
#  undef _mm512_set1_epi16
12
#  define _mm512_set1_epi16(e1) _mm512_maskz_set1_epi16(UINT32_MAX, e1)
13
#  undef _mm512_maddubs_epi16
14
#  define _mm512_maddubs_epi16(v1, v2) _mm512_maskz_maddubs_epi16(UINT32_MAX, v1, v2)
15
#endif
16
17
/* Written because *_add_epi32(a) sets off ubsan */
18
0
static inline uint32_t _mm512_reduce_add_epu32(__m512i x) {
19
0
    __m256i a = _mm512_extracti64x4_epi64(x, 1);
20
0
    __m256i b = _mm512_extracti64x4_epi64(x, 0);
21
22
0
    __m256i a_plus_b = _mm256_add_epi32(a, b);
23
0
    __m128i c = _mm256_extracti128_si256(a_plus_b, 1);
24
0
    __m128i d = _mm256_extracti128_si256(a_plus_b, 0);
25
0
    __m128i c_plus_d = _mm_add_epi32(c, d);
26
27
0
    __m128i sum1 = _mm_unpackhi_epi64(c_plus_d, c_plus_d);
28
0
    __m128i sum2 = _mm_add_epi32(sum1, c_plus_d);
29
0
    __m128i sum3 = _mm_shuffle_epi32(sum2, 0x01);
30
0
    __m128i sum4 = _mm_add_epi32(sum2, sum3);
31
32
0
    return _mm_cvtsi128_si32(sum4);
33
0
}
Unexecuted instantiation: adler32_avx512.c:_mm512_reduce_add_epu32
Unexecuted instantiation: adler32_avx512_vnni.c:_mm512_reduce_add_epu32
34
35
0
static inline uint32_t partial_hsum(__m512i x) {
36
    /* We need a permutation vector to extract every other integer. The
37
     * rest are going to be zeros. Marking this const so the compiler stands
38
     * a better chance of keeping this resident in a register through entire
39
     * loop execution. We certainly have enough zmm registers (32) */
40
0
    const __m512i perm_vec = _mm512_setr_epi32(0, 2, 4, 6, 8, 10, 12, 14,
41
0
                                               1, 1, 1, 1, 1,  1,  1,  1);
42
43
0
    __m512i non_zero = _mm512_permutexvar_epi32(perm_vec, x);
44
45
    /* From here, it's a simple 256 bit wide reduction sum */
46
0
    __m256i non_zero_avx = _mm512_castsi512_si256(non_zero);
47
48
    /* See Agner Fog's vectorclass for a decent reference. Essentially, phadd is
49
     * pretty slow, much slower than the longer instruction sequence below */
50
0
    __m128i sum1  = _mm_add_epi32(_mm256_extracti128_si256(non_zero_avx, 1),
51
0
                                  _mm256_castsi256_si128(non_zero_avx));
52
0
    __m128i sum2  = _mm_add_epi32(sum1,_mm_unpackhi_epi64(sum1, sum1));
53
0
    __m128i sum3  = _mm_add_epi32(sum2,_mm_shuffle_epi32(sum2, 1));
54
0
    return (uint32_t)_mm_cvtsi128_si32(sum3);
55
0
}
Unexecuted instantiation: adler32_avx512.c:partial_hsum
Unexecuted instantiation: adler32_avx512_vnni.c:partial_hsum
56
57
#endif