Coverage Report

Created: 2026-02-26 06:53

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/zlib-ng/arch/x86/chunkset_avx512.c
Line
Count
Source
1
/* chunkset_avx512.c -- AVX512 inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
5
#ifdef X86_AVX512
6
7
#include "zbuild.h"
8
#include "zmemory.h"
9
10
#include "arch/generic/chunk_256bit_perm_idx_lut.h"
11
#include <immintrin.h>
12
#include "x86_intrins.h"
13
14
typedef __m256i chunk_t;
15
typedef __m128i halfchunk_t;
16
typedef __mmask32 mask_t;
17
typedef __mmask16 halfmask_t;
18
19
#define HAVE_CHUNKMEMSET_2
20
#define HAVE_CHUNKMEMSET_4
21
#define HAVE_CHUNKMEMSET_8
22
#define HAVE_CHUNKMEMSET_16
23
#define HAVE_CHUNK_MAG
24
#define HAVE_HALF_CHUNK
25
#define HAVE_MASKED_READWRITE
26
#define HAVE_CHUNKCOPY
27
#define HAVE_HALFCHUNKCOPY
28
29
0
static inline halfmask_t gen_half_mask(size_t len) {
30
0
   return (halfmask_t)_bzhi_u32(0xFFFF, (unsigned)len);
31
0
}
32
33
0
static inline mask_t gen_mask(size_t len) {
34
0
   return (mask_t)_bzhi_u32(0xFFFFFFFF, (unsigned)len);
35
0
}
36
37
0
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
38
0
    *chunk = _mm256_set1_epi16(zng_memread_2(from));
39
0
}
40
41
0
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
42
0
    *chunk = _mm256_set1_epi32(zng_memread_4(from));
43
0
}
44
45
0
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
46
0
    *chunk = _mm256_set1_epi64x(zng_memread_8(from));
47
0
}
48
49
0
static inline void chunkmemset_16(uint8_t *from, chunk_t *chunk) {
50
    /* Unfortunately there seems to be a compiler bug in Visual Studio 2015 where
51
     * the load is dumped to the stack with an aligned move for this memory-register
52
     * broadcast. The vbroadcasti128 instruction is 2 fewer cycles and this dump to
53
     * stack doesn't exist if compiled with optimizations. For the sake of working
54
     * properly in a debugger, let's take the 2 cycle penalty */
55
#if defined(_MSC_VER) && _MSC_VER <= 1900
56
    halfchunk_t half = _mm_loadu_si128((__m128i*)from);
57
    *chunk = _mm256_inserti128_si256(_mm256_castsi128_si256(half), half, 1);
58
#else
59
0
    *chunk = _mm256_broadcastsi128_si256(_mm_loadu_si128((__m128i*)from));
60
0
#endif
61
0
}
62
63
0
static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
64
0
    *chunk = _mm256_loadu_si256((__m256i *)s);
65
0
}
66
67
0
static inline void storechunk(uint8_t *out, chunk_t *chunk) {
68
0
    _mm256_storeu_si256((__m256i *)out, *chunk);
69
0
}
70
71
0
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, size_t len) {
72
0
    Assert(len > 0, "chunkcopy should never have a length 0");
73
74
0
    chunk_t chunk;
75
0
    size_t rem = len % sizeof(chunk_t);
76
77
0
    if (len < sizeof(chunk_t)) {
78
0
        mask_t rem_mask = gen_mask(rem);
79
0
        chunk = _mm256_maskz_loadu_epi8(rem_mask, from);
80
0
        _mm256_mask_storeu_epi8(out, rem_mask, chunk);
81
0
        return out + rem;
82
0
    }
83
84
0
    loadchunk(from, &chunk);
85
0
    rem = (rem == 0) ? sizeof(chunk_t) : rem;
86
0
    storechunk(out, &chunk);
87
0
    out += rem;
88
0
    from += rem;
89
0
    len -= rem;
90
91
0
    while (len > 0) {
92
0
        loadchunk(from, &chunk);
93
0
        storechunk(out, &chunk);
94
0
        out += sizeof(chunk_t);
95
0
        from += sizeof(chunk_t);
96
0
        len -= sizeof(chunk_t);
97
0
    }
98
99
0
    return out;
100
0
}
101
102
/* MSVC compiler decompression bug when optimizing for size */
103
#if defined(_MSC_VER) && _MSC_VER < 1943
104
#  pragma optimize("", off)
105
#endif
106
0
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, size_t *chunk_rem, size_t dist) {
107
0
    lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
108
0
    __m256i ret_vec;
109
0
    *chunk_rem = lut_rem.remval;
110
111
    /* See the AVX2 implementation for more detailed comments. This is that + some masked
112
     * loads to avoid an out of bounds read on the heap */
113
114
0
    if (dist < 16) {
115
0
        __m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx));
116
0
        halfmask_t load_mask = gen_half_mask(dist);
117
0
        __m128i ret_vec0 = _mm_maskz_loadu_epi8(load_mask, buf);
118
0
        ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
119
0
        ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec);
120
0
    }  else {
121
0
        halfmask_t load_mask = gen_half_mask(dist - 16);
122
0
        __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
123
0
        __m128i ret_vec1 = _mm_maskz_loadu_epi8(load_mask, (__m128i*)(buf + 16));
124
0
        __m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
125
0
        halfmask_t xlane_mask = _mm_cmp_epi8_mask(perm_vec1, _mm_set1_epi8(15), _MM_CMPINT_LE);
126
0
        __m128i latter_half = _mm_mask_shuffle_epi8(ret_vec1, xlane_mask, ret_vec0, perm_vec1);
127
0
        ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1);
128
0
    }
129
130
0
    return ret_vec;
131
0
}
132
#if defined(_MSC_VER) && _MSC_VER < 1943
133
#  pragma optimize("", on)
134
#endif
135
136
0
static inline void storehalfchunk(uint8_t *out, halfchunk_t *chunk) {
137
0
    _mm_storeu_si128((__m128i *)out, *chunk);
138
0
}
139
140
0
static inline chunk_t halfchunk2whole(halfchunk_t *chunk) {
141
    /* We zero extend mostly to appease some memory sanitizers. These bytes are ultimately
142
     * unlikely to be actually written or read from */
143
0
    return _mm256_zextsi128_si256(*chunk);
144
0
}
145
146
0
static inline halfchunk_t GET_HALFCHUNK_MAG(uint8_t *buf, size_t *chunk_rem, size_t dist) {
147
0
    lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
148
0
    __m128i perm_vec, ret_vec;
149
0
    halfmask_t load_mask = gen_half_mask(dist);
150
0
    ret_vec = _mm_maskz_loadu_epi8(load_mask, buf);
151
0
    *chunk_rem = half_rem_vals[dist - 3];
152
153
0
    perm_vec = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
154
0
    ret_vec = _mm_shuffle_epi8(ret_vec, perm_vec);
155
156
0
    return ret_vec;
157
0
}
158
159
0
static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, size_t len) {
160
0
    Assert(len > 0, "chunkcopy should never have a length 0");
161
0
    halfchunk_t chunk;
162
163
0
    size_t rem = len % sizeof(halfchunk_t);
164
0
    if (rem == 0) {
165
0
        rem = sizeof(halfchunk_t);
166
0
    }
167
168
0
    halfmask_t rem_mask = gen_half_mask(rem);
169
0
    chunk = _mm_maskz_loadu_epi8(rem_mask, from);
170
0
    _mm_mask_storeu_epi8(out, rem_mask, chunk);
171
172
0
    return out + rem;
173
0
}
174
175
#define CHUNKSIZE        chunksize_avx512
176
0
#define CHUNKUNROLL      chunkunroll_avx512
177
0
#define CHUNKMEMSET      chunkmemset_avx512
178
#define CHUNKMEMSET_SAFE chunkmemset_safe_avx512
179
180
#include "chunkset_tpl.h"
181
182
#define INFLATE_FAST     inflate_fast_avx512
183
184
#include "inffast_tpl.h"
185
186
#endif