Coverage Report

Created: 2026-01-17 06:26

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/zlib-ng/arch/x86/chunkset_avx2.c
Line
Count
Source
1
/* chunkset_avx2.c -- AVX2 inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
#include "zbuild.h"
5
#include "zmemory.h"
6
7
#ifdef X86_AVX2
8
#include "arch/generic/chunk_256bit_perm_idx_lut.h"
9
#include <immintrin.h>
10
#include "x86_intrins.h"
11
12
typedef __m256i chunk_t;
13
typedef __m128i halfchunk_t;
14
15
#define HAVE_CHUNKMEMSET_2
16
#define HAVE_CHUNKMEMSET_4
17
#define HAVE_CHUNKMEMSET_8
18
#define HAVE_CHUNKMEMSET_16
19
#define HAVE_CHUNK_MAG
20
#define HAVE_HALF_CHUNK
21
22
175k
static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
23
175k
    *chunk = _mm256_set1_epi16(zng_memread_2(from));
24
175k
}
25
26
67.9k
static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
27
67.9k
    *chunk = _mm256_set1_epi32(zng_memread_4(from));
28
67.9k
}
29
30
8.77k
static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
31
8.77k
    *chunk = _mm256_set1_epi64x(zng_memread_8(from));
32
8.77k
}
33
34
2.21k
static inline void chunkmemset_16(uint8_t *from, chunk_t *chunk) {
35
    /* See explanation in chunkset_avx512.c */
36
#if defined(_MSC_VER) && _MSC_VER <= 1900
37
    halfchunk_t half = _mm_loadu_si128((__m128i*)from);
38
    *chunk = _mm256_inserti128_si256(_mm256_castsi128_si256(half), half, 1);
39
#else
40
2.21k
    *chunk = _mm256_broadcastsi128_si256(_mm_loadu_si128((__m128i*)from));
41
2.21k
#endif
42
2.21k
}
43
44
21.8M
static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
45
21.8M
    *chunk = _mm256_loadu_si256((__m256i *)s);
46
21.8M
}
47
48
23.3M
static inline void storechunk(uint8_t *out, chunk_t *chunk) {
49
23.3M
    _mm256_storeu_si256((__m256i *)out, *chunk);
50
23.3M
}
51
52
265k
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
53
265k
    lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
54
265k
    __m256i ret_vec;
55
    /* While technically we only need to read 4 or 8 bytes into this vector register for a lot of cases, GCC is
56
     * compiling this to a shared load for all branches, preferring the simpler code.  Given that the buf value isn't in
57
     * GPRs to begin with the 256 bit load is _probably_ just as inexpensive */
58
265k
    *chunk_rem = lut_rem.remval;
59
60
    /* See note in chunkset_ssse3.c for why this is ok */
61
265k
    __msan_unpoison(buf + dist, 32 - dist);
62
63
265k
    if (dist < 16) {
64
        /* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after
65
         * broadcasting the first vector register to both halves. This is _marginally_ faster than doing two separate
66
         * shuffles and combining the halves later */
67
216k
        __m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx));
68
216k
        __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
69
216k
        ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
70
216k
        ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec);
71
216k
    }  else {
72
49.0k
        __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
73
49.0k
        __m128i ret_vec1 = _mm_loadu_si128((__m128i*)(buf + 16));
74
        /* Take advantage of the fact that only the latter half of the 256 bit vector will actually differ */
75
49.0k
        __m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
76
49.0k
        __m128i xlane_permutes = _mm_cmpgt_epi8(_mm_set1_epi8(16), perm_vec1);
77
49.0k
        __m128i xlane_res  = _mm_shuffle_epi8(ret_vec0, perm_vec1);
78
        /* Since we can't wrap twice, we can simply keep the later half exactly how it is instead of having to _also_
79
         * shuffle those values */
80
49.0k
        __m128i latter_half = _mm_blendv_epi8(ret_vec1, xlane_res, xlane_permutes);
81
49.0k
        ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1);
82
49.0k
    }
83
84
265k
    return ret_vec;
85
265k
}
86
87
5.49k
static inline void loadhalfchunk(uint8_t const *s, halfchunk_t *chunk) {
88
5.49k
    *chunk = _mm_loadu_si128((__m128i *)s);
89
5.49k
}
90
91
6.74k
static inline void storehalfchunk(uint8_t *out, halfchunk_t *chunk) {
92
6.74k
    _mm_storeu_si128((__m128i *)out, *chunk);
93
6.74k
}
94
95
99.5k
static inline chunk_t halfchunk2whole(halfchunk_t *chunk) {
96
    /* We zero extend mostly to appease some memory sanitizers. These bytes are ultimately
97
     * unlikely to be actually written or read from */
98
99.5k
    return _mm256_zextsi128_si256(*chunk);
99
99.5k
}
100
101
99.5k
static inline halfchunk_t GET_HALFCHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
102
99.5k
    lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
103
99.5k
    __m128i perm_vec, ret_vec;
104
99.5k
    __msan_unpoison(buf + dist, 16 - dist);
105
99.5k
    ret_vec = _mm_loadu_si128((__m128i*)buf);
106
99.5k
    *chunk_rem = half_rem_vals[dist - 3];
107
108
99.5k
    perm_vec = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
109
99.5k
    ret_vec = _mm_shuffle_epi8(ret_vec, perm_vec);
110
111
99.5k
    return ret_vec;
112
99.5k
}
113
114
1.55M
#define CHUNKSIZE        chunksize_avx2
115
14.7M
#define CHUNKCOPY        chunkcopy_avx2
116
0
#define CHUNKUNROLL      chunkunroll_avx2
117
1.60M
#define CHUNKMEMSET      chunkmemset_avx2
118
#define CHUNKMEMSET_SAFE chunkmemset_safe_avx2
119
120
#include "chunkset_tpl.h"
121
122
#define INFLATE_FAST     inflate_fast_avx2
123
124
#include "inffast_tpl.h"
125
126
#endif