/src/zlib-ng/arch/x86/chunkset_avx2.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* chunkset_avx2.c -- AVX2 inline functions to copy small data chunks. |
2 | | * For conditions of distribution and use, see copyright notice in zlib.h |
3 | | */ |
4 | | #include "zbuild.h" |
5 | | #include "zmemory.h" |
6 | | |
7 | | #ifdef X86_AVX2 |
8 | | #include "arch/generic/chunk_256bit_perm_idx_lut.h" |
9 | | #include <immintrin.h> |
10 | | #include "x86_intrins.h" |
11 | | |
12 | | typedef __m256i chunk_t; |
13 | | typedef __m128i halfchunk_t; |
14 | | |
15 | | #define HAVE_CHUNKMEMSET_2 |
16 | | #define HAVE_CHUNKMEMSET_4 |
17 | | #define HAVE_CHUNKMEMSET_8 |
18 | | #define HAVE_CHUNKMEMSET_16 |
19 | | #define HAVE_CHUNK_MAG |
20 | | #define HAVE_HALF_CHUNK |
21 | | |
22 | 0 | static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) { |
23 | 0 | *chunk = _mm256_set1_epi16(zng_memread_2(from)); |
24 | 0 | } |
25 | | |
26 | 0 | static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) { |
27 | 0 | *chunk = _mm256_set1_epi32(zng_memread_4(from)); |
28 | 0 | } |
29 | | |
30 | 0 | static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) { |
31 | 0 | *chunk = _mm256_set1_epi64x(zng_memread_8(from)); |
32 | 0 | } |
33 | | |
34 | 0 | static inline void chunkmemset_16(uint8_t *from, chunk_t *chunk) { |
35 | | /* See explanation in chunkset_avx512.c */ |
36 | | #if defined(_MSC_VER) && _MSC_VER <= 1900 |
37 | | halfchunk_t half = _mm_loadu_si128((__m128i*)from); |
38 | | *chunk = _mm256_inserti128_si256(_mm256_castsi128_si256(half), half, 1); |
39 | | #else |
40 | 0 | *chunk = _mm256_broadcastsi128_si256(_mm_loadu_si128((__m128i*)from)); |
41 | 0 | #endif |
42 | 0 | } |
43 | | |
44 | 0 | static inline void loadchunk(uint8_t const *s, chunk_t *chunk) { |
45 | 0 | *chunk = _mm256_loadu_si256((__m256i *)s); |
46 | 0 | } |
47 | | |
48 | 0 | static inline void storechunk(uint8_t *out, chunk_t *chunk) { |
49 | 0 | _mm256_storeu_si256((__m256i *)out, *chunk); |
50 | 0 | } |
51 | | |
52 | 0 | static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
53 | 0 | lut_rem_pair lut_rem = perm_idx_lut[dist - 3]; |
54 | 0 | __m256i ret_vec; |
55 | | /* While technically we only need to read 4 or 8 bytes into this vector register for a lot of cases, GCC is |
56 | | * compiling this to a shared load for all branches, preferring the simpler code. Given that the buf value isn't in |
57 | | * GPRs to begin with the 256 bit load is _probably_ just as inexpensive */ |
58 | 0 | *chunk_rem = lut_rem.remval; |
59 | | |
60 | | /* See note in chunkset_ssse3.c for why this is ok */ |
61 | 0 | __msan_unpoison(buf + dist, 32 - dist); |
62 | |
|
63 | 0 | if (dist < 16) { |
64 | | /* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after |
65 | | * broadcasting the first vector register to both halves. This is _marginally_ faster than doing two separate |
66 | | * shuffles and combining the halves later */ |
67 | 0 | const __m256i permute_xform = |
68 | 0 | _mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
69 | 0 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16); |
70 | 0 | __m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx)); |
71 | 0 | __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf); |
72 | 0 | perm_vec = _mm256_add_epi8(perm_vec, permute_xform); |
73 | 0 | ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1); |
74 | 0 | ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec); |
75 | 0 | } else { |
76 | 0 | __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf); |
77 | 0 | __m128i ret_vec1 = _mm_loadu_si128((__m128i*)(buf + 16)); |
78 | | /* Take advantage of the fact that only the latter half of the 256 bit vector will actually differ */ |
79 | 0 | __m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx)); |
80 | 0 | __m128i xlane_permutes = _mm_cmpgt_epi8(_mm_set1_epi8(16), perm_vec1); |
81 | 0 | __m128i xlane_res = _mm_shuffle_epi8(ret_vec0, perm_vec1); |
82 | | /* Since we can't wrap twice, we can simply keep the later half exactly how it is instead of having to _also_ |
83 | | * shuffle those values */ |
84 | 0 | __m128i latter_half = _mm_blendv_epi8(ret_vec1, xlane_res, xlane_permutes); |
85 | 0 | ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1); |
86 | 0 | } |
87 | |
|
88 | 0 | return ret_vec; |
89 | 0 | } |
90 | | |
91 | 0 | static inline void loadhalfchunk(uint8_t const *s, halfchunk_t *chunk) { |
92 | 0 | *chunk = _mm_loadu_si128((__m128i *)s); |
93 | 0 | } |
94 | | |
95 | 0 | static inline void storehalfchunk(uint8_t *out, halfchunk_t *chunk) { |
96 | 0 | _mm_storeu_si128((__m128i *)out, *chunk); |
97 | 0 | } |
98 | | |
99 | 0 | static inline chunk_t halfchunk2whole(halfchunk_t *chunk) { |
100 | | /* We zero extend mostly to appease some memory sanitizers. These bytes are ultimately |
101 | | * unlikely to be actually written or read from */ |
102 | 0 | return _mm256_zextsi128_si256(*chunk); |
103 | 0 | } |
104 | | |
105 | 0 | static inline halfchunk_t GET_HALFCHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
106 | 0 | lut_rem_pair lut_rem = perm_idx_lut[dist - 3]; |
107 | 0 | __m128i perm_vec, ret_vec; |
108 | 0 | __msan_unpoison(buf + dist, 16 - dist); |
109 | 0 | ret_vec = _mm_loadu_si128((__m128i*)buf); |
110 | 0 | *chunk_rem = half_rem_vals[dist - 3]; |
111 | |
|
112 | 0 | perm_vec = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx)); |
113 | 0 | ret_vec = _mm_shuffle_epi8(ret_vec, perm_vec); |
114 | |
|
115 | 0 | return ret_vec; |
116 | 0 | } |
117 | | |
118 | | #define CHUNKSIZE chunksize_avx2 |
119 | 0 | #define CHUNKCOPY chunkcopy_avx2 |
120 | 0 | #define CHUNKUNROLL chunkunroll_avx2 |
121 | 0 | #define CHUNKMEMSET chunkmemset_avx2 |
122 | | #define CHUNKMEMSET_SAFE chunkmemset_safe_avx2 |
123 | | |
124 | | #include "chunkset_tpl.h" |
125 | | |
126 | | #define INFLATE_FAST inflate_fast_avx2 |
127 | | |
128 | | #include "inffast_tpl.h" |
129 | | |
130 | | #endif |