/src/zlib-ng/arch/x86/chunkset_avx2.c
Line | Count | Source |
1 | | /* chunkset_avx2.c -- AVX2 inline functions to copy small data chunks. |
2 | | * For conditions of distribution and use, see copyright notice in zlib.h |
3 | | */ |
4 | | |
5 | | #ifdef X86_AVX2 |
6 | | |
7 | | #include "zbuild.h" |
8 | | #include "zmemory.h" |
9 | | |
10 | | #include "arch/generic/chunk_256bit_perm_idx_lut.h" |
11 | | #include <immintrin.h> |
12 | | #include "x86_intrins.h" |
13 | | |
14 | | typedef __m256i chunk_t; |
15 | | typedef __m128i halfchunk_t; |
16 | | |
17 | | #define HAVE_CHUNKMEMSET_2 |
18 | | #define HAVE_CHUNKMEMSET_4 |
19 | | #define HAVE_CHUNKMEMSET_8 |
20 | | #define HAVE_CHUNKMEMSET_16 |
21 | | #define HAVE_CHUNK_MAG |
22 | | #define HAVE_HALF_CHUNK |
23 | | |
24 | 0 | static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) { |
25 | 0 | *chunk = _mm256_set1_epi16(zng_memread_2(from)); |
26 | 0 | } |
27 | | |
28 | 0 | static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) { |
29 | 0 | *chunk = _mm256_set1_epi32(zng_memread_4(from)); |
30 | 0 | } |
31 | | |
32 | 0 | static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) { |
33 | 0 | *chunk = _mm256_set1_epi64x(zng_memread_8(from)); |
34 | 0 | } |
35 | | |
36 | 0 | static inline void chunkmemset_16(uint8_t *from, chunk_t *chunk) { |
37 | | /* See explanation in chunkset_avx512.c */ |
38 | | #if defined(_MSC_VER) && _MSC_VER <= 1900 |
39 | | halfchunk_t half = _mm_loadu_si128((__m128i*)from); |
40 | | *chunk = _mm256_inserti128_si256(_mm256_castsi128_si256(half), half, 1); |
41 | | #else |
42 | 0 | *chunk = _mm256_broadcastsi128_si256(_mm_loadu_si128((__m128i*)from)); |
43 | 0 | #endif |
44 | 0 | } |
45 | | |
46 | 0 | static inline void loadchunk(uint8_t const *s, chunk_t *chunk) { |
47 | 0 | *chunk = _mm256_loadu_si256((__m256i *)s); |
48 | 0 | } |
49 | | |
50 | 0 | static inline void storechunk(uint8_t *out, chunk_t *chunk) { |
51 | 0 | _mm256_storeu_si256((__m256i *)out, *chunk); |
52 | 0 | } |
53 | | |
54 | 0 | static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
55 | 0 | lut_rem_pair lut_rem = perm_idx_lut[dist - 3]; |
56 | 0 | __m256i ret_vec; |
57 | | /* While technically we only need to read 4 or 8 bytes into this vector register for a lot of cases, GCC is |
58 | | * compiling this to a shared load for all branches, preferring the simpler code. Given that the buf value isn't in |
59 | | * GPRs to begin with the 256 bit load is _probably_ just as inexpensive */ |
60 | 0 | *chunk_rem = lut_rem.remval; |
61 | | |
62 | | /* See note in chunkset_ssse3.c for why this is ok */ |
63 | 0 | __msan_unpoison(buf + dist, 32 - dist); |
64 | |
|
65 | 0 | if (dist < 16) { |
66 | | /* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after |
67 | | * broadcasting the first vector register to both halves. This is _marginally_ faster than doing two separate |
68 | | * shuffles and combining the halves later */ |
69 | 0 | __m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx)); |
70 | 0 | __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf); |
71 | 0 | ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1); |
72 | 0 | ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec); |
73 | 0 | } else { |
74 | 0 | __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf); |
75 | 0 | __m128i ret_vec1 = _mm_loadu_si128((__m128i*)(buf + 16)); |
76 | | /* Take advantage of the fact that only the latter half of the 256 bit vector will actually differ */ |
77 | 0 | __m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx)); |
78 | 0 | __m128i xlane_permutes = _mm_cmpgt_epi8(_mm_set1_epi8(16), perm_vec1); |
79 | 0 | __m128i xlane_res = _mm_shuffle_epi8(ret_vec0, perm_vec1); |
80 | | /* Since we can't wrap twice, we can simply keep the later half exactly how it is instead of having to _also_ |
81 | | * shuffle those values */ |
82 | 0 | __m128i latter_half = _mm_blendv_epi8(ret_vec1, xlane_res, xlane_permutes); |
83 | 0 | ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1); |
84 | 0 | } |
85 | |
|
86 | 0 | return ret_vec; |
87 | 0 | } |
88 | | |
89 | 0 | static inline void loadhalfchunk(uint8_t const *s, halfchunk_t *chunk) { |
90 | 0 | *chunk = _mm_loadu_si128((__m128i *)s); |
91 | 0 | } |
92 | | |
93 | 0 | static inline void storehalfchunk(uint8_t *out, halfchunk_t *chunk) { |
94 | 0 | _mm_storeu_si128((__m128i *)out, *chunk); |
95 | 0 | } |
96 | | |
97 | 0 | static inline chunk_t halfchunk2whole(halfchunk_t *chunk) { |
98 | | /* We zero extend mostly to appease some memory sanitizers. These bytes are ultimately |
99 | | * unlikely to be actually written or read from */ |
100 | 0 | return _mm256_zextsi128_si256(*chunk); |
101 | 0 | } |
102 | | |
103 | 0 | static inline halfchunk_t GET_HALFCHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
104 | 0 | lut_rem_pair lut_rem = perm_idx_lut[dist - 3]; |
105 | 0 | __m128i perm_vec, ret_vec; |
106 | 0 | __msan_unpoison(buf + dist, 16 - dist); |
107 | 0 | ret_vec = _mm_loadu_si128((__m128i*)buf); |
108 | 0 | *chunk_rem = half_rem_vals[dist - 3]; |
109 | |
|
110 | 0 | perm_vec = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx)); |
111 | 0 | ret_vec = _mm_shuffle_epi8(ret_vec, perm_vec); |
112 | |
|
113 | 0 | return ret_vec; |
114 | 0 | } |
115 | | |
116 | 0 | #define CHUNKSIZE chunksize_avx2 |
117 | 0 | #define CHUNKCOPY chunkcopy_avx2 |
118 | 0 | #define CHUNKUNROLL chunkunroll_avx2 |
119 | 0 | #define CHUNKMEMSET chunkmemset_avx2 |
120 | | #define CHUNKMEMSET_SAFE chunkmemset_safe_avx2 |
121 | | |
122 | | #include "chunkset_tpl.h" |
123 | | |
124 | | #define INFLATE_FAST inflate_fast_avx2 |
125 | | |
126 | | #include "inffast_tpl.h" |
127 | | |
128 | | #endif |