/src/zlib-ng/chunkset_tpl.h
Line | Count | Source |
1 | | /* chunkset_tpl.h -- inline functions to copy small data chunks. |
2 | | * For conditions of distribution and use, see copyright notice in zlib.h |
3 | | */ |
4 | | |
5 | | #include "zbuild.h" |
6 | | #include <stdlib.h> |
7 | | |
8 | | /* Returns the chunk size */ |
9 | 0 | static inline size_t CHUNKSIZE(void) { |
10 | 0 | return sizeof(chunk_t); |
11 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunksize_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunksize_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunksize_avx2 Unexecuted instantiation: chunkset_avx512.c:chunksize_avx512 |
12 | | |
13 | | /* Behave like memcpy, but assume that it's OK to overwrite at least |
14 | | chunk_t bytes of output even if the length is shorter than this, |
15 | | that the length is non-zero, and that `from` lags `out` by at least |
16 | | sizeof chunk_t bytes (or that they don't overlap at all or simply that |
17 | | the distance is less than the length of the copy). |
18 | | |
19 | | Aside from better memory bus utilization, this means that short copies |
20 | | (chunk_t bytes or fewer) will fall straight through the loop |
21 | | without iteration, which will hopefully make the branch prediction more |
22 | | reliable. */ |
23 | | #ifndef HAVE_CHUNKCOPY |
24 | 0 | static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { |
25 | 0 | Assert(len > 0, "chunkcopy should never have a length 0"); |
26 | 0 | chunk_t chunk; |
27 | 0 | int32_t align = ((len - 1) % sizeof(chunk_t)) + 1; |
28 | 0 | loadchunk(from, &chunk); |
29 | 0 | storechunk(out, &chunk); |
30 | 0 | out += align; |
31 | 0 | from += align; |
32 | 0 | len -= align; |
33 | 0 | while (len > 0) { |
34 | 0 | loadchunk(from, &chunk); |
35 | 0 | storechunk(out, &chunk); |
36 | 0 | out += sizeof(chunk_t); |
37 | 0 | from += sizeof(chunk_t); |
38 | 0 | len -= sizeof(chunk_t); |
39 | 0 | } |
40 | 0 | return out; |
41 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunkcopy_avx2 |
42 | | #endif |
43 | | |
44 | | /* Perform short copies until distance can be rewritten as being at least |
45 | | sizeof chunk_t. |
46 | | |
47 | | This assumes that it's OK to overwrite at least the first |
48 | | 2*sizeof(chunk_t) bytes of output even if the copy is shorter than this. |
49 | | This assumption holds because inflate_fast() starts every iteration with at |
50 | | least 258 bytes of output space available (258 being the maximum length |
51 | | output from a single token; see inflate_fast()'s assumptions below). */ |
52 | 0 | static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) { |
53 | 0 | unsigned char const *from = out - *dist; |
54 | 0 | chunk_t chunk; |
55 | 0 | while (*dist < *len && *dist < sizeof(chunk_t)) { |
56 | 0 | loadchunk(from, &chunk); |
57 | 0 | storechunk(out, &chunk); |
58 | 0 | out += *dist; |
59 | 0 | *len -= *dist; |
60 | 0 | *dist += *dist; |
61 | 0 | } |
62 | 0 | return out; |
63 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunkunroll_avx2 Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512 |
64 | | |
65 | | #ifndef HAVE_CHUNK_MAG |
66 | | /* Loads a magazine to feed into memory of the pattern */ |
67 | 0 | static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
68 | | /* This code takes string of length dist from "from" and repeats |
69 | | * it for as many times as can fit in a chunk_t (vector register) */ |
70 | 0 | uint64_t cpy_dist; |
71 | 0 | uint64_t bytes_remaining = sizeof(chunk_t); |
72 | 0 | chunk_t chunk_load; |
73 | 0 | uint8_t *cur_chunk = (uint8_t *)&chunk_load; |
74 | 0 | while (bytes_remaining) { |
75 | 0 | cpy_dist = MIN(dist, bytes_remaining); |
76 | 0 | memcpy(cur_chunk, buf, (size_t)cpy_dist); |
77 | 0 | bytes_remaining -= cpy_dist; |
78 | 0 | cur_chunk += cpy_dist; |
79 | | /* This allows us to bypass an expensive integer division since we're effectively |
80 | | * counting in this loop, anyway */ |
81 | 0 | *chunk_rem = (uint32_t)cpy_dist; |
82 | 0 | } |
83 | |
|
84 | 0 | return chunk_load; |
85 | 0 | } |
86 | | #endif |
87 | | |
88 | | #if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY) |
89 | 0 | static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { |
90 | 0 | halfchunk_t chunk; |
91 | 0 | int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1; |
92 | 0 | loadhalfchunk(from, &chunk); |
93 | 0 | storehalfchunk(out, &chunk); |
94 | 0 | out += align; |
95 | 0 | from += align; |
96 | 0 | len -= align; |
97 | 0 | while (len > 0) { |
98 | 0 | loadhalfchunk(from, &chunk); |
99 | 0 | storehalfchunk(out, &chunk); |
100 | 0 | out += sizeof(halfchunk_t); |
101 | 0 | from += sizeof(halfchunk_t); |
102 | 0 | len -= sizeof(halfchunk_t); |
103 | 0 | } |
104 | 0 | return out; |
105 | 0 | } |
106 | | #endif |
107 | | |
108 | | /* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST. |
109 | | Return OUT + LEN. */ |
110 | 0 | static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) { |
111 | | /* Debug performance related issues when len < sizeof(uint64_t): |
112 | | Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */ |
113 | 0 | Assert(from != out, "chunkmemset cannot have a distance 0"); |
114 | |
|
115 | 0 | chunk_t chunk_load; |
116 | 0 | uint32_t chunk_mod = 0; |
117 | 0 | uint32_t adv_amount; |
118 | 0 | int64_t sdist = out - from; |
119 | 0 | uint64_t dist = llabs(sdist); |
120 | | |
121 | | /* We are supporting the case for when we are reading bytes from ahead in the buffer. |
122 | | * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance |
123 | | * always needed to be handled here or if we're just now seeing it because we are |
124 | | * dispatching to this function, more */ |
125 | 0 | if (sdist < 0 && dist < len) { |
126 | | #ifdef HAVE_MASKED_READWRITE |
127 | | /* We can still handle this case if we can mitigate over writing _and_ we |
128 | | * fit the entirety of the copy length with one load */ |
129 | 0 | if (len <= sizeof(chunk_t)) { |
130 | | /* Tempting to add a goto to the block below but hopefully most compilers |
131 | | * collapse these identical code segments as one label to jump to */ |
132 | 0 | return CHUNKCOPY(out, from, len); |
133 | 0 | } |
134 | 0 | #endif |
135 | | /* Here the memmove semantics match perfectly, as when this happens we are |
136 | | * effectively sliding down the contents of memory by dist bytes */ |
137 | 0 | memmove(out, from, len); |
138 | 0 | return out + len; |
139 | 0 | } |
140 | | |
141 | 0 | if (dist == 1) { |
142 | 0 | memset(out, *from, len); |
143 | 0 | return out + len; |
144 | 0 | } else if (dist >= sizeof(chunk_t)) { |
145 | 0 | return CHUNKCOPY(out, from, len); |
146 | 0 | } |
147 | | |
148 | | /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector |
149 | | * lengths because they serve to allow more cases to fall into chunkcopy, as the |
150 | | * distance of the shorter length is still deemed a safe distance. We rewrite this |
151 | | * here rather than calling the ssse3 variant directly now because doing so required |
152 | | * dispatching to another function and broke inlining for this function entirely. We |
153 | | * also can merge an assert and some remainder peeling behavior into the same code blocks, |
154 | | * making the code a little smaller. */ |
155 | | #ifdef HAVE_HALF_CHUNK |
156 | 0 | if (len <= sizeof(halfchunk_t)) { |
157 | 0 | if (dist >= sizeof(halfchunk_t)) |
158 | 0 | return HALFCHUNKCOPY(out, from, len); |
159 | | |
160 | 0 | if ((dist % 2) != 0 || dist == 6) { |
161 | 0 | halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist); |
162 | |
|
163 | 0 | if (len == sizeof(halfchunk_t)) { |
164 | 0 | storehalfchunk(out, &halfchunk_load); |
165 | 0 | len -= sizeof(halfchunk_t); |
166 | 0 | out += sizeof(halfchunk_t); |
167 | 0 | } |
168 | |
|
169 | 0 | chunk_load = halfchunk2whole(&halfchunk_load); |
170 | 0 | goto rem_bytes; |
171 | 0 | } |
172 | 0 | } |
173 | 0 | #endif |
174 | | |
175 | 0 | #ifdef HAVE_CHUNKMEMSET_2 |
176 | 0 | if (dist == 2) { |
177 | 0 | chunkmemset_2(from, &chunk_load); |
178 | 0 | } else |
179 | 0 | #endif |
180 | 0 | #ifdef HAVE_CHUNKMEMSET_4 |
181 | 0 | if (dist == 4) { |
182 | 0 | chunkmemset_4(from, &chunk_load); |
183 | 0 | } else |
184 | 0 | #endif |
185 | 0 | #ifdef HAVE_CHUNKMEMSET_8 |
186 | 0 | if (dist == 8) { |
187 | 0 | chunkmemset_8(from, &chunk_load); |
188 | 0 | } else |
189 | 0 | #endif |
190 | | #ifdef HAVE_CHUNKMEMSET_16 |
191 | 0 | if (dist == 16) { |
192 | 0 | chunkmemset_16(from, &chunk_load); |
193 | 0 | } else |
194 | 0 | #endif |
195 | 0 | chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist); |
196 | |
|
197 | 0 | adv_amount = sizeof(chunk_t) - chunk_mod; |
198 | |
|
199 | 0 | while (len >= (2 * sizeof(chunk_t))) { |
200 | 0 | storechunk(out, &chunk_load); |
201 | 0 | storechunk(out + adv_amount, &chunk_load); |
202 | 0 | out += 2 * adv_amount; |
203 | 0 | len -= 2 * adv_amount; |
204 | 0 | } |
205 | | |
206 | | /* If we don't have a "dist" length that divides evenly into a vector |
207 | | * register, we can write the whole vector register but we need only |
208 | | * advance by the amount of the whole string that fits in our chunk_t. |
209 | | * If we do divide evenly into the vector length, adv_amount = chunk_t size*/ |
210 | 0 | while (len >= sizeof(chunk_t)) { |
211 | 0 | storechunk(out, &chunk_load); |
212 | 0 | len -= adv_amount; |
213 | 0 | out += adv_amount; |
214 | 0 | } |
215 | |
|
216 | | #ifdef HAVE_HALF_CHUNK |
217 | 0 | rem_bytes: |
218 | 0 | #endif |
219 | 0 | if (len) { |
220 | 0 | memcpy(out, &chunk_load, len); |
221 | 0 | out += len; |
222 | 0 | } |
223 | |
|
224 | 0 | return out; |
225 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunkmemset_avx2 Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512 |
226 | | |
227 | 0 | Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) { |
228 | | #if OPTIMAL_CMP < 32 |
229 | | static const uint32_t align_mask = 7; |
230 | | #elif OPTIMAL_CMP == 32 |
231 | | static const uint32_t align_mask = 3; |
232 | | #endif |
233 | |
|
234 | 0 | len = MIN(len, left); |
235 | |
|
236 | | #if OPTIMAL_CMP < 64 |
237 | | while (((uintptr_t)out & align_mask) && (len > 0)) { |
238 | | *out++ = *from++; |
239 | | --len; |
240 | | --left; |
241 | | } |
242 | | #endif |
243 | |
|
244 | | #ifndef HAVE_MASKED_READWRITE |
245 | 0 | if (UNLIKELY(left < sizeof(chunk_t))) { |
246 | 0 | while (len > 0) { |
247 | 0 | *out++ = *from++; |
248 | 0 | --len; |
249 | 0 | } |
250 | |
|
251 | 0 | return out; |
252 | 0 | } |
253 | 0 | #endif |
254 | | |
255 | 0 | if (len) |
256 | 0 | out = CHUNKMEMSET(out, from, len); |
257 | |
|
258 | 0 | return out; |
259 | 0 | } Unexecuted instantiation: chunkmemset_safe_sse2 Unexecuted instantiation: chunkmemset_safe_ssse3 Unexecuted instantiation: chunkmemset_safe_avx2 Unexecuted instantiation: chunkmemset_safe_avx512 |
260 | | |
261 | | static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe) |
262 | 0 | { |
263 | 0 | if (out == from) |
264 | 0 | return out + len; |
265 | | |
266 | 0 | uint64_t safelen = (safe - out); |
267 | 0 | len = MIN(len, safelen); |
268 | |
|
269 | | #ifndef HAVE_MASKED_READWRITE |
270 | | uint64_t from_dist = (uint64_t)llabs(safe - from); |
271 | 0 | if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) { |
272 | 0 | while (len--) { |
273 | 0 | *out++ = *from++; |
274 | 0 | } |
275 | |
|
276 | 0 | return out; |
277 | 0 | } |
278 | 0 | #endif |
279 | | |
280 | 0 | return CHUNKMEMSET(out, from, (unsigned)len); |
281 | 0 | } Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_avx2.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE |