/src/zlib-ng/chunkset_tpl.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* chunkset_tpl.h -- inline functions to copy small data chunks. |
2 | | * For conditions of distribution and use, see copyright notice in zlib.h |
3 | | */ |
4 | | |
5 | | #include "zbuild.h" |
6 | | #include <stdlib.h> |
7 | | |
8 | | /* Returns the chunk size */ |
9 | 0 | Z_INTERNAL uint32_t CHUNKSIZE(void) { |
10 | 0 | return sizeof(chunk_t); |
11 | 0 | } Unexecuted instantiation: chunksize_sse2 Unexecuted instantiation: chunksize_ssse3 Unexecuted instantiation: chunksize_avx2 Unexecuted instantiation: chunksize_avx512 |
12 | | |
13 | | /* Behave like memcpy, but assume that it's OK to overwrite at least |
14 | | chunk_t bytes of output even if the length is shorter than this, |
15 | | that the length is non-zero, and that `from` lags `out` by at least |
16 | | sizeof chunk_t bytes (or that they don't overlap at all or simply that |
17 | | the distance is less than the length of the copy). |
18 | | |
19 | | Aside from better memory bus utilisation, this means that short copies |
20 | | (chunk_t bytes or fewer) will fall straight through the loop |
21 | | without iteration, which will hopefully make the branch prediction more |
22 | | reliable. */ |
23 | | #ifndef HAVE_CHUNKCOPY |
24 | 0 | static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { |
25 | 0 | Assert(len > 0, "chunkcopy should never have a length 0"); |
26 | 0 | chunk_t chunk; |
27 | 0 | int32_t align = ((len - 1) % sizeof(chunk_t)) + 1; |
28 | 0 | loadchunk(from, &chunk); |
29 | 0 | storechunk(out, &chunk); |
30 | 0 | out += align; |
31 | 0 | from += align; |
32 | 0 | len -= align; |
33 | 0 | while (len > 0) { |
34 | 0 | loadchunk(from, &chunk); |
35 | 0 | storechunk(out, &chunk); |
36 | 0 | out += sizeof(chunk_t); |
37 | 0 | from += sizeof(chunk_t); |
38 | 0 | len -= sizeof(chunk_t); |
39 | 0 | } |
40 | 0 | return out; |
41 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunkcopy_avx2 |
42 | | #endif |
43 | | |
44 | | /* Perform short copies until distance can be rewritten as being at least |
45 | | sizeof chunk_t. |
46 | | |
47 | | This assumes that it's OK to overwrite at least the first |
48 | | 2*sizeof(chunk_t) bytes of output even if the copy is shorter than this. |
49 | | This assumption holds because inflate_fast() starts every iteration with at |
50 | | least 258 bytes of output space available (258 being the maximum length |
51 | | output from a single token; see inflate_fast()'s assumptions below). */ |
52 | | #ifndef HAVE_CHUNKUNROLL |
53 | 0 | static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) { |
54 | 0 | unsigned char const *from = out - *dist; |
55 | 0 | chunk_t chunk; |
56 | 0 | while (*dist < *len && *dist < sizeof(chunk_t)) { |
57 | 0 | loadchunk(from, &chunk); |
58 | 0 | storechunk(out, &chunk); |
59 | 0 | out += *dist; |
60 | 0 | *len -= *dist; |
61 | 0 | *dist += *dist; |
62 | 0 | } |
63 | 0 | return out; |
64 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunkunroll_avx2 Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512 |
65 | | #endif |
66 | | |
67 | | #ifndef HAVE_CHUNK_MAG |
68 | | /* Loads a magazine to feed into memory of the pattern */ |
69 | 0 | static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
70 | | /* This code takes string of length dist from "from" and repeats |
71 | | * it for as many times as can fit in a chunk_t (vector register) */ |
72 | 0 | uint64_t cpy_dist; |
73 | 0 | uint64_t bytes_remaining = sizeof(chunk_t); |
74 | 0 | chunk_t chunk_load; |
75 | 0 | uint8_t *cur_chunk = (uint8_t *)&chunk_load; |
76 | 0 | while (bytes_remaining) { |
77 | 0 | cpy_dist = MIN(dist, bytes_remaining); |
78 | 0 | memcpy(cur_chunk, buf, (size_t)cpy_dist); |
79 | 0 | bytes_remaining -= cpy_dist; |
80 | 0 | cur_chunk += cpy_dist; |
81 | | /* This allows us to bypass an expensive integer division since we're effectively |
82 | | * counting in this loop, anyway */ |
83 | 0 | *chunk_rem = (uint32_t)cpy_dist; |
84 | 0 | } |
85 | |
|
86 | 0 | return chunk_load; |
87 | 0 | } |
88 | | #endif |
89 | | |
90 | | #if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY) |
91 | 0 | static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { |
92 | 0 | halfchunk_t chunk; |
93 | 0 | int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1; |
94 | 0 | loadhalfchunk(from, &chunk); |
95 | 0 | storehalfchunk(out, &chunk); |
96 | 0 | out += align; |
97 | 0 | from += align; |
98 | 0 | len -= align; |
99 | 0 | while (len > 0) { |
100 | 0 | loadhalfchunk(from, &chunk); |
101 | 0 | storehalfchunk(out, &chunk); |
102 | 0 | out += sizeof(halfchunk_t); |
103 | 0 | from += sizeof(halfchunk_t); |
104 | 0 | len -= sizeof(halfchunk_t); |
105 | 0 | } |
106 | 0 | return out; |
107 | 0 | } |
108 | | #endif |
109 | | |
110 | | /* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST. |
111 | | Return OUT + LEN. */ |
112 | 0 | static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) { |
113 | | /* Debug performance related issues when len < sizeof(uint64_t): |
114 | | Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */ |
115 | 0 | Assert(from != out, "chunkmemset cannot have a distance 0"); |
116 | |
|
117 | 0 | chunk_t chunk_load; |
118 | 0 | uint32_t chunk_mod = 0; |
119 | 0 | uint32_t adv_amount; |
120 | 0 | int64_t sdist = out - from; |
121 | 0 | uint64_t dist = llabs(sdist); |
122 | | |
123 | | /* We are supporting the case for when we are reading bytes from ahead in the buffer. |
124 | | * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance |
125 | | * always needed to be handled here or if we're just now seeing it because we are |
126 | | * dispatching to this function, more */ |
127 | 0 | if (sdist < 0 && dist < len) { |
128 | | #ifdef HAVE_MASKED_READWRITE |
129 | | /* We can still handle this case if we can mitigate over writing _and_ we |
130 | | * fit the entirety of the copy length with one load */ |
131 | 0 | if (len <= sizeof(chunk_t)) { |
132 | | /* Tempting to add a goto to the block below but hopefully most compilers |
133 | | * collapse these identical code segments as one label to jump to */ |
134 | 0 | return CHUNKCOPY(out, from, len); |
135 | 0 | } |
136 | 0 | #endif |
137 | | /* Here the memmove semantics match perfectly, as when this happens we are |
138 | | * effectively sliding down the contents of memory by dist bytes */ |
139 | 0 | memmove(out, from, len); |
140 | 0 | return out + len; |
141 | 0 | } |
142 | | |
143 | 0 | if (dist == 1) { |
144 | 0 | memset(out, *from, len); |
145 | 0 | return out + len; |
146 | 0 | } else if (dist >= sizeof(chunk_t)) { |
147 | 0 | return CHUNKCOPY(out, from, len); |
148 | 0 | } |
149 | | |
150 | | /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector |
151 | | * lengths because they serve to allow more cases to fall into chunkcopy, as the |
152 | | * distance of the shorter length is still deemed a safe distance. We rewrite this |
153 | | * here rather than calling the ssse3 variant directly now because doing so required |
154 | | * dispatching to another function and broke inlining for this function entirely. We |
155 | | * also can merge an assert and some remainder peeling behavior into the same code blocks, |
156 | | * making the code a little smaller. */ |
157 | | #ifdef HAVE_HALF_CHUNK |
158 | 0 | if (len <= sizeof(halfchunk_t)) { |
159 | 0 | if (dist >= sizeof(halfchunk_t)) |
160 | 0 | return HALFCHUNKCOPY(out, from, len); |
161 | | |
162 | 0 | if ((dist % 2) != 0 || dist == 6) { |
163 | 0 | halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist); |
164 | |
|
165 | 0 | if (len == sizeof(halfchunk_t)) { |
166 | 0 | storehalfchunk(out, &halfchunk_load); |
167 | 0 | len -= sizeof(halfchunk_t); |
168 | 0 | out += sizeof(halfchunk_t); |
169 | 0 | } |
170 | |
|
171 | 0 | chunk_load = halfchunk2whole(&halfchunk_load); |
172 | 0 | goto rem_bytes; |
173 | 0 | } |
174 | 0 | } |
175 | 0 | #endif |
176 | | |
177 | 0 | #ifdef HAVE_CHUNKMEMSET_2 |
178 | 0 | if (dist == 2) { |
179 | 0 | chunkmemset_2(from, &chunk_load); |
180 | 0 | } else |
181 | 0 | #endif |
182 | 0 | #ifdef HAVE_CHUNKMEMSET_4 |
183 | 0 | if (dist == 4) { |
184 | 0 | chunkmemset_4(from, &chunk_load); |
185 | 0 | } else |
186 | 0 | #endif |
187 | 0 | #ifdef HAVE_CHUNKMEMSET_8 |
188 | 0 | if (dist == 8) { |
189 | 0 | chunkmemset_8(from, &chunk_load); |
190 | 0 | } else |
191 | 0 | #endif |
192 | | #ifdef HAVE_CHUNKMEMSET_16 |
193 | 0 | if (dist == 16) { |
194 | 0 | chunkmemset_16(from, &chunk_load); |
195 | 0 | } else |
196 | 0 | #endif |
197 | 0 | chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist); |
198 | |
|
199 | 0 | adv_amount = sizeof(chunk_t) - chunk_mod; |
200 | |
|
201 | 0 | while (len >= (2 * sizeof(chunk_t))) { |
202 | 0 | storechunk(out, &chunk_load); |
203 | 0 | storechunk(out + adv_amount, &chunk_load); |
204 | 0 | out += 2 * adv_amount; |
205 | 0 | len -= 2 * adv_amount; |
206 | 0 | } |
207 | | |
208 | | /* If we don't have a "dist" length that divides evenly into a vector |
209 | | * register, we can write the whole vector register but we need only |
210 | | * advance by the amount of the whole string that fits in our chunk_t. |
211 | | * If we do divide evenly into the vector length, adv_amount = chunk_t size*/ |
212 | 0 | while (len >= sizeof(chunk_t)) { |
213 | 0 | storechunk(out, &chunk_load); |
214 | 0 | len -= adv_amount; |
215 | 0 | out += adv_amount; |
216 | 0 | } |
217 | |
|
218 | | #ifdef HAVE_HALF_CHUNK |
219 | 0 | rem_bytes: |
220 | 0 | #endif |
221 | 0 | if (len) { |
222 | 0 | memcpy(out, &chunk_load, len); |
223 | 0 | out += len; |
224 | 0 | } |
225 | |
|
226 | 0 | return out; |
227 | 0 | } Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3 Unexecuted instantiation: chunkset_avx2.c:chunkmemset_avx2 Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512 |
228 | | |
229 | 0 | Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) { |
230 | | #if OPTIMAL_CMP < 32 |
231 | | static const uint32_t align_mask = 7; |
232 | | #elif OPTIMAL_CMP == 32 |
233 | | static const uint32_t align_mask = 3; |
234 | | #endif |
235 | |
|
236 | 0 | len = MIN(len, left); |
237 | |
|
238 | | #if OPTIMAL_CMP < 64 |
239 | | while (((uintptr_t)out & align_mask) && (len > 0)) { |
240 | | *out++ = *from++; |
241 | | --len; |
242 | | --left; |
243 | | } |
244 | | #endif |
245 | |
|
246 | | #ifndef HAVE_MASKED_READWRITE |
247 | 0 | if (UNLIKELY(left < sizeof(chunk_t))) { |
248 | 0 | while (len > 0) { |
249 | 0 | *out++ = *from++; |
250 | 0 | --len; |
251 | 0 | } |
252 | |
|
253 | 0 | return out; |
254 | 0 | } |
255 | 0 | #endif |
256 | | |
257 | 0 | if (len) |
258 | 0 | out = CHUNKMEMSET(out, from, len); |
259 | |
|
260 | 0 | return out; |
261 | 0 | } Unexecuted instantiation: chunkmemset_safe_sse2 Unexecuted instantiation: chunkmemset_safe_ssse3 Unexecuted instantiation: chunkmemset_safe_avx2 Unexecuted instantiation: chunkmemset_safe_avx512 |
262 | | |
263 | | static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe) |
264 | 0 | { |
265 | 0 | if (out == from) |
266 | 0 | return out + len; |
267 | | |
268 | 0 | uint64_t safelen = (safe - out); |
269 | 0 | len = MIN(len, safelen); |
270 | |
|
271 | | #ifndef HAVE_MASKED_READWRITE |
272 | | uint64_t from_dist = (uint64_t)llabs(safe - from); |
273 | 0 | if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) { |
274 | 0 | while (len--) { |
275 | 0 | *out++ = *from++; |
276 | 0 | } |
277 | |
|
278 | 0 | return out; |
279 | 0 | } |
280 | 0 | #endif |
281 | | |
282 | 0 | return CHUNKMEMSET(out, from, (unsigned)len); |
283 | 0 | } Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_avx2.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE |