/src/zlib-ng/chunkset_tpl.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* chunkset_tpl.h -- inline functions to copy small data chunks. |
2 | | * For conditions of distribution and use, see copyright notice in zlib.h |
3 | | */ |
4 | | |
5 | | #include "zbuild.h" |
6 | | #include <stdlib.h> |
7 | | |
8 | | /* Returns the chunk size */ |
9 | 6.72k | Z_INTERNAL uint32_t CHUNKSIZE(void) { |
10 | 6.72k | return sizeof(chunk_t); |
11 | 6.72k | } Unexecuted instantiation: chunksize_sse2 Unexecuted instantiation: chunksize_ssse3 Line | Count | Source | 9 | 6.72k | Z_INTERNAL uint32_t CHUNKSIZE(void) { | 10 | 6.72k | return sizeof(chunk_t); | 11 | 6.72k | } |
Unexecuted instantiation: chunksize_avx512 |
12 | | |
13 | | /* Behave like memcpy, but assume that it's OK to overwrite at least |
14 | | chunk_t bytes of output even if the length is shorter than this, |
15 | | that the length is non-zero, and that `from` lags `out` by at least |
16 | | sizeof chunk_t bytes (or that they don't overlap at all or simply that |
17 | | the distance is less than the length of the copy). |
18 | | |
19 | | Aside from better memory bus utilisation, this means that short copies |
20 | | (chunk_t bytes or fewer) will fall straight through the loop |
21 | | without iteration, which will hopefully make the branch prediction more |
22 | | reliable. */ |
23 | | #ifndef HAVE_CHUNKCOPY |
24 | 479k | static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { |
25 | 479k | Assert(len > 0, "chunkcopy should never have a length 0"); |
26 | 479k | chunk_t chunk; |
27 | 479k | int32_t align = ((len - 1) % sizeof(chunk_t)) + 1; |
28 | 479k | loadchunk(from, &chunk); |
29 | 479k | storechunk(out, &chunk); |
30 | 479k | out += align; |
31 | 479k | from += align; |
32 | 479k | len -= align; |
33 | 650k | while (len > 0) { |
34 | 170k | loadchunk(from, &chunk); |
35 | 170k | storechunk(out, &chunk); |
36 | 170k | out += sizeof(chunk_t); |
37 | 170k | from += sizeof(chunk_t); |
38 | 170k | len -= sizeof(chunk_t); |
39 | 170k | } |
40 | 479k | return out; |
41 | 479k | } Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3 chunkset_avx2.c:chunkcopy_avx2 Line | Count | Source | 24 | 479k | static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { | 25 | 479k | Assert(len > 0, "chunkcopy should never have a length 0"); | 26 | 479k | chunk_t chunk; | 27 | 479k | int32_t align = ((len - 1) % sizeof(chunk_t)) + 1; | 28 | 479k | loadchunk(from, &chunk); | 29 | 479k | storechunk(out, &chunk); | 30 | 479k | out += align; | 31 | 479k | from += align; | 32 | 479k | len -= align; | 33 | 650k | while (len > 0) { | 34 | 170k | loadchunk(from, &chunk); | 35 | 170k | storechunk(out, &chunk); | 36 | 170k | out += sizeof(chunk_t); | 37 | 170k | from += sizeof(chunk_t); | 38 | 170k | len -= sizeof(chunk_t); | 39 | 170k | } | 40 | 479k | return out; | 41 | 479k | } |
|
42 | | #endif |
43 | | |
44 | | /* Perform short copies until distance can be rewritten as being at least |
45 | | sizeof chunk_t. |
46 | | |
47 | | This assumes that it's OK to overwrite at least the first |
48 | | 2*sizeof(chunk_t) bytes of output even if the copy is shorter than this. |
49 | | This assumption holds because inflate_fast() starts every iteration with at |
50 | | least 258 bytes of output space available (258 being the maximum length |
51 | | output from a single token; see inflate_fast()'s assumptions below). */ |
52 | | #ifndef HAVE_CHUNKUNROLL |
53 | 850 | static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) { |
54 | 850 | unsigned char const *from = out - *dist; |
55 | 850 | chunk_t chunk; |
56 | 1.36k | while (*dist < *len && *dist < sizeof(chunk_t)) { |
57 | 512 | loadchunk(from, &chunk); |
58 | 512 | storechunk(out, &chunk); |
59 | 512 | out += *dist; |
60 | 512 | *len -= *dist; |
61 | 512 | *dist += *dist; |
62 | 512 | } |
63 | 850 | return out; |
64 | 850 | } Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3 chunkset_avx2.c:chunkunroll_avx2 Line | Count | Source | 53 | 850 | static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) { | 54 | 850 | unsigned char const *from = out - *dist; | 55 | 850 | chunk_t chunk; | 56 | 1.36k | while (*dist < *len && *dist < sizeof(chunk_t)) { | 57 | 512 | loadchunk(from, &chunk); | 58 | 512 | storechunk(out, &chunk); | 59 | 512 | out += *dist; | 60 | 512 | *len -= *dist; | 61 | 512 | *dist += *dist; | 62 | 512 | } | 63 | 850 | return out; | 64 | 850 | } |
Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512 |
65 | | #endif |
66 | | |
67 | | #ifndef HAVE_CHUNK_MAG |
68 | | /* Loads a magazine to feed into memory of the pattern */ |
69 | 0 | static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) { |
70 | | /* This code takes string of length dist from "from" and repeats |
71 | | * it for as many times as can fit in a chunk_t (vector register) */ |
72 | 0 | uint64_t cpy_dist; |
73 | 0 | uint64_t bytes_remaining = sizeof(chunk_t); |
74 | 0 | chunk_t chunk_load; |
75 | 0 | uint8_t *cur_chunk = (uint8_t *)&chunk_load; |
76 | 0 | while (bytes_remaining) { |
77 | 0 | cpy_dist = MIN(dist, bytes_remaining); |
78 | 0 | memcpy(cur_chunk, buf, (size_t)cpy_dist); |
79 | 0 | bytes_remaining -= cpy_dist; |
80 | 0 | cur_chunk += cpy_dist; |
81 | | /* This allows us to bypass an expensive integer division since we're effectively |
82 | | * counting in this loop, anyway */ |
83 | 0 | *chunk_rem = (uint32_t)cpy_dist; |
84 | 0 | } |
85 | |
|
86 | 0 | return chunk_load; |
87 | 0 | } |
88 | | #endif |
89 | | |
90 | | #if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY) |
91 | 1.99k | static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) { |
92 | 1.99k | halfchunk_t chunk; |
93 | 1.99k | int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1; |
94 | 1.99k | loadhalfchunk(from, &chunk); |
95 | 1.99k | storehalfchunk(out, &chunk); |
96 | 1.99k | out += align; |
97 | 1.99k | from += align; |
98 | 1.99k | len -= align; |
99 | 1.99k | while (len > 0) { |
100 | 0 | loadhalfchunk(from, &chunk); |
101 | 0 | storehalfchunk(out, &chunk); |
102 | 0 | out += sizeof(halfchunk_t); |
103 | 0 | from += sizeof(halfchunk_t); |
104 | 0 | len -= sizeof(halfchunk_t); |
105 | 0 | } |
106 | 1.99k | return out; |
107 | 1.99k | } |
108 | | #endif |
109 | | |
110 | | /* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST. |
111 | | Return OUT + LEN. */ |
112 | 159k | static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) { |
113 | | /* Debug performance related issues when len < sizeof(uint64_t): |
114 | | Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */ |
115 | 159k | Assert(from != out, "chunkmemset cannot have a distance 0"); |
116 | | |
117 | 159k | chunk_t chunk_load; |
118 | 159k | uint32_t chunk_mod = 0; |
119 | 159k | uint32_t adv_amount; |
120 | 159k | int64_t sdist = out - from; |
121 | 159k | uint64_t dist = llabs(sdist); |
122 | | |
123 | | /* We are supporting the case for when we are reading bytes from ahead in the buffer. |
124 | | * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance |
125 | | * always needed to be handled here or if we're just now seeing it because we are |
126 | | * dispatching to this function, more */ |
127 | 159k | if (sdist < 0 && dist < len) { |
128 | | #ifdef HAVE_MASKED_READWRITE |
129 | | /* We can still handle this case if we can mitigate over writing _and_ we |
130 | | * fit the entirety of the copy length with one load */ |
131 | 0 | if (len <= sizeof(chunk_t)) { |
132 | | /* Tempting to add a goto to the block below but hopefully most compilers |
133 | | * collapse these identical code segments as one label to jump to */ |
134 | 0 | return CHUNKCOPY(out, from, len); |
135 | 0 | } |
136 | 0 | #endif |
137 | | /* Here the memmove semantics match perfectly, as when this happens we are |
138 | | * effectively sliding down the contents of memory by dist bytes */ |
139 | 0 | memmove(out, from, len); |
140 | 0 | return out + len; |
141 | 0 | } |
142 | | |
143 | 159k | if (dist == 1) { |
144 | 106k | memset(out, *from, len); |
145 | 106k | return out + len; |
146 | 106k | } else if (dist >= sizeof(chunk_t)) { |
147 | 18.9k | return CHUNKCOPY(out, from, len); |
148 | 18.9k | } |
149 | | |
150 | | /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector |
151 | | * lengths because they serve to allow more cases to fall into chunkcopy, as the |
152 | | * distance of the shorter length is still deemed a safe distance. We rewrite this |
153 | | * here rather than calling the ssse3 variant directly now because doing so required |
154 | | * dispatching to another function and broke inlining for this function entirely. We |
155 | | * also can merge an assert and some remainder peeling behavior into the same code blocks, |
156 | | * making the code a little smaller. */ |
157 | | #ifdef HAVE_HALF_CHUNK |
158 | 34.3k | if (len <= sizeof(halfchunk_t)) { |
159 | 27.0k | if (dist >= sizeof(halfchunk_t)) |
160 | 1.99k | return HALFCHUNKCOPY(out, from, len); |
161 | | |
162 | 25.0k | if ((dist % 2) != 0 || dist == 6) { |
163 | 6.57k | halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist); |
164 | | |
165 | 6.57k | if (len == sizeof(halfchunk_t)) { |
166 | 522 | storehalfchunk(out, &halfchunk_load); |
167 | 522 | len -= sizeof(halfchunk_t); |
168 | 522 | out += sizeof(halfchunk_t); |
169 | 522 | } |
170 | | |
171 | 6.57k | chunk_load = halfchunk2whole(&halfchunk_load); |
172 | 6.57k | goto rem_bytes; |
173 | 6.57k | } |
174 | 25.0k | } |
175 | 25.7k | #endif |
176 | | |
177 | 25.7k | #ifdef HAVE_CHUNKMEMSET_2 |
178 | 25.7k | if (dist == 2) { |
179 | 16.0k | chunkmemset_2(from, &chunk_load); |
180 | 16.0k | } else |
181 | 9.72k | #endif |
182 | 9.72k | #ifdef HAVE_CHUNKMEMSET_4 |
183 | 9.72k | if (dist == 4) { |
184 | 2.49k | chunkmemset_4(from, &chunk_load); |
185 | 2.49k | } else |
186 | 7.23k | #endif |
187 | 7.23k | #ifdef HAVE_CHUNKMEMSET_8 |
188 | 7.23k | if (dist == 8) { |
189 | 1.15k | chunkmemset_8(from, &chunk_load); |
190 | 1.15k | } else |
191 | 6.08k | #endif |
192 | | #ifdef HAVE_CHUNKMEMSET_16 |
193 | 6.08k | if (dist == 16) { |
194 | 365 | chunkmemset_16(from, &chunk_load); |
195 | 365 | } else |
196 | 5.71k | #endif |
197 | 5.71k | chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist); |
198 | |
|
199 | 0 | adv_amount = sizeof(chunk_t) - chunk_mod; |
200 | |
|
201 | 29.2k | while (len >= (2 * sizeof(chunk_t))) { |
202 | 3.53k | storechunk(out, &chunk_load); |
203 | 3.53k | storechunk(out + adv_amount, &chunk_load); |
204 | 3.53k | out += 2 * adv_amount; |
205 | 3.53k | len -= 2 * adv_amount; |
206 | 3.53k | } |
207 | | |
208 | | /* If we don't have a "dist" length that divides evenly into a vector |
209 | | * register, we can write the whole vector register but we need only |
210 | | * advance by the amount of the whole string that fits in our chunk_t. |
211 | | * If we do divide evenly into the vector length, adv_amount = chunk_t size*/ |
212 | 30.1k | while (len >= sizeof(chunk_t)) { |
213 | 4.37k | storechunk(out, &chunk_load); |
214 | 4.37k | len -= adv_amount; |
215 | 4.37k | out += adv_amount; |
216 | 4.37k | } |
217 | |
|
218 | | #ifdef HAVE_HALF_CHUNK |
219 | 32.3k | rem_bytes: |
220 | 32.3k | #endif |
221 | 32.3k | if (len) { |
222 | 31.6k | memcpy(out, &chunk_load, len); |
223 | 31.6k | out += len; |
224 | 31.6k | } |
225 | | |
226 | 32.3k | return out; |
227 | 25.7k | } Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2 Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3 chunkset_avx2.c:chunkmemset_avx2 Line | Count | Source | 112 | 159k | static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) { | 113 | | /* Debug performance related issues when len < sizeof(uint64_t): | 114 | | Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */ | 115 | 159k | Assert(from != out, "chunkmemset cannot have a distance 0"); | 116 | | | 117 | 159k | chunk_t chunk_load; | 118 | 159k | uint32_t chunk_mod = 0; | 119 | 159k | uint32_t adv_amount; | 120 | 159k | int64_t sdist = out - from; | 121 | 159k | uint64_t dist = llabs(sdist); | 122 | | | 123 | | /* We are supporting the case for when we are reading bytes from ahead in the buffer. | 124 | | * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance | 125 | | * always needed to be handled here or if we're just now seeing it because we are | 126 | | * dispatching to this function, more */ | 127 | 159k | if (sdist < 0 && dist < len) { | 128 | | #ifdef HAVE_MASKED_READWRITE | 129 | | /* We can still handle this case if we can mitigate over writing _and_ we | 130 | | * fit the entirety of the copy length with one load */ | 131 | | if (len <= sizeof(chunk_t)) { | 132 | | /* Tempting to add a goto to the block below but hopefully most compilers | 133 | | * collapse these identical code segments as one label to jump to */ | 134 | | return CHUNKCOPY(out, from, len); | 135 | | } | 136 | | #endif | 137 | | /* Here the memmove semantics match perfectly, as when this happens we are | 138 | | * effectively sliding down the contents of memory by dist bytes */ | 139 | 0 | memmove(out, from, len); | 140 | 0 | return out + len; | 141 | 0 | } | 142 | | | 143 | 159k | if (dist == 1) { | 144 | 106k | memset(out, *from, len); | 145 | 106k | return out + len; | 146 | 106k | } else if (dist >= sizeof(chunk_t)) { | 147 | 18.9k | return CHUNKCOPY(out, from, len); | 148 | 18.9k | } | 149 | | | 150 | | /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector | 151 | | * lengths because they serve to allow more cases to fall into chunkcopy, as the | 152 | | * distance of the shorter length is still deemed a safe distance. We rewrite this | 153 | | * here rather than calling the ssse3 variant directly now because doing so required | 154 | | * dispatching to another function and broke inlining for this function entirely. We | 155 | | * also can merge an assert and some remainder peeling behavior into the same code blocks, | 156 | | * making the code a little smaller. */ | 157 | 34.3k | #ifdef HAVE_HALF_CHUNK | 158 | 34.3k | if (len <= sizeof(halfchunk_t)) { | 159 | 27.0k | if (dist >= sizeof(halfchunk_t)) | 160 | 1.99k | return HALFCHUNKCOPY(out, from, len); | 161 | | | 162 | 25.0k | if ((dist % 2) != 0 || dist == 6) { | 163 | 6.57k | halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist); | 164 | | | 165 | 6.57k | if (len == sizeof(halfchunk_t)) { | 166 | 522 | storehalfchunk(out, &halfchunk_load); | 167 | 522 | len -= sizeof(halfchunk_t); | 168 | 522 | out += sizeof(halfchunk_t); | 169 | 522 | } | 170 | | | 171 | 6.57k | chunk_load = halfchunk2whole(&halfchunk_load); | 172 | 6.57k | goto rem_bytes; | 173 | 6.57k | } | 174 | 25.0k | } | 175 | 25.7k | #endif | 176 | | | 177 | 25.7k | #ifdef HAVE_CHUNKMEMSET_2 | 178 | 25.7k | if (dist == 2) { | 179 | 16.0k | chunkmemset_2(from, &chunk_load); | 180 | 16.0k | } else | 181 | 9.72k | #endif | 182 | 9.72k | #ifdef HAVE_CHUNKMEMSET_4 | 183 | 9.72k | if (dist == 4) { | 184 | 2.49k | chunkmemset_4(from, &chunk_load); | 185 | 2.49k | } else | 186 | 7.23k | #endif | 187 | 7.23k | #ifdef HAVE_CHUNKMEMSET_8 | 188 | 7.23k | if (dist == 8) { | 189 | 1.15k | chunkmemset_8(from, &chunk_load); | 190 | 1.15k | } else | 191 | 6.08k | #endif | 192 | 6.08k | #ifdef HAVE_CHUNKMEMSET_16 | 193 | 6.08k | if (dist == 16) { | 194 | 365 | chunkmemset_16(from, &chunk_load); | 195 | 365 | } else | 196 | 5.71k | #endif | 197 | 5.71k | chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist); | 198 | | | 199 | 25.7k | adv_amount = sizeof(chunk_t) - chunk_mod; | 200 | | | 201 | 29.2k | while (len >= (2 * sizeof(chunk_t))) { | 202 | 3.53k | storechunk(out, &chunk_load); | 203 | 3.53k | storechunk(out + adv_amount, &chunk_load); | 204 | 3.53k | out += 2 * adv_amount; | 205 | 3.53k | len -= 2 * adv_amount; | 206 | 3.53k | } | 207 | | | 208 | | /* If we don't have a "dist" length that divides evenly into a vector | 209 | | * register, we can write the whole vector register but we need only | 210 | | * advance by the amount of the whole string that fits in our chunk_t. | 211 | | * If we do divide evenly into the vector length, adv_amount = chunk_t size*/ | 212 | 30.1k | while (len >= sizeof(chunk_t)) { | 213 | 4.37k | storechunk(out, &chunk_load); | 214 | 4.37k | len -= adv_amount; | 215 | 4.37k | out += adv_amount; | 216 | 4.37k | } | 217 | | | 218 | 25.7k | #ifdef HAVE_HALF_CHUNK | 219 | 32.3k | rem_bytes: | 220 | 32.3k | #endif | 221 | 32.3k | if (len) { | 222 | 31.6k | memcpy(out, &chunk_load, len); | 223 | 31.6k | out += len; | 224 | 31.6k | } | 225 | | | 226 | 32.3k | return out; | 227 | 25.7k | } |
Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512 |
228 | | |
229 | 28.0k | Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) { |
230 | | #if OPTIMAL_CMP < 32 |
231 | | static const uint32_t align_mask = 7; |
232 | | #elif OPTIMAL_CMP == 32 |
233 | | static const uint32_t align_mask = 3; |
234 | | #endif |
235 | | |
236 | 28.0k | len = MIN(len, left); |
237 | | |
238 | | #if OPTIMAL_CMP < 64 |
239 | | while (((uintptr_t)out & align_mask) && (len > 0)) { |
240 | | *out++ = *from++; |
241 | | --len; |
242 | | --left; |
243 | | } |
244 | | #endif |
245 | | |
246 | | #ifndef HAVE_MASKED_READWRITE |
247 | 28.0k | if (UNLIKELY(left < sizeof(chunk_t))) { |
248 | 43.6k | while (len > 0) { |
249 | 37.9k | *out++ = *from++; |
250 | 37.9k | --len; |
251 | 37.9k | } |
252 | | |
253 | 5.71k | return out; |
254 | 5.71k | } |
255 | 22.3k | #endif |
256 | | |
257 | 22.3k | if (len) |
258 | 22.3k | out = CHUNKMEMSET(out, from, len); |
259 | | |
260 | 22.3k | return out; |
261 | 28.0k | } Unexecuted instantiation: chunkmemset_safe_sse2 Unexecuted instantiation: chunkmemset_safe_ssse3 Line | Count | Source | 229 | 28.0k | Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) { | 230 | | #if OPTIMAL_CMP < 32 | 231 | | static const uint32_t align_mask = 7; | 232 | | #elif OPTIMAL_CMP == 32 | 233 | | static const uint32_t align_mask = 3; | 234 | | #endif | 235 | | | 236 | 28.0k | len = MIN(len, left); | 237 | | | 238 | | #if OPTIMAL_CMP < 64 | 239 | | while (((uintptr_t)out & align_mask) && (len > 0)) { | 240 | | *out++ = *from++; | 241 | | --len; | 242 | | --left; | 243 | | } | 244 | | #endif | 245 | | | 246 | 28.0k | #ifndef HAVE_MASKED_READWRITE | 247 | 28.0k | if (UNLIKELY(left < sizeof(chunk_t))) { | 248 | 43.6k | while (len > 0) { | 249 | 37.9k | *out++ = *from++; | 250 | 37.9k | --len; | 251 | 37.9k | } | 252 | | | 253 | 5.71k | return out; | 254 | 5.71k | } | 255 | 22.3k | #endif | 256 | | | 257 | 22.3k | if (len) | 258 | 22.3k | out = CHUNKMEMSET(out, from, len); | 259 | | | 260 | 22.3k | return out; | 261 | 28.0k | } |
Unexecuted instantiation: chunkmemset_safe_avx512 |
262 | | |
263 | | static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe) |
264 | 8.37k | { |
265 | 8.37k | if (out == from) |
266 | 0 | return out + len; |
267 | | |
268 | 8.37k | uint64_t safelen = (safe - out); |
269 | 8.37k | len = MIN(len, safelen); |
270 | |
|
271 | | #ifndef HAVE_MASKED_READWRITE |
272 | | uint64_t from_dist = (uint64_t)llabs(safe - from); |
273 | 8.37k | if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) { |
274 | 183 | while (len--) { |
275 | 155 | *out++ = *from++; |
276 | 155 | } |
277 | | |
278 | 28 | return out; |
279 | 28 | } |
280 | 8.34k | #endif |
281 | | |
282 | 8.34k | return CHUNKMEMSET(out, from, (unsigned)len); |
283 | 8.37k | } Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE chunkset_avx2.c:CHUNKCOPY_SAFE Line | Count | Source | 264 | 8.37k | { | 265 | 8.37k | if (out == from) | 266 | 0 | return out + len; | 267 | | | 268 | 8.37k | uint64_t safelen = (safe - out); | 269 | 8.37k | len = MIN(len, safelen); | 270 | | | 271 | 8.37k | #ifndef HAVE_MASKED_READWRITE | 272 | 8.37k | uint64_t from_dist = (uint64_t)llabs(safe - from); | 273 | 8.37k | if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) { | 274 | 183 | while (len--) { | 275 | 155 | *out++ = *from++; | 276 | 155 | } | 277 | | | 278 | 28 | return out; | 279 | 28 | } | 280 | 8.34k | #endif | 281 | | | 282 | 8.34k | return CHUNKMEMSET(out, from, (unsigned)len); | 283 | 8.37k | } |
Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE |