Coverage Report

Created: 2026-01-10 06:46

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/zlib-ng/chunkset_tpl.h
Line
Count
Source
1
/* chunkset_tpl.h -- inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
5
#include "zbuild.h"
6
#include <stdlib.h>
7
8
/* Returns the chunk size */
9
1.05M
static inline size_t CHUNKSIZE(void) {
10
1.05M
    return sizeof(chunk_t);
11
1.05M
}
Unexecuted instantiation: chunkset_sse2.c:chunksize_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunksize_ssse3
chunkset_avx2.c:chunksize_avx2
Line
Count
Source
9
1.05M
static inline size_t CHUNKSIZE(void) {
10
1.05M
    return sizeof(chunk_t);
11
1.05M
}
Unexecuted instantiation: chunkset_avx512.c:chunksize_avx512
12
13
/* Behave like memcpy, but assume that it's OK to overwrite at least
14
   chunk_t bytes of output even if the length is shorter than this,
15
   that the length is non-zero, and that `from` lags `out` by at least
16
   sizeof chunk_t bytes (or that they don't overlap at all or simply that
17
   the distance is less than the length of the copy).
18
19
   Aside from better memory bus utilisation, this means that short copies
20
   (chunk_t bytes or fewer) will fall straight through the loop
21
   without iteration, which will hopefully make the branch prediction more
22
   reliable. */
23
#ifndef HAVE_CHUNKCOPY
24
14.6M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
14.6M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
14.6M
    chunk_t chunk;
27
14.6M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
14.6M
    loadchunk(from, &chunk);
29
14.6M
    storechunk(out, &chunk);
30
14.6M
    out += align;
31
14.6M
    from += align;
32
14.6M
    len -= align;
33
20.5M
    while (len > 0) {
34
5.99M
        loadchunk(from, &chunk);
35
5.99M
        storechunk(out, &chunk);
36
5.99M
        out += sizeof(chunk_t);
37
5.99M
        from += sizeof(chunk_t);
38
5.99M
        len -= sizeof(chunk_t);
39
5.99M
    }
40
14.6M
    return out;
41
14.6M
}
Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3
chunkset_avx2.c:chunkcopy_avx2
Line
Count
Source
24
14.6M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
14.6M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
14.6M
    chunk_t chunk;
27
14.6M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
14.6M
    loadchunk(from, &chunk);
29
14.6M
    storechunk(out, &chunk);
30
14.6M
    out += align;
31
14.6M
    from += align;
32
14.6M
    len -= align;
33
20.5M
    while (len > 0) {
34
5.99M
        loadchunk(from, &chunk);
35
5.99M
        storechunk(out, &chunk);
36
5.99M
        out += sizeof(chunk_t);
37
5.99M
        from += sizeof(chunk_t);
38
5.99M
        len -= sizeof(chunk_t);
39
5.99M
    }
40
14.6M
    return out;
41
14.6M
}
42
#endif
43
44
/* Perform short copies until distance can be rewritten as being at least
45
   sizeof chunk_t.
46
47
   This assumes that it's OK to overwrite at least the first
48
   2*sizeof(chunk_t) bytes of output even if the copy is shorter than this.
49
   This assumption holds because inflate_fast() starts every iteration with at
50
   least 258 bytes of output space available (258 being the maximum length
51
   output from a single token; see inflate_fast()'s assumptions below). */
52
#ifndef HAVE_CHUNKUNROLL
53
0
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
0
    unsigned char const *from = out - *dist;
55
0
    chunk_t chunk;
56
0
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
0
        loadchunk(from, &chunk);
58
0
        storechunk(out, &chunk);
59
0
        out += *dist;
60
0
        *len -= *dist;
61
0
        *dist += *dist;
62
0
    }
63
0
    return out;
64
0
}
Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3
Unexecuted instantiation: chunkset_avx2.c:chunkunroll_avx2
Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512
65
#endif
66
67
#ifndef HAVE_CHUNK_MAG
68
/* Loads a magazine to feed into memory of the pattern */
69
0
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
70
        /* This code takes string of length dist from "from" and repeats
71
         * it for as many times as can fit in a chunk_t (vector register) */
72
0
        uint64_t cpy_dist;
73
0
        uint64_t bytes_remaining = sizeof(chunk_t);
74
0
        chunk_t chunk_load;
75
0
        uint8_t *cur_chunk = (uint8_t *)&chunk_load;
76
0
        while (bytes_remaining) {
77
0
            cpy_dist = MIN(dist, bytes_remaining);
78
0
            memcpy(cur_chunk, buf, (size_t)cpy_dist);
79
0
            bytes_remaining -= cpy_dist;
80
0
            cur_chunk += cpy_dist;
81
            /* This allows us to bypass an expensive integer division since we're effectively
82
             * counting in this loop, anyway */
83
0
            *chunk_rem = (uint32_t)cpy_dist;
84
0
        }
85
86
0
        return chunk_load;
87
0
}
88
#endif
89
90
#if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY)
91
4.03k
static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
92
4.03k
    halfchunk_t chunk;
93
4.03k
    int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1;
94
4.03k
    loadhalfchunk(from, &chunk);
95
4.03k
    storehalfchunk(out, &chunk);
96
4.03k
    out += align;
97
4.03k
    from += align;
98
4.03k
    len -= align;
99
4.03k
    while (len > 0) {
100
0
        loadhalfchunk(from, &chunk);
101
0
        storehalfchunk(out, &chunk);
102
0
        out += sizeof(halfchunk_t);
103
0
        from += sizeof(halfchunk_t);
104
0
        len -= sizeof(halfchunk_t);
105
0
    }
106
4.03k
    return out;
107
4.03k
}
108
#endif
109
110
/* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST.
111
   Return OUT + LEN. */
112
1.09M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
1.09M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
1.09M
    chunk_t chunk_load;
118
1.09M
    uint32_t chunk_mod = 0;
119
1.09M
    uint32_t adv_amount;
120
1.09M
    int64_t sdist = out - from;
121
1.09M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
1.09M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
0
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
0
            return CHUNKCOPY(out, from, len);
135
0
        }
136
0
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
1.09M
    if (dist == 1) {
144
583k
        memset(out, *from, len);
145
583k
        return out + len;
146
583k
    } else if (dist >= sizeof(chunk_t)) {
147
36.7k
        return CHUNKCOPY(out, from, len);
148
36.7k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
#ifdef HAVE_HALF_CHUNK
158
472k
    if (len <= sizeof(halfchunk_t)) {
159
208k
        if (dist >= sizeof(halfchunk_t))
160
4.03k
            return HALFCHUNKCOPY(out, from, len);
161
162
204k
        if ((dist % 2) != 0 || dist == 6) {
163
59.3k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
59.3k
            if (len == sizeof(halfchunk_t)) {
166
1.31k
                storehalfchunk(out, &halfchunk_load);
167
1.31k
                len -= sizeof(halfchunk_t);
168
1.31k
                out += sizeof(halfchunk_t);
169
1.31k
            }
170
171
59.3k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
59.3k
            goto rem_bytes;
173
59.3k
        }
174
204k
    }
175
408k
#endif
176
177
408k
#ifdef HAVE_CHUNKMEMSET_2
178
408k
    if (dist == 2) {
179
147k
        chunkmemset_2(from, &chunk_load);
180
147k
    } else
181
261k
#endif
182
261k
#ifdef HAVE_CHUNKMEMSET_4
183
261k
    if (dist == 4) {
184
53.1k
        chunkmemset_4(from, &chunk_load);
185
53.1k
    } else
186
208k
#endif
187
208k
#ifdef HAVE_CHUNKMEMSET_8
188
208k
    if (dist == 8) {
189
7.63k
        chunkmemset_8(from, &chunk_load);
190
7.63k
    } else
191
200k
#endif
192
#ifdef HAVE_CHUNKMEMSET_16
193
200k
    if (dist == 16) {
194
2.14k
        chunkmemset_16(from, &chunk_load);
195
2.14k
    } else
196
198k
#endif
197
198k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
0
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
893k
    while (len >= (2 * sizeof(chunk_t))) {
202
484k
        storechunk(out, &chunk_load);
203
484k
        storechunk(out + adv_amount, &chunk_load);
204
484k
        out += 2 * adv_amount;
205
484k
        len -= 2 * adv_amount;
206
484k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
463k
    while (len >= sizeof(chunk_t)) {
213
55.0k
        storechunk(out, &chunk_load);
214
55.0k
        len -= adv_amount;
215
55.0k
        out += adv_amount;
216
55.0k
    }
217
218
#ifdef HAVE_HALF_CHUNK
219
468k
rem_bytes:
220
468k
#endif
221
468k
    if (len) {
222
463k
        memcpy(out, &chunk_load, len);
223
463k
        out += len;
224
463k
    }
225
226
468k
    return out;
227
408k
}
Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3
chunkset_avx2.c:chunkmemset_avx2
Line
Count
Source
112
1.09M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
1.09M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
1.09M
    chunk_t chunk_load;
118
1.09M
    uint32_t chunk_mod = 0;
119
1.09M
    uint32_t adv_amount;
120
1.09M
    int64_t sdist = out - from;
121
1.09M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
1.09M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
            return CHUNKCOPY(out, from, len);
135
        }
136
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
1.09M
    if (dist == 1) {
144
583k
        memset(out, *from, len);
145
583k
        return out + len;
146
583k
    } else if (dist >= sizeof(chunk_t)) {
147
36.7k
        return CHUNKCOPY(out, from, len);
148
36.7k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
472k
#ifdef HAVE_HALF_CHUNK
158
472k
    if (len <= sizeof(halfchunk_t)) {
159
208k
        if (dist >= sizeof(halfchunk_t))
160
4.03k
            return HALFCHUNKCOPY(out, from, len);
161
162
204k
        if ((dist % 2) != 0 || dist == 6) {
163
59.3k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
59.3k
            if (len == sizeof(halfchunk_t)) {
166
1.31k
                storehalfchunk(out, &halfchunk_load);
167
1.31k
                len -= sizeof(halfchunk_t);
168
1.31k
                out += sizeof(halfchunk_t);
169
1.31k
            }
170
171
59.3k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
59.3k
            goto rem_bytes;
173
59.3k
        }
174
204k
    }
175
408k
#endif
176
177
408k
#ifdef HAVE_CHUNKMEMSET_2
178
408k
    if (dist == 2) {
179
147k
        chunkmemset_2(from, &chunk_load);
180
147k
    } else
181
261k
#endif
182
261k
#ifdef HAVE_CHUNKMEMSET_4
183
261k
    if (dist == 4) {
184
53.1k
        chunkmemset_4(from, &chunk_load);
185
53.1k
    } else
186
208k
#endif
187
208k
#ifdef HAVE_CHUNKMEMSET_8
188
208k
    if (dist == 8) {
189
7.63k
        chunkmemset_8(from, &chunk_load);
190
7.63k
    } else
191
200k
#endif
192
200k
#ifdef HAVE_CHUNKMEMSET_16
193
200k
    if (dist == 16) {
194
2.14k
        chunkmemset_16(from, &chunk_load);
195
2.14k
    } else
196
198k
#endif
197
198k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
408k
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
893k
    while (len >= (2 * sizeof(chunk_t))) {
202
484k
        storechunk(out, &chunk_load);
203
484k
        storechunk(out + adv_amount, &chunk_load);
204
484k
        out += 2 * adv_amount;
205
484k
        len -= 2 * adv_amount;
206
484k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
463k
    while (len >= sizeof(chunk_t)) {
213
55.0k
        storechunk(out, &chunk_load);
214
55.0k
        len -= adv_amount;
215
55.0k
        out += adv_amount;
216
55.0k
    }
217
218
408k
#ifdef HAVE_HALF_CHUNK
219
468k
rem_bytes:
220
468k
#endif
221
468k
    if (len) {
222
463k
        memcpy(out, &chunk_load, len);
223
463k
        out += len;
224
463k
    }
225
226
468k
    return out;
227
408k
}
Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512
228
229
79.7k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
79.7k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
#ifndef HAVE_MASKED_READWRITE
247
79.7k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
130k
        while (len > 0) {
249
113k
            *out++ = *from++;
250
113k
            --len;
251
113k
        }
252
253
16.5k
        return out;
254
16.5k
    }
255
63.1k
#endif
256
257
63.1k
    if (len)
258
63.1k
        out = CHUNKMEMSET(out, from, len);
259
260
63.1k
    return out;
261
79.7k
}
Unexecuted instantiation: chunkmemset_safe_sse2
Unexecuted instantiation: chunkmemset_safe_ssse3
chunkmemset_safe_avx2
Line
Count
Source
229
79.7k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
79.7k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
79.7k
#ifndef HAVE_MASKED_READWRITE
247
79.7k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
130k
        while (len > 0) {
249
113k
            *out++ = *from++;
250
113k
            --len;
251
113k
        }
252
253
16.5k
        return out;
254
16.5k
    }
255
63.1k
#endif
256
257
63.1k
    if (len)
258
63.1k
        out = CHUNKMEMSET(out, from, len);
259
260
63.1k
    return out;
261
79.7k
}
Unexecuted instantiation: chunkmemset_safe_avx512
262
263
static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe)
264
0
{
265
0
    if (out == from)
266
0
        return out + len;
267
268
0
    uint64_t safelen = (safe - out);
269
0
    len = MIN(len, safelen);
270
271
#ifndef HAVE_MASKED_READWRITE
272
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
0
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
0
        while (len--) {
275
0
            *out++ = *from++;
276
0
        }
277
278
0
        return out;
279
0
    }
280
0
#endif
281
282
0
    return CHUNKMEMSET(out, from, (unsigned)len);
283
0
}
Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE
Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE
Unexecuted instantiation: chunkset_avx2.c:CHUNKCOPY_SAFE
Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE