Coverage Report

Created: 2026-01-10 06:46

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/zlib-ng/chunkset_tpl.h
Line
Count
Source
1
/* chunkset_tpl.h -- inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
5
#include "zbuild.h"
6
#include <stdlib.h>
7
8
/* Returns the chunk size */
9
5.84M
static inline size_t CHUNKSIZE(void) {
10
5.84M
    return sizeof(chunk_t);
11
5.84M
}
Unexecuted instantiation: chunkset_sse2.c:chunksize_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunksize_ssse3
chunkset_avx2.c:chunksize_avx2
Line
Count
Source
9
5.84M
static inline size_t CHUNKSIZE(void) {
10
5.84M
    return sizeof(chunk_t);
11
5.84M
}
Unexecuted instantiation: chunkset_avx512.c:chunksize_avx512
12
13
/* Behave like memcpy, but assume that it's OK to overwrite at least
14
   chunk_t bytes of output even if the length is shorter than this,
15
   that the length is non-zero, and that `from` lags `out` by at least
16
   sizeof chunk_t bytes (or that they don't overlap at all or simply that
17
   the distance is less than the length of the copy).
18
19
   Aside from better memory bus utilisation, this means that short copies
20
   (chunk_t bytes or fewer) will fall straight through the loop
21
   without iteration, which will hopefully make the branch prediction more
22
   reliable. */
23
#ifndef HAVE_CHUNKCOPY
24
25.9M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
25.9M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
25.9M
    chunk_t chunk;
27
25.9M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
25.9M
    loadchunk(from, &chunk);
29
25.9M
    storechunk(out, &chunk);
30
25.9M
    out += align;
31
25.9M
    from += align;
32
25.9M
    len -= align;
33
34.1M
    while (len > 0) {
34
8.21M
        loadchunk(from, &chunk);
35
8.21M
        storechunk(out, &chunk);
36
8.21M
        out += sizeof(chunk_t);
37
8.21M
        from += sizeof(chunk_t);
38
8.21M
        len -= sizeof(chunk_t);
39
8.21M
    }
40
25.9M
    return out;
41
25.9M
}
Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3
chunkset_avx2.c:chunkcopy_avx2
Line
Count
Source
24
25.9M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
25.9M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
25.9M
    chunk_t chunk;
27
25.9M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
25.9M
    loadchunk(from, &chunk);
29
25.9M
    storechunk(out, &chunk);
30
25.9M
    out += align;
31
25.9M
    from += align;
32
25.9M
    len -= align;
33
34.1M
    while (len > 0) {
34
8.21M
        loadchunk(from, &chunk);
35
8.21M
        storechunk(out, &chunk);
36
8.21M
        out += sizeof(chunk_t);
37
8.21M
        from += sizeof(chunk_t);
38
8.21M
        len -= sizeof(chunk_t);
39
8.21M
    }
40
25.9M
    return out;
41
25.9M
}
42
#endif
43
44
/* Perform short copies until distance can be rewritten as being at least
45
   sizeof chunk_t.
46
47
   This assumes that it's OK to overwrite at least the first
48
   2*sizeof(chunk_t) bytes of output even if the copy is shorter than this.
49
   This assumption holds because inflate_fast() starts every iteration with at
50
   least 258 bytes of output space available (258 being the maximum length
51
   output from a single token; see inflate_fast()'s assumptions below). */
52
#ifndef HAVE_CHUNKUNROLL
53
1.54k
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
1.54k
    unsigned char const *from = out - *dist;
55
1.54k
    chunk_t chunk;
56
3.89k
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
2.34k
        loadchunk(from, &chunk);
58
2.34k
        storechunk(out, &chunk);
59
2.34k
        out += *dist;
60
2.34k
        *len -= *dist;
61
2.34k
        *dist += *dist;
62
2.34k
    }
63
1.54k
    return out;
64
1.54k
}
Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3
chunkset_avx2.c:chunkunroll_avx2
Line
Count
Source
53
1.54k
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
1.54k
    unsigned char const *from = out - *dist;
55
1.54k
    chunk_t chunk;
56
3.89k
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
2.34k
        loadchunk(from, &chunk);
58
2.34k
        storechunk(out, &chunk);
59
2.34k
        out += *dist;
60
2.34k
        *len -= *dist;
61
2.34k
        *dist += *dist;
62
2.34k
    }
63
1.54k
    return out;
64
1.54k
}
Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512
65
#endif
66
67
#ifndef HAVE_CHUNK_MAG
68
/* Loads a magazine to feed into memory of the pattern */
69
0
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
70
        /* This code takes string of length dist from "from" and repeats
71
         * it for as many times as can fit in a chunk_t (vector register) */
72
0
        uint64_t cpy_dist;
73
0
        uint64_t bytes_remaining = sizeof(chunk_t);
74
0
        chunk_t chunk_load;
75
0
        uint8_t *cur_chunk = (uint8_t *)&chunk_load;
76
0
        while (bytes_remaining) {
77
0
            cpy_dist = MIN(dist, bytes_remaining);
78
0
            memcpy(cur_chunk, buf, (size_t)cpy_dist);
79
0
            bytes_remaining -= cpy_dist;
80
0
            cur_chunk += cpy_dist;
81
            /* This allows us to bypass an expensive integer division since we're effectively
82
             * counting in this loop, anyway */
83
0
            *chunk_rem = (uint32_t)cpy_dist;
84
0
        }
85
86
0
        return chunk_load;
87
0
}
88
#endif
89
90
#if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY)
91
7.39k
static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
92
7.39k
    halfchunk_t chunk;
93
7.39k
    int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1;
94
7.39k
    loadhalfchunk(from, &chunk);
95
7.39k
    storehalfchunk(out, &chunk);
96
7.39k
    out += align;
97
7.39k
    from += align;
98
7.39k
    len -= align;
99
7.39k
    while (len > 0) {
100
0
        loadhalfchunk(from, &chunk);
101
0
        storehalfchunk(out, &chunk);
102
0
        out += sizeof(halfchunk_t);
103
0
        from += sizeof(halfchunk_t);
104
0
        len -= sizeof(halfchunk_t);
105
0
    }
106
7.39k
    return out;
107
7.39k
}
108
#endif
109
110
/* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST.
111
   Return OUT + LEN. */
112
6.19M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
6.19M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
6.19M
    chunk_t chunk_load;
118
6.19M
    uint32_t chunk_mod = 0;
119
6.19M
    uint32_t adv_amount;
120
6.19M
    int64_t sdist = out - from;
121
6.19M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
6.19M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
0
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
0
            return CHUNKCOPY(out, from, len);
135
0
        }
136
0
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
6.19M
    if (dist == 1) {
144
5.21M
        memset(out, *from, len);
145
5.21M
        return out + len;
146
5.21M
    } else if (dist >= sizeof(chunk_t)) {
147
327k
        return CHUNKCOPY(out, from, len);
148
327k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
#ifdef HAVE_HALF_CHUNK
158
659k
    if (len <= sizeof(halfchunk_t)) {
159
356k
        if (dist >= sizeof(halfchunk_t))
160
7.39k
            return HALFCHUNKCOPY(out, from, len);
161
162
349k
        if ((dist % 2) != 0 || dist == 6) {
163
80.0k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
80.0k
            if (len == sizeof(halfchunk_t)) {
166
2.79k
                storehalfchunk(out, &halfchunk_load);
167
2.79k
                len -= sizeof(halfchunk_t);
168
2.79k
                out += sizeof(halfchunk_t);
169
2.79k
            }
170
171
80.0k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
80.0k
            goto rem_bytes;
173
80.0k
        }
174
349k
    }
175
572k
#endif
176
177
572k
#ifdef HAVE_CHUNKMEMSET_2
178
572k
    if (dist == 2) {
179
243k
        chunkmemset_2(from, &chunk_load);
180
243k
    } else
181
328k
#endif
182
328k
#ifdef HAVE_CHUNKMEMSET_4
183
328k
    if (dist == 4) {
184
83.6k
        chunkmemset_4(from, &chunk_load);
185
83.6k
    } else
186
245k
#endif
187
245k
#ifdef HAVE_CHUNKMEMSET_8
188
245k
    if (dist == 8) {
189
12.7k
        chunkmemset_8(from, &chunk_load);
190
12.7k
    } else
191
232k
#endif
192
#ifdef HAVE_CHUNKMEMSET_16
193
232k
    if (dist == 16) {
194
7.16k
        chunkmemset_16(from, &chunk_load);
195
7.16k
    } else
196
225k
#endif
197
225k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
0
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
1.11M
    while (len >= (2 * sizeof(chunk_t))) {
202
540k
        storechunk(out, &chunk_load);
203
540k
        storechunk(out + adv_amount, &chunk_load);
204
540k
        out += 2 * adv_amount;
205
540k
        len -= 2 * adv_amount;
206
540k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
646k
    while (len >= sizeof(chunk_t)) {
213
74.0k
        storechunk(out, &chunk_load);
214
74.0k
        len -= adv_amount;
215
74.0k
        out += adv_amount;
216
74.0k
    }
217
218
#ifdef HAVE_HALF_CHUNK
219
652k
rem_bytes:
220
652k
#endif
221
652k
    if (len) {
222
645k
        memcpy(out, &chunk_load, len);
223
645k
        out += len;
224
645k
    }
225
226
652k
    return out;
227
572k
}
Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3
chunkset_avx2.c:chunkmemset_avx2
Line
Count
Source
112
6.19M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
6.19M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
6.19M
    chunk_t chunk_load;
118
6.19M
    uint32_t chunk_mod = 0;
119
6.19M
    uint32_t adv_amount;
120
6.19M
    int64_t sdist = out - from;
121
6.19M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
6.19M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
            return CHUNKCOPY(out, from, len);
135
        }
136
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
6.19M
    if (dist == 1) {
144
5.21M
        memset(out, *from, len);
145
5.21M
        return out + len;
146
5.21M
    } else if (dist >= sizeof(chunk_t)) {
147
327k
        return CHUNKCOPY(out, from, len);
148
327k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
659k
#ifdef HAVE_HALF_CHUNK
158
659k
    if (len <= sizeof(halfchunk_t)) {
159
356k
        if (dist >= sizeof(halfchunk_t))
160
7.39k
            return HALFCHUNKCOPY(out, from, len);
161
162
349k
        if ((dist % 2) != 0 || dist == 6) {
163
80.0k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
80.0k
            if (len == sizeof(halfchunk_t)) {
166
2.79k
                storehalfchunk(out, &halfchunk_load);
167
2.79k
                len -= sizeof(halfchunk_t);
168
2.79k
                out += sizeof(halfchunk_t);
169
2.79k
            }
170
171
80.0k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
80.0k
            goto rem_bytes;
173
80.0k
        }
174
349k
    }
175
572k
#endif
176
177
572k
#ifdef HAVE_CHUNKMEMSET_2
178
572k
    if (dist == 2) {
179
243k
        chunkmemset_2(from, &chunk_load);
180
243k
    } else
181
328k
#endif
182
328k
#ifdef HAVE_CHUNKMEMSET_4
183
328k
    if (dist == 4) {
184
83.6k
        chunkmemset_4(from, &chunk_load);
185
83.6k
    } else
186
245k
#endif
187
245k
#ifdef HAVE_CHUNKMEMSET_8
188
245k
    if (dist == 8) {
189
12.7k
        chunkmemset_8(from, &chunk_load);
190
12.7k
    } else
191
232k
#endif
192
232k
#ifdef HAVE_CHUNKMEMSET_16
193
232k
    if (dist == 16) {
194
7.16k
        chunkmemset_16(from, &chunk_load);
195
7.16k
    } else
196
225k
#endif
197
225k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
572k
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
1.11M
    while (len >= (2 * sizeof(chunk_t))) {
202
540k
        storechunk(out, &chunk_load);
203
540k
        storechunk(out + adv_amount, &chunk_load);
204
540k
        out += 2 * adv_amount;
205
540k
        len -= 2 * adv_amount;
206
540k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
646k
    while (len >= sizeof(chunk_t)) {
213
74.0k
        storechunk(out, &chunk_load);
214
74.0k
        len -= adv_amount;
215
74.0k
        out += adv_amount;
216
74.0k
    }
217
218
572k
#ifdef HAVE_HALF_CHUNK
219
652k
rem_bytes:
220
652k
#endif
221
652k
    if (len) {
222
645k
        memcpy(out, &chunk_load, len);
223
645k
        out += len;
224
645k
    }
225
226
652k
    return out;
227
572k
}
Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512
228
229
148k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
148k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
#ifndef HAVE_MASKED_READWRITE
247
148k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
220k
        while (len > 0) {
249
192k
            *out++ = *from++;
250
192k
            --len;
251
192k
        }
252
253
28.1k
        return out;
254
28.1k
    }
255
120k
#endif
256
257
120k
    if (len)
258
120k
        out = CHUNKMEMSET(out, from, len);
259
260
120k
    return out;
261
148k
}
Unexecuted instantiation: chunkmemset_safe_sse2
Unexecuted instantiation: chunkmemset_safe_ssse3
chunkmemset_safe_avx2
Line
Count
Source
229
148k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
148k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
148k
#ifndef HAVE_MASKED_READWRITE
247
148k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
220k
        while (len > 0) {
249
192k
            *out++ = *from++;
250
192k
            --len;
251
192k
        }
252
253
28.1k
        return out;
254
28.1k
    }
255
120k
#endif
256
257
120k
    if (len)
258
120k
        out = CHUNKMEMSET(out, from, len);
259
260
120k
    return out;
261
148k
}
Unexecuted instantiation: chunkmemset_safe_avx512
262
263
static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe)
264
267k
{
265
267k
    if (out == from)
266
0
        return out + len;
267
268
267k
    uint64_t safelen = (safe - out);
269
267k
    len = MIN(len, safelen);
270
271
#ifndef HAVE_MASKED_READWRITE
272
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
267k
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
334
        while (len--) {
275
291
            *out++ = *from++;
276
291
        }
277
278
43
        return out;
279
43
    }
280
267k
#endif
281
282
267k
    return CHUNKMEMSET(out, from, (unsigned)len);
283
267k
}
Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE
Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE
chunkset_avx2.c:CHUNKCOPY_SAFE
Line
Count
Source
264
267k
{
265
267k
    if (out == from)
266
0
        return out + len;
267
268
267k
    uint64_t safelen = (safe - out);
269
267k
    len = MIN(len, safelen);
270
271
267k
#ifndef HAVE_MASKED_READWRITE
272
267k
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
267k
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
334
        while (len--) {
275
291
            *out++ = *from++;
276
291
        }
277
278
43
        return out;
279
43
    }
280
267k
#endif
281
282
267k
    return CHUNKMEMSET(out, from, (unsigned)len);
283
267k
}
Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE