Coverage Report

Created: 2025-08-28 06:40

/src/zlib-ng/chunkset_tpl.h
Line
Count
Source (jump to first uncovered line)
1
/* chunkset_tpl.h -- inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
5
#include "zbuild.h"
6
#include <stdlib.h>
7
8
/* Returns the chunk size */
9
5.48k
Z_INTERNAL uint32_t CHUNKSIZE(void) {
10
5.48k
    return sizeof(chunk_t);
11
5.48k
}
Unexecuted instantiation: chunksize_sse2
Unexecuted instantiation: chunksize_ssse3
chunksize_avx2
Line
Count
Source
9
5.48k
Z_INTERNAL uint32_t CHUNKSIZE(void) {
10
5.48k
    return sizeof(chunk_t);
11
5.48k
}
Unexecuted instantiation: chunksize_avx512
12
13
/* Behave like memcpy, but assume that it's OK to overwrite at least
14
   chunk_t bytes of output even if the length is shorter than this,
15
   that the length is non-zero, and that `from` lags `out` by at least
16
   sizeof chunk_t bytes (or that they don't overlap at all or simply that
17
   the distance is less than the length of the copy).
18
19
   Aside from better memory bus utilisation, this means that short copies
20
   (chunk_t bytes or fewer) will fall straight through the loop
21
   without iteration, which will hopefully make the branch prediction more
22
   reliable. */
23
#ifndef HAVE_CHUNKCOPY
24
9.14M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
9.14M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
9.14M
    chunk_t chunk;
27
9.14M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
9.14M
    loadchunk(from, &chunk);
29
9.14M
    storechunk(out, &chunk);
30
9.14M
    out += align;
31
9.14M
    from += align;
32
9.14M
    len -= align;
33
10.3M
    while (len > 0) {
34
1.20M
        loadchunk(from, &chunk);
35
1.20M
        storechunk(out, &chunk);
36
1.20M
        out += sizeof(chunk_t);
37
1.20M
        from += sizeof(chunk_t);
38
1.20M
        len -= sizeof(chunk_t);
39
1.20M
    }
40
9.14M
    return out;
41
9.14M
}
Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3
chunkset_avx2.c:chunkcopy_avx2
Line
Count
Source
24
9.14M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
9.14M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
9.14M
    chunk_t chunk;
27
9.14M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
9.14M
    loadchunk(from, &chunk);
29
9.14M
    storechunk(out, &chunk);
30
9.14M
    out += align;
31
9.14M
    from += align;
32
9.14M
    len -= align;
33
10.3M
    while (len > 0) {
34
1.20M
        loadchunk(from, &chunk);
35
1.20M
        storechunk(out, &chunk);
36
1.20M
        out += sizeof(chunk_t);
37
1.20M
        from += sizeof(chunk_t);
38
1.20M
        len -= sizeof(chunk_t);
39
1.20M
    }
40
9.14M
    return out;
41
9.14M
}
42
#endif
43
44
/* Perform short copies until distance can be rewritten as being at least
45
   sizeof chunk_t.
46
47
   This assumes that it's OK to overwrite at least the first
48
   2*sizeof(chunk_t) bytes of output even if the copy is shorter than this.
49
   This assumption holds because inflate_fast() starts every iteration with at
50
   least 258 bytes of output space available (258 being the maximum length
51
   output from a single token; see inflate_fast()'s assumptions below). */
52
#ifndef HAVE_CHUNKUNROLL
53
561
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
561
    unsigned char const *from = out - *dist;
55
561
    chunk_t chunk;
56
640
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
79
        loadchunk(from, &chunk);
58
79
        storechunk(out, &chunk);
59
79
        out += *dist;
60
79
        *len -= *dist;
61
79
        *dist += *dist;
62
79
    }
63
561
    return out;
64
561
}
Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3
chunkset_avx2.c:chunkunroll_avx2
Line
Count
Source
53
561
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
561
    unsigned char const *from = out - *dist;
55
561
    chunk_t chunk;
56
640
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
79
        loadchunk(from, &chunk);
58
79
        storechunk(out, &chunk);
59
79
        out += *dist;
60
79
        *len -= *dist;
61
79
        *dist += *dist;
62
79
    }
63
561
    return out;
64
561
}
Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512
65
#endif
66
67
#ifndef HAVE_CHUNK_MAG
68
/* Loads a magazine to feed into memory of the pattern */
69
0
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
70
        /* This code takes string of length dist from "from" and repeats
71
         * it for as many times as can fit in a chunk_t (vector register) */
72
0
        uint64_t cpy_dist;
73
0
        uint64_t bytes_remaining = sizeof(chunk_t);
74
0
        chunk_t chunk_load;
75
0
        uint8_t *cur_chunk = (uint8_t *)&chunk_load;
76
0
        while (bytes_remaining) {
77
0
            cpy_dist = MIN(dist, bytes_remaining);
78
0
            memcpy(cur_chunk, buf, (size_t)cpy_dist);
79
0
            bytes_remaining -= cpy_dist;
80
0
            cur_chunk += cpy_dist;
81
            /* This allows us to bypass an expensive integer division since we're effectively
82
             * counting in this loop, anyway */
83
0
            *chunk_rem = (uint32_t)cpy_dist;
84
0
        }
85
86
0
        return chunk_load;
87
0
}
88
#endif
89
90
#if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY)
91
744
static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
92
744
    halfchunk_t chunk;
93
744
    int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1;
94
744
    loadhalfchunk(from, &chunk);
95
744
    storehalfchunk(out, &chunk);
96
744
    out += align;
97
744
    from += align;
98
744
    len -= align;
99
744
    while (len > 0) {
100
0
        loadhalfchunk(from, &chunk);
101
0
        storehalfchunk(out, &chunk);
102
0
        out += sizeof(halfchunk_t);
103
0
        from += sizeof(halfchunk_t);
104
0
        len -= sizeof(halfchunk_t);
105
0
    }
106
744
    return out;
107
744
}
108
#endif
109
110
/* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST.
111
   Return OUT + LEN. */
112
1.98M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
1.98M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
1.98M
    chunk_t chunk_load;
118
1.98M
    uint32_t chunk_mod = 0;
119
1.98M
    uint32_t adv_amount;
120
1.98M
    int64_t sdist = out - from;
121
1.98M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
1.98M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
0
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
0
            return CHUNKCOPY(out, from, len);
135
0
        }
136
0
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
1.98M
    if (dist == 1) {
144
1.56M
        memset(out, *from, len);
145
1.56M
        return out + len;
146
1.56M
    } else if (dist >= sizeof(chunk_t)) {
147
282k
        return CHUNKCOPY(out, from, len);
148
282k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
#ifdef HAVE_HALF_CHUNK
158
139k
    if (len <= sizeof(halfchunk_t)) {
159
116k
        if (dist >= sizeof(halfchunk_t))
160
744
            return HALFCHUNKCOPY(out, from, len);
161
162
115k
        if ((dist % 2) != 0 || dist == 6) {
163
9.97k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
9.97k
            if (len == sizeof(halfchunk_t)) {
166
549
                storehalfchunk(out, &halfchunk_load);
167
549
                len -= sizeof(halfchunk_t);
168
549
                out += sizeof(halfchunk_t);
169
549
            }
170
171
9.97k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
9.97k
            goto rem_bytes;
173
9.97k
        }
174
115k
    }
175
128k
#endif
176
177
128k
#ifdef HAVE_CHUNKMEMSET_2
178
128k
    if (dist == 2) {
179
82.2k
        chunkmemset_2(from, &chunk_load);
180
82.2k
    } else
181
46.7k
#endif
182
46.7k
#ifdef HAVE_CHUNKMEMSET_4
183
46.7k
    if (dist == 4) {
184
25.9k
        chunkmemset_4(from, &chunk_load);
185
25.9k
    } else
186
20.7k
#endif
187
20.7k
#ifdef HAVE_CHUNKMEMSET_8
188
20.7k
    if (dist == 8) {
189
1.47k
        chunkmemset_8(from, &chunk_load);
190
1.47k
    } else
191
19.3k
#endif
192
#ifdef HAVE_CHUNKMEMSET_16
193
19.3k
    if (dist == 16) {
194
4.48k
        chunkmemset_16(from, &chunk_load);
195
4.48k
    } else
196
14.8k
#endif
197
14.8k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
0
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
168k
    while (len >= (2 * sizeof(chunk_t))) {
202
39.2k
        storechunk(out, &chunk_load);
203
39.2k
        storechunk(out + adv_amount, &chunk_load);
204
39.2k
        out += 2 * adv_amount;
205
39.2k
        len -= 2 * adv_amount;
206
39.2k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
138k
    while (len >= sizeof(chunk_t)) {
213
9.90k
        storechunk(out, &chunk_load);
214
9.90k
        len -= adv_amount;
215
9.90k
        out += adv_amount;
216
9.90k
    }
217
218
#ifdef HAVE_HALF_CHUNK
219
138k
rem_bytes:
220
138k
#endif
221
138k
    if (len) {
222
137k
        memcpy(out, &chunk_load, len);
223
137k
        out += len;
224
137k
    }
225
226
138k
    return out;
227
128k
}
Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3
chunkset_avx2.c:chunkmemset_avx2
Line
Count
Source
112
1.98M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
1.98M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
1.98M
    chunk_t chunk_load;
118
1.98M
    uint32_t chunk_mod = 0;
119
1.98M
    uint32_t adv_amount;
120
1.98M
    int64_t sdist = out - from;
121
1.98M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
1.98M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
            return CHUNKCOPY(out, from, len);
135
        }
136
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
1.98M
    if (dist == 1) {
144
1.56M
        memset(out, *from, len);
145
1.56M
        return out + len;
146
1.56M
    } else if (dist >= sizeof(chunk_t)) {
147
282k
        return CHUNKCOPY(out, from, len);
148
282k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
139k
#ifdef HAVE_HALF_CHUNK
158
139k
    if (len <= sizeof(halfchunk_t)) {
159
116k
        if (dist >= sizeof(halfchunk_t))
160
744
            return HALFCHUNKCOPY(out, from, len);
161
162
115k
        if ((dist % 2) != 0 || dist == 6) {
163
9.97k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
9.97k
            if (len == sizeof(halfchunk_t)) {
166
549
                storehalfchunk(out, &halfchunk_load);
167
549
                len -= sizeof(halfchunk_t);
168
549
                out += sizeof(halfchunk_t);
169
549
            }
170
171
9.97k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
9.97k
            goto rem_bytes;
173
9.97k
        }
174
115k
    }
175
128k
#endif
176
177
128k
#ifdef HAVE_CHUNKMEMSET_2
178
128k
    if (dist == 2) {
179
82.2k
        chunkmemset_2(from, &chunk_load);
180
82.2k
    } else
181
46.7k
#endif
182
46.7k
#ifdef HAVE_CHUNKMEMSET_4
183
46.7k
    if (dist == 4) {
184
25.9k
        chunkmemset_4(from, &chunk_load);
185
25.9k
    } else
186
20.7k
#endif
187
20.7k
#ifdef HAVE_CHUNKMEMSET_8
188
20.7k
    if (dist == 8) {
189
1.47k
        chunkmemset_8(from, &chunk_load);
190
1.47k
    } else
191
19.3k
#endif
192
19.3k
#ifdef HAVE_CHUNKMEMSET_16
193
19.3k
    if (dist == 16) {
194
4.48k
        chunkmemset_16(from, &chunk_load);
195
4.48k
    } else
196
14.8k
#endif
197
14.8k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
128k
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
168k
    while (len >= (2 * sizeof(chunk_t))) {
202
39.2k
        storechunk(out, &chunk_load);
203
39.2k
        storechunk(out + adv_amount, &chunk_load);
204
39.2k
        out += 2 * adv_amount;
205
39.2k
        len -= 2 * adv_amount;
206
39.2k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
138k
    while (len >= sizeof(chunk_t)) {
213
9.90k
        storechunk(out, &chunk_load);
214
9.90k
        len -= adv_amount;
215
9.90k
        out += adv_amount;
216
9.90k
    }
217
218
128k
#ifdef HAVE_HALF_CHUNK
219
138k
rem_bytes:
220
138k
#endif
221
138k
    if (len) {
222
137k
        memcpy(out, &chunk_load, len);
223
137k
        out += len;
224
137k
    }
225
226
138k
    return out;
227
128k
}
Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512
228
229
13.2k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
13.2k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
#ifndef HAVE_MASKED_READWRITE
247
13.2k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
6.48k
        while (len > 0) {
249
5.54k
            *out++ = *from++;
250
5.54k
            --len;
251
5.54k
        }
252
253
939
        return out;
254
939
    }
255
12.3k
#endif
256
257
12.3k
    if (len)
258
12.3k
        out = CHUNKMEMSET(out, from, len);
259
260
12.3k
    return out;
261
13.2k
}
Unexecuted instantiation: chunkmemset_safe_sse2
Unexecuted instantiation: chunkmemset_safe_ssse3
chunkmemset_safe_avx2
Line
Count
Source
229
13.2k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
13.2k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
13.2k
#ifndef HAVE_MASKED_READWRITE
247
13.2k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
6.48k
        while (len > 0) {
249
5.54k
            *out++ = *from++;
250
5.54k
            --len;
251
5.54k
        }
252
253
939
        return out;
254
939
    }
255
12.3k
#endif
256
257
12.3k
    if (len)
258
12.3k
        out = CHUNKMEMSET(out, from, len);
259
260
12.3k
    return out;
261
13.2k
}
Unexecuted instantiation: chunkmemset_safe_avx512
262
263
static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe)
264
276k
{
265
276k
    if (out == from)
266
0
        return out + len;
267
268
276k
    uint64_t safelen = (safe - out);
269
276k
    len = MIN(len, safelen);
270
271
#ifndef HAVE_MASKED_READWRITE
272
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
276k
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
87
        while (len--) {
275
80
            *out++ = *from++;
276
80
        }
277
278
7
        return out;
279
7
    }
280
276k
#endif
281
282
276k
    return CHUNKMEMSET(out, from, (unsigned)len);
283
276k
}
Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE
Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE
chunkset_avx2.c:CHUNKCOPY_SAFE
Line
Count
Source
264
276k
{
265
276k
    if (out == from)
266
0
        return out + len;
267
268
276k
    uint64_t safelen = (safe - out);
269
276k
    len = MIN(len, safelen);
270
271
276k
#ifndef HAVE_MASKED_READWRITE
272
276k
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
276k
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
87
        while (len--) {
275
80
            *out++ = *from++;
276
80
        }
277
278
7
        return out;
279
7
    }
280
276k
#endif
281
282
276k
    return CHUNKMEMSET(out, from, (unsigned)len);
283
276k
}
Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE