Coverage Report

Created: 2025-08-26 06:46

/src/zlib-ng/chunkset_tpl.h
Line
Count
Source (jump to first uncovered line)
1
/* chunkset_tpl.h -- inline functions to copy small data chunks.
2
 * For conditions of distribution and use, see copyright notice in zlib.h
3
 */
4
5
#include "zbuild.h"
6
#include <stdlib.h>
7
8
/* Returns the chunk size */
9
41.8k
Z_INTERNAL uint32_t CHUNKSIZE(void) {
10
41.8k
    return sizeof(chunk_t);
11
41.8k
}
Unexecuted instantiation: chunksize_sse2
Unexecuted instantiation: chunksize_ssse3
chunksize_avx2
Line
Count
Source
9
41.8k
Z_INTERNAL uint32_t CHUNKSIZE(void) {
10
41.8k
    return sizeof(chunk_t);
11
41.8k
}
Unexecuted instantiation: chunksize_avx512
12
13
/* Behave like memcpy, but assume that it's OK to overwrite at least
14
   chunk_t bytes of output even if the length is shorter than this,
15
   that the length is non-zero, and that `from` lags `out` by at least
16
   sizeof chunk_t bytes (or that they don't overlap at all or simply that
17
   the distance is less than the length of the copy).
18
19
   Aside from better memory bus utilisation, this means that short copies
20
   (chunk_t bytes or fewer) will fall straight through the loop
21
   without iteration, which will hopefully make the branch prediction more
22
   reliable. */
23
#ifndef HAVE_CHUNKCOPY
24
31.6M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
31.6M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
31.6M
    chunk_t chunk;
27
31.6M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
31.6M
    loadchunk(from, &chunk);
29
31.6M
    storechunk(out, &chunk);
30
31.6M
    out += align;
31
31.6M
    from += align;
32
31.6M
    len -= align;
33
40.3M
    while (len > 0) {
34
8.67M
        loadchunk(from, &chunk);
35
8.67M
        storechunk(out, &chunk);
36
8.67M
        out += sizeof(chunk_t);
37
8.67M
        from += sizeof(chunk_t);
38
8.67M
        len -= sizeof(chunk_t);
39
8.67M
    }
40
31.6M
    return out;
41
31.6M
}
Unexecuted instantiation: chunkset_sse2.c:chunkcopy_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_ssse3
chunkset_avx2.c:chunkcopy_avx2
Line
Count
Source
24
31.6M
static inline uint8_t* CHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
25
31.6M
    Assert(len > 0, "chunkcopy should never have a length 0");
26
31.6M
    chunk_t chunk;
27
31.6M
    int32_t align = ((len - 1) % sizeof(chunk_t)) + 1;
28
31.6M
    loadchunk(from, &chunk);
29
31.6M
    storechunk(out, &chunk);
30
31.6M
    out += align;
31
31.6M
    from += align;
32
31.6M
    len -= align;
33
40.3M
    while (len > 0) {
34
8.67M
        loadchunk(from, &chunk);
35
8.67M
        storechunk(out, &chunk);
36
8.67M
        out += sizeof(chunk_t);
37
8.67M
        from += sizeof(chunk_t);
38
8.67M
        len -= sizeof(chunk_t);
39
8.67M
    }
40
31.6M
    return out;
41
31.6M
}
42
#endif
43
44
/* Perform short copies until distance can be rewritten as being at least
45
   sizeof chunk_t.
46
47
   This assumes that it's OK to overwrite at least the first
48
   2*sizeof(chunk_t) bytes of output even if the copy is shorter than this.
49
   This assumption holds because inflate_fast() starts every iteration with at
50
   least 258 bytes of output space available (258 being the maximum length
51
   output from a single token; see inflate_fast()'s assumptions below). */
52
#ifndef HAVE_CHUNKUNROLL
53
1.30k
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
1.30k
    unsigned char const *from = out - *dist;
55
1.30k
    chunk_t chunk;
56
1.87k
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
575
        loadchunk(from, &chunk);
58
575
        storechunk(out, &chunk);
59
575
        out += *dist;
60
575
        *len -= *dist;
61
575
        *dist += *dist;
62
575
    }
63
1.30k
    return out;
64
1.30k
}
Unexecuted instantiation: chunkset_sse2.c:chunkunroll_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkunroll_ssse3
chunkset_avx2.c:chunkunroll_avx2
Line
Count
Source
53
1.30k
static inline uint8_t* CHUNKUNROLL(uint8_t *out, unsigned *dist, unsigned *len) {
54
1.30k
    unsigned char const *from = out - *dist;
55
1.30k
    chunk_t chunk;
56
1.87k
    while (*dist < *len && *dist < sizeof(chunk_t)) {
57
575
        loadchunk(from, &chunk);
58
575
        storechunk(out, &chunk);
59
575
        out += *dist;
60
575
        *len -= *dist;
61
575
        *dist += *dist;
62
575
    }
63
1.30k
    return out;
64
1.30k
}
Unexecuted instantiation: chunkset_avx512.c:chunkunroll_avx512
65
#endif
66
67
#ifndef HAVE_CHUNK_MAG
68
/* Loads a magazine to feed into memory of the pattern */
69
0
static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
70
        /* This code takes string of length dist from "from" and repeats
71
         * it for as many times as can fit in a chunk_t (vector register) */
72
0
        uint64_t cpy_dist;
73
0
        uint64_t bytes_remaining = sizeof(chunk_t);
74
0
        chunk_t chunk_load;
75
0
        uint8_t *cur_chunk = (uint8_t *)&chunk_load;
76
0
        while (bytes_remaining) {
77
0
            cpy_dist = MIN(dist, bytes_remaining);
78
0
            memcpy(cur_chunk, buf, (size_t)cpy_dist);
79
0
            bytes_remaining -= cpy_dist;
80
0
            cur_chunk += cpy_dist;
81
            /* This allows us to bypass an expensive integer division since we're effectively
82
             * counting in this loop, anyway */
83
0
            *chunk_rem = (uint32_t)cpy_dist;
84
0
        }
85
86
0
        return chunk_load;
87
0
}
88
#endif
89
90
#if defined(HAVE_HALF_CHUNK) && !defined(HAVE_HALFCHUNKCOPY)
91
8.43k
static inline uint8_t* HALFCHUNKCOPY(uint8_t *out, uint8_t const *from, unsigned len) {
92
8.43k
    halfchunk_t chunk;
93
8.43k
    int32_t align = ((len - 1) % sizeof(halfchunk_t)) + 1;
94
8.43k
    loadhalfchunk(from, &chunk);
95
8.43k
    storehalfchunk(out, &chunk);
96
8.43k
    out += align;
97
8.43k
    from += align;
98
8.43k
    len -= align;
99
8.43k
    while (len > 0) {
100
0
        loadhalfchunk(from, &chunk);
101
0
        storehalfchunk(out, &chunk);
102
0
        out += sizeof(halfchunk_t);
103
0
        from += sizeof(halfchunk_t);
104
0
        len -= sizeof(halfchunk_t);
105
0
    }
106
8.43k
    return out;
107
8.43k
}
108
#endif
109
110
/* Copy DIST bytes from OUT - DIST into OUT + DIST * k, for 0 <= k < LEN/DIST.
111
   Return OUT + LEN. */
112
5.26M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
5.26M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
5.26M
    chunk_t chunk_load;
118
5.26M
    uint32_t chunk_mod = 0;
119
5.26M
    uint32_t adv_amount;
120
5.26M
    int64_t sdist = out - from;
121
5.26M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
5.26M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
0
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
0
            return CHUNKCOPY(out, from, len);
135
0
        }
136
0
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
5.26M
    if (dist == 1) {
144
4.12M
        memset(out, *from, len);
145
4.12M
        return out + len;
146
4.12M
    } else if (dist >= sizeof(chunk_t)) {
147
357k
        return CHUNKCOPY(out, from, len);
148
357k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
#ifdef HAVE_HALF_CHUNK
158
786k
    if (len <= sizeof(halfchunk_t)) {
159
392k
        if (dist >= sizeof(halfchunk_t))
160
8.43k
            return HALFCHUNKCOPY(out, from, len);
161
162
384k
        if ((dist % 2) != 0 || dist == 6) {
163
92.1k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
92.1k
            if (len == sizeof(halfchunk_t)) {
166
2.37k
                storehalfchunk(out, &halfchunk_load);
167
2.37k
                len -= sizeof(halfchunk_t);
168
2.37k
                out += sizeof(halfchunk_t);
169
2.37k
            }
170
171
92.1k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
92.1k
            goto rem_bytes;
173
92.1k
        }
174
384k
    }
175
686k
#endif
176
177
686k
#ifdef HAVE_CHUNKMEMSET_2
178
686k
    if (dist == 2) {
179
287k
        chunkmemset_2(from, &chunk_load);
180
287k
    } else
181
398k
#endif
182
398k
#ifdef HAVE_CHUNKMEMSET_4
183
398k
    if (dist == 4) {
184
94.2k
        chunkmemset_4(from, &chunk_load);
185
94.2k
    } else
186
303k
#endif
187
303k
#ifdef HAVE_CHUNKMEMSET_8
188
303k
    if (dist == 8) {
189
12.3k
        chunkmemset_8(from, &chunk_load);
190
12.3k
    } else
191
291k
#endif
192
#ifdef HAVE_CHUNKMEMSET_16
193
291k
    if (dist == 16) {
194
7.65k
        chunkmemset_16(from, &chunk_load);
195
7.65k
    } else
196
283k
#endif
197
283k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
0
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
1.28M
    while (len >= (2 * sizeof(chunk_t))) {
202
603k
        storechunk(out, &chunk_load);
203
603k
        storechunk(out + adv_amount, &chunk_load);
204
603k
        out += 2 * adv_amount;
205
603k
        len -= 2 * adv_amount;
206
603k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
771k
    while (len >= sizeof(chunk_t)) {
213
85.8k
        storechunk(out, &chunk_load);
214
85.8k
        len -= adv_amount;
215
85.8k
        out += adv_amount;
216
85.8k
    }
217
218
#ifdef HAVE_HALF_CHUNK
219
778k
rem_bytes:
220
778k
#endif
221
778k
    if (len) {
222
770k
        memcpy(out, &chunk_load, len);
223
770k
        out += len;
224
770k
    }
225
226
778k
    return out;
227
686k
}
Unexecuted instantiation: chunkset_sse2.c:chunkmemset_sse2
Unexecuted instantiation: chunkset_ssse3.c:chunkmemset_ssse3
chunkset_avx2.c:chunkmemset_avx2
Line
Count
Source
112
5.26M
static inline uint8_t* CHUNKMEMSET(uint8_t *out, uint8_t *from, unsigned len) {
113
    /* Debug performance related issues when len < sizeof(uint64_t):
114
       Assert(len >= sizeof(uint64_t), "chunkmemset should be called on larger chunks"); */
115
5.26M
    Assert(from != out, "chunkmemset cannot have a distance 0");
116
117
5.26M
    chunk_t chunk_load;
118
5.26M
    uint32_t chunk_mod = 0;
119
5.26M
    uint32_t adv_amount;
120
5.26M
    int64_t sdist = out - from;
121
5.26M
    uint64_t dist = llabs(sdist);
122
123
    /* We are supporting the case for when we are reading bytes from ahead in the buffer.
124
     * We now have to handle this, though it wasn't _quite_ clear if this rare circumstance
125
     * always needed to be handled here or if we're just now seeing it because we are
126
     * dispatching to this function, more */
127
5.26M
    if (sdist < 0 && dist < len) {
128
#ifdef HAVE_MASKED_READWRITE
129
        /* We can still handle this case if we can mitigate over writing _and_ we
130
         * fit the entirety of the copy length with one load */
131
        if (len <= sizeof(chunk_t)) {
132
            /* Tempting to add a goto to the block below but hopefully most compilers
133
             * collapse these identical code segments as one label to jump to */
134
            return CHUNKCOPY(out, from, len);
135
        }
136
#endif
137
        /* Here the memmove semantics match perfectly, as when this happens we are
138
         * effectively sliding down the contents of memory by dist bytes */
139
0
        memmove(out, from, len);
140
0
        return out + len;
141
0
    }
142
143
5.26M
    if (dist == 1) {
144
4.12M
        memset(out, *from, len);
145
4.12M
        return out + len;
146
4.12M
    } else if (dist >= sizeof(chunk_t)) {
147
357k
        return CHUNKCOPY(out, from, len);
148
357k
    }
149
150
    /* Only AVX2+ as there's 128 bit vectors and 256 bit. We allow for shorter vector
151
     * lengths because they serve to allow more cases to fall into chunkcopy, as the
152
     * distance of the shorter length is still deemed a safe distance. We rewrite this
153
     * here rather than calling the ssse3 variant directly now because doing so required
154
     * dispatching to another function and broke inlining for this function entirely. We
155
     * also can merge an assert and some remainder peeling behavior into the same code blocks,
156
     * making the code a little smaller.  */
157
786k
#ifdef HAVE_HALF_CHUNK
158
786k
    if (len <= sizeof(halfchunk_t)) {
159
392k
        if (dist >= sizeof(halfchunk_t))
160
8.43k
            return HALFCHUNKCOPY(out, from, len);
161
162
384k
        if ((dist % 2) != 0 || dist == 6) {
163
92.1k
            halfchunk_t halfchunk_load = GET_HALFCHUNK_MAG(from, &chunk_mod, (unsigned)dist);
164
165
92.1k
            if (len == sizeof(halfchunk_t)) {
166
2.37k
                storehalfchunk(out, &halfchunk_load);
167
2.37k
                len -= sizeof(halfchunk_t);
168
2.37k
                out += sizeof(halfchunk_t);
169
2.37k
            }
170
171
92.1k
            chunk_load = halfchunk2whole(&halfchunk_load);
172
92.1k
            goto rem_bytes;
173
92.1k
        }
174
384k
    }
175
686k
#endif
176
177
686k
#ifdef HAVE_CHUNKMEMSET_2
178
686k
    if (dist == 2) {
179
287k
        chunkmemset_2(from, &chunk_load);
180
287k
    } else
181
398k
#endif
182
398k
#ifdef HAVE_CHUNKMEMSET_4
183
398k
    if (dist == 4) {
184
94.2k
        chunkmemset_4(from, &chunk_load);
185
94.2k
    } else
186
303k
#endif
187
303k
#ifdef HAVE_CHUNKMEMSET_8
188
303k
    if (dist == 8) {
189
12.3k
        chunkmemset_8(from, &chunk_load);
190
12.3k
    } else
191
291k
#endif
192
291k
#ifdef HAVE_CHUNKMEMSET_16
193
291k
    if (dist == 16) {
194
7.65k
        chunkmemset_16(from, &chunk_load);
195
7.65k
    } else
196
283k
#endif
197
283k
    chunk_load = GET_CHUNK_MAG(from, &chunk_mod, (unsigned)dist);
198
199
686k
    adv_amount = sizeof(chunk_t) - chunk_mod;
200
201
1.28M
    while (len >= (2 * sizeof(chunk_t))) {
202
603k
        storechunk(out, &chunk_load);
203
603k
        storechunk(out + adv_amount, &chunk_load);
204
603k
        out += 2 * adv_amount;
205
603k
        len -= 2 * adv_amount;
206
603k
    }
207
208
    /* If we don't have a "dist" length that divides evenly into a vector
209
     * register, we can write the whole vector register but we need only
210
     * advance by the amount of the whole string that fits in our chunk_t.
211
     * If we do divide evenly into the vector length, adv_amount = chunk_t size*/
212
771k
    while (len >= sizeof(chunk_t)) {
213
85.8k
        storechunk(out, &chunk_load);
214
85.8k
        len -= adv_amount;
215
85.8k
        out += adv_amount;
216
85.8k
    }
217
218
686k
#ifdef HAVE_HALF_CHUNK
219
778k
rem_bytes:
220
778k
#endif
221
778k
    if (len) {
222
770k
        memcpy(out, &chunk_load, len);
223
770k
        out += len;
224
770k
    }
225
226
778k
    return out;
227
686k
}
Unexecuted instantiation: chunkset_avx512.c:chunkmemset_avx512
228
229
155k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
155k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
#ifndef HAVE_MASKED_READWRITE
247
155k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
227k
        while (len > 0) {
249
198k
            *out++ = *from++;
250
198k
            --len;
251
198k
        }
252
253
28.9k
        return out;
254
28.9k
    }
255
127k
#endif
256
257
127k
    if (len)
258
127k
        out = CHUNKMEMSET(out, from, len);
259
260
127k
    return out;
261
155k
}
Unexecuted instantiation: chunkmemset_safe_sse2
Unexecuted instantiation: chunkmemset_safe_ssse3
chunkmemset_safe_avx2
Line
Count
Source
229
155k
Z_INTERNAL uint8_t* CHUNKMEMSET_SAFE(uint8_t *out, uint8_t *from, unsigned len, unsigned left) {
230
#if OPTIMAL_CMP < 32
231
    static const uint32_t align_mask = 7;
232
#elif OPTIMAL_CMP == 32
233
    static const uint32_t align_mask = 3;
234
#endif
235
236
155k
    len = MIN(len, left);
237
238
#if OPTIMAL_CMP < 64
239
    while (((uintptr_t)out & align_mask) && (len > 0)) {
240
        *out++ = *from++;
241
        --len;
242
        --left;
243
    }
244
#endif
245
246
155k
#ifndef HAVE_MASKED_READWRITE
247
155k
    if (UNLIKELY(left < sizeof(chunk_t))) {
248
227k
        while (len > 0) {
249
198k
            *out++ = *from++;
250
198k
            --len;
251
198k
        }
252
253
28.9k
        return out;
254
28.9k
    }
255
127k
#endif
256
257
127k
    if (len)
258
127k
        out = CHUNKMEMSET(out, from, len);
259
260
127k
    return out;
261
155k
}
Unexecuted instantiation: chunkmemset_safe_avx512
262
263
static inline uint8_t *CHUNKCOPY_SAFE(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe)
264
287k
{
265
287k
    if (out == from)
266
0
        return out + len;
267
268
287k
    uint64_t safelen = (safe - out);
269
287k
    len = MIN(len, safelen);
270
271
#ifndef HAVE_MASKED_READWRITE
272
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
287k
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
317
        while (len--) {
275
284
            *out++ = *from++;
276
284
        }
277
278
33
        return out;
279
33
    }
280
287k
#endif
281
282
287k
    return CHUNKMEMSET(out, from, (unsigned)len);
283
287k
}
Unexecuted instantiation: chunkset_sse2.c:CHUNKCOPY_SAFE
Unexecuted instantiation: chunkset_ssse3.c:CHUNKCOPY_SAFE
chunkset_avx2.c:CHUNKCOPY_SAFE
Line
Count
Source
264
287k
{
265
287k
    if (out == from)
266
0
        return out + len;
267
268
287k
    uint64_t safelen = (safe - out);
269
287k
    len = MIN(len, safelen);
270
271
287k
#ifndef HAVE_MASKED_READWRITE
272
287k
    uint64_t from_dist = (uint64_t)llabs(safe - from);
273
287k
    if (UNLIKELY(from_dist < sizeof(chunk_t) || safelen < sizeof(chunk_t))) {
274
317
        while (len--) {
275
284
            *out++ = *from++;
276
284
        }
277
278
33
        return out;
279
33
    }
280
287k
#endif
281
282
287k
    return CHUNKMEMSET(out, from, (unsigned)len);
283
287k
}
Unexecuted instantiation: chunkset_avx512.c:CHUNKCOPY_SAFE