Coverage Report

Created: 2024-07-27 06:20

/src/c-blosc2/internal-complibs/zstd-1.5.6/compress/zstd_double_fast.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
#include "zstd_compress_internal.h"
12
#include "zstd_double_fast.h"
13
14
#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
15
16
static
17
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
18
void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
19
                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
20
0
{
21
0
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
22
0
    U32* const hashLarge = ms->hashTable;
23
0
    U32  const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
24
0
    U32  const mls = cParams->minMatch;
25
0
    U32* const hashSmall = ms->chainTable;
26
0
    U32  const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
27
0
    const BYTE* const base = ms->window.base;
28
0
    const BYTE* ip = base + ms->nextToUpdate;
29
0
    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
30
0
    const U32 fastHashFillStep = 3;
31
32
    /* Always insert every fastHashFillStep position into the hash tables.
33
     * Insert the other positions into the large hash table if their entry
34
     * is empty.
35
     */
36
0
    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
37
0
        U32 const curr = (U32)(ip - base);
38
0
        U32 i;
39
0
        for (i = 0; i < fastHashFillStep; ++i) {
40
0
            size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls);
41
0
            size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8);
42
0
            if (i == 0) {
43
0
                ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i);
44
0
            }
45
0
            if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {
46
0
                ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i);
47
0
            }
48
            /* Only load extra positions for ZSTD_dtlm_full */
49
0
            if (dtlm == ZSTD_dtlm_fast)
50
0
                break;
51
0
    }   }
52
0
}
53
54
static
55
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
56
void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
57
                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
58
0
{
59
0
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
60
0
    U32* const hashLarge = ms->hashTable;
61
0
    U32  const hBitsL = cParams->hashLog;
62
0
    U32  const mls = cParams->minMatch;
63
0
    U32* const hashSmall = ms->chainTable;
64
0
    U32  const hBitsS = cParams->chainLog;
65
0
    const BYTE* const base = ms->window.base;
66
0
    const BYTE* ip = base + ms->nextToUpdate;
67
0
    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
68
0
    const U32 fastHashFillStep = 3;
69
70
    /* Always insert every fastHashFillStep position into the hash tables.
71
     * Insert the other positions into the large hash table if their entry
72
     * is empty.
73
     */
74
0
    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
75
0
        U32 const curr = (U32)(ip - base);
76
0
        U32 i;
77
0
        for (i = 0; i < fastHashFillStep; ++i) {
78
0
            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
79
0
            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
80
0
            if (i == 0)
81
0
                hashSmall[smHash] = curr + i;
82
0
            if (i == 0 || hashLarge[lgHash] == 0)
83
0
                hashLarge[lgHash] = curr + i;
84
            /* Only load extra positions for ZSTD_dtlm_full */
85
0
            if (dtlm == ZSTD_dtlm_fast)
86
0
                break;
87
0
        }   }
88
0
}
89
90
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
91
                        const void* const end,
92
                        ZSTD_dictTableLoadMethod_e dtlm,
93
                        ZSTD_tableFillPurpose_e tfp)
94
0
{
95
0
    if (tfp == ZSTD_tfp_forCDict) {
96
0
        ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm);
97
0
    } else {
98
0
        ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm);
99
0
    }
100
0
}
101
102
103
FORCE_INLINE_TEMPLATE
104
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
105
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
106
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
107
        void const* src, size_t srcSize, U32 const mls /* template */)
108
18.9k
{
109
18.9k
    ZSTD_compressionParameters const* cParams = &ms->cParams;
110
18.9k
    U32* const hashLong = ms->hashTable;
111
18.9k
    const U32 hBitsL = cParams->hashLog;
112
18.9k
    U32* const hashSmall = ms->chainTable;
113
18.9k
    const U32 hBitsS = cParams->chainLog;
114
18.9k
    const BYTE* const base = ms->window.base;
115
18.9k
    const BYTE* const istart = (const BYTE*)src;
116
18.9k
    const BYTE* anchor = istart;
117
18.9k
    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
118
    /* presumes that, if there is a dictionary, it must be using Attach mode */
119
18.9k
    const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
120
18.9k
    const BYTE* const prefixLowest = base + prefixLowestIndex;
121
18.9k
    const BYTE* const iend = istart + srcSize;
122
18.9k
    const BYTE* const ilimit = iend - HASH_READ_SIZE;
123
18.9k
    U32 offset_1=rep[0], offset_2=rep[1];
124
18.9k
    U32 offsetSaved1 = 0, offsetSaved2 = 0;
125
126
18.9k
    size_t mLength;
127
18.9k
    U32 offset;
128
18.9k
    U32 curr;
129
130
    /* how many positions to search before increasing step size */
131
18.9k
    const size_t kStepIncr = 1 << kSearchStrength;
132
    /* the position at which to increment the step size if no match is found */
133
18.9k
    const BYTE* nextStep;
134
18.9k
    size_t step; /* the current step size */
135
136
18.9k
    size_t hl0; /* the long hash at ip */
137
18.9k
    size_t hl1; /* the long hash at ip1 */
138
139
18.9k
    U32 idxl0; /* the long match index for ip */
140
18.9k
    U32 idxl1; /* the long match index for ip1 */
141
142
18.9k
    const BYTE* matchl0; /* the long match for ip */
143
18.9k
    const BYTE* matchs0; /* the short match for ip */
144
18.9k
    const BYTE* matchl1; /* the long match for ip1 */
145
146
18.9k
    const BYTE* ip = istart; /* the current position */
147
18.9k
    const BYTE* ip1; /* the next position */
148
149
18.9k
    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
150
151
    /* init */
152
18.9k
    ip += ((ip - prefixLowest) == 0);
153
18.9k
    {
154
18.9k
        U32 const current = (U32)(ip - base);
155
18.9k
        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
156
18.9k
        U32 const maxRep = current - windowLow;
157
18.9k
        if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
158
18.9k
        if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
159
18.9k
    }
160
161
    /* Outer Loop: one iteration per match found and stored */
162
2.53M
    while (1) {
163
2.53M
        step = 1;
164
2.53M
        nextStep = ip + kStepIncr;
165
2.53M
        ip1 = ip + step;
166
167
2.53M
        if (ip1 > ilimit) {
168
15.4k
            goto _cleanup;
169
15.4k
        }
170
171
2.51M
        hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
172
2.51M
        idxl0 = hashLong[hl0];
173
2.51M
        matchl0 = base + idxl0;
174
175
        /* Inner Loop: one iteration per search / position */
176
19.8M
        do {
177
19.8M
            const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
178
19.8M
            const U32 idxs0 = hashSmall[hs0];
179
19.8M
            curr = (U32)(ip-base);
180
19.8M
            matchs0 = base + idxs0;
181
182
19.8M
            hashLong[hl0] = hashSmall[hs0] = curr;   /* update hash tables */
183
184
            /* check noDict repcode */
185
19.8M
            if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
186
714k
                mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
187
714k
                ip++;
188
714k
                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
189
714k
                goto _match_stored;
190
714k
            }
191
192
19.0M
            hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
193
194
19.0M
            if (idxl0 > prefixLowestIndex) {
195
                /* check prefix long match */
196
3.26M
                if (MEM_read64(matchl0) == MEM_read64(ip)) {
197
757k
                    mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
198
757k
                    offset = (U32)(ip-matchl0);
199
778k
                    while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
200
757k
                    goto _match_found;
201
757k
                }
202
3.26M
            }
203
204
18.3M
            idxl1 = hashLong[hl1];
205
18.3M
            matchl1 = base + idxl1;
206
207
18.3M
            if (idxs0 > prefixLowestIndex) {
208
                /* check prefix short match */
209
5.04M
                if (MEM_read32(matchs0) == MEM_read32(ip)) {
210
1.04M
                    goto _search_next_long;
211
1.04M
                }
212
5.04M
            }
213
214
17.2M
            if (ip1 >= nextStep) {
215
40.3k
                PREFETCH_L1(ip1 + 64);
216
40.3k
                PREFETCH_L1(ip1 + 128);
217
40.3k
                step++;
218
40.3k
                nextStep += kStepIncr;
219
40.3k
            }
220
17.2M
            ip = ip1;
221
17.2M
            ip1 += step;
222
223
17.2M
            hl0 = hl1;
224
17.2M
            idxl0 = idxl1;
225
17.2M
            matchl0 = matchl1;
226
    #if defined(__aarch64__)
227
            PREFETCH_L1(ip+256);
228
    #endif
229
17.2M
        } while (ip1 <= ilimit);
230
231
18.9k
_cleanup:
232
        /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
233
         * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
234
18.9k
        offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
235
236
        /* save reps for next block */
237
18.9k
        rep[0] = offset_1 ? offset_1 : offsetSaved1;
238
18.9k
        rep[1] = offset_2 ? offset_2 : offsetSaved2;
239
240
        /* Return the last literals size */
241
18.9k
        return (size_t)(iend - anchor);
242
243
1.04M
_search_next_long:
244
245
        /* check prefix long +1 match */
246
1.04M
        if (idxl1 > prefixLowestIndex) {
247
201k
            if (MEM_read64(matchl1) == MEM_read64(ip1)) {
248
62.3k
                ip = ip1;
249
62.3k
                mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
250
62.3k
                offset = (U32)(ip-matchl1);
251
86.2k
                while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
252
62.3k
                goto _match_found;
253
62.3k
            }
254
201k
        }
255
256
        /* if no long +1 match, explore the short match we found */
257
980k
        mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
258
980k
        offset = (U32)(ip - matchs0);
259
1.02M
        while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
260
261
        /* fall-through */
262
263
1.80M
_match_found: /* requires ip, offset, mLength */
264
1.80M
        offset_2 = offset_1;
265
1.80M
        offset_1 = offset;
266
267
1.80M
        if (step < 4) {
268
            /* It is unsafe to write this value back to the hashtable when ip1 is
269
             * greater than or equal to the new ip we will have after we're done
270
             * processing this match. Rather than perform that test directly
271
             * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
272
             * more predictable test. The minmatch even if we take a short match is
273
             * 4 bytes, so as long as step, the distance between ip and ip1
274
             * (initially) is less than 4, we know ip1 < new ip. */
275
1.79M
            hashLong[hl1] = (U32)(ip1 - base);
276
1.79M
        }
277
278
1.80M
        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
279
280
2.51M
_match_stored:
281
        /* match found */
282
2.51M
        ip += mLength;
283
2.51M
        anchor = ip;
284
285
2.51M
        if (ip <= ilimit) {
286
            /* Complementary insertion */
287
            /* done after iLimit test, as candidates could be > iend-8 */
288
2.50M
            {   U32 const indexToInsert = curr+2;
289
2.50M
                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
290
2.50M
                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
291
2.50M
                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
292
2.50M
                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
293
2.50M
            }
294
295
            /* check immediate repcode */
296
2.74M
            while ( (ip <= ilimit)
297
2.74M
                 && ( (offset_2>0)
298
2.73M
                    & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
299
                /* store sequence */
300
239k
                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
301
239k
                U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
302
239k
                hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
303
239k
                hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
304
239k
                ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
305
239k
                ip += rLength;
306
239k
                anchor = ip;
307
239k
                continue;   /* faster when present ... (?) */
308
239k
            }
309
2.50M
        }
310
2.51M
    }
311
18.9k
}
312
313
314
FORCE_INLINE_TEMPLATE
315
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
316
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
317
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
318
        void const* src, size_t srcSize,
319
        U32 const mls /* template */)
320
0
{
321
0
    ZSTD_compressionParameters const* cParams = &ms->cParams;
322
0
    U32* const hashLong = ms->hashTable;
323
0
    const U32 hBitsL = cParams->hashLog;
324
0
    U32* const hashSmall = ms->chainTable;
325
0
    const U32 hBitsS = cParams->chainLog;
326
0
    const BYTE* const base = ms->window.base;
327
0
    const BYTE* const istart = (const BYTE*)src;
328
0
    const BYTE* ip = istart;
329
0
    const BYTE* anchor = istart;
330
0
    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
331
    /* presumes that, if there is a dictionary, it must be using Attach mode */
332
0
    const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
333
0
    const BYTE* const prefixLowest = base + prefixLowestIndex;
334
0
    const BYTE* const iend = istart + srcSize;
335
0
    const BYTE* const ilimit = iend - HASH_READ_SIZE;
336
0
    U32 offset_1=rep[0], offset_2=rep[1];
337
338
0
    const ZSTD_matchState_t* const dms = ms->dictMatchState;
339
0
    const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
340
0
    const U32* const dictHashLong  = dms->hashTable;
341
0
    const U32* const dictHashSmall = dms->chainTable;
342
0
    const U32 dictStartIndex       = dms->window.dictLimit;
343
0
    const BYTE* const dictBase     = dms->window.base;
344
0
    const BYTE* const dictStart    = dictBase + dictStartIndex;
345
0
    const BYTE* const dictEnd      = dms->window.nextSrc;
346
0
    const U32 dictIndexDelta       = prefixLowestIndex - (U32)(dictEnd - dictBase);
347
0
    const U32 dictHBitsL           = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
348
0
    const U32 dictHBitsS           = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
349
0
    const U32 dictAndPrefixLength  = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
350
351
0
    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
352
353
    /* if a dictionary is attached, it must be within window range */
354
0
    assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
355
356
0
    if (ms->prefetchCDictTables) {
357
0
        size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
358
0
        size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
359
0
        PREFETCH_AREA(dictHashLong, hashTableBytes);
360
0
        PREFETCH_AREA(dictHashSmall, chainTableBytes);
361
0
    }
362
363
    /* init */
364
0
    ip += (dictAndPrefixLength == 0);
365
366
    /* dictMatchState repCode checks don't currently handle repCode == 0
367
     * disabling. */
368
0
    assert(offset_1 <= dictAndPrefixLength);
369
0
    assert(offset_2 <= dictAndPrefixLength);
370
371
    /* Main Search Loop */
372
0
    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
373
0
        size_t mLength;
374
0
        U32 offset;
375
0
        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
376
0
        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
377
0
        size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8);
378
0
        size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls);
379
0
        U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS];
380
0
        U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS];
381
0
        int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL);
382
0
        int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS);
383
0
        U32 const curr = (U32)(ip-base);
384
0
        U32 const matchIndexL = hashLong[h2];
385
0
        U32 matchIndexS = hashSmall[h];
386
0
        const BYTE* matchLong = base + matchIndexL;
387
0
        const BYTE* match = base + matchIndexS;
388
0
        const U32 repIndex = curr + 1 - offset_1;
389
0
        const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
390
0
                               dictBase + (repIndex - dictIndexDelta) :
391
0
                               base + repIndex;
392
0
        hashLong[h2] = hashSmall[h] = curr;   /* update hash tables */
393
394
        /* check repcode */
395
0
        if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
396
0
            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
397
0
            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
398
0
            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
399
0
            ip++;
400
0
            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
401
0
            goto _match_stored;
402
0
        }
403
404
0
        if (matchIndexL > prefixLowestIndex) {
405
            /* check prefix long match */
406
0
            if (MEM_read64(matchLong) == MEM_read64(ip)) {
407
0
                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
408
0
                offset = (U32)(ip-matchLong);
409
0
                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
410
0
                goto _match_found;
411
0
            }
412
0
        } else if (dictTagsMatchL) {
413
            /* check dictMatchState long match */
414
0
            U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
415
0
            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
416
0
            assert(dictMatchL < dictEnd);
417
418
0
            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
419
0
                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
420
0
                offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
421
0
                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
422
0
                goto _match_found;
423
0
        }   }
424
425
0
        if (matchIndexS > prefixLowestIndex) {
426
            /* check prefix short match */
427
0
            if (MEM_read32(match) == MEM_read32(ip)) {
428
0
                goto _search_next_long;
429
0
            }
430
0
        } else if (dictTagsMatchS) {
431
            /* check dictMatchState short match */
432
0
            U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS;
433
0
            match = dictBase + dictMatchIndexS;
434
0
            matchIndexS = dictMatchIndexS + dictIndexDelta;
435
436
0
            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
437
0
                goto _search_next_long;
438
0
        }   }
439
440
0
        ip += ((ip-anchor) >> kSearchStrength) + 1;
441
#if defined(__aarch64__)
442
        PREFETCH_L1(ip+256);
443
#endif
444
0
        continue;
445
446
0
_search_next_long:
447
0
        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
448
0
            size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
449
0
            U32 const matchIndexL3 = hashLong[hl3];
450
0
            U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS];
451
0
            int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3);
452
0
            const BYTE* matchL3 = base + matchIndexL3;
453
0
            hashLong[hl3] = curr + 1;
454
455
            /* check prefix long +1 match */
456
0
            if (matchIndexL3 > prefixLowestIndex) {
457
0
                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
458
0
                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
459
0
                    ip++;
460
0
                    offset = (U32)(ip-matchL3);
461
0
                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
462
0
                    goto _match_found;
463
0
                }
464
0
            } else if (dictTagsMatchL3) {
465
                /* check dict long +1 match */
466
0
                U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
467
0
                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
468
0
                assert(dictMatchL3 < dictEnd);
469
0
                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
470
0
                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
471
0
                    ip++;
472
0
                    offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
473
0
                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
474
0
                    goto _match_found;
475
0
        }   }   }
476
477
        /* if no long +1 match, explore the short match we found */
478
0
        if (matchIndexS < prefixLowestIndex) {
479
0
            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
480
0
            offset = (U32)(curr - matchIndexS);
481
0
            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
482
0
        } else {
483
0
            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
484
0
            offset = (U32)(ip - match);
485
0
            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
486
0
        }
487
488
0
_match_found:
489
0
        offset_2 = offset_1;
490
0
        offset_1 = offset;
491
492
0
        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
493
494
0
_match_stored:
495
        /* match found */
496
0
        ip += mLength;
497
0
        anchor = ip;
498
499
0
        if (ip <= ilimit) {
500
            /* Complementary insertion */
501
            /* done after iLimit test, as candidates could be > iend-8 */
502
0
            {   U32 const indexToInsert = curr+2;
503
0
                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
504
0
                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
505
0
                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
506
0
                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
507
0
            }
508
509
            /* check immediate repcode */
510
0
            while (ip <= ilimit) {
511
0
                U32 const current2 = (U32)(ip-base);
512
0
                U32 const repIndex2 = current2 - offset_2;
513
0
                const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
514
0
                        dictBase + repIndex2 - dictIndexDelta :
515
0
                        base + repIndex2;
516
0
                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
517
0
                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
518
0
                    const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
519
0
                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
520
0
                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
521
0
                    ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
522
0
                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
523
0
                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
524
0
                    ip += repLength2;
525
0
                    anchor = ip;
526
0
                    continue;
527
0
                }
528
0
                break;
529
0
            }
530
0
        }
531
0
    }   /* while (ip < ilimit) */
532
533
    /* save reps for next block */
534
0
    rep[0] = offset_1;
535
0
    rep[1] = offset_2;
536
537
    /* Return the last literals size */
538
0
    return (size_t)(iend - anchor);
539
0
}
540
541
#define ZSTD_GEN_DFAST_FN(dictMode, mls)                                                                 \
542
    static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls(                                      \
543
            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],                          \
544
            void const* src, size_t srcSize)                                                             \
545
18.9k
    {                                                                                                    \
546
18.9k
        return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
547
18.9k
    }
zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_4
Line
Count
Source
545
18.9k
    {                                                                                                    \
546
18.9k
        return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
547
18.9k
    }
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_5
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_6
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_7
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_4
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_5
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_6
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_7
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_4
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_5
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_6
Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_7
548
549
ZSTD_GEN_DFAST_FN(noDict, 4)
550
ZSTD_GEN_DFAST_FN(noDict, 5)
551
ZSTD_GEN_DFAST_FN(noDict, 6)
552
ZSTD_GEN_DFAST_FN(noDict, 7)
553
554
ZSTD_GEN_DFAST_FN(dictMatchState, 4)
555
ZSTD_GEN_DFAST_FN(dictMatchState, 5)
556
ZSTD_GEN_DFAST_FN(dictMatchState, 6)
557
ZSTD_GEN_DFAST_FN(dictMatchState, 7)
558
559
560
size_t ZSTD_compressBlock_doubleFast(
561
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
562
        void const* src, size_t srcSize)
563
18.9k
{
564
18.9k
    const U32 mls = ms->cParams.minMatch;
565
18.9k
    switch(mls)
566
18.9k
    {
567
0
    default: /* includes case 3 */
568
18.9k
    case 4 :
569
18.9k
        return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
570
0
    case 5 :
571
0
        return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
572
0
    case 6 :
573
0
        return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
574
0
    case 7 :
575
0
        return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
576
18.9k
    }
577
18.9k
}
578
579
580
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
581
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
582
        void const* src, size_t srcSize)
583
0
{
584
0
    const U32 mls = ms->cParams.minMatch;
585
0
    switch(mls)
586
0
    {
587
0
    default: /* includes case 3 */
588
0
    case 4 :
589
0
        return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
590
0
    case 5 :
591
0
        return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
592
0
    case 6 :
593
0
        return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
594
0
    case 7 :
595
0
        return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
596
0
    }
597
0
}
598
599
600
static
601
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
602
size_t ZSTD_compressBlock_doubleFast_extDict_generic(
603
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
604
        void const* src, size_t srcSize,
605
        U32 const mls /* template */)
606
0
{
607
0
    ZSTD_compressionParameters const* cParams = &ms->cParams;
608
0
    U32* const hashLong = ms->hashTable;
609
0
    U32  const hBitsL = cParams->hashLog;
610
0
    U32* const hashSmall = ms->chainTable;
611
0
    U32  const hBitsS = cParams->chainLog;
612
0
    const BYTE* const istart = (const BYTE*)src;
613
0
    const BYTE* ip = istart;
614
0
    const BYTE* anchor = istart;
615
0
    const BYTE* const iend = istart + srcSize;
616
0
    const BYTE* const ilimit = iend - 8;
617
0
    const BYTE* const base = ms->window.base;
618
0
    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
619
0
    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
620
0
    const U32   dictStartIndex = lowLimit;
621
0
    const U32   dictLimit = ms->window.dictLimit;
622
0
    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
623
0
    const BYTE* const prefixStart = base + prefixStartIndex;
624
0
    const BYTE* const dictBase = ms->window.dictBase;
625
0
    const BYTE* const dictStart = dictBase + dictStartIndex;
626
0
    const BYTE* const dictEnd = dictBase + prefixStartIndex;
627
0
    U32 offset_1=rep[0], offset_2=rep[1];
628
629
0
    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
630
631
    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
632
0
    if (prefixStartIndex == dictStartIndex)
633
0
        return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
634
635
    /* Search Loop */
636
0
    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
637
0
        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
638
0
        const U32 matchIndex = hashSmall[hSmall];
639
0
        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
640
0
        const BYTE* match = matchBase + matchIndex;
641
642
0
        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
643
0
        const U32 matchLongIndex = hashLong[hLong];
644
0
        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
645
0
        const BYTE* matchLong = matchLongBase + matchLongIndex;
646
647
0
        const U32 curr = (U32)(ip-base);
648
0
        const U32 repIndex = curr + 1 - offset_1;   /* offset_1 expected <= curr +1 */
649
0
        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
650
0
        const BYTE* const repMatch = repBase + repIndex;
651
0
        size_t mLength;
652
0
        hashSmall[hSmall] = hashLong[hLong] = curr;   /* update hash table */
653
654
0
        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
655
0
            & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
656
0
          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
657
0
            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
658
0
            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
659
0
            ip++;
660
0
            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
661
0
        } else {
662
0
            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
663
0
                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
664
0
                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
665
0
                U32 offset;
666
0
                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
667
0
                offset = curr - matchLongIndex;
668
0
                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
669
0
                offset_2 = offset_1;
670
0
                offset_1 = offset;
671
0
                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
672
673
0
            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
674
0
                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
675
0
                U32 const matchIndex3 = hashLong[h3];
676
0
                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
677
0
                const BYTE* match3 = match3Base + matchIndex3;
678
0
                U32 offset;
679
0
                hashLong[h3] = curr + 1;
680
0
                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
681
0
                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
682
0
                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
683
0
                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
684
0
                    ip++;
685
0
                    offset = curr+1 - matchIndex3;
686
0
                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
687
0
                } else {
688
0
                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
689
0
                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
690
0
                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
691
0
                    offset = curr - matchIndex;
692
0
                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
693
0
                }
694
0
                offset_2 = offset_1;
695
0
                offset_1 = offset;
696
0
                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
697
698
0
            } else {
699
0
                ip += ((ip-anchor) >> kSearchStrength) + 1;
700
0
                continue;
701
0
        }   }
702
703
        /* move to next sequence start */
704
0
        ip += mLength;
705
0
        anchor = ip;
706
707
0
        if (ip <= ilimit) {
708
            /* Complementary insertion */
709
            /* done after iLimit test, as candidates could be > iend-8 */
710
0
            {   U32 const indexToInsert = curr+2;
711
0
                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
712
0
                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
713
0
                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
714
0
                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
715
0
            }
716
717
            /* check immediate repcode */
718
0
            while (ip <= ilimit) {
719
0
                U32 const current2 = (U32)(ip-base);
720
0
                U32 const repIndex2 = current2 - offset_2;
721
0
                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
722
0
                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
723
0
                    & (offset_2 <= current2 - dictStartIndex))
724
0
                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
725
0
                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
726
0
                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
727
0
                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
728
0
                    ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
729
0
                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
730
0
                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
731
0
                    ip += repLength2;
732
0
                    anchor = ip;
733
0
                    continue;
734
0
                }
735
0
                break;
736
0
    }   }   }
737
738
    /* save reps for next block */
739
0
    rep[0] = offset_1;
740
0
    rep[1] = offset_2;
741
742
    /* Return the last literals size */
743
0
    return (size_t)(iend - anchor);
744
0
}
745
746
ZSTD_GEN_DFAST_FN(extDict, 4)
747
ZSTD_GEN_DFAST_FN(extDict, 5)
748
ZSTD_GEN_DFAST_FN(extDict, 6)
749
ZSTD_GEN_DFAST_FN(extDict, 7)
750
751
size_t ZSTD_compressBlock_doubleFast_extDict(
752
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
753
        void const* src, size_t srcSize)
754
0
{
755
0
    U32 const mls = ms->cParams.minMatch;
756
0
    switch(mls)
757
0
    {
758
0
    default: /* includes case 3 */
759
0
    case 4 :
760
0
        return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
761
0
    case 5 :
762
0
        return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
763
0
    case 6 :
764
0
        return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
765
0
    case 7 :
766
0
        return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
767
0
    }
768
0
}
769
770
#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */