Coverage Report

Created: 2023-12-08 06:32

/src/c-blosc2/internal-complibs/zstd-1.5.5/compress/zstd_fast.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
#include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
12
#include "zstd_fast.h"
13
14
static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
15
                        const void* const end,
16
                        ZSTD_dictTableLoadMethod_e dtlm)
17
0
{
18
0
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
19
0
    U32* const hashTable = ms->hashTable;
20
0
    U32  const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
21
0
    U32  const mls = cParams->minMatch;
22
0
    const BYTE* const base = ms->window.base;
23
0
    const BYTE* ip = base + ms->nextToUpdate;
24
0
    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
25
0
    const U32 fastHashFillStep = 3;
26
27
    /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
28
     * Feel free to remove this assert if there's a good reason! */
29
0
    assert(dtlm == ZSTD_dtlm_full);
30
31
    /* Always insert every fastHashFillStep position into the hash table.
32
     * Insert the other positions if their hash entry is empty.
33
     */
34
0
    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
35
0
        U32 const curr = (U32)(ip - base);
36
0
        {   size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
37
0
            ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr);   }
38
39
0
        if (dtlm == ZSTD_dtlm_fast) continue;
40
        /* Only load extra positions for ZSTD_dtlm_full */
41
0
        {   U32 p;
42
0
            for (p = 1; p < fastHashFillStep; ++p) {
43
0
                size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
44
0
                if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {  /* not yet filled */
45
0
                    ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
46
0
                }   }   }   }
47
0
}
48
49
static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
50
                        const void* const end,
51
                        ZSTD_dictTableLoadMethod_e dtlm)
52
0
{
53
0
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
54
0
    U32* const hashTable = ms->hashTable;
55
0
    U32  const hBits = cParams->hashLog;
56
0
    U32  const mls = cParams->minMatch;
57
0
    const BYTE* const base = ms->window.base;
58
0
    const BYTE* ip = base + ms->nextToUpdate;
59
0
    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
60
0
    const U32 fastHashFillStep = 3;
61
62
    /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
63
     * Feel free to remove this assert if there's a good reason! */
64
0
    assert(dtlm == ZSTD_dtlm_fast);
65
66
    /* Always insert every fastHashFillStep position into the hash table.
67
     * Insert the other positions if their hash entry is empty.
68
     */
69
0
    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
70
0
        U32 const curr = (U32)(ip - base);
71
0
        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
72
0
        hashTable[hash0] = curr;
73
0
        if (dtlm == ZSTD_dtlm_fast) continue;
74
        /* Only load extra positions for ZSTD_dtlm_full */
75
0
        {   U32 p;
76
0
            for (p = 1; p < fastHashFillStep; ++p) {
77
0
                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
78
0
                if (hashTable[hash] == 0) {  /* not yet filled */
79
0
                    hashTable[hash] = curr + p;
80
0
    }   }   }   }
81
0
}
82
83
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
84
                        const void* const end,
85
                        ZSTD_dictTableLoadMethod_e dtlm,
86
                        ZSTD_tableFillPurpose_e tfp)
87
0
{
88
0
    if (tfp == ZSTD_tfp_forCDict) {
89
0
        ZSTD_fillHashTableForCDict(ms, end, dtlm);
90
0
    } else {
91
0
        ZSTD_fillHashTableForCCtx(ms, end, dtlm);
92
0
    }
93
0
}
94
95
96
/**
97
 * If you squint hard enough (and ignore repcodes), the search operation at any
98
 * given position is broken into 4 stages:
99
 *
100
 * 1. Hash   (map position to hash value via input read)
101
 * 2. Lookup (map hash val to index via hashtable read)
102
 * 3. Load   (map index to value at that position via input read)
103
 * 4. Compare
104
 *
105
 * Each of these steps involves a memory read at an address which is computed
106
 * from the previous step. This means these steps must be sequenced and their
107
 * latencies are cumulative.
108
 *
109
 * Rather than do 1->2->3->4 sequentially for a single position before moving
110
 * onto the next, this implementation interleaves these operations across the
111
 * next few positions:
112
 *
113
 * R = Repcode Read & Compare
114
 * H = Hash
115
 * T = Table Lookup
116
 * M = Match Read & Compare
117
 *
118
 * Pos | Time -->
119
 * ----+-------------------
120
 * N   | ... M
121
 * N+1 | ...   TM
122
 * N+2 |    R H   T M
123
 * N+3 |         H    TM
124
 * N+4 |           R H   T M
125
 * N+5 |                H   ...
126
 * N+6 |                  R ...
127
 *
128
 * This is very much analogous to the pipelining of execution in a CPU. And just
129
 * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
130
 * branch).
131
 *
132
 * When this happens, we throw away our current state, and do the following prep
133
 * to re-enter the loop:
134
 *
135
 * Pos | Time -->
136
 * ----+-------------------
137
 * N   | H T
138
 * N+1 |  H
139
 *
140
 * This is also the work we do at the beginning to enter the loop initially.
141
 */
142
FORCE_INLINE_TEMPLATE size_t
143
ZSTD_compressBlock_fast_noDict_generic(
144
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
145
        void const* src, size_t srcSize,
146
        U32 const mls, U32 const hasStep)
147
11.2k
{
148
11.2k
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
149
11.2k
    U32* const hashTable = ms->hashTable;
150
11.2k
    U32 const hlog = cParams->hashLog;
151
    /* support stepSize of 0 */
152
11.2k
    size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
153
11.2k
    const BYTE* const base = ms->window.base;
154
11.2k
    const BYTE* const istart = (const BYTE*)src;
155
11.2k
    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
156
11.2k
    const U32   prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
157
11.2k
    const BYTE* const prefixStart = base + prefixStartIndex;
158
11.2k
    const BYTE* const iend = istart + srcSize;
159
11.2k
    const BYTE* const ilimit = iend - HASH_READ_SIZE;
160
161
11.2k
    const BYTE* anchor = istart;
162
11.2k
    const BYTE* ip0 = istart;
163
11.2k
    const BYTE* ip1;
164
11.2k
    const BYTE* ip2;
165
11.2k
    const BYTE* ip3;
166
11.2k
    U32 current0;
167
168
11.2k
    U32 rep_offset1 = rep[0];
169
11.2k
    U32 rep_offset2 = rep[1];
170
11.2k
    U32 offsetSaved1 = 0, offsetSaved2 = 0;
171
172
11.2k
    size_t hash0; /* hash for ip0 */
173
11.2k
    size_t hash1; /* hash for ip1 */
174
11.2k
    U32 idx; /* match idx for ip0 */
175
11.2k
    U32 mval; /* src value at match idx */
176
177
11.2k
    U32 offcode;
178
11.2k
    const BYTE* match0;
179
11.2k
    size_t mLength;
180
181
    /* ip0 and ip1 are always adjacent. The targetLength skipping and
182
     * uncompressibility acceleration is applied to every other position,
183
     * matching the behavior of #1562. step therefore represents the gap
184
     * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
185
11.2k
    size_t step;
186
11.2k
    const BYTE* nextStep;
187
11.2k
    const size_t kStepIncr = (1 << (kSearchStrength - 1));
188
189
11.2k
    DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
190
11.2k
    ip0 += (ip0 == prefixStart);
191
11.2k
    {   U32 const curr = (U32)(ip0 - base);
192
11.2k
        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
193
11.2k
        U32 const maxRep = curr - windowLow;
194
11.2k
        if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
195
11.2k
        if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
196
11.2k
    }
197
198
    /* start each op */
199
955k
_start: /* Requires: ip0 */
200
201
955k
    step = stepSize;
202
955k
    nextStep = ip0 + kStepIncr;
203
204
    /* calculate positions, ip0 - anchor == 0, so we skip step calc */
205
955k
    ip1 = ip0 + 1;
206
955k
    ip2 = ip0 + step;
207
955k
    ip3 = ip2 + 1;
208
209
955k
    if (ip3 >= ilimit) {
210
8.70k
        goto _cleanup;
211
8.70k
    }
212
213
946k
    hash0 = ZSTD_hashPtr(ip0, hlog, mls);
214
946k
    hash1 = ZSTD_hashPtr(ip1, hlog, mls);
215
216
946k
    idx = hashTable[hash0];
217
218
5.45M
    do {
219
        /* load repcode match for ip[2]*/
220
5.45M
        const U32 rval = MEM_read32(ip2 - rep_offset1);
221
222
        /* write back hash table entry */
223
5.45M
        current0 = (U32)(ip0 - base);
224
5.45M
        hashTable[hash0] = current0;
225
226
        /* check repcode at ip[2] */
227
5.45M
        if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
228
413k
            ip0 = ip2;
229
413k
            match0 = ip0 - rep_offset1;
230
413k
            mLength = ip0[-1] == match0[-1];
231
413k
            ip0 -= mLength;
232
413k
            match0 -= mLength;
233
413k
            offcode = REPCODE1_TO_OFFBASE;
234
413k
            mLength += 4;
235
236
            /* First write next hash table entry; we've already calculated it.
237
             * This write is known to be safe because the ip1 is before the
238
             * repcode (ip2). */
239
413k
            hashTable[hash1] = (U32)(ip1 - base);
240
241
413k
            goto _match;
242
413k
        }
243
244
        /* load match for ip[0] */
245
5.04M
        if (idx >= prefixStartIndex) {
246
997k
            mval = MEM_read32(base + idx);
247
4.04M
        } else {
248
4.04M
            mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
249
4.04M
        }
250
251
        /* check match at ip[0] */
252
5.04M
        if (MEM_read32(ip0) == mval) {
253
            /* found a match! */
254
255
            /* First write next hash table entry; we've already calculated it.
256
             * This write is known to be safe because the ip1 == ip0 + 1, so
257
             * we know we will resume searching after ip1 */
258
393k
            hashTable[hash1] = (U32)(ip1 - base);
259
260
393k
            goto _offset;
261
393k
        }
262
263
        /* lookup ip[1] */
264
4.64M
        idx = hashTable[hash1];
265
266
        /* hash ip[2] */
267
4.64M
        hash0 = hash1;
268
4.64M
        hash1 = ZSTD_hashPtr(ip2, hlog, mls);
269
270
        /* advance to next positions */
271
4.64M
        ip0 = ip1;
272
4.64M
        ip1 = ip2;
273
4.64M
        ip2 = ip3;
274
275
        /* write back hash table entry */
276
4.64M
        current0 = (U32)(ip0 - base);
277
4.64M
        hashTable[hash0] = current0;
278
279
        /* load match for ip[0] */
280
4.64M
        if (idx >= prefixStartIndex) {
281
727k
            mval = MEM_read32(base + idx);
282
3.91M
        } else {
283
3.91M
            mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
284
3.91M
        }
285
286
        /* check match at ip[0] */
287
4.64M
        if (MEM_read32(ip0) == mval) {
288
            /* found a match! */
289
290
            /* first write next hash table entry; we've already calculated it */
291
136k
            if (step <= 4) {
292
                /* We need to avoid writing an index into the hash table >= the
293
                 * position at which we will pick up our searching after we've
294
                 * taken this match.
295
                 *
296
                 * The minimum possible match has length 4, so the earliest ip0
297
                 * can be after we take this match will be the current ip0 + 4.
298
                 * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
299
                 * write this position.
300
                 */
301
135k
                hashTable[hash1] = (U32)(ip1 - base);
302
135k
            }
303
304
136k
            goto _offset;
305
136k
        }
306
307
        /* lookup ip[1] */
308
4.51M
        idx = hashTable[hash1];
309
310
        /* hash ip[2] */
311
4.51M
        hash0 = hash1;
312
4.51M
        hash1 = ZSTD_hashPtr(ip2, hlog, mls);
313
314
        /* advance to next positions */
315
4.51M
        ip0 = ip1;
316
4.51M
        ip1 = ip2;
317
4.51M
        ip2 = ip0 + step;
318
4.51M
        ip3 = ip1 + step;
319
320
        /* calculate step */
321
4.51M
        if (ip2 >= nextStep) {
322
79.8k
            step++;
323
79.8k
            PREFETCH_L1(ip1 + 64);
324
79.8k
            PREFETCH_L1(ip1 + 128);
325
79.8k
            nextStep += kStepIncr;
326
79.8k
        }
327
4.51M
    } while (ip3 < ilimit);
328
329
11.2k
_cleanup:
330
    /* Note that there are probably still a couple positions we could search.
331
     * However, it seems to be a meaningful performance hit to try to search
332
     * them. So let's not. */
333
334
    /* When the repcodes are outside of the prefix, we set them to zero before the loop.
335
     * When the offsets are still zero, we need to restore them after the block to have a correct
336
     * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
337
     * offsets were invalid. We need to figure out which offset to refill with.
338
     *     - If both offsets are zero they are in the same order.
339
     *     - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
340
     *     - If only one is zero, we need to decide which offset to restore.
341
     *         - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
342
     *         - It is impossible for rep_offset2 to be non-zero.
343
     *
344
     * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
345
     * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
346
     */
347
11.2k
    offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
348
349
    /* save reps for next block */
350
11.2k
    rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
351
11.2k
    rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
352
353
    /* Return the last literals size */
354
11.2k
    return (size_t)(iend - anchor);
355
356
530k
_offset: /* Requires: ip0, idx */
357
358
    /* Compute the offset code. */
359
530k
    match0 = base + idx;
360
530k
    rep_offset2 = rep_offset1;
361
530k
    rep_offset1 = (U32)(ip0-match0);
362
530k
    offcode = OFFSET_TO_OFFBASE(rep_offset1);
363
530k
    mLength = 4;
364
365
    /* Count the backwards match length. */
366
562k
    while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
367
32.1k
        ip0--;
368
32.1k
        match0--;
369
32.1k
        mLength++;
370
32.1k
    }
371
372
944k
_match: /* Requires: ip0, match0, offcode */
373
374
    /* Count the forward length. */
375
944k
    mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
376
377
944k
    ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
378
379
944k
    ip0 += mLength;
380
944k
    anchor = ip0;
381
382
    /* Fill table and check for immediate repcode. */
383
944k
    if (ip0 <= ilimit) {
384
        /* Fill Table */
385
939k
        assert(base+current0+2 > istart);  /* check base overflow */
386
939k
        hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
387
939k
        hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
388
389
939k
        if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
390
874k
            while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
391
                /* store sequence */
392
115k
                size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
393
115k
                { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
394
115k
                hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
395
115k
                ip0 += rLength;
396
115k
                ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
397
115k
                anchor = ip0;
398
115k
                continue;   /* faster when present (confirmed on gcc-8) ... (?) */
399
115k
    }   }   }
400
401
944k
    goto _start;
402
530k
}
403
404
#define ZSTD_GEN_FAST_FN(dictMode, mls, step)                                                            \
405
    static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step(                                      \
406
            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],                    \
407
            void const* src, size_t srcSize)                                                       \
408
11.2k
    {                                                                                              \
409
11.2k
        return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
410
11.2k
    }
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_4_1
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_5_1
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_6_1
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_7_1
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_4_0
zstd_fast.c:ZSTD_compressBlock_fast_noDict_5_0
Line
Count
Source
408
11.2k
    {                                                                                              \
409
11.2k
        return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
410
11.2k
    }
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_6_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_7_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_4_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_5_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_6_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_7_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_4_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_5_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_6_0
Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_7_0
411
412
ZSTD_GEN_FAST_FN(noDict, 4, 1)
413
ZSTD_GEN_FAST_FN(noDict, 5, 1)
414
ZSTD_GEN_FAST_FN(noDict, 6, 1)
415
ZSTD_GEN_FAST_FN(noDict, 7, 1)
416
417
ZSTD_GEN_FAST_FN(noDict, 4, 0)
418
ZSTD_GEN_FAST_FN(noDict, 5, 0)
419
ZSTD_GEN_FAST_FN(noDict, 6, 0)
420
ZSTD_GEN_FAST_FN(noDict, 7, 0)
421
422
size_t ZSTD_compressBlock_fast(
423
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
424
        void const* src, size_t srcSize)
425
11.2k
{
426
11.2k
    U32 const mls = ms->cParams.minMatch;
427
11.2k
    assert(ms->dictMatchState == NULL);
428
11.2k
    if (ms->cParams.targetLength > 1) {
429
0
        switch(mls)
430
0
        {
431
0
        default: /* includes case 3 */
432
0
        case 4 :
433
0
            return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
434
0
        case 5 :
435
0
            return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
436
0
        case 6 :
437
0
            return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
438
0
        case 7 :
439
0
            return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
440
0
        }
441
11.2k
    } else {
442
11.2k
        switch(mls)
443
11.2k
        {
444
0
        default: /* includes case 3 */
445
0
        case 4 :
446
0
            return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
447
11.2k
        case 5 :
448
11.2k
            return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
449
0
        case 6 :
450
0
            return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
451
0
        case 7 :
452
0
            return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
453
11.2k
        }
454
455
11.2k
    }
456
11.2k
}
457
458
FORCE_INLINE_TEMPLATE
459
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
460
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
461
        void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
462
0
{
463
0
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
464
0
    U32* const hashTable = ms->hashTable;
465
0
    U32 const hlog = cParams->hashLog;
466
    /* support stepSize of 0 */
467
0
    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
468
0
    const BYTE* const base = ms->window.base;
469
0
    const BYTE* const istart = (const BYTE*)src;
470
0
    const BYTE* ip0 = istart;
471
0
    const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
472
0
    const BYTE* anchor = istart;
473
0
    const U32   prefixStartIndex = ms->window.dictLimit;
474
0
    const BYTE* const prefixStart = base + prefixStartIndex;
475
0
    const BYTE* const iend = istart + srcSize;
476
0
    const BYTE* const ilimit = iend - HASH_READ_SIZE;
477
0
    U32 offset_1=rep[0], offset_2=rep[1];
478
479
0
    const ZSTD_matchState_t* const dms = ms->dictMatchState;
480
0
    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
481
0
    const U32* const dictHashTable = dms->hashTable;
482
0
    const U32 dictStartIndex       = dms->window.dictLimit;
483
0
    const BYTE* const dictBase     = dms->window.base;
484
0
    const BYTE* const dictStart    = dictBase + dictStartIndex;
485
0
    const BYTE* const dictEnd      = dms->window.nextSrc;
486
0
    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
487
0
    const U32 dictAndPrefixLength  = (U32)(istart - prefixStart + dictEnd - dictStart);
488
0
    const U32 dictHBits            = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
489
490
    /* if a dictionary is still attached, it necessarily means that
491
     * it is within window size. So we just check it. */
492
0
    const U32 maxDistance = 1U << cParams->windowLog;
493
0
    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
494
0
    assert(endIndex - prefixStartIndex <= maxDistance);
495
0
    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
496
497
0
    (void)hasStep; /* not currently specialized on whether it's accelerated */
498
499
    /* ensure there will be no underflow
500
     * when translating a dict index into a local index */
501
0
    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
502
503
0
    if (ms->prefetchCDictTables) {
504
0
        size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
505
0
        PREFETCH_AREA(dictHashTable, hashTableBytes)
506
0
    }
507
508
    /* init */
509
0
    DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
510
0
    ip0 += (dictAndPrefixLength == 0);
511
    /* dictMatchState repCode checks don't currently handle repCode == 0
512
     * disabling. */
513
0
    assert(offset_1 <= dictAndPrefixLength);
514
0
    assert(offset_2 <= dictAndPrefixLength);
515
516
    /* Outer search loop */
517
0
    assert(stepSize >= 1);
518
0
    while (ip1 <= ilimit) {   /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
519
0
        size_t mLength;
520
0
        size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
521
522
0
        size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
523
0
        U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
524
0
        int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
525
526
0
        U32 matchIndex = hashTable[hash0];
527
0
        U32 curr = (U32)(ip0 - base);
528
0
        size_t step = stepSize;
529
0
        const size_t kStepIncr = 1 << kSearchStrength;
530
0
        const BYTE* nextStep = ip0 + kStepIncr;
531
532
        /* Inner search loop */
533
0
        while (1) {
534
0
            const BYTE* match = base + matchIndex;
535
0
            const U32 repIndex = curr + 1 - offset_1;
536
0
            const BYTE* repMatch = (repIndex < prefixStartIndex) ?
537
0
                                   dictBase + (repIndex - dictIndexDelta) :
538
0
                                   base + repIndex;
539
0
            const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
540
0
            size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
541
0
            hashTable[hash0] = curr;   /* update hash table */
542
543
0
            if (((U32) ((prefixStartIndex - 1) - repIndex) >=
544
0
                 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
545
0
                && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
546
0
                const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
547
0
                mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
548
0
                ip0++;
549
0
                ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
550
0
                break;
551
0
            }
552
553
0
            if (dictTagsMatch) {
554
                /* Found a possible dict match */
555
0
                const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
556
0
                const BYTE* dictMatch = dictBase + dictMatchIndex;
557
0
                if (dictMatchIndex > dictStartIndex &&
558
0
                    MEM_read32(dictMatch) == MEM_read32(ip0)) {
559
                    /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
560
0
                    if (matchIndex <= prefixStartIndex) {
561
0
                        U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
562
0
                        mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
563
0
                        while (((ip0 > anchor) & (dictMatch > dictStart))
564
0
                            && (ip0[-1] == dictMatch[-1])) {
565
0
                            ip0--;
566
0
                            dictMatch--;
567
0
                            mLength++;
568
0
                        } /* catch up */
569
0
                        offset_2 = offset_1;
570
0
                        offset_1 = offset;
571
0
                        ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
572
0
                        break;
573
0
                    }
574
0
                }
575
0
            }
576
577
0
            if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
578
                /* found a regular match */
579
0
                U32 const offset = (U32) (ip0 - match);
580
0
                mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
581
0
                while (((ip0 > anchor) & (match > prefixStart))
582
0
                       && (ip0[-1] == match[-1])) {
583
0
                    ip0--;
584
0
                    match--;
585
0
                    mLength++;
586
0
                } /* catch up */
587
0
                offset_2 = offset_1;
588
0
                offset_1 = offset;
589
0
                ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
590
0
                break;
591
0
            }
592
593
            /* Prepare for next iteration */
594
0
            dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
595
0
            dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
596
0
            matchIndex = hashTable[hash1];
597
598
0
            if (ip1 >= nextStep) {
599
0
                step++;
600
0
                nextStep += kStepIncr;
601
0
            }
602
0
            ip0 = ip1;
603
0
            ip1 = ip1 + step;
604
0
            if (ip1 > ilimit) goto _cleanup;
605
606
0
            curr = (U32)(ip0 - base);
607
0
            hash0 = hash1;
608
0
        }   /* end inner search loop */
609
610
        /* match found */
611
0
        assert(mLength);
612
0
        ip0 += mLength;
613
0
        anchor = ip0;
614
615
0
        if (ip0 <= ilimit) {
616
            /* Fill Table */
617
0
            assert(base+curr+2 > istart);  /* check base overflow */
618
0
            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;  /* here because curr+2 could be > iend-8 */
619
0
            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
620
621
            /* check immediate repcode */
622
0
            while (ip0 <= ilimit) {
623
0
                U32 const current2 = (U32)(ip0-base);
624
0
                U32 const repIndex2 = current2 - offset_2;
625
0
                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
626
0
                        dictBase - dictIndexDelta + repIndex2 :
627
0
                        base + repIndex2;
628
0
                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
629
0
                   && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
630
0
                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
631
0
                    size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
632
0
                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
633
0
                    ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
634
0
                    hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
635
0
                    ip0 += repLength2;
636
0
                    anchor = ip0;
637
0
                    continue;
638
0
                }
639
0
                break;
640
0
            }
641
0
        }
642
643
        /* Prepare for next iteration */
644
0
        assert(ip0 == anchor);
645
0
        ip1 = ip0 + stepSize;
646
0
    }
647
648
0
_cleanup:
649
    /* save reps for next block */
650
0
    rep[0] = offset_1;
651
0
    rep[1] = offset_2;
652
653
    /* Return the last literals size */
654
0
    return (size_t)(iend - anchor);
655
0
}
656
657
658
ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
659
ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
660
ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
661
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
662
663
size_t ZSTD_compressBlock_fast_dictMatchState(
664
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
665
        void const* src, size_t srcSize)
666
0
{
667
0
    U32 const mls = ms->cParams.minMatch;
668
0
    assert(ms->dictMatchState != NULL);
669
0
    switch(mls)
670
0
    {
671
0
    default: /* includes case 3 */
672
0
    case 4 :
673
0
        return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
674
0
    case 5 :
675
0
        return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
676
0
    case 6 :
677
0
        return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
678
0
    case 7 :
679
0
        return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
680
0
    }
681
0
}
682
683
684
static size_t ZSTD_compressBlock_fast_extDict_generic(
685
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
686
        void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
687
0
{
688
0
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
689
0
    U32* const hashTable = ms->hashTable;
690
0
    U32 const hlog = cParams->hashLog;
691
    /* support stepSize of 0 */
692
0
    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
693
0
    const BYTE* const base = ms->window.base;
694
0
    const BYTE* const dictBase = ms->window.dictBase;
695
0
    const BYTE* const istart = (const BYTE*)src;
696
0
    const BYTE* anchor = istart;
697
0
    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
698
0
    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
699
0
    const U32   dictStartIndex = lowLimit;
700
0
    const BYTE* const dictStart = dictBase + dictStartIndex;
701
0
    const U32   dictLimit = ms->window.dictLimit;
702
0
    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
703
0
    const BYTE* const prefixStart = base + prefixStartIndex;
704
0
    const BYTE* const dictEnd = dictBase + prefixStartIndex;
705
0
    const BYTE* const iend = istart + srcSize;
706
0
    const BYTE* const ilimit = iend - 8;
707
0
    U32 offset_1=rep[0], offset_2=rep[1];
708
0
    U32 offsetSaved1 = 0, offsetSaved2 = 0;
709
710
0
    const BYTE* ip0 = istart;
711
0
    const BYTE* ip1;
712
0
    const BYTE* ip2;
713
0
    const BYTE* ip3;
714
0
    U32 current0;
715
716
717
0
    size_t hash0; /* hash for ip0 */
718
0
    size_t hash1; /* hash for ip1 */
719
0
    U32 idx; /* match idx for ip0 */
720
0
    const BYTE* idxBase; /* base pointer for idx */
721
722
0
    U32 offcode;
723
0
    const BYTE* match0;
724
0
    size_t mLength;
725
0
    const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
726
727
0
    size_t step;
728
0
    const BYTE* nextStep;
729
0
    const size_t kStepIncr = (1 << (kSearchStrength - 1));
730
731
0
    (void)hasStep; /* not currently specialized on whether it's accelerated */
732
733
0
    DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
734
735
    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
736
0
    if (prefixStartIndex == dictStartIndex)
737
0
        return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
738
739
0
    {   U32 const curr = (U32)(ip0 - base);
740
0
        U32 const maxRep = curr - dictStartIndex;
741
0
        if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
742
0
        if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
743
0
    }
744
745
    /* start each op */
746
0
_start: /* Requires: ip0 */
747
748
0
    step = stepSize;
749
0
    nextStep = ip0 + kStepIncr;
750
751
    /* calculate positions, ip0 - anchor == 0, so we skip step calc */
752
0
    ip1 = ip0 + 1;
753
0
    ip2 = ip0 + step;
754
0
    ip3 = ip2 + 1;
755
756
0
    if (ip3 >= ilimit) {
757
0
        goto _cleanup;
758
0
    }
759
760
0
    hash0 = ZSTD_hashPtr(ip0, hlog, mls);
761
0
    hash1 = ZSTD_hashPtr(ip1, hlog, mls);
762
763
0
    idx = hashTable[hash0];
764
0
    idxBase = idx < prefixStartIndex ? dictBase : base;
765
766
0
    do {
767
0
        {   /* load repcode match for ip[2] */
768
0
            U32 const current2 = (U32)(ip2 - base);
769
0
            U32 const repIndex = current2 - offset_1;
770
0
            const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
771
0
            U32 rval;
772
0
            if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
773
0
                 & (offset_1 > 0) ) {
774
0
                rval = MEM_read32(repBase + repIndex);
775
0
            } else {
776
0
                rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
777
0
            }
778
779
            /* write back hash table entry */
780
0
            current0 = (U32)(ip0 - base);
781
0
            hashTable[hash0] = current0;
782
783
            /* check repcode at ip[2] */
784
0
            if (MEM_read32(ip2) == rval) {
785
0
                ip0 = ip2;
786
0
                match0 = repBase + repIndex;
787
0
                matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
788
0
                assert((match0 != prefixStart) & (match0 != dictStart));
789
0
                mLength = ip0[-1] == match0[-1];
790
0
                ip0 -= mLength;
791
0
                match0 -= mLength;
792
0
                offcode = REPCODE1_TO_OFFBASE;
793
0
                mLength += 4;
794
0
                goto _match;
795
0
        }   }
796
797
0
        {   /* load match for ip[0] */
798
0
            U32 const mval = idx >= dictStartIndex ?
799
0
                    MEM_read32(idxBase + idx) :
800
0
                    MEM_read32(ip0) ^ 1; /* guaranteed not to match */
801
802
            /* check match at ip[0] */
803
0
            if (MEM_read32(ip0) == mval) {
804
                /* found a match! */
805
0
                goto _offset;
806
0
        }   }
807
808
        /* lookup ip[1] */
809
0
        idx = hashTable[hash1];
810
0
        idxBase = idx < prefixStartIndex ? dictBase : base;
811
812
        /* hash ip[2] */
813
0
        hash0 = hash1;
814
0
        hash1 = ZSTD_hashPtr(ip2, hlog, mls);
815
816
        /* advance to next positions */
817
0
        ip0 = ip1;
818
0
        ip1 = ip2;
819
0
        ip2 = ip3;
820
821
        /* write back hash table entry */
822
0
        current0 = (U32)(ip0 - base);
823
0
        hashTable[hash0] = current0;
824
825
0
        {   /* load match for ip[0] */
826
0
            U32 const mval = idx >= dictStartIndex ?
827
0
                    MEM_read32(idxBase + idx) :
828
0
                    MEM_read32(ip0) ^ 1; /* guaranteed not to match */
829
830
            /* check match at ip[0] */
831
0
            if (MEM_read32(ip0) == mval) {
832
                /* found a match! */
833
0
                goto _offset;
834
0
        }   }
835
836
        /* lookup ip[1] */
837
0
        idx = hashTable[hash1];
838
0
        idxBase = idx < prefixStartIndex ? dictBase : base;
839
840
        /* hash ip[2] */
841
0
        hash0 = hash1;
842
0
        hash1 = ZSTD_hashPtr(ip2, hlog, mls);
843
844
        /* advance to next positions */
845
0
        ip0 = ip1;
846
0
        ip1 = ip2;
847
0
        ip2 = ip0 + step;
848
0
        ip3 = ip1 + step;
849
850
        /* calculate step */
851
0
        if (ip2 >= nextStep) {
852
0
            step++;
853
0
            PREFETCH_L1(ip1 + 64);
854
0
            PREFETCH_L1(ip1 + 128);
855
0
            nextStep += kStepIncr;
856
0
        }
857
0
    } while (ip3 < ilimit);
858
859
0
_cleanup:
860
    /* Note that there are probably still a couple positions we could search.
861
     * However, it seems to be a meaningful performance hit to try to search
862
     * them. So let's not. */
863
864
    /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
865
     * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
866
0
    offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
867
868
    /* save reps for next block */
869
0
    rep[0] = offset_1 ? offset_1 : offsetSaved1;
870
0
    rep[1] = offset_2 ? offset_2 : offsetSaved2;
871
872
    /* Return the last literals size */
873
0
    return (size_t)(iend - anchor);
874
875
0
_offset: /* Requires: ip0, idx, idxBase */
876
877
    /* Compute the offset code. */
878
0
    {   U32 const offset = current0 - idx;
879
0
        const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
880
0
        matchEnd = idx < prefixStartIndex ? dictEnd : iend;
881
0
        match0 = idxBase + idx;
882
0
        offset_2 = offset_1;
883
0
        offset_1 = offset;
884
0
        offcode = OFFSET_TO_OFFBASE(offset);
885
0
        mLength = 4;
886
887
        /* Count the backwards match length. */
888
0
        while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
889
0
            ip0--;
890
0
            match0--;
891
0
            mLength++;
892
0
    }   }
893
894
0
_match: /* Requires: ip0, match0, offcode, matchEnd */
895
896
    /* Count the forward length. */
897
0
    assert(matchEnd != 0);
898
0
    mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
899
900
0
    ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
901
902
0
    ip0 += mLength;
903
0
    anchor = ip0;
904
905
    /* write next hash table entry */
906
0
    if (ip1 < ip0) {
907
0
        hashTable[hash1] = (U32)(ip1 - base);
908
0
    }
909
910
    /* Fill table and check for immediate repcode. */
911
0
    if (ip0 <= ilimit) {
912
        /* Fill Table */
913
0
        assert(base+current0+2 > istart);  /* check base overflow */
914
0
        hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
915
0
        hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
916
917
0
        while (ip0 <= ilimit) {
918
0
            U32 const repIndex2 = (U32)(ip0-base) - offset_2;
919
0
            const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
920
0
            if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0))  /* intentional underflow */
921
0
                 && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
922
0
                const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
923
0
                size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
924
0
                { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
925
0
                ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
926
0
                hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
927
0
                ip0 += repLength2;
928
0
                anchor = ip0;
929
0
                continue;
930
0
            }
931
0
            break;
932
0
    }   }
933
934
0
    goto _start;
935
0
}
936
937
ZSTD_GEN_FAST_FN(extDict, 4, 0)
938
ZSTD_GEN_FAST_FN(extDict, 5, 0)
939
ZSTD_GEN_FAST_FN(extDict, 6, 0)
940
ZSTD_GEN_FAST_FN(extDict, 7, 0)
941
942
size_t ZSTD_compressBlock_fast_extDict(
943
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
944
        void const* src, size_t srcSize)
945
0
{
946
0
    U32 const mls = ms->cParams.minMatch;
947
0
    assert(ms->dictMatchState == NULL);
948
0
    switch(mls)
949
0
    {
950
0
    default: /* includes case 3 */
951
0
    case 4 :
952
0
        return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
953
0
    case 5 :
954
0
        return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
955
0
    case 6 :
956
0
        return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
957
0
    case 7 :
958
0
        return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
959
0
    }
960
0
}