Coverage Report

Created: 2025-07-12 06:54

/src/zstd/lib/compress/zstd_ldm.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
#include "zstd_ldm.h"
12
13
#include "../common/debug.h"
14
#include "../common/xxhash.h"
15
#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
16
#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
17
#include "zstd_ldm_geartab.h"
18
19
#define LDM_BUCKET_SIZE_LOG 4
20
0
#define LDM_MIN_MATCH_LENGTH 64
21
#define LDM_HASH_RLOG 7
22
23
typedef struct {
24
    U64 rolling;
25
    U64 stopMask;
26
} ldmRollingHashState_t;
27
28
/** ZSTD_ldm_gear_init():
29
 *
30
 * Initializes the rolling hash state such that it will honor the
31
 * settings in params. */
32
static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
33
595k
{
34
595k
    unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
35
595k
    unsigned hashRateLog = params->hashRateLog;
36
37
595k
    state->rolling = ~(U32)0;
38
39
    /* The choice of the splitting criterion is subject to two conditions:
40
     *   1. it has to trigger on average every 2^(hashRateLog) bytes;
41
     *   2. ideally, it has to depend on a window of minMatchLength bytes.
42
     *
43
     * In the gear hash algorithm, bit n depends on the last n bytes;
44
     * so in order to obtain a good quality splitting criterion it is
45
     * preferable to use bits with high weight.
46
     *
47
     * To match condition 1 we use a mask with hashRateLog bits set
48
     * and, because of the previous remark, we make sure these bits
49
     * have the highest possible weight while still respecting
50
     * condition 2.
51
     */
52
595k
    if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
53
467k
        state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
54
467k
    } else {
55
        /* In this degenerate case we simply honor the hash rate. */
56
127k
        state->stopMask = ((U64)1 << hashRateLog) - 1;
57
127k
    }
58
595k
}
59
60
/** ZSTD_ldm_gear_reset()
61
 * Feeds [data, data + minMatchLength) into the hash without registering any
62
 * splits. This effectively resets the hash state. This is used when skipping
63
 * over data, either at the beginning of a block, or skipping sections.
64
 */
65
static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
66
                                BYTE const* data, size_t minMatchLength)
67
1.13M
{
68
1.13M
    U64 hash = state->rolling;
69
1.13M
    size_t n = 0;
70
71
118M
#define GEAR_ITER_ONCE() do {                                  \
72
118M
        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
73
118M
        n += 1;                                                \
74
118M
    } while (0)
75
30.5M
    while (n + 3 < minMatchLength) {
76
29.4M
        GEAR_ITER_ONCE();
77
29.4M
        GEAR_ITER_ONCE();
78
29.4M
        GEAR_ITER_ONCE();
79
29.4M
        GEAR_ITER_ONCE();
80
29.4M
    }
81
1.63M
    while (n < minMatchLength) {
82
505k
        GEAR_ITER_ONCE();
83
505k
    }
84
1.13M
#undef GEAR_ITER_ONCE
85
1.13M
}
86
87
/** ZSTD_ldm_gear_feed():
88
 *
89
 * Registers in the splits array all the split points found in the first
90
 * size bytes following the data pointer. This function terminates when
91
 * either all the data has been processed or LDM_BATCH_SIZE splits are
92
 * present in the splits array.
93
 *
94
 * Precondition: The splits array must not be full.
95
 * Returns: The number of bytes processed. */
96
static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
97
                                 BYTE const* data, size_t size,
98
                                 size_t* splits, unsigned* numSplits)
99
3.29M
{
100
3.29M
    size_t n;
101
3.29M
    U64 hash, mask;
102
103
3.29M
    hash = state->rolling;
104
3.29M
    mask = state->stopMask;
105
3.29M
    n = 0;
106
107
1.20G
#define GEAR_ITER_ONCE() do { \
108
1.20G
        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
109
1.20G
        n += 1; \
110
1.20G
        if (UNLIKELY((hash & mask) == 0)) { \
111
187M
            splits[*numSplits] = n; \
112
187M
            *numSplits += 1; \
113
187M
            if (*numSplits == LDM_BATCH_SIZE) \
114
187M
                goto done; \
115
187M
        } \
116
1.20G
    } while (0)
117
118
302M
    while (n + 3 < size) {
119
301M
        GEAR_ITER_ONCE();
120
301M
        GEAR_ITER_ONCE();
121
300M
        GEAR_ITER_ONCE();
122
300M
        GEAR_ITER_ONCE();
123
300M
    }
124
1.21M
    while (n < size) {
125
655k
        GEAR_ITER_ONCE();
126
655k
    }
127
128
560k
#undef GEAR_ITER_ONCE
129
130
3.29M
done:
131
3.29M
    state->rolling = hash;
132
3.29M
    return n;
133
561k
}
134
135
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
136
                        const ZSTD_compressionParameters* cParams)
137
203k
{
138
203k
    params->windowLog = cParams->windowLog;
139
203k
    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
140
203k
    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
141
203k
    if (params->hashRateLog == 0) {
142
66.2k
        if (params->hashLog > 0) {
143
            /* if params->hashLog is set, derive hashRateLog from it */
144
66.2k
            assert(params->hashLog <= ZSTD_HASHLOG_MAX);
145
66.2k
            if (params->windowLog > params->hashLog) {
146
42.8k
                params->hashRateLog = params->windowLog - params->hashLog;
147
42.8k
            }
148
66.2k
        } else {
149
0
            assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
150
            /* mapping from [fast, rate7] to [btultra2, rate4] */
151
0
            params->hashRateLog = 7 - (cParams->strategy/3);
152
0
        }
153
66.2k
    }
154
203k
    if (params->hashLog == 0) {
155
0
        params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX);
156
0
    }
157
203k
    if (params->minMatchLength == 0) {
158
0
        params->minMatchLength = LDM_MIN_MATCH_LENGTH;
159
0
        if (cParams->strategy >= ZSTD_btultra)
160
0
            params->minMatchLength /= 2;
161
0
    }
162
203k
    if (params->bucketSizeLog==0) {
163
74.1k
        assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9);
164
74.1k
        params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX);
165
74.1k
    }
166
203k
    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
167
203k
}
168
169
size_t ZSTD_ldm_getTableSize(ldmParams_t params)
170
5.09M
{
171
5.09M
    size_t const ldmHSize = ((size_t)1) << params.hashLog;
172
5.09M
    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
173
5.09M
    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
174
5.09M
    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
175
5.09M
                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
176
5.09M
    return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
177
5.09M
}
178
179
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
180
10.2M
{
181
10.2M
    return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
182
10.2M
}
183
184
/** ZSTD_ldm_getBucket() :
185
 *  Returns a pointer to the start of the bucket associated with hash. */
186
static ldmEntry_t* ZSTD_ldm_getBucket(
187
        const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog)
188
364M
{
189
364M
    return ldmState->hashTable + (hash << bucketSizeLog);
190
364M
}
191
192
/** ZSTD_ldm_insertEntry() :
193
 *  Insert the entry with corresponding hash into the hash table */
194
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
195
                                 size_t const hash, const ldmEntry_t entry,
196
                                 U32 const bucketSizeLog)
197
181M
{
198
181M
    BYTE* const pOffset = ldmState->bucketOffsets + hash;
199
181M
    unsigned const offset = *pOffset;
200
201
181M
    *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry;
202
181M
    *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1));
203
204
181M
}
205
206
/** ZSTD_ldm_countBackwardsMatch() :
207
 *  Returns the number of bytes that match backwards before pIn and pMatch.
208
 *
209
 *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
210
static size_t ZSTD_ldm_countBackwardsMatch(
211
            const BYTE* pIn, const BYTE* pAnchor,
212
            const BYTE* pMatch, const BYTE* pMatchBase)
213
58.9M
{
214
58.9M
    size_t matchLength = 0;
215
132M
    while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
216
73.2M
        pIn--;
217
73.2M
        pMatch--;
218
73.2M
        matchLength++;
219
73.2M
    }
220
58.9M
    return matchLength;
221
58.9M
}
222
223
/** ZSTD_ldm_countBackwardsMatch_2segments() :
224
 *  Returns the number of bytes that match backwards from pMatch,
225
 *  even with the backwards match spanning 2 different segments.
226
 *
227
 *  On reaching `pMatchBase`, start counting from mEnd */
228
static size_t ZSTD_ldm_countBackwardsMatch_2segments(
229
                    const BYTE* pIn, const BYTE* pAnchor,
230
                    const BYTE* pMatch, const BYTE* pMatchBase,
231
                    const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
232
2.34M
{
233
2.34M
    size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
234
2.34M
    if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
235
        /* If backwards match is entirely in the extDict or prefix, immediately return */
236
2.34M
        return matchLength;
237
2.34M
    }
238
2.38k
    DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
239
2.38k
    matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
240
2.38k
    DEBUGLOG(7, "final backwards match length = %zu", matchLength);
241
2.38k
    return matchLength;
242
2.34M
}
243
244
/** ZSTD_ldm_fillFastTables() :
245
 *
246
 *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
247
 *  This is similar to ZSTD_loadDictionaryContent.
248
 *
249
 *  The tables for the other strategies are filled within their
250
 *  block compressors. */
251
static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms,
252
                                      void const* end)
253
11.6M
{
254
11.6M
    const BYTE* const iend = (const BYTE*)end;
255
256
11.6M
    switch(ms->cParams.strategy)
257
11.6M
    {
258
1.99M
    case ZSTD_fast:
259
1.99M
        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
260
1.99M
        break;
261
262
3.53M
    case ZSTD_dfast:
263
3.53M
#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
264
3.53M
        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
265
#else
266
        assert(0); /* shouldn't be called: cparams should've been adjusted. */
267
#endif
268
3.53M
        break;
269
270
1.20M
    case ZSTD_greedy:
271
2.82M
    case ZSTD_lazy:
272
5.51M
    case ZSTD_lazy2:
273
6.14M
    case ZSTD_btlazy2:
274
6.14M
    case ZSTD_btopt:
275
6.14M
    case ZSTD_btultra:
276
6.14M
    case ZSTD_btultra2:
277
6.14M
        break;
278
0
    default:
279
0
        assert(0);  /* not possible : not a valid strategy id */
280
11.6M
    }
281
282
11.6M
    return 0;
283
11.6M
}
284
285
void ZSTD_ldm_fillHashTable(
286
            ldmState_t* ldmState, const BYTE* ip,
287
            const BYTE* iend, ldmParams_t const* params)
288
8.73k
{
289
8.73k
    U32 const minMatchLength = params->minMatchLength;
290
8.73k
    U32 const bucketSizeLog = params->bucketSizeLog;
291
8.73k
    U32 const hBits = params->hashLog - bucketSizeLog;
292
8.73k
    BYTE const* const base = ldmState->window.base;
293
8.73k
    BYTE const* const istart = ip;
294
8.73k
    ldmRollingHashState_t hashState;
295
8.73k
    size_t* const splits = ldmState->splitIndices;
296
8.73k
    unsigned numSplits;
297
298
8.73k
    DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
299
300
8.73k
    ZSTD_ldm_gear_init(&hashState, params);
301
85.4k
    while (ip < iend) {
302
76.7k
        size_t hashed;
303
76.7k
        unsigned n;
304
305
76.7k
        numSplits = 0;
306
76.7k
        hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits);
307
308
4.54M
        for (n = 0; n < numSplits; n++) {
309
4.46M
            if (ip + splits[n] >= istart + minMatchLength) {
310
4.23M
                BYTE const* const split = ip + splits[n] - minMatchLength;
311
4.23M
                U64 const xxhash = XXH64(split, minMatchLength, 0);
312
4.23M
                U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
313
4.23M
                ldmEntry_t entry;
314
315
4.23M
                entry.offset = (U32)(split - base);
316
4.23M
                entry.checksum = (U32)(xxhash >> 32);
317
4.23M
                ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog);
318
4.23M
            }
319
4.46M
        }
320
321
76.7k
        ip += hashed;
322
76.7k
    }
323
8.73k
}
324
325
326
/** ZSTD_ldm_limitTableUpdate() :
327
 *
328
 *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
329
 *  if it is far way
330
 *  (after a long match, only update tables a limited amount). */
331
static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor)
332
11.6M
{
333
11.6M
    U32 const curr = (U32)(anchor - ms->window.base);
334
11.6M
    if (curr > ms->nextToUpdate + 1024) {
335
201k
        ms->nextToUpdate =
336
201k
            curr - MIN(512, curr - ms->nextToUpdate - 1024);
337
201k
    }
338
11.6M
}
339
340
static
341
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
342
size_t ZSTD_ldm_generateSequences_internal(
343
        ldmState_t* ldmState, RawSeqStore_t* rawSeqStore,
344
        ldmParams_t const* params, void const* src, size_t srcSize)
345
2.08M
{
346
    /* LDM parameters */
347
2.08M
    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
348
2.08M
    U32 const minMatchLength = params->minMatchLength;
349
2.08M
    U32 const entsPerBucket = 1U << params->bucketSizeLog;
350
2.08M
    U32 const hBits = params->hashLog - params->bucketSizeLog;
351
    /* Prefix and extDict parameters */
352
2.08M
    U32 const dictLimit = ldmState->window.dictLimit;
353
2.08M
    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
354
2.08M
    BYTE const* const base = ldmState->window.base;
355
2.08M
    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
356
2.08M
    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
357
2.08M
    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
358
2.08M
    BYTE const* const lowPrefixPtr = base + dictLimit;
359
    /* Input bounds */
360
2.08M
    BYTE const* const istart = (BYTE const*)src;
361
2.08M
    BYTE const* const iend = istart + srcSize;
362
2.08M
    BYTE const* const ilimit = iend - HASH_READ_SIZE;
363
    /* Input positions */
364
2.08M
    BYTE const* anchor = istart;
365
2.08M
    BYTE const* ip = istart;
366
    /* Rolling hash state */
367
2.08M
    ldmRollingHashState_t hashState;
368
    /* Arrays for staged-processing */
369
2.08M
    size_t* const splits = ldmState->splitIndices;
370
2.08M
    ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
371
2.08M
    unsigned numSplits;
372
373
2.08M
    if (srcSize < minMatchLength)
374
1.49M
        return iend - anchor;
375
376
    /* Initialize the rolling hash state with the first minMatchLength bytes */
377
586k
    ZSTD_ldm_gear_init(&hashState, params);
378
586k
    ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
379
586k
    ip += minMatchLength;
380
381
3.80M
    while (ip < ilimit) {
382
3.22M
        size_t hashed;
383
3.22M
        unsigned n;
384
385
3.22M
        numSplits = 0;
386
3.22M
        hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
387
3.22M
                                    splits, &numSplits);
388
389
186M
        for (n = 0; n < numSplits; n++) {
390
183M
            BYTE const* const split = ip + splits[n] - minMatchLength;
391
183M
            U64 const xxhash = XXH64(split, minMatchLength, 0);
392
183M
            U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
393
394
183M
            candidates[n].split = split;
395
183M
            candidates[n].hash = hash;
396
183M
            candidates[n].checksum = (U32)(xxhash >> 32);
397
183M
            candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog);
398
183M
            PREFETCH_L1(candidates[n].bucket);
399
183M
        }
400
401
179M
        for (n = 0; n < numSplits; n++) {
402
176M
            size_t forwardMatchLength = 0, backwardMatchLength = 0,
403
176M
                   bestMatchLength = 0, mLength;
404
176M
            U32 offset;
405
176M
            BYTE const* const split = candidates[n].split;
406
176M
            U32 const checksum = candidates[n].checksum;
407
176M
            U32 const hash = candidates[n].hash;
408
176M
            ldmEntry_t* const bucket = candidates[n].bucket;
409
176M
            ldmEntry_t const* cur;
410
176M
            ldmEntry_t const* bestEntry = NULL;
411
176M
            ldmEntry_t newEntry;
412
413
176M
            newEntry.offset = (U32)(split - base);
414
176M
            newEntry.checksum = checksum;
415
416
            /* If a split point would generate a sequence overlapping with
417
             * the previous one, we merely register it in the hash table and
418
             * move on */
419
176M
            if (split < anchor) {
420
39.0M
                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
421
39.0M
                continue;
422
39.0M
            }
423
424
7.14G
            for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
425
7.00G
                size_t curForwardMatchLength, curBackwardMatchLength,
426
7.00G
                       curTotalMatchLength;
427
7.00G
                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
428
6.94G
                    continue;
429
6.94G
                }
430
58.9M
                if (extDict) {
431
2.34M
                    BYTE const* const curMatchBase =
432
2.34M
                        cur->offset < dictLimit ? dictBase : base;
433
2.34M
                    BYTE const* const pMatch = curMatchBase + cur->offset;
434
2.34M
                    BYTE const* const matchEnd =
435
2.34M
                        cur->offset < dictLimit ? dictEnd : iend;
436
2.34M
                    BYTE const* const lowMatchPtr =
437
2.34M
                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
438
2.34M
                    curForwardMatchLength =
439
2.34M
                        ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
440
2.34M
                    if (curForwardMatchLength < minMatchLength) {
441
73
                        continue;
442
73
                    }
443
2.34M
                    curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
444
2.34M
                            split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
445
56.5M
                } else { /* !extDict */
446
56.5M
                    BYTE const* const pMatch = base + cur->offset;
447
56.5M
                    curForwardMatchLength = ZSTD_count(split, pMatch, iend);
448
56.5M
                    if (curForwardMatchLength < minMatchLength) {
449
934
                        continue;
450
934
                    }
451
56.5M
                    curBackwardMatchLength =
452
56.5M
                        ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
453
56.5M
                }
454
58.9M
                curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
455
456
58.9M
                if (curTotalMatchLength > bestMatchLength) {
457
15.5M
                    bestMatchLength = curTotalMatchLength;
458
15.5M
                    forwardMatchLength = curForwardMatchLength;
459
15.5M
                    backwardMatchLength = curBackwardMatchLength;
460
15.5M
                    bestEntry = cur;
461
15.5M
                }
462
58.9M
            }
463
464
            /* No match found -- insert an entry into the hash table
465
             * and process the next candidate match */
466
137M
            if (bestEntry == NULL) {
467
126M
                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
468
126M
                continue;
469
126M
            }
470
471
            /* Match found */
472
11.5M
            offset = (U32)(split - base) - bestEntry->offset;
473
11.5M
            mLength = forwardMatchLength + backwardMatchLength;
474
11.5M
            {
475
11.5M
                rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
476
477
                /* Out of sequence storage */
478
11.5M
                if (rawSeqStore->size == rawSeqStore->capacity)
479
0
                    return ERROR(dstSize_tooSmall);
480
11.5M
                seq->litLength = (U32)(split - backwardMatchLength - anchor);
481
11.5M
                seq->matchLength = (U32)mLength;
482
11.5M
                seq->offset = offset;
483
11.5M
                rawSeqStore->size++;
484
11.5M
            }
485
486
            /* Insert the current entry into the hash table --- it must be
487
             * done after the previous block to avoid clobbering bestEntry */
488
0
            ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog);
489
490
11.5M
            anchor = split + forwardMatchLength;
491
492
            /* If we find a match that ends after the data that we've hashed
493
             * then we have a repeating, overlapping, pattern. E.g. all zeros.
494
             * If one repetition of the pattern matches our `stopMask` then all
495
             * repetitions will. We don't need to insert them all into out table,
496
             * only the first one. So skip over overlapping matches.
497
             * This is a major speed boost (20x) for compressing a single byte
498
             * repeated, when that byte ends up in the table.
499
             */
500
11.5M
            if (anchor > ip + hashed) {
501
544k
                ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
502
                /* Continue the outer loop at anchor (ip + hashed == anchor). */
503
544k
                ip = anchor - hashed;
504
544k
                break;
505
544k
            }
506
11.5M
        }
507
508
3.22M
        ip += hashed;
509
3.22M
    }
510
511
586k
    return iend - anchor;
512
586k
}
513
514
/*! ZSTD_ldm_reduceTable() :
515
 *  reduce table indexes by `reducerValue` */
516
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
517
                                 U32 const reducerValue)
518
57.4k
{
519
57.4k
    U32 u;
520
330M
    for (u = 0; u < size; u++) {
521
330M
        if (table[u].offset < reducerValue) table[u].offset = 0;
522
11.5M
        else table[u].offset -= reducerValue;
523
330M
    }
524
57.4k
}
525
526
size_t ZSTD_ldm_generateSequences(
527
        ldmState_t* ldmState, RawSeqStore_t* sequences,
528
        ldmParams_t const* params, void const* src, size_t srcSize)
529
2.32M
{
530
2.32M
    U32 const maxDist = 1U << params->windowLog;
531
2.32M
    BYTE const* const istart = (BYTE const*)src;
532
2.32M
    BYTE const* const iend = istart + srcSize;
533
2.32M
    size_t const kMaxChunkSize = 1 << 20;
534
2.32M
    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
535
2.32M
    size_t chunk;
536
2.32M
    size_t leftoverSize = 0;
537
538
2.32M
    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
539
    /* Check that ZSTD_window_update() has been called for this chunk prior
540
     * to passing it to this function.
541
     */
542
2.32M
    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
543
    /* The input could be very large (in zstdmt), so it must be broken up into
544
     * chunks to enforce the maximum distance and handle overflow correction.
545
     */
546
2.32M
    assert(sequences->pos <= sequences->size);
547
2.32M
    assert(sequences->size <= sequences->capacity);
548
4.40M
    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
549
2.08M
        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
550
2.08M
        size_t const remaining = (size_t)(iend - chunkStart);
551
2.08M
        BYTE const *const chunkEnd =
552
2.08M
            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
553
2.08M
        size_t const chunkSize = chunkEnd - chunkStart;
554
2.08M
        size_t newLeftoverSize;
555
2.08M
        size_t const prevSize = sequences->size;
556
557
2.08M
        assert(chunkStart < iend);
558
        /* 1. Perform overflow correction if necessary. */
559
2.08M
        if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
560
57.4k
            U32 const ldmHSize = 1U << params->hashLog;
561
57.4k
            U32 const correction = ZSTD_window_correctOverflow(
562
57.4k
                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
563
57.4k
            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
564
            /* invalidate dictionaries on overflow correction */
565
57.4k
            ldmState->loadedDictEnd = 0;
566
57.4k
        }
567
        /* 2. We enforce the maximum offset allowed.
568
         *
569
         * kMaxChunkSize should be small enough that we don't lose too much of
570
         * the window through early invalidation.
571
         * TODO: * Test the chunk size.
572
         *       * Try invalidation after the sequence generation and test the
573
         *         offset against maxDist directly.
574
         *
575
         * NOTE: Because of dictionaries + sequence splitting we MUST make sure
576
         * that any offset used is valid at the END of the sequence, since it may
577
         * be split into two sequences. This condition holds when using
578
         * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
579
         * against maxDist directly, we'll have to carefully handle that case.
580
         */
581
2.08M
        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
582
        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
583
2.08M
        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
584
2.08M
            ldmState, sequences, params, chunkStart, chunkSize);
585
2.08M
        if (ZSTD_isError(newLeftoverSize))
586
0
            return newLeftoverSize;
587
        /* 4. We add the leftover literals from previous iterations to the first
588
         *    newly generated sequence, or add the `newLeftoverSize` if none are
589
         *    generated.
590
         */
591
        /* Prepend the leftover literals from the last call */
592
2.08M
        if (prevSize < sequences->size) {
593
394k
            sequences->seq[prevSize].litLength += (U32)leftoverSize;
594
394k
            leftoverSize = newLeftoverSize;
595
1.68M
        } else {
596
1.68M
            assert(newLeftoverSize == chunkSize);
597
1.68M
            leftoverSize += chunkSize;
598
1.68M
        }
599
2.08M
    }
600
2.32M
    return 0;
601
2.32M
}
602
603
void
604
ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
605
34.7M
{
606
34.7M
    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
607
51.7k
        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
608
51.7k
        if (srcSize <= seq->litLength) {
609
            /* Skip past srcSize literals */
610
49.6k
            seq->litLength -= (U32)srcSize;
611
49.6k
            return;
612
49.6k
        }
613
2.07k
        srcSize -= seq->litLength;
614
2.07k
        seq->litLength = 0;
615
2.07k
        if (srcSize < seq->matchLength) {
616
            /* Skip past the first srcSize of the match */
617
2.05k
            seq->matchLength -= (U32)srcSize;
618
2.05k
            if (seq->matchLength < minMatch) {
619
                /* The match is too short, omit it */
620
480
                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
621
436
                    seq[1].litLength += seq[0].matchLength;
622
436
                }
623
480
                rawSeqStore->pos++;
624
480
            }
625
2.05k
            return;
626
2.05k
        }
627
16
        srcSize -= seq->matchLength;
628
16
        seq->matchLength = 0;
629
16
        rawSeqStore->pos++;
630
16
    }
631
34.7M
}
632
633
/**
634
 * If the sequence length is longer than remaining then the sequence is split
635
 * between this block and the next.
636
 *
637
 * Returns the current sequence to handle, or if the rest of the block should
638
 * be literals, it returns a sequence with offset == 0.
639
 */
640
static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore,
641
                                 U32 const remaining, U32 const minMatch)
642
10.9M
{
643
10.9M
    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
644
10.9M
    assert(sequence.offset > 0);
645
    /* Likely: No partial sequence */
646
10.9M
    if (remaining >= sequence.litLength + sequence.matchLength) {
647
10.8M
        rawSeqStore->pos++;
648
10.8M
        return sequence;
649
10.8M
    }
650
    /* Cut the sequence short (offset == 0 ==> rest is literals). */
651
51.7k
    if (remaining <= sequence.litLength) {
652
49.6k
        sequence.offset = 0;
653
49.6k
    } else if (remaining < sequence.litLength + sequence.matchLength) {
654
2.05k
        sequence.matchLength = remaining - sequence.litLength;
655
2.05k
        if (sequence.matchLength < minMatch) {
656
463
            sequence.offset = 0;
657
463
        }
658
2.05k
    }
659
    /* Skip past `remaining` bytes for the future sequences. */
660
51.7k
    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
661
51.7k
    return sequence;
662
10.9M
}
663
664
1.50M
void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) {
665
1.50M
    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
666
2.14M
    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
667
657k
        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
668
657k
        if (currPos >= currSeq.litLength + currSeq.matchLength) {
669
639k
            currPos -= currSeq.litLength + currSeq.matchLength;
670
639k
            rawSeqStore->pos++;
671
639k
        } else {
672
17.8k
            rawSeqStore->posInSequence = currPos;
673
17.8k
            break;
674
17.8k
        }
675
657k
    }
676
1.50M
    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
677
1.48M
        rawSeqStore->posInSequence = 0;
678
1.48M
    }
679
1.50M
}
680
681
size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore,
682
    ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
683
    ZSTD_ParamSwitch_e useRowMatchFinder,
684
    void const* src, size_t srcSize)
685
917k
{
686
917k
    const ZSTD_compressionParameters* const cParams = &ms->cParams;
687
917k
    unsigned const minMatch = cParams->minMatch;
688
917k
    ZSTD_BlockCompressor_f const blockCompressor =
689
917k
        ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
690
    /* Input bounds */
691
917k
    BYTE const* const istart = (BYTE const*)src;
692
917k
    BYTE const* const iend = istart + srcSize;
693
    /* Input positions */
694
917k
    BYTE const* ip = istart;
695
696
917k
    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
697
    /* If using opt parser, use LDMs only as candidates rather than always accepting them */
698
917k
    if (cParams->strategy >= ZSTD_btopt) {
699
144k
        size_t lastLLSize;
700
144k
        ms->ldmSeqStore = rawSeqStore;
701
144k
        lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
702
144k
        ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
703
144k
        return lastLLSize;
704
144k
    }
705
706
773k
    assert(rawSeqStore->pos <= rawSeqStore->size);
707
773k
    assert(rawSeqStore->size <= rawSeqStore->capacity);
708
    /* Loop through each sequence and apply the block compressor to the literals */
709
11.6M
    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
710
        /* maybeSplitSequence updates rawSeqStore->pos */
711
10.9M
        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
712
10.9M
                                                   (U32)(iend - ip), minMatch);
713
        /* End signal */
714
10.9M
        if (sequence.offset == 0)
715
50.1k
            break;
716
717
10.8M
        assert(ip + sequence.litLength + sequence.matchLength <= iend);
718
719
        /* Fill tables for block compressor */
720
10.8M
        ZSTD_ldm_limitTableUpdate(ms, ip);
721
10.8M
        ZSTD_ldm_fillFastTables(ms, ip);
722
        /* Run the block compressor */
723
10.8M
        DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
724
10.8M
        {
725
10.8M
            int i;
726
10.8M
            size_t const newLitLength =
727
10.8M
                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
728
10.8M
            ip += sequence.litLength;
729
            /* Update the repcodes */
730
32.6M
            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
731
21.7M
                rep[i] = rep[i-1];
732
10.8M
            rep[0] = sequence.offset;
733
            /* Store the sequence */
734
10.8M
            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
735
10.8M
                          OFFSET_TO_OFFBASE(sequence.offset),
736
0
                          sequence.matchLength);
737
0
            ip += sequence.matchLength;
738
10.8M
        }
739
10.8M
    }
740
    /* Fill the tables for the block compressor */
741
773k
    ZSTD_ldm_limitTableUpdate(ms, ip);
742
773k
    ZSTD_ldm_fillFastTables(ms, ip);
743
    /* Compress the last literals */
744
773k
    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
745
773k
}