Coverage Report

Created: 2026-03-12 06:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/lz4/lib/lz4hc.c
Line
Count
Source
1
/*
2
    LZ4 HC - High Compression Mode of LZ4
3
    Copyright (c) Yann Collet. All rights reserved.
4
5
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7
    Redistribution and use in source and binary forms, with or without
8
    modification, are permitted provided that the following conditions are
9
    met:
10
11
    * Redistributions of source code must retain the above copyright
12
    notice, this list of conditions and the following disclaimer.
13
    * Redistributions in binary form must reproduce the above
14
    copyright notice, this list of conditions and the following disclaimer
15
    in the documentation and/or other materials provided with the
16
    distribution.
17
18
    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
    You can contact the author at :
31
       - LZ4 source repository : https://github.com/lz4/lz4
32
       - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33
*/
34
/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
35
36
37
/* *************************************
38
*  Tuning Parameter
39
***************************************/
40
41
/*! HEAPMODE :
42
 *  Select how stateless HC compression functions like `LZ4_compress_HC()`
43
 *  allocate memory for their workspace:
44
 *  in stack (0:fastest), or in heap (1:default, requires malloc()).
45
 *  Since workspace is rather large, heap mode is recommended.
46
**/
47
#ifndef LZ4HC_HEAPMODE
48
#  define LZ4HC_HEAPMODE 1
49
#endif
50
51
52
/*===    Dependency    ===*/
53
#define LZ4_HC_STATIC_LINKING_ONLY
54
#include "lz4hc.h"
55
#include <limits.h>
56
57
58
/*===   Shared lz4.c code   ===*/
59
#ifndef LZ4_SRC_INCLUDED
60
# if defined(__GNUC__)
61
#  pragma GCC diagnostic ignored "-Wunused-function"
62
# endif
63
# if defined (__clang__)
64
#  pragma clang diagnostic ignored "-Wunused-function"
65
# endif
66
# define LZ4_COMMONDEFS_ONLY
67
# include "lz4.c"   /* LZ4_count, constants, mem */
68
#endif
69
70
71
/*===   Enums   ===*/
72
typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
73
74
75
/*===   Constants   ===*/
76
1.63M
#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
77
6.97M
#define LZ4_OPT_NUM   (1<<12)
78
79
80
/*===   Macros   ===*/
81
161M
#define MIN(a,b)   ( (a) < (b) ? (a) : (b) )
82
344M
#define MAX(a,b)   ( (a) > (b) ? (a) : (b) )
83
84
85
/*===   Levels definition   ===*/
86
typedef enum { lz4mid, lz4hc, lz4opt } lz4hc_strat_e;
87
typedef struct {
88
    lz4hc_strat_e strat;
89
    int nbSearches;
90
    U32 targetLength;
91
} cParams_t;
92
static const cParams_t k_clTable[LZ4HC_CLEVEL_MAX+1] = {
93
    { lz4mid,    2, 16 },  /* 0, unused */
94
    { lz4mid,    2, 16 },  /* 1, unused */
95
    { lz4mid,    2, 16 },  /* 2 */
96
    { lz4hc,     4, 16 },  /* 3 */
97
    { lz4hc,     8, 16 },  /* 4 */
98
    { lz4hc,    16, 16 },  /* 5 */
99
    { lz4hc,    32, 16 },  /* 6 */
100
    { lz4hc,    64, 16 },  /* 7 */
101
    { lz4hc,   128, 16 },  /* 8 */
102
    { lz4hc,   256, 16 },  /* 9 */
103
    { lz4opt,   96, 64 },  /*10==LZ4HC_CLEVEL_OPT_MIN*/
104
    { lz4opt,  512,128 },  /*11 */
105
    { lz4opt,16384,LZ4_OPT_NUM },  /* 12==LZ4HC_CLEVEL_MAX */
106
};
107
108
static cParams_t LZ4HC_getCLevelParams(int cLevel)
109
14.5k
{
110
    /* note : clevel convention is a bit different from lz4frame,
111
     * possibly something worth revisiting for consistency */
112
14.5k
    if (cLevel < 1)
113
0
        cLevel = LZ4HC_CLEVEL_DEFAULT;
114
14.5k
    cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
115
14.5k
    return k_clTable[cLevel];
116
14.5k
}
117
118
119
/*===   Hashing   ===*/
120
0
#define LZ4HC_HASHSIZE 4
121
620M
#define HASH_FUNCTION(i)      (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
122
620M
static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
123
124
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
125
/* lie to the compiler about data alignment; use with caution */
126
static U64 LZ4_read64(const void* memPtr) { return *(const U64*) memPtr; }
127
128
#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
129
/* __pack instructions are safer, but compiler specific */
130
LZ4_PACK(typedef struct { U64 u64; }) LZ4_unalign64;
131
30.7M
static U64 LZ4_read64(const void* ptr) { return ((const LZ4_unalign64*)ptr)->u64; }
132
133
#else  /* safe and portable access using memcpy() */
134
static U64 LZ4_read64(const void* memPtr)
135
{
136
    U64 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
137
}
138
139
#endif /* LZ4_FORCE_MEMORY_ACCESS */
140
141
2.47k
#define LZ4MID_HASHSIZE 8
142
54.8M
#define LZ4MID_HASHLOG (LZ4HC_HASH_LOG-1)
143
2.47k
#define LZ4MID_HASHTABLESIZE (1 << LZ4MID_HASHLOG)
144
145
24.1M
static U32 LZ4MID_hash4(U32 v) { return (v * 2654435761U) >> (32-LZ4MID_HASHLOG); }
146
24.1M
static U32 LZ4MID_hash4Ptr(const void* ptr) { return LZ4MID_hash4(LZ4_read32(ptr)); }
147
/* note: hash7 hashes the lower 56-bits.
148
 * It presumes input was read using little endian.*/
149
30.7M
static U32 LZ4MID_hash7(U64 v) { return (U32)(((v  << (64-56)) * 58295818150454627ULL) >> (64-LZ4MID_HASHLOG)) ; }
150
static U64 LZ4_readLE64(const void* memPtr);
151
30.7M
static U32 LZ4MID_hash8Ptr(const void* ptr) { return LZ4MID_hash7(LZ4_readLE64(ptr)); }
152
153
static U64 LZ4_readLE64(const void* memPtr)
154
30.7M
{
155
30.7M
    if (LZ4_isLittleEndian()) {
156
30.7M
        return LZ4_read64(memPtr);
157
30.7M
    } else {
158
0
        const BYTE* p = (const BYTE*)memPtr;
159
        /* note: relies on the compiler to simplify this expression */
160
0
        return (U64)p[0] | ((U64)p[1]<<8) | ((U64)p[2]<<16) | ((U64)p[3]<<24)
161
0
            | ((U64)p[4]<<32) | ((U64)p[5]<<40) | ((U64)p[6]<<48) | ((U64)p[7]<<56);
162
0
    }
163
30.7M
}
164
165
166
/*===   Count match length   ===*/
167
LZ4_FORCE_INLINE
168
unsigned LZ4HC_NbCommonBytes32(U32 val)
169
83.7M
{
170
83.7M
    assert(val != 0);
171
83.7M
    if (LZ4_isLittleEndian()) {
172
#     if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
173
        unsigned long r;
174
        _BitScanReverse(&r, val);
175
        return (unsigned)((31 - r) >> 3);
176
#     elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
177
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
178
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
179
        return (unsigned)__builtin_clz(val) >> 3;
180
#     else
181
        val >>= 8;
182
        val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
183
              (val + 0x00FF0000)) >> 24;
184
        return (unsigned)val ^ 3;
185
#     endif
186
83.7M
    } else {
187
#     if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
188
        unsigned long r;
189
        _BitScanForward(&r, val);
190
        return (unsigned)(r >> 3);
191
#     elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
192
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
193
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
194
        return (unsigned)__builtin_ctz(val) >> 3;
195
#     else
196
        const U32 m = 0x01010101;
197
        return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
198
#     endif
199
0
    }
200
83.7M
}
201
202
/** LZ4HC_countBack() :
203
 * @return : negative value, nb of common bytes before ip/match */
204
LZ4_FORCE_INLINE
205
int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
206
                    const BYTE* const iMin, const BYTE* const mMin)
207
92.1M
{
208
92.1M
    int back = 0;
209
92.1M
    int const min = (int)MAX(iMin - ip, mMin - match);
210
92.1M
    assert(min <= 0);
211
92.1M
    assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
212
92.1M
    assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
213
214
218M
    while ((back - min) > 3) {
215
209M
        U32 const v = LZ4_read32(ip + back - 4) ^ LZ4_read32(match + back - 4);
216
209M
        if (v) {
217
83.7M
            return (back - (int)LZ4HC_NbCommonBytes32(v));
218
126M
        } else back -= 4; /* 4-byte step */
219
209M
    }
220
    /* check remainder if any */
221
14.6M
    while ( (back > min)
222
13.9M
         && (ip[back-1] == match[back-1]) )
223
6.20M
            back--;
224
8.45M
    return back;
225
92.1M
}
226
227
/*===   Chain table updates   ===*/
228
3.99G
#define DELTANEXTU16(table, pos) table[(U16)(pos)]   /* faster */
229
/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
230
12.7M
#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
231
232
233
/**************************************
234
*  Init
235
**************************************/
236
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
237
0
{
238
0
    MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
239
0
    MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
240
0
}
241
242
static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
243
11.6k
{
244
11.6k
    size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
245
11.6k
    size_t newStartingOffset = bufferSize + hc4->dictLimit;
246
11.6k
    DEBUGLOG(5, "LZ4HC_init_internal");
247
11.6k
    assert(newStartingOffset >= bufferSize);  /* check overflow */
248
11.6k
    if (newStartingOffset > 1 GB) {
249
0
        LZ4HC_clearTables(hc4);
250
0
        newStartingOffset = 0;
251
0
    }
252
11.6k
    newStartingOffset += 64 KB;
253
11.6k
    hc4->nextToUpdate = (U32)newStartingOffset;
254
11.6k
    hc4->prefixStart = start;
255
11.6k
    hc4->end = start;
256
11.6k
    hc4->dictStart = start;
257
11.6k
    hc4->dictLimit = (U32)newStartingOffset;
258
11.6k
    hc4->lowLimit = (U32)newStartingOffset;
259
11.6k
}
260
261
262
/**************************************
263
*  Encode
264
**************************************/
265
#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
266
# define RAWLOG(...) fprintf(stderr, __VA_ARGS__)
267
void LZ4HC_hexOut(const void* src, size_t len)
268
{
269
    const BYTE* p = (const BYTE*)src;
270
    size_t n;
271
    for (n=0; n<len; n++) {
272
        RAWLOG("%02X ", p[n]);
273
    }
274
    RAWLOG(" \n");
275
}
276
277
# define HEX_CMP(_lev, _ptr, _ref, _len) \
278
    if (LZ4_DEBUG >= _lev) {            \
279
        RAWLOG("match bytes: ");        \
280
        LZ4HC_hexOut(_ptr, _len);       \
281
        RAWLOG("ref bytes: ");          \
282
        LZ4HC_hexOut(_ref, _len);       \
283
    }
284
285
#else
286
# define HEX_CMP(l,p,r,_l)
287
#endif
288
289
/* LZ4HC_encodeSequence() :
290
 * @return : 0 if ok,
291
 *           1 if buffer issue detected */
292
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
293
    const BYTE** _ip,
294
    BYTE** _op,
295
    const BYTE** _anchor,
296
    int matchLength,
297
    int offset,
298
    limitedOutput_directive limit,
299
    BYTE* oend)
300
12.7M
{
301
38.1M
#define ip      (*_ip)
302
105M
#define op      (*_op)
303
38.1M
#define anchor  (*_anchor)
304
305
12.7M
    BYTE* const token = op++;
306
307
#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
308
    static const BYTE* start = NULL;
309
    static U32 totalCost = 0;
310
    U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start); /* only works for single segment */
311
    U32 const ll = (U32)(ip - anchor);
312
    U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
313
    U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
314
    U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
315
    if (start==NULL) start = anchor;  /* only works for single segment */
316
    DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u",
317
                pos,
318
                (U32)(ip - anchor), matchLength, offset,
319
                cost, totalCost);
320
# if 1 /* only works on single segment data */
321
    HEX_CMP(7, ip, ip-offset, matchLength);
322
# endif
323
    totalCost += cost;
324
#endif
325
326
    /* Encode Literal length */
327
12.7M
    {   size_t litLen = (size_t)(ip - anchor);
328
12.7M
        LZ4_STATIC_ASSERT(notLimited == 0);
329
        /* Check output limit */
330
12.7M
        if (limit && ((op + (litLen / 255) + litLen + (2 + 1 + LASTLITERALS)) > oend)) {
331
162
            DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
332
162
                    (int)litLen, (int)(oend - op));
333
162
            return 1;
334
162
        }
335
12.7M
        if (litLen >= RUN_MASK) {
336
896k
            size_t len = litLen - RUN_MASK;
337
896k
            *token = (RUN_MASK << ML_BITS);
338
1.26M
            for(; len >= 255 ; len -= 255) *op++ = 255;
339
896k
            *op++ = (BYTE)len;
340
11.8M
        } else {
341
11.8M
            *token = (BYTE)(litLen << ML_BITS);
342
11.8M
        }
343
344
        /* Copy Literals */
345
12.7M
        LZ4_wildCopy8(op, anchor, op + litLen);
346
12.7M
        op += litLen;
347
12.7M
    }
348
349
    /* Encode Offset */
350
12.7M
    assert(offset <= LZ4_DISTANCE_MAX );
351
12.7M
    assert(offset > 0);
352
12.7M
    LZ4_writeLE16(op, (U16)(offset)); op += 2;
353
354
    /* Encode MatchLength */
355
12.7M
    assert(matchLength >= MINMATCH);
356
12.7M
    {   size_t mlCode = (size_t)matchLength - MINMATCH;
357
12.7M
        if (limit && (op + (mlCode / 255) + (1 + LASTLITERALS) > oend)) {
358
50
            DEBUGLOG(6, "Not enough room to write match length");
359
50
            return 1;   /* Check output limit */
360
50
        }
361
12.7M
        if (mlCode >= ML_MASK) {
362
1.26M
            *token += ML_MASK;
363
1.26M
            mlCode -= ML_MASK;
364
1.75M
            for(; mlCode >= 510 ; mlCode -= 510) { *op++ = 255; *op++ = 255; }
365
1.26M
            if (mlCode >= 255) { mlCode -= 255; *op++ = 255; }
366
1.26M
            *op++ = (BYTE)mlCode;
367
11.4M
        } else {
368
11.4M
            *token += (BYTE)(mlCode);
369
11.4M
    }   }
370
371
    /* Prepare next loop */
372
12.7M
    ip += matchLength;
373
12.7M
    anchor = ip;
374
375
12.7M
    return 0;
376
377
12.7M
#undef ip
378
12.7M
#undef op
379
12.7M
#undef anchor
380
12.7M
}
381
382
383
typedef struct {
384
    int off;
385
    int len;
386
    int back;  /* negative value */
387
} LZ4HC_match_t;
388
389
static LZ4HC_match_t LZ4HC_searchExtDict(const BYTE* ip, U32 ipIndex,
390
        const BYTE* const iLowLimit, const BYTE* const iHighLimit,
391
        const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex,
392
        int currentBestML, int nbAttempts)
393
0
{
394
0
    size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
395
0
    U32 lDictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
396
0
    U32 matchIndex = lDictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
397
0
    int offset = 0, sBack = 0;
398
0
    assert(lDictEndIndex <= 1 GB);
399
0
    if (lDictMatchIndex>0)
400
0
        DEBUGLOG(7, "lDictEndIndex = %zu, lDictMatchIndex = %u", lDictEndIndex, lDictMatchIndex);
401
0
    while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
402
0
        const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + lDictMatchIndex;
403
404
0
        if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
405
0
            int mlt;
406
0
            int back = 0;
407
0
            const BYTE* vLimit = ip + (lDictEndIndex - lDictMatchIndex);
408
0
            if (vLimit > iHighLimit) vLimit = iHighLimit;
409
0
            mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
410
0
            back = (ip > iLowLimit) ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
411
0
            mlt -= back;
412
0
            if (mlt > currentBestML) {
413
0
                currentBestML = mlt;
414
0
                offset = (int)(ipIndex - matchIndex);
415
0
                sBack = back;
416
0
                DEBUGLOG(7, "found match of length %i within extDictCtx", currentBestML);
417
0
        }   }
418
419
0
        {   U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, lDictMatchIndex);
420
0
            lDictMatchIndex -= nextOffset;
421
0
            matchIndex -= nextOffset;
422
0
    }   }
423
424
0
    {   LZ4HC_match_t md;
425
0
        md.len = currentBestML;
426
0
        md.off = offset;
427
0
        md.back = sBack;
428
0
        return md;
429
0
    }
430
0
}
431
432
typedef LZ4HC_match_t (*LZ4MID_searchIntoDict_f)(const BYTE* ip, U32 ipIndex,
433
        const BYTE* const iHighLimit,
434
        const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex);
435
436
static LZ4HC_match_t LZ4MID_searchHCDict(const BYTE* ip, U32 ipIndex,
437
        const BYTE* const iHighLimit,
438
        const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
439
0
{
440
0
    return LZ4HC_searchExtDict(ip,ipIndex,
441
0
                            ip, iHighLimit,
442
0
                            dictCtx, gDictEndIndex,
443
0
                            MINMATCH-1, 2);
444
0
}
445
446
static LZ4HC_match_t LZ4MID_searchExtDict(const BYTE* ip, U32 ipIndex,
447
        const BYTE* const iHighLimit,
448
        const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
449
0
{
450
0
    size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
451
0
    const U32* const hash4Table = dictCtx->hashTable;
452
0
    const U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
453
0
    DEBUGLOG(7, "LZ4MID_searchExtDict (ipIdx=%u)", ipIndex);
454
455
    /* search long match first */
456
0
    {   U32 l8DictMatchIndex = hash8Table[LZ4MID_hash8Ptr(ip)];
457
0
        U32 m8Index = l8DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
458
0
        assert(lDictEndIndex <= 1 GB);
459
0
        if (ipIndex - m8Index <= LZ4_DISTANCE_MAX) {
460
0
            const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l8DictMatchIndex;
461
0
            const size_t safeLen = MIN(lDictEndIndex - l8DictMatchIndex, (size_t)(iHighLimit - ip));
462
0
            int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
463
0
            if (mlt >= MINMATCH) {
464
0
                LZ4HC_match_t md;
465
0
                DEBUGLOG(7, "Found long ExtDict match of len=%u", mlt);
466
0
                md.len = mlt;
467
0
                md.off = (int)(ipIndex - m8Index);
468
0
                md.back = 0;
469
0
                return md;
470
0
            }
471
0
        }
472
0
    }
473
474
    /* search for short match second */
475
0
    {   U32 l4DictMatchIndex = hash4Table[LZ4MID_hash4Ptr(ip)];
476
0
        U32 m4Index = l4DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
477
0
        if (ipIndex - m4Index <= LZ4_DISTANCE_MAX) {
478
0
            const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l4DictMatchIndex;
479
0
            const size_t safeLen = MIN(lDictEndIndex - l4DictMatchIndex, (size_t)(iHighLimit - ip));
480
0
            int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
481
0
            if (mlt >= MINMATCH) {
482
0
                LZ4HC_match_t md;
483
0
                DEBUGLOG(7, "Found short ExtDict match of len=%u", mlt);
484
0
                md.len = mlt;
485
0
                md.off = (int)(ipIndex - m4Index);
486
0
                md.back = 0;
487
0
                return md;
488
0
            }
489
0
        }
490
0
    }
491
492
    /* nothing found */
493
0
    {   LZ4HC_match_t const md = {0, 0, 0 };
494
0
        return md;
495
0
    }
496
0
}
497
498
/**************************************
499
*  Mid Compression (level 2)
500
**************************************/
501
502
LZ4_FORCE_INLINE void
503
LZ4MID_addPosition(U32* hTable, U32 hValue, U32 index)
504
53.2M
{
505
53.2M
    hTable[hValue] = index;
506
53.2M
}
507
508
11.0M
#define ADDPOS8(_p, _idx) LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx)
509
6.63M
#define ADDPOS4(_p, _idx) LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx)
510
511
/* Fill hash tables with references into dictionary.
512
 * The resulting table is only exploitable by LZ4MID (level 2) */
513
static void
514
LZ4MID_fillHTable (LZ4HC_CCtx_internal* cctx, const void* dict, size_t size)
515
0
{
516
0
    U32* const hash4Table = cctx->hashTable;
517
0
    U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
518
0
    const BYTE* const prefixPtr = (const BYTE*)dict;
519
0
    U32 const prefixIdx = cctx->dictLimit;
520
0
    U32 const target = prefixIdx + (U32)size - LZ4MID_HASHSIZE;
521
0
    U32 idx = cctx->nextToUpdate;
522
0
    assert(dict == cctx->prefixStart);
523
0
    DEBUGLOG(4, "LZ4MID_fillHTable (size:%zu)", size);
524
0
    if (size <= LZ4MID_HASHSIZE)
525
0
        return;
526
527
0
    for (; idx < target; idx += 3) {
528
0
        ADDPOS4(prefixPtr+idx-prefixIdx, idx);
529
0
        ADDPOS8(prefixPtr+idx+1-prefixIdx, idx+1);
530
0
    }
531
532
0
    idx = (size > 32 KB + LZ4MID_HASHSIZE) ? target - 32 KB : cctx->nextToUpdate;
533
0
    for (; idx < target; idx += 1) {
534
0
        ADDPOS8(prefixPtr+idx-prefixIdx, idx);
535
0
    }
536
537
0
    cctx->nextToUpdate = target;
538
0
}
539
540
static LZ4MID_searchIntoDict_f select_searchDict_function(const LZ4HC_CCtx_internal* dictCtx)
541
0
{
542
0
    if (dictCtx == NULL) return NULL;
543
0
    if (LZ4HC_getCLevelParams(dictCtx->compressionLevel).strat == lz4mid)
544
0
        return LZ4MID_searchExtDict;
545
0
    return LZ4MID_searchHCDict;
546
0
}
547
548
/* preconditions:
549
 * - *srcSizePtr within [1, LZ4_MAX_INPUT_SIZE]
550
 * - src is valid
551
 * - maxOutputSize >= 1
552
 * - dst is valid
553
 */
554
static int LZ4MID_compress (
555
    LZ4HC_CCtx_internal* const ctx,
556
    const char* const src,
557
    char* const dst,
558
    int* srcSizePtr,
559
    int const maxOutputSize,
560
    const limitedOutput_directive limit,
561
    const dictCtx_directive dict
562
    )
563
2.47k
{
564
2.47k
    U32* const hash4Table = ctx->hashTable;
565
2.47k
    U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
566
2.47k
    const BYTE* ip = (const BYTE*)src;
567
2.47k
    const BYTE* anchor = ip;
568
2.47k
    const BYTE* const iend = ip + *srcSizePtr;
569
2.47k
    const BYTE* const mflimit = iend - MFLIMIT;
570
2.47k
    const BYTE* const matchlimit = (iend - LASTLITERALS);
571
2.47k
    const BYTE* const ilimit = (iend - LZ4MID_HASHSIZE);
572
2.47k
    BYTE* op = (BYTE*)dst;
573
2.47k
    BYTE* oend = op + maxOutputSize;
574
575
2.47k
    const BYTE* const prefixPtr = ctx->prefixStart;
576
2.47k
    const U32 prefixIdx = ctx->dictLimit;
577
2.47k
    const U32 ilimitIdx = (U32)(ilimit - prefixPtr) + prefixIdx;
578
2.47k
    const BYTE* const dictStart = ctx->dictStart;
579
2.47k
    const U32 dictIdx = ctx->lowLimit;
580
2.47k
    const U32 gDictEndIndex = ctx->lowLimit;
581
2.47k
    const LZ4MID_searchIntoDict_f searchIntoDict = (dict == usingDictCtxHc) ? select_searchDict_function(ctx->dictCtx) : NULL;
582
2.47k
    unsigned matchLength;
583
2.47k
    unsigned matchDistance;
584
585
2.47k
    DEBUGLOG(5, "LZ4MID_compress (%i bytes)", *srcSizePtr);
586
587
    /* preconditions verifications */
588
2.47k
    if (dict == usingDictCtxHc) DEBUGLOG(5, "usingDictCtxHc");
589
2.47k
    assert(*srcSizePtr > 0);
590
2.47k
    assert(*srcSizePtr <= LZ4_MAX_INPUT_SIZE);
591
2.47k
    assert(src != NULL);
592
2.47k
    assert(maxOutputSize >= 1);
593
2.47k
    assert(dst != NULL);
594
595
2.47k
    if (limit == fillOutput) oend -= LASTLITERALS;  /* Hack for support LZ4 format restriction */
596
2.47k
    if (*srcSizePtr < LZ4_minLength)
597
27
        goto _lz4mid_last_literals;  /* Input too small, no compression (all literals) */
598
599
    /* main loop */
600
17.9M
    while (ip <= mflimit) {
601
17.9M
        const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
602
        /* search long match */
603
17.9M
        {   U32 const h8 = LZ4MID_hash8Ptr(ip);
604
17.9M
            U32 const pos8 = hash8Table[h8];
605
17.9M
            assert(h8 < LZ4MID_HASHTABLESIZE);
606
17.9M
            assert(pos8 < ipIndex);
607
17.9M
            LZ4MID_addPosition(hash8Table, h8, ipIndex);
608
17.9M
            if (ipIndex - pos8 <= LZ4_DISTANCE_MAX) {
609
                /* match candidate found */
610
12.5M
                if (pos8 >= prefixIdx) {
611
12.5M
                    const BYTE* const matchPtr = prefixPtr + pos8 - prefixIdx;
612
12.5M
                    assert(matchPtr < ip);
613
12.5M
                    matchLength = LZ4_count(ip, matchPtr, matchlimit);
614
12.5M
                    if (matchLength >= MINMATCH) {
615
521k
                        DEBUGLOG(7, "found long match at pos %u (len=%u)", pos8, matchLength);
616
521k
                        matchDistance = ipIndex - pos8;
617
521k
                        goto _lz4mid_encode_sequence;
618
521k
                    }
619
12.5M
                } else {
620
0
                    if (pos8 >= dictIdx) {
621
                        /* extDict match candidate */
622
0
                        const BYTE* const matchPtr = dictStart + (pos8 - dictIdx);
623
0
                        const size_t safeLen = MIN(prefixIdx - pos8, (size_t)(matchlimit - ip));
624
0
                        matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
625
0
                        if (matchLength >= MINMATCH) {
626
0
                            DEBUGLOG(7, "found long match at ExtDict pos %u (len=%u)", pos8, matchLength);
627
0
                            matchDistance = ipIndex - pos8;
628
0
                            goto _lz4mid_encode_sequence;
629
0
                        }
630
0
                    }
631
0
                }
632
12.5M
        }   }
633
        /* search short match */
634
17.4M
        {   U32 const h4 = LZ4MID_hash4Ptr(ip);
635
17.4M
            U32 const pos4 = hash4Table[h4];
636
17.4M
            assert(h4 < LZ4MID_HASHTABLESIZE);
637
17.4M
            assert(pos4 < ipIndex);
638
17.4M
            LZ4MID_addPosition(hash4Table, h4, ipIndex);
639
17.4M
            if (ipIndex - pos4 <= LZ4_DISTANCE_MAX) {
640
                /* match candidate found */
641
12.1M
                if (pos4 >= prefixIdx) {
642
                /* only search within prefix */
643
12.1M
                    const BYTE* const matchPtr = prefixPtr + (pos4 - prefixIdx);
644
12.1M
                    assert(matchPtr < ip);
645
12.1M
                    assert(matchPtr >= prefixPtr);
646
12.1M
                    matchLength = LZ4_count(ip, matchPtr, matchlimit);
647
12.1M
                    if (matchLength >= MINMATCH) {
648
                        /* short match found, let's just check ip+1 for longer */
649
1.69M
                        U32 const h8 = LZ4MID_hash8Ptr(ip+1);
650
1.69M
                        U32 const pos8 = hash8Table[h8];
651
1.69M
                        U32 const m2Distance = ipIndex + 1 - pos8;
652
1.69M
                        matchDistance = ipIndex - pos4;
653
1.69M
                        if ( m2Distance <= LZ4_DISTANCE_MAX
654
1.25M
                        && pos8 >= prefixIdx /* only search within prefix */
655
1.25M
                        && likely(ip < mflimit)
656
1.69M
                        ) {
657
1.25M
                            const BYTE* const m2Ptr = prefixPtr + (pos8 - prefixIdx);
658
1.25M
                            unsigned ml2 = LZ4_count(ip+1, m2Ptr, matchlimit);
659
1.25M
                            if (ml2 > matchLength) {
660
74.2k
                                LZ4MID_addPosition(hash8Table, h8, ipIndex+1);
661
74.2k
                                ip++;
662
74.2k
                                matchLength = ml2;
663
74.2k
                                matchDistance = m2Distance;
664
74.2k
                        }   }
665
1.69M
                        goto _lz4mid_encode_sequence;
666
1.69M
                    }
667
12.1M
                } else {
668
0
                    if (pos4 >= dictIdx) {
669
                        /* extDict match candidate */
670
0
                        const BYTE* const matchPtr = dictStart + (pos4 - dictIdx);
671
0
                        const size_t safeLen = MIN(prefixIdx - pos4, (size_t)(matchlimit - ip));
672
0
                        matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
673
0
                        if (matchLength >= MINMATCH) {
674
0
                            DEBUGLOG(7, "found match at ExtDict pos %u (len=%u)", pos4, matchLength);
675
0
                            matchDistance = ipIndex - pos4;
676
0
                            goto _lz4mid_encode_sequence;
677
0
                        }
678
0
                    }
679
0
                }
680
12.1M
        }   }
681
        /* no match found in prefix */
682
15.7M
        if ( (dict == usingDictCtxHc)
683
0
          && (ipIndex - gDictEndIndex < LZ4_DISTANCE_MAX - 8) ) {
684
            /* search a match into external dictionary */
685
0
            LZ4HC_match_t dMatch = searchIntoDict(ip, ipIndex,
686
0
                    matchlimit,
687
0
                    ctx->dictCtx, gDictEndIndex);
688
0
            if (dMatch.len >= MINMATCH) {
689
0
                DEBUGLOG(7, "found Dictionary match (offset=%i)", dMatch.off);
690
0
                assert(dMatch.back == 0);
691
0
                matchLength = (unsigned)dMatch.len;
692
0
                matchDistance = (unsigned)dMatch.off;
693
0
                goto _lz4mid_encode_sequence;
694
0
            }
695
0
        }
696
        /* no match found */
697
15.7M
        ip += 1 + ((ip-anchor) >> 9);  /* skip faster over incompressible data */
698
15.7M
        continue;
699
700
2.21M
_lz4mid_encode_sequence:
701
        /* catch back */
702
2.37M
        while (((ip > anchor) & ((U32)(ip-prefixPtr) > matchDistance)) && (unlikely(ip[-1] == ip[-(int)matchDistance-1]))) {
703
162k
            ip--;  matchLength++;
704
162k
        };
705
706
        /* fill table with beginning of match */
707
2.21M
        ADDPOS8(ip+1, ipIndex+1);
708
2.21M
        ADDPOS8(ip+2, ipIndex+2);
709
2.21M
        ADDPOS4(ip+1, ipIndex+1);
710
711
        /* encode */
712
2.21M
        {   BYTE* const saved_op = op;
713
            /* LZ4HC_encodeSequence always updates @op; on success, it updates @ip and @anchor */
714
2.21M
            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
715
2.21M
                    (int)matchLength, (int)matchDistance,
716
2.21M
                    limit, oend) ) {
717
19
                op = saved_op;  /* restore @op value before failed LZ4HC_encodeSequence */
718
19
                goto _lz4mid_dest_overflow;
719
19
            }
720
2.21M
        }
721
722
        /* fill table with end of match */
723
2.21M
        {   U32 endMatchIdx = (U32)(ip-prefixPtr) + prefixIdx;
724
2.21M
            U32 pos_m2 = endMatchIdx - 2;
725
2.21M
            if (pos_m2 < ilimitIdx) {
726
2.21M
                if (likely(ip - prefixPtr > 5)) {
727
2.21M
                    ADDPOS8(ip-5, endMatchIdx - 5);
728
2.21M
                }
729
2.21M
                ADDPOS8(ip-3, endMatchIdx - 3);
730
2.21M
                ADDPOS8(ip-2, endMatchIdx - 2);
731
2.21M
                ADDPOS4(ip-2, endMatchIdx - 2);
732
2.21M
                ADDPOS4(ip-1, endMatchIdx - 1);
733
2.21M
            }
734
2.21M
        }
735
2.21M
    }
736
737
2.45k
_lz4mid_last_literals:
738
    /* Encode Last Literals */
739
2.45k
    {   size_t lastRunSize = (size_t)(iend - anchor);  /* literals */
740
2.45k
        size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
741
2.45k
        size_t const totalSize = 1 + llAdd + lastRunSize;
742
2.45k
        if (limit == fillOutput) oend += LASTLITERALS;  /* restore correct value */
743
2.45k
        if (limit && (op + totalSize > oend)) {
744
363
            if (limit == limitedOutput) return 0;  /* not enough space in @dst */
745
            /* adapt lastRunSize to fill 'dest' */
746
0
            lastRunSize  = (size_t)(oend - op) - 1 /*token*/;
747
0
            llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
748
0
            lastRunSize -= llAdd;
749
0
        }
750
2.09k
        DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
751
2.09k
        ip = anchor + lastRunSize;  /* can be != iend if limit==fillOutput */
752
753
2.09k
        if (lastRunSize >= RUN_MASK) {
754
375
            size_t accumulator = lastRunSize - RUN_MASK;
755
375
            *op++ = (RUN_MASK << ML_BITS);
756
15.9k
            for(; accumulator >= 255 ; accumulator -= 255)
757
15.5k
                *op++ = 255;
758
375
            *op++ = (BYTE) accumulator;
759
1.72k
        } else {
760
1.72k
            *op++ = (BYTE)(lastRunSize << ML_BITS);
761
1.72k
        }
762
2.09k
        assert(lastRunSize <= (size_t)(oend - op));
763
2.09k
        LZ4_memcpy(op, anchor, lastRunSize);
764
2.09k
        op += lastRunSize;
765
2.09k
    }
766
767
    /* End */
768
2.09k
    DEBUGLOG(5, "compressed %i bytes into %i bytes", *srcSizePtr, (int)((char*)op - dst));
769
2.09k
    assert(ip >= (const BYTE*)src);
770
2.09k
    assert(ip <= iend);
771
2.09k
    *srcSizePtr = (int)(ip - (const BYTE*)src);
772
2.09k
    assert((char*)op >= dst);
773
2.09k
    assert(op <= oend);
774
2.09k
    assert((char*)op - dst < INT_MAX);
775
2.09k
    return (int)((char*)op - dst);
776
777
19
_lz4mid_dest_overflow:
778
19
    if (limit == fillOutput) {
779
        /* Assumption : @ip, @anchor, @optr and @matchLength must be set correctly */
780
0
        size_t const ll = (size_t)(ip - anchor);
781
0
        size_t const ll_addbytes = (ll + 240) / 255;
782
0
        size_t const ll_totalCost = 1 + ll_addbytes + ll;
783
0
        BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
784
0
        DEBUGLOG(6, "Last sequence is overflowing : %u literals, %u remaining space",
785
0
                (unsigned)ll, (unsigned)(oend-op));
786
0
        if (op + ll_totalCost <= maxLitPos) {
787
            /* ll validated; now adjust match length */
788
0
            size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
789
0
            size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
790
0
            assert(maxMlSize < INT_MAX);
791
0
            if ((size_t)matchLength > maxMlSize) matchLength= (unsigned)maxMlSize;
792
0
            if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + matchLength >= MFLIMIT) {
793
0
            DEBUGLOG(6, "Let's encode a last sequence (ll=%u, ml=%u)", (unsigned)ll, matchLength);
794
0
                LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
795
0
                        (int)matchLength, (int)matchDistance,
796
0
                        notLimited, oend);
797
0
        }   }
798
0
        DEBUGLOG(6, "Let's finish with a run of literals (%u bytes left)", (unsigned)(oend-op));
799
0
        goto _lz4mid_last_literals;
800
0
    }
801
    /* compression failed */
802
19
    return 0;
803
19
}
804
805
806
/**************************************
807
*  HC Compression - Search
808
**************************************/
809
810
/* Update chains up to ip (excluded) */
811
LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
812
171M
{
813
171M
    U16* const chainTable = hc4->chainTable;
814
171M
    U32* const hashTable  = hc4->hashTable;
815
171M
    const BYTE* const prefixPtr = hc4->prefixStart;
816
171M
    U32 const prefixIdx = hc4->dictLimit;
817
171M
    U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
818
171M
    U32 idx = hc4->nextToUpdate;
819
171M
    assert(ip >= prefixPtr);
820
171M
    assert(target >= prefixIdx);
821
822
620M
    while (idx < target) {
823
449M
        U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
824
449M
        size_t delta = idx - hashTable[h];
825
449M
        if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
826
449M
        DELTANEXTU16(chainTable, idx) = (U16)delta;
827
449M
        hashTable[h] = idx;
828
449M
        idx++;
829
449M
    }
830
831
171M
    hc4->nextToUpdate = target;
832
171M
}
833
834
#if defined(_MSC_VER)
835
#  define LZ4HC_rotl32(x,r) _rotl(x,r)
836
#else
837
0
#  define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
838
#endif
839
840
841
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
842
0
{
843
0
    size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
844
0
    if (bitsToRotate == 0) return pattern;
845
0
    return LZ4HC_rotl32(pattern, (int)bitsToRotate);
846
0
}
847
848
/* LZ4HC_countPattern() :
849
 * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
850
static unsigned
851
LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
852
254M
{
853
254M
    const BYTE* const iStart = ip;
854
254M
    reg_t const pattern = (sizeof(pattern)==8) ?
855
254M
        (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
856
857
357M
    while (likely(ip < iEnd-(sizeof(pattern)-1))) {
858
357M
        reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
859
357M
        if (!diff) { ip+=sizeof(pattern); continue; }
860
254M
        ip += LZ4_NbCommonBytes(diff);
861
254M
        return (unsigned)(ip - iStart);
862
357M
    }
863
864
15.5k
    if (LZ4_isLittleEndian()) {
865
15.5k
        reg_t patternByte = pattern;
866
64.1k
        while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
867
48.6k
            ip++; patternByte >>= 8;
868
48.6k
        }
869
15.5k
    } else {  /* big endian */
870
0
        U32 bitOffset = (sizeof(pattern)*8) - 8;
871
0
        while (ip < iEnd) {
872
0
            BYTE const byte = (BYTE)(pattern >> bitOffset);
873
0
            if (*ip != byte) break;
874
0
            ip ++; bitOffset -= 8;
875
0
    }   }
876
877
15.5k
    return (unsigned)(ip - iStart);
878
254M
}
879
880
/* LZ4HC_reverseCountPattern() :
881
 * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
882
 * read using natural platform endianness */
883
static unsigned
884
LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
885
252M
{
886
252M
    const BYTE* const iStart = ip;
887
888
1.84G
    while (likely(ip >= iLow+4)) {
889
1.84G
        if (LZ4_read32(ip-4) != pattern) break;
890
1.59G
        ip -= 4;
891
1.59G
    }
892
252M
    {   const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
893
570M
        while (likely(ip>iLow)) {
894
570M
            if (ip[-1] != *bytePtr) break;
895
317M
            ip--; bytePtr--;
896
317M
    }   }
897
252M
    return (unsigned)(iStart - ip);
898
252M
}
899
900
/* LZ4HC_protectDictEnd() :
901
 * Checks if the match is in the last 3 bytes of the dictionary, so reading the
902
 * 4 byte MINMATCH would overflow.
903
 * @returns true if the match index is okay.
904
 */
905
static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
906
505M
{
907
505M
    return ((U32)((dictLimit - 1) - matchIndex) >= 3);
908
505M
}
909
910
typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
911
typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
912
913
914
LZ4_FORCE_INLINE LZ4HC_match_t
915
LZ4HC_InsertAndGetWiderMatch (
916
        LZ4HC_CCtx_internal* const hc4,
917
        const BYTE* const ip,
918
        const BYTE* const iLowLimit, const BYTE* const iHighLimit,
919
        int longest,
920
        const int maxNbAttempts,
921
        const int patternAnalysis, const int chainSwap,
922
        const dictCtx_directive dict,
923
        const HCfavor_e favorDecSpeed)
924
171M
{
925
171M
    U16* const chainTable = hc4->chainTable;
926
171M
    U32* const hashTable = hc4->hashTable;
927
171M
    const LZ4HC_CCtx_internal* const dictCtx = hc4->dictCtx;
928
171M
    const BYTE* const prefixPtr = hc4->prefixStart;
929
171M
    const U32 prefixIdx = hc4->dictLimit;
930
171M
    const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
931
171M
    const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex);
932
171M
    const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
933
171M
    const BYTE* const dictStart = hc4->dictStart;
934
171M
    const U32 dictIdx = hc4->lowLimit;
935
171M
    const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
936
171M
    int const lookBackLength = (int)(ip-iLowLimit);
937
171M
    int nbAttempts = maxNbAttempts;
938
171M
    U32 matchChainPos = 0;
939
171M
    U32 const pattern = LZ4_read32(ip);
940
171M
    U32 matchIndex;
941
171M
    repeat_state_e repeat = rep_untested;
942
171M
    size_t srcPatternLength = 0;
943
171M
    int offset = 0, sBack = 0;
944
945
171M
    DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
946
    /* First Match */
947
171M
    LZ4HC_Insert(hc4, ip);  /* insert all prior positions up to ip (excluded) */
948
171M
    matchIndex = hashTable[LZ4HC_hashPtr(ip)];
949
171M
    DEBUGLOG(7, "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)",
950
171M
                ipIndex, matchIndex, lowestMatchIndex);
951
952
1.85G
    while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
953
1.68G
        int matchLength=0;
954
1.68G
        nbAttempts--;
955
1.68G
        assert(matchIndex < ipIndex);
956
1.68G
        if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
957
            /* do nothing:
958
             * favorDecSpeed intentionally skips matches with offset < 8 */
959
1.68G
        } else if (matchIndex >= prefixIdx) {   /* within current Prefix */
960
1.68G
            const BYTE* const matchPtr = prefixPtr + (matchIndex - prefixIdx);
961
1.68G
            assert(matchPtr < ip);
962
1.68G
            assert(longest >= 1);
963
1.68G
            if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
964
186M
                if (LZ4_read32(matchPtr) == pattern) {
965
152M
                    int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
966
152M
                    matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
967
152M
                    matchLength -= back;
968
152M
                    if (matchLength > longest) {
969
25.0M
                        longest = matchLength;
970
25.0M
                        offset = (int)(ipIndex - matchIndex);
971
25.0M
                        sBack = back;
972
25.0M
                        DEBUGLOG(7, "Found match of len=%i within prefix, offset=%i, back=%i", longest, offset, -back);
973
25.0M
                        HEX_CMP(7, ip + back, ip + back - offset, (size_t)matchLength);
974
25.0M
            }   }   }
975
1.68G
        } else {   /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */
976
0
            const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
977
0
            assert(matchIndex >= dictIdx);
978
0
            if ( likely(matchIndex <= prefixIdx - 4)
979
0
              && (LZ4_read32(matchPtr) == pattern) ) {
980
0
                int back = 0;
981
0
                const BYTE* vLimit = ip + (prefixIdx - matchIndex);
982
0
                if (vLimit > iHighLimit) vLimit = iHighLimit;
983
0
                matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
984
0
                if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
985
0
                    matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit);
986
0
                back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
987
0
                matchLength -= back;
988
0
                if (matchLength > longest) {
989
0
                    longest = matchLength;
990
0
                    offset = (int)(ipIndex - matchIndex);
991
0
                    sBack = back;
992
0
                    DEBUGLOG(7, "Found match of len=%i within dict, offset=%i, back=%i", longest, offset, -back);
993
0
                    HEX_CMP(7, ip + back, matchPtr + back, (size_t)matchLength);
994
0
        }   }   }
995
996
1.68G
        if (chainSwap && matchLength==longest) {   /* better match => select a better chain */
997
15.3M
            assert(lookBackLength==0);   /* search forward only */
998
15.3M
            if (matchIndex + (U32)longest <= ipIndex) {
999
14.8M
                int const kTrigger = 4;
1000
14.8M
                U32 distanceToNextMatch = 1;
1001
14.8M
                int const end = longest - MINMATCH + 1;
1002
14.8M
                int step = 1;
1003
14.8M
                int accel = 1 << kTrigger;
1004
14.8M
                int pos;
1005
304M
                for (pos = 0; pos < end; pos += step) {
1006
289M
                    U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
1007
289M
                    step = (accel++ >> kTrigger);
1008
289M
                    if (candidateDist > distanceToNextMatch) {
1009
19.3M
                        distanceToNextMatch = candidateDist;
1010
19.3M
                        matchChainPos = (U32)pos;
1011
19.3M
                        accel = 1 << kTrigger;
1012
19.3M
                }   }
1013
14.8M
                if (distanceToNextMatch > 1) {
1014
14.4M
                    if (distanceToNextMatch > matchIndex) break;   /* avoid overflow */
1015
14.4M
                    matchIndex -= distanceToNextMatch;
1016
14.4M
                    continue;
1017
14.4M
        }   }   }
1018
1019
1.67G
        {   U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
1020
1.67G
            if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
1021
258M
                U32 const matchCandidateIdx = matchIndex-1;
1022
                /* may be a repeated pattern */
1023
258M
                if (repeat == rep_untested) {
1024
1.44M
                    if ( ((pattern & 0xFFFF) == (pattern >> 16))
1025
1.44M
                      &  ((pattern & 0xFF)   == (pattern >> 24)) ) {
1026
1.42M
                        DEBUGLOG(7, "Repeat pattern detected, char %02X", pattern >> 24);
1027
1.42M
                        repeat = rep_confirmed;
1028
1.42M
                        srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
1029
1.42M
                    } else {
1030
23.1k
                        repeat = rep_not;
1031
23.1k
                }   }
1032
258M
                if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
1033
253M
                  && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
1034
253M
                    const int extDict = matchCandidateIdx < prefixIdx;
1035
253M
                    const BYTE* const matchPtr = extDict ? dictStart + (matchCandidateIdx - dictIdx) : prefixPtr + (matchCandidateIdx - prefixIdx);
1036
253M
                    if (LZ4_read32(matchPtr) == pattern) {  /* good candidate */
1037
252M
                        const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
1038
252M
                        size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
1039
252M
                        if (extDict && matchPtr + forwardPatternLength == iLimit) {
1040
0
                            U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
1041
0
                            forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
1042
0
                        }
1043
252M
                        {   const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
1044
252M
                            size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
1045
252M
                            size_t currentSegmentLength;
1046
252M
                            if (!extDict
1047
252M
                              && matchPtr - backLength == prefixPtr
1048
88.3k
                              && dictIdx < prefixIdx) {
1049
0
                                U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
1050
0
                                backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
1051
0
                            }
1052
                            /* Limit backLength not go further than lowestMatchIndex */
1053
252M
                            backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
1054
252M
                            assert(matchCandidateIdx - backLength >= lowestMatchIndex);
1055
252M
                            currentSegmentLength = backLength + forwardPatternLength;
1056
                            /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
1057
252M
                            if ( (currentSegmentLength >= srcPatternLength)   /* current pattern segment large enough to contain full srcPatternLength */
1058
175M
                              && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
1059
86.3M
                                U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength;  /* best position, full pattern, might be followed by more match */
1060
86.3M
                                if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
1061
86.3M
                                    matchIndex = newMatchIndex;
1062
0
                                else {
1063
                                    /* Can only happen if started in the prefix */
1064
0
                                    assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
1065
0
                                    matchIndex = prefixIdx;
1066
0
                                }
1067
166M
                            } else {
1068
166M
                                U32 const newMatchIndex = matchCandidateIdx - (U32)backLength;   /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
1069
166M
                                if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
1070
0
                                    assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict);
1071
0
                                    matchIndex = prefixIdx;
1072
166M
                                } else {
1073
166M
                                    matchIndex = newMatchIndex;
1074
166M
                                    if (lookBackLength==0) {  /* no back possible */
1075
161M
                                        size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
1076
161M
                                        if ((size_t)longest < maxML) {
1077
593k
                                            assert(prefixPtr - prefixIdx + matchIndex != ip);
1078
593k
                                            if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break;
1079
593k
                                            assert(maxML < 2 GB);
1080
593k
                                            longest = (int)maxML;
1081
593k
                                            offset = (int)(ipIndex - matchIndex);
1082
593k
                                            assert(sBack == 0);
1083
593k
                                            DEBUGLOG(7, "Found repeat pattern match of len=%i, offset=%i", longest, offset);
1084
593k
                                        }
1085
161M
                                        {   U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
1086
161M
                                            if (distToNextPattern > matchIndex) break;  /* avoid overflow */
1087
161M
                                            matchIndex -= distToNextPattern;
1088
161M
                        }   }   }   }   }
1089
252M
                        continue;
1090
252M
                }   }
1091
258M
        }   }   /* PA optimization */
1092
1093
        /* follow current chain */
1094
1.41G
        matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
1095
1096
1.41G
    }  /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
1097
1098
171M
    if ( dict == usingDictCtxHc
1099
0
      && nbAttempts > 0
1100
0
      && withinStartDistance) {
1101
0
        size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
1102
0
        U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
1103
0
        assert(dictEndOffset <= 1 GB);
1104
0
        matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
1105
0
        if (dictMatchIndex>0) DEBUGLOG(7, "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i", dictEndOffset, dictMatchIndex, (int)dictMatchIndex - (int)dictEndOffset);
1106
0
        while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
1107
0
            const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
1108
1109
0
            if (LZ4_read32(matchPtr) == pattern) {
1110
0
                int mlt;
1111
0
                int back = 0;
1112
0
                const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
1113
0
                if (vLimit > iHighLimit) vLimit = iHighLimit;
1114
0
                mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
1115
0
                back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
1116
0
                mlt -= back;
1117
0
                if (mlt > longest) {
1118
0
                    longest = mlt;
1119
0
                    offset = (int)(ipIndex - matchIndex);
1120
0
                    sBack = back;
1121
0
                    DEBUGLOG(7, "found match of length %i within extDictCtx", longest);
1122
0
            }   }
1123
1124
0
            {   U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
1125
0
                dictMatchIndex -= nextOffset;
1126
0
                matchIndex -= nextOffset;
1127
0
    }   }   }
1128
1129
171M
    {   LZ4HC_match_t md;
1130
171M
        assert(longest >= 0);
1131
171M
        md.len = longest;
1132
171M
        md.off = offset;
1133
171M
        md.back = sBack;
1134
171M
        return md;
1135
171M
    }
1136
171M
}
1137
1138
LZ4_FORCE_INLINE LZ4HC_match_t
1139
LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4,   /* Index table will be updated */
1140
                       const BYTE* const ip, const BYTE* const iLimit,
1141
                       const int maxNbAttempts,
1142
                       const int patternAnalysis,
1143
                       const dictCtx_directive dict)
1144
82.3M
{
1145
82.3M
    DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch");
1146
    /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
1147
     * but this won't be the case here, as we define iLowLimit==ip,
1148
     * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
1149
82.3M
    return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
1150
82.3M
}
1151
1152
1153
/* preconditions:
1154
 * - *srcSizePtr within [1, LZ4_MAX_INPUT_SIZE]
1155
 * - src is valid
1156
 * - maxOutputSize >= 1
1157
 * - dst is valid
1158
 */
1159
LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
1160
    LZ4HC_CCtx_internal* const ctx,
1161
    const char* const src,
1162
    char* const dst,
1163
    int* srcSizePtr,
1164
    int const maxOutputSize,
1165
    int maxNbAttempts,
1166
    const limitedOutput_directive limit,
1167
    const dictCtx_directive dict
1168
    )
1169
6.01k
{
1170
6.01k
    const int inputSize = *srcSizePtr;
1171
6.01k
    const int patternAnalysis = (maxNbAttempts > 128);   /* levels 9+ */
1172
1173
6.01k
    const BYTE* ip = (const BYTE*)src;
1174
6.01k
    const BYTE* anchor = ip;
1175
6.01k
    const BYTE* const iend = ip + inputSize;
1176
6.01k
    const BYTE* const mflimit = iend - MFLIMIT;
1177
6.01k
    const BYTE* const matchlimit = (iend - LASTLITERALS);
1178
1179
6.01k
    BYTE* optr = (BYTE*) dst;
1180
6.01k
    BYTE* op = (BYTE*) dst;
1181
6.01k
    BYTE* oend = op + maxOutputSize;
1182
1183
6.01k
    const BYTE* start0;
1184
6.01k
    const BYTE* start2 = NULL;
1185
6.01k
    const BYTE* start3 = NULL;
1186
6.01k
    LZ4HC_match_t m0, m1, m2, m3;
1187
6.01k
    const LZ4HC_match_t nomatch = {0, 0, 0};
1188
1189
    /* init */
1190
6.01k
    DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict);
1191
1192
    /* preconditions verifications */
1193
6.01k
    assert(*srcSizePtr >= 1);
1194
6.01k
    assert(src != NULL);
1195
6.01k
    assert(maxOutputSize >= 1);
1196
6.01k
    assert(dst != NULL);
1197
1198
6.01k
    *srcSizePtr = 0;
1199
6.01k
    if (limit == fillOutput) oend -= LASTLITERALS;                  /* Hack for support LZ4 format restriction */
1200
6.01k
    if (inputSize < LZ4_minLength) goto _last_literals;             /* Input too small, no compression (all literals) */
1201
1202
    /* Main Loop */
1203
82.3M
    while (ip <= mflimit) {
1204
82.3M
        m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, maxNbAttempts, patternAnalysis, dict);
1205
82.3M
        if (m1.len<MINMATCH) { ip++; continue; }
1206
1207
        /* saved, in case we would skip too much */
1208
5.45M
        start0 = ip; m0 = m1;
1209
1210
6.43M
_Search2:
1211
6.43M
        DEBUGLOG(7, "_Search2 (currently found match of size %i)", m1.len);
1212
6.43M
        if (ip+m1.len <= mflimit) {
1213
6.43M
            start2 = ip + m1.len - 2;
1214
6.43M
            m2 = LZ4HC_InsertAndGetWiderMatch(ctx,
1215
6.43M
                            start2, ip + 0, matchlimit, m1.len,
1216
6.43M
                            maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
1217
6.43M
            start2 += m2.back;
1218
6.43M
        } else {
1219
3.57k
            m2 = nomatch;  /* do not search further */
1220
3.57k
        }
1221
1222
6.43M
        if (m2.len <= m1.len) { /* No better match => encode ML1 immediately */
1223
5.11M
            optr = op;
1224
5.11M
            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
1225
5.11M
                    m1.len, m1.off,
1226
5.11M
                    limit, oend) )
1227
58
                goto _dest_overflow;
1228
5.11M
            continue;
1229
5.11M
        }
1230
1231
1.31M
        if (start0 < ip) {   /* first match was skipped at least once */
1232
164k
            if (start2 < ip + m0.len) {  /* squeezing ML1 between ML0(original ML1) and ML2 */
1233
109k
                ip = start0; m1 = m0;  /* restore initial Match1 */
1234
109k
        }   }
1235
1236
        /* Here, start0==ip */
1237
1.31M
        if ((start2 - ip) < 3) {  /* First Match too small : removed */
1238
738k
            ip = start2;
1239
738k
            m1 = m2;
1240
738k
            goto _Search2;
1241
738k
        }
1242
1243
788k
_Search3:
1244
788k
        if ((start2 - ip) < OPTIMAL_ML) {
1245
710k
            int correction;
1246
710k
            int new_ml = m1.len;
1247
710k
            if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
1248
710k
            if (ip+new_ml > start2 + m2.len - MINMATCH)
1249
705
                new_ml = (int)(start2 - ip) + m2.len - MINMATCH;
1250
710k
            correction = new_ml - (int)(start2 - ip);
1251
710k
            if (correction > 0) {
1252
678k
                start2 += correction;
1253
678k
                m2.len -= correction;
1254
678k
            }
1255
710k
        }
1256
1257
788k
        if (start2 + m2.len <= mflimit) {
1258
788k
            start3 = start2 + m2.len - 3;
1259
788k
            m3 = LZ4HC_InsertAndGetWiderMatch(ctx,
1260
788k
                            start3, start2, matchlimit, m2.len,
1261
788k
                            maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
1262
788k
            start3 += m3.back;
1263
788k
        } else {
1264
729
            m3 = nomatch;  /* do not search further */
1265
729
        }
1266
1267
788k
        if (m3.len <= m2.len) {  /* No better match => encode ML1 and ML2 */
1268
            /* ip & ref are known; Now for ml */
1269
333k
            if (start2 < ip+m1.len) m1.len = (int)(start2 - ip);
1270
            /* Now, encode 2 sequences */
1271
333k
            optr = op;
1272
333k
            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
1273
333k
                    m1.len, m1.off,
1274
333k
                    limit, oend) )
1275
3
                goto _dest_overflow;
1276
333k
            ip = start2;
1277
333k
            optr = op;
1278
333k
            if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
1279
333k
                    m2.len, m2.off,
1280
333k
                    limit, oend) ) {
1281
3
                m1 = m2;
1282
3
                goto _dest_overflow;
1283
3
            }
1284
333k
            continue;
1285
333k
        }
1286
1287
454k
        if (start3 < ip+m1.len+3) {  /* Not enough space for match 2 : remove it */
1288
270k
            if (start3 >= (ip+m1.len)) {  /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
1289
246k
                if (start2 < ip+m1.len) {
1290
8.38k
                    int correction = (int)(ip+m1.len - start2);
1291
8.38k
                    start2 += correction;
1292
8.38k
                    m2.len -= correction;
1293
8.38k
                    if (m2.len < MINMATCH) {
1294
601
                        start2 = start3;
1295
601
                        m2 = m3;
1296
601
                    }
1297
8.38k
                }
1298
1299
246k
                optr = op;
1300
246k
                if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
1301
246k
                        m1.len, m1.off,
1302
246k
                        limit, oend) )
1303
2
                    goto _dest_overflow;
1304
246k
                ip  = start3;
1305
246k
                m1 = m3;
1306
1307
246k
                start0 = start2;
1308
246k
                m0 = m2;
1309
246k
                goto _Search2;
1310
246k
            }
1311
1312
23.4k
            start2 = start3;
1313
23.4k
            m2 = m3;
1314
23.4k
            goto _Search3;
1315
270k
        }
1316
1317
        /*
1318
        * OK, now we have 3 ascending matches;
1319
        * let's write the first one ML1.
1320
        * ip & ref are known; Now decide ml.
1321
        */
1322
184k
        if (start2 < ip+m1.len) {
1323
36.5k
            if ((start2 - ip) < OPTIMAL_ML) {
1324
0
                int correction;
1325
0
                if (m1.len > OPTIMAL_ML) m1.len = OPTIMAL_ML;
1326
0
                if (ip + m1.len > start2 + m2.len - MINMATCH)
1327
0
                    m1.len = (int)(start2 - ip) + m2.len - MINMATCH;
1328
0
                correction = m1.len - (int)(start2 - ip);
1329
0
                if (correction > 0) {
1330
0
                    start2 += correction;
1331
0
                    m2.len -= correction;
1332
0
                }
1333
36.5k
            } else {
1334
36.5k
                m1.len = (int)(start2 - ip);
1335
36.5k
            }
1336
36.5k
        }
1337
184k
        optr = op;
1338
184k
        if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
1339
184k
                m1.len, m1.off,
1340
184k
                limit, oend) )
1341
4
            goto _dest_overflow;
1342
1343
        /* ML2 becomes ML1 */
1344
184k
        ip = start2; m1 = m2;
1345
1346
        /* ML3 becomes ML2 */
1347
184k
        start2 = start3; m2 = m3;
1348
1349
        /* let's find a new ML3 */
1350
184k
        goto _Search3;
1351
184k
    }
1352
1353
5.94k
_last_literals:
1354
    /* Encode Last Literals */
1355
5.94k
    {   size_t lastRunSize = (size_t)(iend - anchor);  /* literals */
1356
5.94k
        size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
1357
5.94k
        size_t const totalSize = 1 + llAdd + lastRunSize;
1358
5.94k
        if (limit == fillOutput) oend += LASTLITERALS;  /* restore correct value */
1359
5.94k
        if (limit && (op + totalSize > oend)) {
1360
526
            if (limit == limitedOutput) return 0;
1361
            /* adapt lastRunSize to fill 'dest' */
1362
0
            lastRunSize  = (size_t)(oend - op) - 1 /*token*/;
1363
0
            llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
1364
0
            lastRunSize -= llAdd;
1365
0
        }
1366
5.41k
        DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
1367
5.41k
        ip = anchor + lastRunSize;  /* can be != iend if limit==fillOutput */
1368
1369
5.41k
        if (lastRunSize >= RUN_MASK) {
1370
570
            size_t accumulator = lastRunSize - RUN_MASK;
1371
570
            *op++ = (RUN_MASK << ML_BITS);
1372
4.01k
            for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
1373
570
            *op++ = (BYTE) accumulator;
1374
4.84k
        } else {
1375
4.84k
            *op++ = (BYTE)(lastRunSize << ML_BITS);
1376
4.84k
        }
1377
5.41k
        LZ4_memcpy(op, anchor, lastRunSize);
1378
5.41k
        op += lastRunSize;
1379
5.41k
    }
1380
1381
    /* End */
1382
0
    *srcSizePtr = (int) (((const char*)ip) - src);
1383
5.41k
    return (int) (((char*)op)-dst);
1384
1385
70
_dest_overflow:
1386
70
    if (limit == fillOutput) {
1387
        /* Assumption : @ip, @anchor, @optr and @m1 must be set correctly */
1388
0
        size_t const ll = (size_t)(ip - anchor);
1389
0
        size_t const ll_addbytes = (ll + 240) / 255;
1390
0
        size_t const ll_totalCost = 1 + ll_addbytes + ll;
1391
0
        BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
1392
0
        DEBUGLOG(6, "Last sequence overflowing");
1393
0
        op = optr;  /* restore correct out pointer */
1394
0
        if (op + ll_totalCost <= maxLitPos) {
1395
            /* ll validated; now adjust match length */
1396
0
            size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
1397
0
            size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
1398
0
            assert(maxMlSize < INT_MAX); assert(m1.len >= 0);
1399
0
            if ((size_t)m1.len > maxMlSize) m1.len = (int)maxMlSize;
1400
0
            if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + m1.len >= MFLIMIT) {
1401
0
                LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, notLimited, oend);
1402
0
        }   }
1403
0
        goto _last_literals;
1404
0
    }
1405
    /* compression failed */
1406
70
    return 0;
1407
70
}
1408
1409
1410
static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
1411
    const char* const source, char* dst,
1412
    int* srcSizePtr, int dstCapacity,
1413
    int const nbSearches, size_t sufficient_len,
1414
    const limitedOutput_directive limit, int const fullUpdate,
1415
    const dictCtx_directive dict,
1416
    const HCfavor_e favorDecSpeed);
1417
1418
static int
1419
LZ4HC_compress_generic_internal (
1420
            LZ4HC_CCtx_internal* const ctx,
1421
            const char* const src,
1422
            char* const dst,
1423
            int* const srcSizePtr,
1424
            int const dstCapacity,
1425
            int cLevel,
1426
            const limitedOutput_directive limit,
1427
            const dictCtx_directive dict
1428
            )
1429
14.5k
{
1430
14.5k
    DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d, dstCapacity=%d)",
1431
14.5k
                src, *srcSizePtr, dstCapacity);
1432
1433
    /* input sanitization */
1434
14.5k
    if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0;  /* Unsupported input size (too large or negative) */
1435
14.5k
    if (dstCapacity < 1) return 0;   /* Invalid: impossible to store anything */
1436
14.5k
    assert(dst); /* since dstCapacity >= 1, dst must be valid */
1437
14.5k
    if (*srcSizePtr == 0) { *dst = 0; return 1; }
1438
14.5k
    assert(src != NULL); /* since *srcSizePtr >= 1, src must be valid */
1439
1440
14.5k
    ctx->end += *srcSizePtr;
1441
14.5k
    {   cParams_t const cParam = LZ4HC_getCLevelParams(cLevel);
1442
14.5k
        HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
1443
14.5k
        int result;
1444
1445
14.5k
        if (cParam.strat == lz4mid) {
1446
2.47k
            result = LZ4MID_compress(ctx,
1447
2.47k
                                src, dst, srcSizePtr, dstCapacity,
1448
2.47k
                                limit, dict);
1449
12.0k
        } else if (cParam.strat == lz4hc) {
1450
6.01k
            result = LZ4HC_compress_hashChain(ctx,
1451
6.01k
                                src, dst, srcSizePtr, dstCapacity,
1452
6.01k
                                cParam.nbSearches, limit, dict);
1453
6.04k
        } else {
1454
6.04k
            assert(cParam.strat == lz4opt);
1455
6.04k
            result = LZ4HC_compress_optimal(ctx,
1456
6.04k
                                src, dst, srcSizePtr, dstCapacity,
1457
6.04k
                                cParam.nbSearches, cParam.targetLength, limit,
1458
6.04k
                                cLevel >= LZ4HC_CLEVEL_MAX,   /* ultra mode */
1459
6.04k
                                dict, favor);
1460
6.04k
        }
1461
14.5k
        if (result <= 0) ctx->dirty = 1;
1462
14.5k
        return result;
1463
14.5k
    }
1464
14.5k
}
1465
1466
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
1467
1468
static int
1469
LZ4HC_compress_generic_noDictCtx (
1470
        LZ4HC_CCtx_internal* const ctx,
1471
        const char* const src,
1472
        char* const dst,
1473
        int* const srcSizePtr,
1474
        int const dstCapacity,
1475
        int cLevel,
1476
        limitedOutput_directive limit
1477
        )
1478
14.5k
{
1479
14.5k
    assert(ctx->dictCtx == NULL);
1480
14.5k
    return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
1481
14.5k
}
1482
1483
static int isStateCompatible(const LZ4HC_CCtx_internal* ctx1, const LZ4HC_CCtx_internal* ctx2)
1484
0
{
1485
0
    int const isMid1 = LZ4HC_getCLevelParams(ctx1->compressionLevel).strat == lz4mid;
1486
0
    int const isMid2 = LZ4HC_getCLevelParams(ctx2->compressionLevel).strat == lz4mid;
1487
0
    return !(isMid1 ^ isMid2);
1488
0
}
1489
1490
static int
1491
LZ4HC_compress_generic_dictCtx (
1492
        LZ4HC_CCtx_internal* const ctx,
1493
        const char* const src,
1494
        char* const dst,
1495
        int* const srcSizePtr,
1496
        int const dstCapacity,
1497
        int cLevel,
1498
        limitedOutput_directive limit
1499
        )
1500
0
{
1501
0
    const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
1502
0
    assert(ctx->dictCtx != NULL);
1503
0
    if (position >= 64 KB) {
1504
0
        ctx->dictCtx = NULL;
1505
0
        return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1506
0
    } else if (position == 0 && *srcSizePtr > 4 KB && isStateCompatible(ctx, ctx->dictCtx)) {
1507
0
        LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
1508
0
        LZ4HC_setExternalDict(ctx, (const BYTE *)src);
1509
0
        ctx->compressionLevel = (short)cLevel;
1510
0
        return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1511
0
    } else {
1512
0
        return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
1513
0
    }
1514
0
}
1515
1516
static int
1517
LZ4HC_compress_generic (
1518
        LZ4HC_CCtx_internal* const ctx,
1519
        const char* const src,
1520
        char* const dst,
1521
        int* const srcSizePtr,
1522
        int const dstCapacity,
1523
        int cLevel,
1524
        limitedOutput_directive limit
1525
        )
1526
14.5k
{
1527
14.5k
    if (ctx->dictCtx == NULL) {
1528
14.5k
        return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1529
14.5k
    } else {
1530
0
        return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1531
0
    }
1532
14.5k
}
1533
1534
1535
9.37k
int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
1536
1537
static size_t LZ4_streamHC_t_alignment(void)
1538
20.4k
{
1539
20.4k
#if LZ4_ALIGN_TEST
1540
20.4k
    typedef struct { char c; LZ4_streamHC_t t; } t_a;
1541
20.4k
    return sizeof(t_a) - sizeof(LZ4_streamHC_t);
1542
#else
1543
    return 1;  /* effectively disabled */
1544
#endif
1545
20.4k
}
1546
1547
/* state is presumed correctly initialized,
1548
 * in which case its size and alignment have already been validate */
1549
int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
1550
10.7k
{
1551
10.7k
    LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
1552
10.7k
    if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
1553
10.7k
    LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
1554
10.7k
    LZ4HC_init_internal (ctx, (const BYTE*)src);
1555
10.7k
    if (dstCapacity < LZ4_compressBound(srcSize))
1556
10.7k
        return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
1557
0
    else
1558
0
        return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
1559
10.7k
}
1560
1561
int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
1562
0
{
1563
0
    LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
1564
0
    if (ctx==NULL) return 0;   /* init failure */
1565
0
    return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
1566
0
}
1567
1568
int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
1569
0
{
1570
0
    int cSize;
1571
0
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1572
0
    LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
1573
0
    if (statePtr==NULL) return 0;
1574
#else
1575
    LZ4_streamHC_t state;
1576
    LZ4_streamHC_t* const statePtr = &state;
1577
#endif
1578
0
    DEBUGLOG(5, "LZ4_compress_HC")
1579
0
    cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
1580
0
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1581
0
    FREEMEM(statePtr);
1582
0
#endif
1583
0
    return cSize;
1584
0
}
1585
1586
/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
1587
int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
1588
0
{
1589
0
    LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
1590
0
    if (ctx==NULL) return 0;   /* init failure */
1591
0
    LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
1592
0
    LZ4_setCompressionLevel(ctx, cLevel);
1593
0
    return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
1594
0
}
1595
1596
1597
1598
/**************************************
1599
*  Streaming Functions
1600
**************************************/
1601
/* allocation */
1602
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1603
LZ4_streamHC_t* LZ4_createStreamHC(void)
1604
0
{
1605
0
    LZ4_streamHC_t* const state =
1606
0
        (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
1607
0
    if (state == NULL) return NULL;
1608
0
    LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
1609
0
    return state;
1610
0
}
1611
1612
int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
1613
0
{
1614
0
    DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
1615
0
    if (!LZ4_streamHCPtr) return 0;  /* support free on NULL */
1616
0
    FREEMEM(LZ4_streamHCPtr);
1617
0
    return 0;
1618
0
}
1619
#endif
1620
1621
1622
LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
1623
9.67k
{
1624
9.67k
    LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
1625
9.67k
    DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
1626
    /* check conditions */
1627
9.67k
    if (buffer == NULL) return NULL;
1628
9.67k
    if (size < sizeof(LZ4_streamHC_t)) return NULL;
1629
9.67k
    if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
1630
    /* init */
1631
9.67k
    { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
1632
9.67k
      MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
1633
9.67k
    LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
1634
9.67k
    return LZ4_streamHCPtr;
1635
9.67k
}
1636
1637
/* just a stub */
1638
void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1639
0
{
1640
0
    LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1641
0
    LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
1642
0
}
1643
1644
void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1645
22.3k
{
1646
22.3k
    LZ4HC_CCtx_internal* const s = &LZ4_streamHCPtr->internal_donotuse;
1647
22.3k
    DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
1648
22.3k
    if (s->dirty) {
1649
304
        LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1650
22.0k
    } else {
1651
22.0k
        assert(s->end >= s->prefixStart);
1652
22.0k
        s->dictLimit += (U32)(s->end - s->prefixStart);
1653
22.0k
        s->prefixStart = NULL;
1654
22.0k
        s->end = NULL;
1655
22.0k
        s->dictCtx = NULL;
1656
22.0k
    }
1657
22.3k
    LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
1658
22.3k
}
1659
1660
void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1661
32.0k
{
1662
32.0k
    DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
1663
32.0k
    if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
1664
32.0k
    if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
1665
32.0k
    LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
1666
32.0k
}
1667
1668
void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
1669
9.37k
{
1670
9.37k
    LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
1671
9.37k
}
1672
1673
/* LZ4_loadDictHC() :
1674
 * LZ4_streamHCPtr is presumed properly initialized */
1675
int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
1676
              const char* dictionary, int dictSize)
1677
0
{
1678
0
    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1679
0
    cParams_t cp;
1680
0
    DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d, clevel=%d)", LZ4_streamHCPtr, dictionary, dictSize, ctxPtr->compressionLevel);
1681
0
    assert(dictSize >= 0);
1682
0
    assert(LZ4_streamHCPtr != NULL);
1683
0
    if (dictSize > 64 KB) {
1684
0
        dictionary += (size_t)dictSize - 64 KB;
1685
0
        dictSize = 64 KB;
1686
0
    }
1687
    /* need a full initialization, there are bad side-effects when using resetFast() */
1688
0
    {   int const cLevel = ctxPtr->compressionLevel;
1689
0
        LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1690
0
        LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
1691
0
        cp = LZ4HC_getCLevelParams(cLevel);
1692
0
    }
1693
0
    LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
1694
0
    ctxPtr->end = (const BYTE*)dictionary + dictSize;
1695
0
    if (cp.strat == lz4mid) {
1696
0
        LZ4MID_fillHTable (ctxPtr, dictionary, (size_t)dictSize);
1697
0
    } else {
1698
0
        if (dictSize >= LZ4HC_HASHSIZE) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
1699
0
    }
1700
0
    return dictSize;
1701
0
}
1702
1703
0
void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
1704
0
    working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
1705
0
}
1706
1707
/* compression */
1708
1709
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
1710
0
{
1711
0
    DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
1712
0
    if ( (ctxPtr->end >= ctxPtr->prefixStart + 4)
1713
0
      && (LZ4HC_getCLevelParams(ctxPtr->compressionLevel).strat != lz4mid) ) {
1714
0
        LZ4HC_Insert (ctxPtr, ctxPtr->end-3);  /* Referencing remaining dictionary content */
1715
0
    }
1716
1717
    /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
1718
0
    ctxPtr->lowLimit  = ctxPtr->dictLimit;
1719
0
    ctxPtr->dictStart  = ctxPtr->prefixStart;
1720
0
    ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
1721
0
    ctxPtr->prefixStart = newBlock;
1722
0
    ctxPtr->end  = newBlock;
1723
0
    ctxPtr->nextToUpdate = ctxPtr->dictLimit;   /* match referencing will resume from there */
1724
1725
    /* cannot reference an extDict and a dictCtx at the same time */
1726
0
    ctxPtr->dictCtx = NULL;
1727
0
}
1728
1729
static int
1730
LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1731
                                 const char* src, char* dst,
1732
                                 int* srcSizePtr, int dstCapacity,
1733
                                 limitedOutput_directive limit)
1734
3.78k
{
1735
3.78k
    LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1736
3.78k
    DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
1737
3.78k
                LZ4_streamHCPtr, src, *srcSizePtr, limit);
1738
3.78k
    assert(ctxPtr != NULL);
1739
    /* auto-init if forgotten */
1740
3.78k
    if (ctxPtr->prefixStart == NULL)
1741
862
        LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
1742
1743
    /* Check overflow */
1744
3.78k
    if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
1745
0
        size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
1746
0
        if (dictSize > 64 KB) dictSize = 64 KB;
1747
0
        LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
1748
0
    }
1749
1750
    /* Check if blocks follow each other */
1751
3.78k
    if ((const BYTE*)src != ctxPtr->end)
1752
0
        LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
1753
1754
    /* Check overlapping input/dictionary space */
1755
3.78k
    {   const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
1756
3.78k
        const BYTE* const dictBegin = ctxPtr->dictStart;
1757
3.78k
        const BYTE* const dictEnd   = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
1758
3.78k
        if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
1759
0
            if (sourceEnd > dictEnd) sourceEnd = dictEnd;
1760
0
            ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
1761
0
            ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
1762
            /* invalidate dictionary is it's too small */
1763
0
            if (ctxPtr->dictLimit - ctxPtr->lowLimit < LZ4HC_HASHSIZE) {
1764
0
                ctxPtr->lowLimit = ctxPtr->dictLimit;
1765
0
                ctxPtr->dictStart = ctxPtr->prefixStart;
1766
0
    }   }   }
1767
1768
3.78k
    return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
1769
3.78k
}
1770
1771
int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
1772
3.78k
{
1773
3.78k
    DEBUGLOG(5, "LZ4_compress_HC_continue");
1774
3.78k
    if (dstCapacity < LZ4_compressBound(srcSize))
1775
3.78k
        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
1776
0
    else
1777
0
        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
1778
3.78k
}
1779
1780
int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
1781
0
{
1782
0
    return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
1783
0
}
1784
1785
1786
/* LZ4_saveDictHC :
1787
 * save history content
1788
 * into a user-provided buffer
1789
 * which is then used to continue compression
1790
 */
1791
int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
1792
0
{
1793
0
    LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
1794
0
    int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
1795
0
    DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
1796
0
    assert(prefixSize >= 0);
1797
0
    if (dictSize > 64 KB) dictSize = 64 KB;
1798
0
    if (dictSize < 4) dictSize = 0;
1799
0
    if (dictSize > prefixSize) dictSize = prefixSize;
1800
0
    if (safeBuffer == NULL) assert(dictSize == 0); /* a NULL buffer with !0 size is invalid */
1801
0
    if (dictSize > 0)
1802
0
        LZ4_memmove(safeBuffer, streamPtr->end - dictSize, (size_t)dictSize);
1803
0
    {   U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
1804
0
        streamPtr->end = (safeBuffer == NULL) ? NULL : (const BYTE*)safeBuffer + dictSize;
1805
0
        streamPtr->prefixStart = (const BYTE*)safeBuffer;
1806
0
        streamPtr->dictLimit = endIndex - (U32)dictSize;
1807
0
        streamPtr->lowLimit = endIndex - (U32)dictSize;
1808
0
        streamPtr->dictStart = streamPtr->prefixStart;
1809
0
        if (streamPtr->nextToUpdate < streamPtr->dictLimit)
1810
0
            streamPtr->nextToUpdate = streamPtr->dictLimit;
1811
0
    }
1812
0
    return dictSize;
1813
0
}
1814
1815
1816
/* ================================================
1817
 *  LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
1818
 * ===============================================*/
1819
typedef struct {
1820
    int price;
1821
    int off;
1822
    int mlen;
1823
    int litlen;
1824
} LZ4HC_optimal_t;
1825
1826
/* price in bytes */
1827
LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
1828
249M
{
1829
249M
    int price = litlen;
1830
249M
    assert(litlen >= 0);
1831
249M
    if (litlen >= (int)RUN_MASK)
1832
6.55M
        price += 1 + ((litlen-(int)RUN_MASK) / 255);
1833
249M
    return price;
1834
249M
}
1835
1836
/* requires mlen >= MINMATCH */
1837
LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
1838
168M
{
1839
168M
    int price = 1 + 2 ; /* token + 16-bit offset */
1840
168M
    assert(litlen >= 0);
1841
168M
    assert(mlen >= MINMATCH);
1842
1843
168M
    price += LZ4HC_literalsPrice(litlen);
1844
1845
168M
    if (mlen >= (int)(ML_MASK+MINMATCH))
1846
121M
        price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
1847
1848
168M
    return price;
1849
168M
}
1850
1851
LZ4_FORCE_INLINE LZ4HC_match_t
1852
LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
1853
                      const BYTE* ip, const BYTE* const iHighLimit,
1854
                      int minLen, int nbSearches,
1855
                      const dictCtx_directive dict,
1856
                      const HCfavor_e favorDecSpeed)
1857
81.6M
{
1858
81.6M
    LZ4HC_match_t const match0 = { 0 , 0, 0 };
1859
    /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
1860
     * but this won't be the case here, as we define iLowLimit==ip,
1861
    ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
1862
81.6M
    LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
1863
81.6M
    assert(md.back == 0);
1864
81.6M
    if (md.len <= minLen) return match0;
1865
9.54M
    if (favorDecSpeed) {
1866
4.96M
        if ((md.len>18) & (md.len<=36)) md.len=18;   /* favor dec.speed (shortcut) */
1867
4.96M
    }
1868
9.54M
    return md;
1869
81.6M
}
1870
1871
1872
1873
/* preconditions:
1874
 * - *srcSizePtr within [1, LZ4_MAX_INPUT_SIZE]
1875
 * - src is valid
1876
 * - maxOutputSize >= 1
1877
 * - dst is valid
1878
 */
1879
static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1880
                                    const char* const source,
1881
                                    char* dst,
1882
                                    int* srcSizePtr,
1883
                                    int dstCapacity,
1884
                                    int const nbSearches,
1885
                                    size_t sufficient_len,
1886
                                    const limitedOutput_directive limit,
1887
                                    int const fullUpdate,
1888
                                    const dictCtx_directive dict,
1889
                                    const HCfavor_e favorDecSpeed)
1890
6.04k
{
1891
6.04k
    int retval = 0;
1892
333M
#define TRAILING_LITERALS 3
1893
6.04k
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1894
6.04k
    LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
1895
#else
1896
    LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS];   /* ~64 KB, which can be a bit large for some stacks... */
1897
#endif
1898
1899
6.04k
    const BYTE* ip = (const BYTE*) source;
1900
6.04k
    const BYTE* anchor = ip;
1901
6.04k
    const BYTE* const iend = ip + *srcSizePtr;
1902
6.04k
    const BYTE* const mflimit = iend - MFLIMIT;
1903
6.04k
    const BYTE* const matchlimit = iend - LASTLITERALS;
1904
6.04k
    BYTE* op = (BYTE*) dst;
1905
6.04k
    BYTE* opSaved = (BYTE*) dst;
1906
6.04k
    BYTE* oend = op + dstCapacity;
1907
6.04k
    int ovml = MINMATCH;  /* overflow - last sequence */
1908
6.04k
    int ovoff = 0;
1909
1910
    /* init */
1911
6.04k
    DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
1912
6.04k
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
1913
6.04k
    if (opt == NULL) goto _return_label;
1914
6.04k
#endif
1915
1916
    /* preconditions verifications */
1917
6.04k
    assert(dstCapacity > 0);
1918
6.04k
    assert(dst != NULL);
1919
6.04k
    assert(*srcSizePtr > 0);
1920
6.04k
    assert(source != NULL);
1921
1922
6.04k
    *srcSizePtr = 0;
1923
6.04k
    if (limit == fillOutput) oend -= LASTLITERALS;   /* Hack for support LZ4 format restriction */
1924
6.04k
    if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
1925
1926
    /* Main Loop */
1927
67.2M
    while (ip <= mflimit) {
1928
67.2M
         int const llen = (int)(ip - anchor);
1929
67.2M
         int best_mlen, best_off;
1930
67.2M
         int cur, last_match_pos = 0;
1931
1932
67.2M
         LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
1933
67.2M
         if (firstMatch.len==0) { ip++; continue; }
1934
1935
2.56M
         if ((size_t)firstMatch.len > sufficient_len) {
1936
             /* good enough solution : immediate encoding */
1937
38.9k
             int const firstML = firstMatch.len;
1938
38.9k
             opSaved = op;
1939
38.9k
             if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, firstMatch.off, limit, oend) ) {  /* updates ip, op and anchor */
1940
20
                 ovml = firstML;
1941
20
                 ovoff = firstMatch.off;
1942
20
                 goto _dest_overflow;
1943
20
             }
1944
38.8k
             continue;
1945
38.9k
         }
1946
1947
         /* set prices for first positions (literals) */
1948
2.52M
         {   int rPos;
1949
12.6M
             for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
1950
10.1M
                 int const cost = LZ4HC_literalsPrice(llen + rPos);
1951
10.1M
                 opt[rPos].mlen = 1;
1952
10.1M
                 opt[rPos].off = 0;
1953
10.1M
                 opt[rPos].litlen = llen + rPos;
1954
10.1M
                 opt[rPos].price = cost;
1955
10.1M
                 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
1956
10.1M
                             rPos, cost, opt[rPos].litlen);
1957
10.1M
         }   }
1958
         /* set prices using initial match */
1959
2.52M
         {   int const matchML = firstMatch.len;   /* necessarily < sufficient_len < LZ4_OPT_NUM */
1960
2.52M
             int const offset = firstMatch.off;
1961
2.52M
             int mlen;
1962
2.52M
             assert(matchML < LZ4_OPT_NUM);
1963
23.6M
             for (mlen = MINMATCH ; mlen <= matchML ; mlen++) {
1964
21.0M
                 int const cost = LZ4HC_sequencePrice(llen, mlen);
1965
21.0M
                 opt[mlen].mlen = mlen;
1966
21.0M
                 opt[mlen].off = offset;
1967
21.0M
                 opt[mlen].litlen = llen;
1968
21.0M
                 opt[mlen].price = cost;
1969
21.0M
                 DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
1970
21.0M
                             mlen, cost, mlen);
1971
21.0M
         }   }
1972
2.52M
         last_match_pos = firstMatch.len;
1973
2.52M
         {   int addLit;
1974
10.1M
             for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
1975
7.58M
                 opt[last_match_pos+addLit].mlen = 1; /* literal */
1976
7.58M
                 opt[last_match_pos+addLit].off = 0;
1977
7.58M
                 opt[last_match_pos+addLit].litlen = addLit;
1978
7.58M
                 opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
1979
7.58M
                 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
1980
7.58M
                             last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
1981
7.58M
         }   }
1982
1983
         /* check further positions */
1984
53.1M
         for (cur = 1; cur < last_match_pos; cur++) {
1985
50.6M
             const BYTE* const curPtr = ip + cur;
1986
50.6M
             LZ4HC_match_t newMatch;
1987
1988
50.6M
             if (curPtr > mflimit) break;
1989
50.6M
             DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
1990
50.6M
                     cur, opt[cur].price, opt[cur+1].price, cur+1);
1991
50.6M
             if (fullUpdate) {
1992
                 /* not useful to search here if next position has same (or lower) cost */
1993
38.3M
                 if ( (opt[cur+1].price <= opt[cur].price)
1994
                   /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
1995
30.6M
                   && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
1996
28.1M
                     continue;
1997
38.3M
             } else {
1998
                 /* not useful to search here if next position has same (or lower) cost */
1999
12.3M
                 if (opt[cur+1].price <= opt[cur].price) continue;
2000
12.3M
             }
2001
2002
14.3M
             DEBUGLOG(7, "search at rPos:%u", cur);
2003
14.3M
             if (fullUpdate)
2004
10.2M
                 newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
2005
4.10M
             else
2006
                 /* only test matches of minimum length; slightly faster, but misses a few bytes */
2007
4.10M
                 newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
2008
14.3M
             if (!newMatch.len) continue;
2009
2010
6.97M
             if ( ((size_t)newMatch.len > sufficient_len)
2011
6.97M
               || (newMatch.len + cur >= LZ4_OPT_NUM) ) {
2012
                 /* immediate encoding */
2013
8.16k
                 best_mlen = newMatch.len;
2014
8.16k
                 best_off = newMatch.off;
2015
8.16k
                 last_match_pos = cur + 1;
2016
8.16k
                 goto encode;
2017
8.16k
             }
2018
2019
             /* before match : set price with literals at beginning */
2020
6.96M
             {   int const baseLitlen = opt[cur].litlen;
2021
6.96M
                 int litlen;
2022
27.8M
                 for (litlen = 1; litlen < MINMATCH; litlen++) {
2023
20.9M
                     int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
2024
20.9M
                     int const pos = cur + litlen;
2025
20.9M
                     if (price < opt[pos].price) {
2026
0
                         opt[pos].mlen = 1; /* literal */
2027
0
                         opt[pos].off = 0;
2028
0
                         opt[pos].litlen = baseLitlen+litlen;
2029
0
                         opt[pos].price = price;
2030
0
                         DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
2031
0
                                     pos, price, opt[pos].litlen);
2032
0
             }   }   }
2033
2034
             /* set prices using match at position = cur */
2035
6.96M
             {   int const matchML = newMatch.len;
2036
6.96M
                 int ml = MINMATCH;
2037
2038
6.96M
                 assert(cur + newMatch.len < LZ4_OPT_NUM);
2039
154M
                 for ( ; ml <= matchML ; ml++) {
2040
147M
                     int const pos = cur + ml;
2041
147M
                     int const offset = newMatch.off;
2042
147M
                     int price;
2043
147M
                     int ll;
2044
147M
                     DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",
2045
147M
                                 pos, last_match_pos);
2046
147M
                     if (opt[cur].mlen == 1) {
2047
59.9M
                         ll = opt[cur].litlen;
2048
59.9M
                         price = ((cur > ll) ? opt[cur - ll].price : 0)
2049
59.9M
                               + LZ4HC_sequencePrice(ll, ml);
2050
87.9M
                     } else {
2051
87.9M
                         ll = 0;
2052
87.9M
                         price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
2053
87.9M
                     }
2054
2055
147M
                    assert((U32)favorDecSpeed <= 1);
2056
147M
                     if (pos > last_match_pos+TRAILING_LITERALS
2057
130M
                      || price <= opt[pos].price - (int)favorDecSpeed) {
2058
24.9M
                         DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
2059
24.9M
                                     pos, price, ml);
2060
24.9M
                         assert(pos < LZ4_OPT_NUM);
2061
24.9M
                         if ( (ml == matchML)  /* last pos of last match */
2062
3.72M
                           && (last_match_pos < pos) )
2063
2.92M
                             last_match_pos = pos;
2064
24.9M
                         opt[pos].mlen = ml;
2065
24.9M
                         opt[pos].off = offset;
2066
24.9M
                         opt[pos].litlen = ll;
2067
24.9M
                         opt[pos].price = price;
2068
24.9M
             }   }   }
2069
             /* complete following positions with literals */
2070
6.96M
             {   int addLit;
2071
27.8M
                 for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
2072
20.9M
                     opt[last_match_pos+addLit].mlen = 1; /* literal */
2073
20.9M
                     opt[last_match_pos+addLit].off = 0;
2074
20.9M
                     opt[last_match_pos+addLit].litlen = addLit;
2075
20.9M
                     opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
2076
20.9M
                     DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
2077
20.9M
             }   }
2078
6.96M
         }  /* for (cur = 1; cur <= last_match_pos; cur++) */
2079
2080
2.52M
         assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
2081
2.51M
         best_mlen = opt[last_match_pos].mlen;
2082
2.51M
         best_off = opt[last_match_pos].off;
2083
2.51M
         cur = last_match_pos - best_mlen;
2084
2085
2.52M
encode: /* cur, last_match_pos, best_mlen, best_off must be set */
2086
2.52M
         assert(cur < LZ4_OPT_NUM);
2087
2.52M
         assert(last_match_pos >= 1);  /* == 1 when only one candidate */
2088
2.52M
         DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
2089
2.52M
         {   int candidate_pos = cur;
2090
2.52M
             int selected_matchLength = best_mlen;
2091
2.52M
             int selected_offset = best_off;
2092
5.66M
             while (1) {  /* from end to beginning */
2093
5.66M
                 int const next_matchLength = opt[candidate_pos].mlen;  /* can be 1, means literal */
2094
5.66M
                 int const next_offset = opt[candidate_pos].off;
2095
5.66M
                 DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength);
2096
5.66M
                 opt[candidate_pos].mlen = selected_matchLength;
2097
5.66M
                 opt[candidate_pos].off = selected_offset;
2098
5.66M
                 selected_matchLength = next_matchLength;
2099
5.66M
                 selected_offset = next_offset;
2100
5.66M
                 if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
2101
5.66M
                 assert(next_matchLength > 0);  /* can be 1, means literal */
2102
3.13M
                 candidate_pos -= next_matchLength;
2103
3.13M
         }   }
2104
2105
         /* encode all recorded sequences in order */
2106
2.52M
         {   int rPos = 0;  /* relative position (to ip) */
2107
8.19M
             while (rPos < last_match_pos) {
2108
5.66M
                 int const ml = opt[rPos].mlen;
2109
5.66M
                 int const offset = opt[rPos].off;
2110
5.66M
                 if (ml == 1) { ip++; rPos++; continue; }  /* literal; note: can end up with several literals, in which case, skip them */
2111
4.23M
                 rPos += ml;
2112
4.23M
                 assert(ml >= MINMATCH);
2113
4.23M
                 assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
2114
4.23M
                 opSaved = op;
2115
4.23M
                 if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, offset, limit, oend) ) {  /* updates ip, op and anchor */
2116
103
                     ovml = ml;
2117
103
                     ovoff = offset;
2118
103
                     goto _dest_overflow;
2119
103
         }   }   }
2120
2.52M
     }  /* while (ip <= mflimit) */
2121
2122
5.92k
_last_literals:
2123
     /* Encode Last Literals */
2124
5.92k
     {   size_t lastRunSize = (size_t)(iend - anchor);  /* literals */
2125
5.92k
         size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
2126
5.92k
         size_t const totalSize = 1 + llAdd + lastRunSize;
2127
5.92k
         if (limit == fillOutput) oend += LASTLITERALS;  /* restore correct value */
2128
5.92k
         if (limit && (op + totalSize > oend)) {
2129
549
             if (limit == limitedOutput) { /* Check output limit */
2130
549
                retval = 0;
2131
549
                goto _return_label;
2132
549
             }
2133
             /* adapt lastRunSize to fill 'dst' */
2134
0
             lastRunSize  = (size_t)(oend - op) - 1 /*token*/;
2135
0
             llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
2136
0
             lastRunSize -= llAdd;
2137
0
         }
2138
5.37k
         DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
2139
5.37k
         ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
2140
2141
5.37k
         if (lastRunSize >= RUN_MASK) {
2142
478
             size_t accumulator = lastRunSize - RUN_MASK;
2143
478
             *op++ = (RUN_MASK << ML_BITS);
2144
5.61k
             for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
2145
478
             *op++ = (BYTE) accumulator;
2146
4.89k
         } else {
2147
4.89k
             *op++ = (BYTE)(lastRunSize << ML_BITS);
2148
4.89k
         }
2149
5.37k
         LZ4_memcpy(op, anchor, lastRunSize);
2150
5.37k
         op += lastRunSize;
2151
5.37k
     }
2152
2153
     /* End */
2154
0
     *srcSizePtr = (int) (((const char*)ip) - source);
2155
5.37k
     retval = (int) ((char*)op-dst);
2156
5.37k
     goto _return_label;
2157
2158
123
_dest_overflow:
2159
123
if (limit == fillOutput) {
2160
     /* Assumption : ip, anchor, ovml and ovref must be set correctly */
2161
0
     size_t const ll = (size_t)(ip - anchor);
2162
0
     size_t const ll_addbytes = (ll + 240) / 255;
2163
0
     size_t const ll_totalCost = 1 + ll_addbytes + ll;
2164
0
     BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
2165
0
     DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
2166
0
     op = opSaved;  /* restore correct out pointer */
2167
0
     if (op + ll_totalCost <= maxLitPos) {
2168
         /* ll validated; now adjust match length */
2169
0
         size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
2170
0
         size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
2171
0
         assert(maxMlSize < INT_MAX); assert(ovml >= 0);
2172
0
         if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
2173
0
         if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
2174
0
             DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
2175
0
             DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
2176
0
             LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovoff, notLimited, oend);
2177
0
             DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
2178
0
     }   }
2179
0
     goto _last_literals;
2180
0
}
2181
6.04k
_return_label:
2182
6.04k
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
2183
6.04k
     if (opt) FREEMEM(opt);
2184
6.04k
#endif
2185
6.04k
     return retval;
2186
123
}
2187
2188
2189
/***************************************************
2190
*  Deprecated Functions
2191
***************************************************/
2192
2193
/* These functions currently generate deprecation warnings */
2194
2195
/* Wrappers for deprecated compression functions */
2196
0
int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
2197
0
int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
2198
0
int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
2199
0
int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
2200
0
int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
2201
0
int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
2202
0
int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
2203
0
int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
2204
0
int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
2205
0
int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
2206
2207
2208
/* Deprecated streaming functions */
2209
0
int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
2210
2211
/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
2212
 * @return : 0 on success, !=0 if error */
2213
int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
2214
0
{
2215
0
    LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
2216
0
    if (hc4 == NULL) return 1;   /* init failed */
2217
0
    LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
2218
0
    return 0;
2219
0
}
2220
2221
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2222
void* LZ4_createHC (const char* inputBuffer)
2223
0
{
2224
0
    LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
2225
0
    if (hc4 == NULL) return NULL;   /* not enough memory */
2226
0
    LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
2227
0
    return hc4;
2228
0
}
2229
2230
int LZ4_freeHC (void* LZ4HC_Data)
2231
0
{
2232
0
    if (!LZ4HC_Data) return 0;  /* support free on NULL */
2233
0
    FREEMEM(LZ4HC_Data);
2234
0
    return 0;
2235
0
}
2236
#endif
2237
2238
int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
2239
0
{
2240
0
    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
2241
0
}
2242
2243
int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
2244
0
{
2245
0
    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
2246
0
}
2247
2248
char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
2249
0
{
2250
0
    LZ4HC_CCtx_internal* const s = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
2251
0
    const BYTE* const bufferStart = s->prefixStart - s->dictLimit + s->lowLimit;
2252
0
    LZ4_resetStreamHC_fast((LZ4_streamHC_t*)LZ4HC_Data, s->compressionLevel);
2253
    /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */
2254
0
    return (char*)(uptrval)bufferStart;
2255
0
}