Coverage Report

Created: 2025-06-24 07:53

/src/duckdb/third_party/lz4/lz4.cpp
Line
Count
Source (jump to first uncovered line)
1
/*
2
   LZ4 - Fast LZ compression algorithm
3
   Copyright (C) 2011-2020, Yann Collet.
4
5
   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7
   Redistribution and use in source and binary forms, with or without
8
   modification, are permitted provided that the following conditions are
9
   met:
10
11
       * Redistributions of source code must retain the above copyright
12
   notice, this list of conditions and the following disclaimer.
13
       * Redistributions in binary form must reproduce the above
14
   copyright notice, this list of conditions and the following disclaimer
15
   in the documentation and/or other materials provided with the
16
   distribution.
17
18
   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
   You can contact the author at :
31
    - LZ4 homepage : http://www.lz4.org
32
    - LZ4 source repository : https://github.com/lz4/lz4
33
*/
34
35
/*-************************************
36
*  Tuning parameters
37
**************************************/
38
/*
39
 * LZ4_HEAPMODE :
40
 * Select how default compression functions will allocate memory for their hash table,
41
 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
42
 */
43
#ifndef LZ4_HEAPMODE
44
#  define LZ4_HEAPMODE 0
45
#endif
46
47
/*
48
 * LZ4_ACCELERATION_DEFAULT :
49
 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
50
 */
51
0
#define LZ4_ACCELERATION_DEFAULT 1
52
/*
53
 * LZ4_ACCELERATION_MAX :
54
 * Any "acceleration" value higher than this threshold
55
 * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
56
 */
57
0
#define LZ4_ACCELERATION_MAX 65537
58
59
60
/*-************************************
61
*  CPU Feature Detection
62
**************************************/
63
/* LZ4_FORCE_MEMORY_ACCESS
64
 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
65
 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
66
 * The below switch allow to select different access method for improved performance.
67
 * Method 0 (default) : use `memcpy()`. Safe and portable.
68
 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
69
 *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
70
 * Method 2 : direct access. This method is portable but violate C standard.
71
 *            It can generate buggy code on targets which assembly generation depends on alignment.
72
 *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
73
 * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
74
 * Prefer these methods in priority order (0 > 1 > 2)
75
 */
76
#ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally */
77
#  if defined(__GNUC__) && \
78
  ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
79
  || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
80
#    define LZ4_FORCE_MEMORY_ACCESS 2
81
#  elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
82
#    define LZ4_FORCE_MEMORY_ACCESS 1
83
#  endif
84
#endif
85
86
/*
87
 * LZ4_FORCE_SW_BITCOUNT
88
 * Define this parameter if your target system or compiler does not support hardware bit count
89
 */
90
#if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for WinCE doesn't support Hardware bit count */
91
#  undef  LZ4_FORCE_SW_BITCOUNT  /* avoid double def */
92
#  define LZ4_FORCE_SW_BITCOUNT
93
#endif
94
95
96
97
/*-************************************
98
*  Dependency
99
**************************************/
100
/*
101
 * LZ4_SRC_INCLUDED:
102
 * Amalgamation flag, whether lz4.c is included
103
 */
104
#ifndef LZ4_SRC_INCLUDED
105
#  define LZ4_SRC_INCLUDED 1
106
#endif
107
108
#ifndef LZ4_STATIC_LINKING_ONLY
109
#define LZ4_STATIC_LINKING_ONLY
110
#endif
111
112
#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
113
#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
114
#endif
115
116
#define LZ4_STATIC_LINKING_ONLY  /* LZ4_DISTANCE_MAX */
117
#include <stdlib.h>
118
119
#include "lz4.hpp"
120
/* see also "memory routines" below */
121
122
123
/*-************************************
124
*  Compiler Options
125
**************************************/
126
127
#define LZ4_FORCE_INLINE static
128
#define LZ4_FORCE_O2
129
130
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
131
0
#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
132
#else
133
#  define expect(expr,value)    (expr)
134
#endif
135
136
#ifndef likely
137
0
#define likely(expr)     expect((expr) != 0, 1)
138
#endif
139
#ifndef unlikely
140
0
#define unlikely(expr)   expect((expr) != 0, 0)
141
#endif
142
143
/* Should the alignment test prove unreliable, for some reason,
144
 * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
145
#ifndef LZ4_ALIGN_TEST  /* can be externally provided */
146
# define LZ4_ALIGN_TEST 1
147
#endif
148
149
namespace duckdb_lz4 {
150
/*-************************************
151
*  Memory routines
152
**************************************/
153
154
/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :
155
 *  Disable relatively high-level LZ4/HC functions that use dynamic memory
156
 *  allocation functions (malloc(), calloc(), free()).
157
 *
158
 *  Note that this is a compile-time switch. And since it disables
159
 *  public/stable LZ4 v1 API functions, we don't recommend using this
160
 *  symbol to generate a library for distribution.
161
 *
162
 *  The following public functions are removed when this symbol is defined.
163
 *  - lz4   : LZ4_createStream, LZ4_freeStream,
164
 *            LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)
165
 *  - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,
166
 *            LZ4_createHC (deprecated), LZ4_freeHC  (deprecated)
167
 *  - lz4frame, lz4file : All LZ4F_* functions
168
 */
169
#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
170
#  define ALLOC(s)          lz4_error_memory_allocation_is_disabled
171
#  define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
172
#  define FREEMEM(p)        lz4_error_memory_allocation_is_disabled
173
#elif defined(LZ4_USER_MEMORY_FUNCTIONS)
174
/* memory management functions can be customized by user project.
175
 * Below functions must exist somewhere in the Project
176
 * and be available at link time */
177
void* LZ4_malloc(size_t s);
178
void* LZ4_calloc(size_t n, size_t s);
179
void  LZ4_free(void* p);
180
# define ALLOC(s)          LZ4_malloc(s)
181
# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
182
# define FREEMEM(p)        LZ4_free(p)
183
#else
184
# include <stdlib.h>   /* malloc, calloc, free */
185
0
# define ALLOC(s)          malloc(s)
186
0
# define ALLOC_AND_ZERO(s) calloc(1,s)
187
0
# define FREEMEM(p)        free(p)
188
#endif
189
190
#if ! LZ4_FREESTANDING
191
#  include <string.h>   /* memset, memcpy */
192
#endif
193
#if !defined(LZ4_memset)
194
0
#  define LZ4_memset(p,v,s) memset((p),(v),(s))
195
#endif
196
0
#define MEM_INIT(p,v,s)   LZ4_memset((p),(v),(s))
197
198
199
/*-************************************
200
*  Common Constants
201
**************************************/
202
0
#define MINMATCH 4
203
204
0
#define WILDCOPYLENGTH 8
205
0
#define LASTLITERALS   5   /* see ../doc/lz4_Block_format.md#parsing-restrictions */
206
0
#define MFLIMIT       12   /* see ../doc/lz4_Block_format.md#parsing-restrictions */
207
0
#define MATCH_SAFEGUARD_DISTANCE  ((2*WILDCOPYLENGTH) - MINMATCH)   /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
208
0
#define FASTLOOP_SAFE_DISTANCE 64
209
static const int LZ4_minLength = (MFLIMIT+1);
210
211
0
#define KB *(1 <<10)
212
#define MB *(1 <<20)
213
0
#define GB *(1U<<30)
214
215
0
#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
216
#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX)   /* max supported by LZ4 format */
217
#  error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
218
#endif
219
220
0
#define ML_BITS  4
221
0
#define ML_MASK  ((1U<<ML_BITS)-1)
222
0
#define RUN_BITS (8-ML_BITS)
223
0
#define RUN_MASK ((1U<<RUN_BITS)-1)
224
225
226
/*-************************************
227
*  Error detection
228
**************************************/
229
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
230
#  include <assert.h>
231
#else
232
#  ifndef assert
233
0
#    define assert(condition) ((void)0)
234
#  endif
235
#endif
236
237
0
#define LZ4_STATIC_ASSERT(c)   { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use after variable declarations */
238
239
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
240
#  include <stdio.h>
241
   static int g_debuglog_enable = 1;
242
#  define DEBUGLOG(l, ...) {                          \
243
        if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) {  \
244
            fprintf(stderr, __FILE__ ": ");           \
245
            fprintf(stderr, __VA_ARGS__);             \
246
            fprintf(stderr, " \n");                   \
247
    }   }
248
#else
249
0
#  define DEBUGLOG(l, ...) {}    /* disabled */
250
#endif
251
252
static int LZ4_isAligned(const void* ptr, size_t alignment)
253
0
{
254
0
    return ((size_t)ptr & (alignment -1)) == 0;
255
0
}
256
257
258
/*-************************************
259
*  Types
260
**************************************/
261
#include <limits.h>
262
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
263
# include <stdint.h>
264
  typedef  uint8_t BYTE;
265
  typedef uint16_t U16;
266
  typedef uint32_t U32;
267
  typedef  int32_t S32;
268
  typedef uint64_t U64;
269
  typedef uintptr_t uptrval;
270
#else
271
# if UINT_MAX != 4294967295UL
272
#   error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
273
# endif
274
  typedef unsigned char       BYTE;
275
  typedef unsigned short      U16;
276
  typedef unsigned int        U32;
277
  typedef   signed int        S32;
278
  typedef unsigned long long  U64;
279
  typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
280
#endif
281
282
#if defined(__x86_64__)
283
  typedef U64    reg_t;   /* 64-bits in x32 mode */
284
#else
285
  typedef size_t reg_t;   /* 32-bits in x32 mode */
286
#endif
287
288
typedef enum {
289
    notLimited = 0,
290
    limitedOutput = 1,
291
    fillOutput = 2
292
} limitedOutput_directive;
293
294
295
/*-************************************
296
*  Reading and writing into memory
297
**************************************/
298
299
/**
300
 * LZ4 relies on memcpy with a constant size being inlined. In freestanding
301
 * environments, the compiler can't assume the implementation of memcpy() is
302
 * standard compliant, so it can't apply its specialized memcpy() inlining
303
 * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
304
 * memcpy() as if it were standard compliant, so it can inline it in freestanding
305
 * environments. This is needed when decompressing the Linux Kernel, for example.
306
 */
307
#if !defined(LZ4_memcpy)
308
#  if defined(__GNUC__) && (__GNUC__ >= 4)
309
0
#    define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
310
#  else
311
#    define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
312
#  endif
313
#endif
314
315
#if !defined(LZ4_memmove)
316
#  if defined(__GNUC__) && (__GNUC__ >= 4)
317
0
#    define LZ4_memmove __builtin_memmove
318
#  else
319
#    define LZ4_memmove memmove
320
#  endif
321
#endif
322
323
static unsigned LZ4_isLittleEndian(void)
324
0
{
325
0
    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
326
0
    return one.c[0];
327
0
}
328
329
330
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
331
/* lie to the compiler about data alignment; use with caution */
332
333
static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
334
static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
335
static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
336
337
static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
338
static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
339
340
#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
341
342
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
343
/* currently only defined for gcc and icc */
344
typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign;
345
346
0
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; }
347
0
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; }
348
0
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; }
349
350
0
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; }
351
0
static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; }
352
353
#else  /* safe and portable access using memcpy() */
354
355
static U16 LZ4_read16(const void* memPtr)
356
{
357
    U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
358
}
359
360
static U32 LZ4_read32(const void* memPtr)
361
{
362
    U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
363
}
364
365
static reg_t LZ4_read_ARCH(const void* memPtr)
366
{
367
    reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
368
}
369
370
static void LZ4_write16(void* memPtr, U16 value)
371
{
372
    LZ4_memcpy(memPtr, &value, sizeof(value));
373
}
374
375
static void LZ4_write32(void* memPtr, U32 value)
376
{
377
    LZ4_memcpy(memPtr, &value, sizeof(value));
378
}
379
380
#endif /* LZ4_FORCE_MEMORY_ACCESS */
381
382
383
static U16 LZ4_readLE16(const void* memPtr)
384
0
{
385
0
    if (LZ4_isLittleEndian()) {
386
0
        return LZ4_read16(memPtr);
387
0
    } else {
388
0
        const BYTE* p = (const BYTE*)memPtr;
389
0
        return (U16)((U16)p[0] + (p[1]<<8));
390
0
    }
391
0
}
392
393
static void LZ4_writeLE16(void* memPtr, U16 value)
394
0
{
395
0
    if (LZ4_isLittleEndian()) {
396
0
        LZ4_write16(memPtr, value);
397
0
    } else {
398
0
        BYTE* p = (BYTE*)memPtr;
399
0
        p[0] = (BYTE) value;
400
0
        p[1] = (BYTE)(value>>8);
401
0
    }
402
0
}
403
404
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
405
LZ4_FORCE_INLINE
406
void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
407
0
{
408
0
    BYTE* d = (BYTE*)dstPtr;
409
0
    const BYTE* s = (const BYTE*)srcPtr;
410
0
    BYTE* const e = (BYTE*)dstEnd;
411
412
0
    do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
413
0
}
414
415
static const unsigned inc32table[8] = {0, 1, 2,  1,  0,  4, 4, 4};
416
static const int      dec64table[8] = {0, 0, 0, -1, -4,  1, 2, 3};
417
418
419
#ifndef LZ4_FAST_DEC_LOOP
420
#  if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
421
#    define LZ4_FAST_DEC_LOOP 1
422
#  elif defined(__aarch64__) && defined(__APPLE__)
423
#    define LZ4_FAST_DEC_LOOP 1
424
#  elif defined(__aarch64__) && !defined(__clang__)
425
     /* On non-Apple aarch64, we disable this optimization for clang because
426
      * on certain mobile chipsets, performance is reduced with clang. For
427
      * more information refer to https://github.com/lz4/lz4/pull/707 */
428
#    define LZ4_FAST_DEC_LOOP 1
429
#  else
430
#    define LZ4_FAST_DEC_LOOP 0
431
#  endif
432
#endif
433
434
#if LZ4_FAST_DEC_LOOP
435
436
LZ4_FORCE_INLINE void
437
LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
438
0
{
439
0
    assert(srcPtr + offset == dstPtr);
440
0
    if (offset < 8) {
441
0
        LZ4_write32(dstPtr, 0);   /* silence an msan warning when offset==0 */
442
0
        dstPtr[0] = srcPtr[0];
443
0
        dstPtr[1] = srcPtr[1];
444
0
        dstPtr[2] = srcPtr[2];
445
0
        dstPtr[3] = srcPtr[3];
446
0
        srcPtr += inc32table[offset];
447
0
        LZ4_memcpy(dstPtr+4, srcPtr, 4);
448
0
        srcPtr -= dec64table[offset];
449
0
        dstPtr += 8;
450
0
    } else {
451
0
        LZ4_memcpy(dstPtr, srcPtr, 8);
452
0
        dstPtr += 8;
453
0
        srcPtr += 8;
454
0
    }
455
456
0
    LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
457
0
}
458
459
/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
460
 * this version copies two times 16 bytes (instead of one time 32 bytes)
461
 * because it must be compatible with offsets >= 16. */
462
LZ4_FORCE_INLINE void
463
LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
464
0
{
465
0
    BYTE* d = (BYTE*)dstPtr;
466
0
    const BYTE* s = (const BYTE*)srcPtr;
467
0
    BYTE* const e = (BYTE*)dstEnd;
468
469
0
    do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
470
0
}
471
472
/* LZ4_memcpy_using_offset()  presumes :
473
 * - dstEnd >= dstPtr + MINMATCH
474
 * - there is at least 8 bytes available to write after dstEnd */
475
LZ4_FORCE_INLINE void
476
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
477
0
{
478
0
    BYTE v[8];
479
480
0
    assert(dstEnd >= dstPtr + MINMATCH);
481
482
0
    switch(offset) {
483
0
    case 1:
484
0
        MEM_INIT(v, *srcPtr, 8);
485
0
        break;
486
0
    case 2:
487
0
        LZ4_memcpy(v, srcPtr, 2);
488
0
        LZ4_memcpy(&v[2], srcPtr, 2);
489
#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
490
#  pragma warning(push)
491
#  pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
492
#endif
493
0
        LZ4_memcpy(&v[4], v, 4);
494
#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
495
#  pragma warning(pop)
496
#endif
497
0
        break;
498
0
    case 4:
499
0
        LZ4_memcpy(v, srcPtr, 4);
500
0
        LZ4_memcpy(&v[4], srcPtr, 4);
501
0
        break;
502
0
    default:
503
0
        LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
504
0
        return;
505
0
    }
506
507
0
    LZ4_memcpy(dstPtr, v, 8);
508
0
    dstPtr += 8;
509
0
    while (dstPtr < dstEnd) {
510
0
        LZ4_memcpy(dstPtr, v, 8);
511
0
        dstPtr += 8;
512
0
    }
513
0
}
514
#endif
515
516
517
/*-************************************
518
*  Common functions
519
**************************************/
520
static unsigned LZ4_NbCommonBytes (reg_t val)
521
0
{
522
0
    assert(val != 0);
523
0
    if (LZ4_isLittleEndian()) {
524
0
        if (sizeof(val) == 8) {
525
0
            const U64 m = 0x0101010101010101ULL;
526
0
            val ^= val - 1;
527
0
            return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
528
0
        } else /* 32 bits */ {
529
0
            const U32 m = 0x01010101;
530
0
            return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
531
0
        }
532
0
    } else   /* Big Endian CPU */ {
533
0
    assert(false);
534
0
    return 0;
535
0
    }
536
0
}
537
538
539
0
#define STEPSIZE sizeof(reg_t)
540
LZ4_FORCE_INLINE
541
unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
542
0
{
543
0
    const BYTE* const pStart = pIn;
544
545
0
    if (likely(pIn < pInLimit-(STEPSIZE-1))) {
546
0
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
547
0
        if (!diff) {
548
0
            pIn+=STEPSIZE; pMatch+=STEPSIZE;
549
0
        } else {
550
0
            return LZ4_NbCommonBytes(diff);
551
0
    }   }
552
553
0
    while (likely(pIn < pInLimit-(STEPSIZE-1))) {
554
0
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
555
0
        if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
556
0
        pIn += LZ4_NbCommonBytes(diff);
557
0
        return (unsigned)(pIn - pStart);
558
0
    }
559
560
0
    if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
561
0
    if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
562
0
    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
563
0
    return (unsigned)(pIn - pStart);
564
0
}
565
566
567
#ifndef LZ4_COMMONDEFS_ONLY
568
/*-************************************
569
*  Local Constants
570
**************************************/
571
static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
572
static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
573
574
575
/*-************************************
576
*  Local Structures and types
577
**************************************/
578
typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
579
580
/**
581
 * This enum distinguishes several different modes of accessing previous
582
 * content in the stream.
583
 *
584
 * - noDict        : There is no preceding content.
585
 * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
586
 *                   blob being compressed are valid and refer to the preceding
587
 *                   content (of length ctx->dictSize), which is available
588
 *                   contiguously preceding in memory the content currently
589
 *                   being compressed.
590
 * - usingExtDict  : Like withPrefix64k, but the preceding content is somewhere
591
 *                   else in memory, starting at ctx->dictionary with length
592
 *                   ctx->dictSize.
593
 * - usingDictCtx  : Everything concerning the preceding content is
594
 *                   in a separate context, pointed to by ctx->dictCtx.
595
 *                   ctx->dictionary, ctx->dictSize, and table entries
596
 *                   in the current context that refer to positions
597
 *                   preceding the beginning of the current compression are
598
 *                   ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
599
 *                   ->dictSize describe the location and size of the preceding
600
 *                   content, and matches are found by looking in the ctx
601
 *                   ->dictCtx->hashTable.
602
 */
603
typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
604
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
605
606
607
/*-************************************
608
*  Local Utils
609
**************************************/
610
0
int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
611
0
const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
612
0
int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
613
0
int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }
614
615
616
/*-****************************************
617
*  Internal Definitions, used only in Tests
618
*******************************************/
619
//#if defined (__cplusplus)
620
//extern "C" {
621
//#endif
622
623
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
624
625
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
626
                                     int compressedSize, int maxOutputSize,
627
                                     const void* dictStart, size_t dictSize);
628
int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,
629
                                     int compressedSize, int targetOutputSize, int dstCapacity,
630
                                     const void* dictStart, size_t dictSize);
631
//#if defined (__cplusplus)
632
//}
633
//#endif
634
635
/*-******************************
636
*  Compression functions
637
********************************/
638
LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
639
0
{
640
0
    if (tableType == byU16)
641
0
        return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
642
0
    else
643
0
        return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
644
0
}
645
646
LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
647
0
{
648
0
    const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
649
0
    if (LZ4_isLittleEndian()) {
650
0
        const U64 prime5bytes = 889523592379ULL;
651
0
        return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
652
0
    } else {
653
0
        const U64 prime8bytes = 11400714785074694791ULL;
654
0
        return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
655
0
    }
656
0
}
657
658
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
659
0
{
660
0
    if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
661
0
    return LZ4_hash4(LZ4_read32(p), tableType);
662
0
}
663
664
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
665
0
{
666
0
    switch (tableType)
667
0
    {
668
0
    default: /* fallthrough */
669
0
    case clearedTable: { /* illegal! */ assert(0); return; }
670
0
    case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
671
0
    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
672
0
    case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
673
0
    }
674
0
}
675
676
LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
677
0
{
678
0
    switch (tableType)
679
0
    {
680
0
    default: /* fallthrough */
681
0
    case clearedTable: /* fallthrough */
682
0
    case byPtr: { /* illegal! */ assert(0); return; }
683
0
    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
684
0
    case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
685
0
    }
686
0
}
687
688
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
689
                                  void* tableBase, tableType_t const tableType,
690
                            const BYTE* srcBase)
691
0
{
692
0
    switch (tableType)
693
0
    {
694
0
    case clearedTable: { /* illegal! */ assert(0); return; }
695
0
    case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
696
0
    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
697
0
    case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
698
0
    }
699
0
}
700
701
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
702
0
{
703
0
    U32 const h = LZ4_hashPosition(p, tableType);
704
0
    LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
705
0
}
706
707
/* LZ4_getIndexOnHash() :
708
 * Index of match position registered in hash table.
709
 * hash position must be calculated by using base+index, or dictBase+index.
710
 * Assumption 1 : only valid if tableType == byU32 or byU16.
711
 * Assumption 2 : h is presumed valid (within limits of hash table)
712
 */
713
LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
714
0
{
715
0
    LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
716
0
    if (tableType == byU32) {
717
0
        const U32* const hashTable = (const U32*) tableBase;
718
0
        assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
719
0
        return hashTable[h];
720
0
    }
721
0
    if (tableType == byU16) {
722
0
        const U16* const hashTable = (const U16*) tableBase;
723
0
        assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
724
0
        return hashTable[h];
725
0
    }
726
0
    assert(0); return 0;  /* forbidden case */
727
0
}
728
729
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
730
0
{
731
0
    if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
732
0
    if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
733
0
    { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; }   /* default, to ensure a return */
734
0
}
735
736
LZ4_FORCE_INLINE const BYTE*
737
LZ4_getPosition(const BYTE* p,
738
                const void* tableBase, tableType_t tableType,
739
                const BYTE* srcBase)
740
0
{
741
0
    U32 const h = LZ4_hashPosition(p, tableType);
742
0
    return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
743
0
}
744
745
LZ4_FORCE_INLINE void
746
LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
747
           const int inputSize,
748
0
           const tableType_t tableType) {
749
    /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
750
     * therefore safe to use no matter what mode we're in. Otherwise, we figure
751
     * out if it's safe to leave as is or whether it needs to be reset.
752
     */
753
0
    if ((tableType_t)cctx->tableType != clearedTable) {
754
0
        assert(inputSize >= 0);
755
0
        if ((tableType_t)cctx->tableType != tableType
756
0
          || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
757
0
          || ((tableType == byU32) && cctx->currentOffset > 1 GB)
758
0
          || tableType == byPtr
759
0
          || inputSize >= 4 KB)
760
0
        {
761
0
            DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
762
0
            MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
763
0
            cctx->currentOffset = 0;
764
0
            cctx->tableType = (U32)clearedTable;
765
0
        } else {
766
0
            DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
767
0
        }
768
0
    }
769
770
    /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
771
     * is faster than compressing without a gap.
772
     * However, compressing with currentOffset == 0 is faster still,
773
     * so we preserve that case.
774
     */
775
0
    if (cctx->currentOffset != 0 && tableType == byU32) {
776
0
        DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
777
0
        cctx->currentOffset += 64 KB;
778
0
    }
779
780
    /* Finally, clear history */
781
0
    cctx->dictCtx = NULL;
782
0
    cctx->dictionary = NULL;
783
0
    cctx->dictSize = 0;
784
0
}
785
786
/** LZ4_compress_generic() :
787
 *  inlined, to ensure branches are decided at compilation time.
788
 *  Presumed already validated at this stage:
789
 *  - source != NULL
790
 *  - inputSize > 0
791
 */
792
LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
793
                 LZ4_stream_t_internal* const cctx,
794
                 const char* const source,
795
                 char* const dest,
796
                 const int inputSize,
797
                 int*  inputConsumed, /* only written when outputDirective == fillOutput */
798
                 const int maxOutputSize,
799
                 const limitedOutput_directive outputDirective,
800
                 const tableType_t tableType,
801
                 const dict_directive dictDirective,
802
                 const dictIssue_directive dictIssue,
803
                 const int acceleration)
804
0
{
805
0
    int result;
806
0
    const BYTE* ip = (const BYTE*) source;
807
808
0
    U32 const startIndex = cctx->currentOffset;
809
0
    const BYTE* base = (const BYTE*) source - startIndex;
810
0
    const BYTE* lowLimit;
811
812
0
    const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
813
0
    const BYTE* const dictionary =
814
0
        dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
815
0
    const U32 dictSize =
816
0
        dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
817
0
    const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0;   /* make indexes in dictCtx comparable with index in current context */
818
819
0
    int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
820
0
    U32 const prefixIdxLimit = startIndex - dictSize;   /* used when dictDirective == dictSmall */
821
0
    const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
822
0
    const BYTE* anchor = (const BYTE*) source;
823
0
    const BYTE* const iend = ip + inputSize;
824
0
    const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
825
0
    const BYTE* const matchlimit = iend - LASTLITERALS;
826
827
    /* the dictCtx currentOffset is indexed on the start of the dictionary,
828
     * while a dictionary in the current context precedes the currentOffset */
829
0
    const BYTE* dictBase = (dictionary == NULL) ? NULL :
830
0
                           (dictDirective == usingDictCtx) ?
831
0
                            dictionary + dictSize - dictCtx->currentOffset :
832
0
                            dictionary + dictSize - startIndex;
833
834
0
    BYTE* op = (BYTE*) dest;
835
0
    BYTE* const olimit = op + maxOutputSize;
836
837
0
    U32 offset = 0;
838
0
    U32 forwardH;
839
840
0
    DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
841
0
    assert(ip != NULL);
842
    /* If init conditions are not met, we don't have to mark stream
843
     * as having dirty context, since no action was taken yet */
844
0
    if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
845
0
    if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; }  /* Size too large (not within 64K limit) */
846
0
    if (tableType==byPtr) assert(dictDirective==noDict);      /* only supported use case with byPtr */
847
0
    assert(acceleration >= 1);
848
849
0
    lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
850
851
    /* Update context state */
852
0
    if (dictDirective == usingDictCtx) {
853
        /* Subsequent linked blocks can't use the dictionary. */
854
        /* Instead, they use the block we just compressed. */
855
0
        cctx->dictCtx = NULL;
856
0
        cctx->dictSize = (U32)inputSize;
857
0
    } else {
858
0
        cctx->dictSize += (U32)inputSize;
859
0
    }
860
0
    cctx->currentOffset += (U32)inputSize;
861
0
    cctx->tableType = (U32)tableType;
862
863
0
    if (inputSize<LZ4_minLength) goto _last_literals;        /* Input too small, no compression (all literals) */
864
865
    /* First Byte */
866
0
    LZ4_putPosition(ip, cctx->hashTable, tableType, base);
867
0
    ip++; forwardH = LZ4_hashPosition(ip, tableType);
868
869
    /* Main Loop */
870
0
    for ( ; ; ) {
871
0
        const BYTE* match;
872
0
        BYTE* token;
873
0
        const BYTE* filledIp;
874
875
        /* Find a match */
876
0
        if (tableType == byPtr) {
877
0
            const BYTE* forwardIp = ip;
878
0
            int step = 1;
879
0
            int searchMatchNb = acceleration << LZ4_skipTrigger;
880
0
            do {
881
0
                U32 const h = forwardH;
882
0
                ip = forwardIp;
883
0
                forwardIp += step;
884
0
                step = (searchMatchNb++ >> LZ4_skipTrigger);
885
886
0
                if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
887
0
                assert(ip < mflimitPlusOne);
888
889
0
                match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
890
0
                forwardH = LZ4_hashPosition(forwardIp, tableType);
891
0
                LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
892
893
0
            } while ( (match+LZ4_DISTANCE_MAX < ip)
894
0
                   || (LZ4_read32(match) != LZ4_read32(ip)) );
895
896
0
        } else {   /* byU32, byU16 */
897
898
0
            const BYTE* forwardIp = ip;
899
0
            int step = 1;
900
0
            int searchMatchNb = acceleration << LZ4_skipTrigger;
901
0
            do {
902
0
                U32 const h = forwardH;
903
0
                U32 const current = (U32)(forwardIp - base);
904
0
                U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
905
0
                assert(matchIndex <= current);
906
0
                assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
907
0
                ip = forwardIp;
908
0
                forwardIp += step;
909
0
                step = (searchMatchNb++ >> LZ4_skipTrigger);
910
911
0
                if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
912
0
                assert(ip < mflimitPlusOne);
913
914
0
                if (dictDirective == usingDictCtx) {
915
0
                    if (matchIndex < startIndex) {
916
                        /* there was no match, try the dictionary */
917
0
                        assert(tableType == byU32);
918
0
                        matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
919
0
                        match = dictBase + matchIndex;
920
0
                        matchIndex += dictDelta;   /* make dictCtx index comparable with current context */
921
0
                        lowLimit = dictionary;
922
0
                    } else {
923
0
                        match = base + matchIndex;
924
0
                        lowLimit = (const BYTE*)source;
925
0
                    }
926
0
                } else if (dictDirective == usingExtDict) {
927
0
                    if (matchIndex < startIndex) {
928
0
                        DEBUGLOG(7, "extDict candidate: matchIndex=%5u  <  startIndex=%5u", matchIndex, startIndex);
929
0
                        assert(startIndex - matchIndex >= MINMATCH);
930
0
                        assert(dictBase);
931
0
                        match = dictBase + matchIndex;
932
0
                        lowLimit = dictionary;
933
0
                    } else {
934
0
                        match = base + matchIndex;
935
0
                        lowLimit = (const BYTE*)source;
936
0
                    }
937
0
                } else {   /* single continuous memory segment */
938
0
                    match = base + matchIndex;
939
0
                }
940
0
                forwardH = LZ4_hashPosition(forwardIp, tableType);
941
0
                LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
942
943
0
                DEBUGLOG(7, "candidate at pos=%u  (offset=%u \n", matchIndex, current - matchIndex);
944
0
                if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; }    /* match outside of valid area */
945
0
                assert(matchIndex < current);
946
0
                if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
947
0
                  && (matchIndex+LZ4_DISTANCE_MAX < current)) {
948
0
                    continue;
949
0
                } /* too far */
950
0
                assert((current - matchIndex) <= LZ4_DISTANCE_MAX);  /* match now expected within distance */
951
952
0
                if (LZ4_read32(match) == LZ4_read32(ip)) {
953
0
                    if (maybe_extMem) offset = current - matchIndex;
954
0
                    break;   /* match found */
955
0
                }
956
957
0
            } while(1);
958
0
        }
959
960
        /* Catch up */
961
0
        filledIp = ip;
962
0
        while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
963
964
        /* Encode Literals */
965
0
        {   unsigned const litLength = (unsigned)(ip - anchor);
966
0
            token = op++;
967
0
            if ((outputDirective == limitedOutput) &&  /* Check output buffer overflow */
968
0
                (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
969
0
                return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
970
0
            }
971
0
            if ((outputDirective == fillOutput) &&
972
0
                (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
973
0
                op--;
974
0
                goto _last_literals;
975
0
            }
976
0
            if (litLength >= RUN_MASK) {
977
0
                int len = (int)(litLength - RUN_MASK);
978
0
                *token = (RUN_MASK<<ML_BITS);
979
0
                for(; len >= 255 ; len-=255) *op++ = 255;
980
0
                *op++ = (BYTE)len;
981
0
            }
982
0
            else *token = (BYTE)(litLength<<ML_BITS);
983
984
            /* Copy Literals */
985
0
            LZ4_wildCopy8(op, anchor, op+litLength);
986
0
            op+=litLength;
987
0
            DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
988
0
                        (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
989
0
        }
990
991
0
_next_match:
992
        /* at this stage, the following variables must be correctly set :
993
         * - ip : at start of LZ operation
994
         * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
995
         * - offset : if maybe_ext_memSegment==1 (constant)
996
         * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
997
         * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
998
         */
999
1000
0
        if ((outputDirective == fillOutput) &&
1001
0
            (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1002
            /* the match was too close to the end, rewind and go to last literals */
1003
0
            op = token;
1004
0
            goto _last_literals;
1005
0
        }
1006
1007
        /* Encode Offset */
1008
0
        if (maybe_extMem) {   /* static test */
1009
0
            DEBUGLOG(6, "             with offset=%u  (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1010
0
            assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
1011
0
            LZ4_writeLE16(op, (U16)offset); op+=2;
1012
0
        } else  {
1013
0
            DEBUGLOG(6, "             with offset=%u  (same segment)", (U32)(ip - match));
1014
0
            assert(ip-match <= LZ4_DISTANCE_MAX);
1015
0
            LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1016
0
        }
1017
1018
        /* Encode MatchLength */
1019
0
        {   unsigned matchCode;
1020
1021
0
            if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
1022
0
              && (lowLimit==dictionary) /* match within extDict */ ) {
1023
0
                const BYTE* limit = ip + (dictEnd-match);
1024
0
                assert(dictEnd > match);
1025
0
                if (limit > matchlimit) limit = matchlimit;
1026
0
                matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
1027
0
                ip += (size_t)matchCode + MINMATCH;
1028
0
                if (ip==limit) {
1029
0
                    unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
1030
0
                    matchCode += more;
1031
0
                    ip += more;
1032
0
                }
1033
0
                DEBUGLOG(6, "             with matchLength=%u starting in extDict", matchCode+MINMATCH);
1034
0
            } else {
1035
0
                matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
1036
0
                ip += (size_t)matchCode + MINMATCH;
1037
0
                DEBUGLOG(6, "             with matchLength=%u", matchCode+MINMATCH);
1038
0
            }
1039
1040
0
            if ((outputDirective) &&    /* Check output buffer overflow */
1041
0
                (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1042
0
                if (outputDirective == fillOutput) {
1043
                    /* Match description too long : reduce it */
1044
0
                    U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1045
0
                    ip -= matchCode - newMatchCode;
1046
0
                    assert(newMatchCode < matchCode);
1047
0
                    matchCode = newMatchCode;
1048
0
                    if (unlikely(ip <= filledIp)) {
1049
                        /* We have already filled up to filledIp so if ip ends up less than filledIp
1050
                         * we have positions in the hash table beyond the current position. This is
1051
                         * a problem if we reuse the hash table. So we have to remove these positions
1052
                         * from the hash table.
1053
                         */
1054
0
                        const BYTE* ptr;
1055
0
                        DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1056
0
                        for (ptr = ip; ptr <= filledIp; ++ptr) {
1057
0
                            U32 const h = LZ4_hashPosition(ptr, tableType);
1058
0
                            LZ4_clearHash(h, cctx->hashTable, tableType);
1059
0
                        }
1060
0
                    }
1061
0
                } else {
1062
0
                    assert(outputDirective == limitedOutput);
1063
0
                    return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1064
0
                }
1065
0
            }
1066
0
            if (matchCode >= ML_MASK) {
1067
0
                *token += ML_MASK;
1068
0
                matchCode -= ML_MASK;
1069
0
                LZ4_write32(op, 0xFFFFFFFF);
1070
0
                while (matchCode >= 4*255) {
1071
0
                    op+=4;
1072
0
                    LZ4_write32(op, 0xFFFFFFFF);
1073
0
                    matchCode -= 4*255;
1074
0
                }
1075
0
                op += matchCode / 255;
1076
0
                *op++ = (BYTE)(matchCode % 255);
1077
0
            } else
1078
0
                *token += (BYTE)(matchCode);
1079
0
        }
1080
        /* Ensure we have enough space for the last literals. */
1081
0
        assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
1082
1083
0
        anchor = ip;
1084
1085
        /* Test end of chunk */
1086
0
        if (ip >= mflimitPlusOne) break;
1087
1088
        /* Fill table */
1089
0
        LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
1090
1091
        /* Test next position */
1092
0
        if (tableType == byPtr) {
1093
1094
0
            match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1095
0
            LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1096
0
            if ( (match+LZ4_DISTANCE_MAX >= ip)
1097
0
              && (LZ4_read32(match) == LZ4_read32(ip)) )
1098
0
            { token=op++; *token=0; goto _next_match; }
1099
1100
0
        } else {   /* byU32, byU16 */
1101
1102
0
            U32 const h = LZ4_hashPosition(ip, tableType);
1103
0
            U32 const current = (U32)(ip-base);
1104
0
            U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1105
0
            assert(matchIndex < current);
1106
0
            if (dictDirective == usingDictCtx) {
1107
0
                if (matchIndex < startIndex) {
1108
                    /* there was no match, try the dictionary */
1109
0
                    matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1110
0
                    match = dictBase + matchIndex;
1111
0
                    lowLimit = dictionary;   /* required for match length counter */
1112
0
                    matchIndex += dictDelta;
1113
0
                } else {
1114
0
                    match = base + matchIndex;
1115
0
                    lowLimit = (const BYTE*)source;  /* required for match length counter */
1116
0
                }
1117
0
            } else if (dictDirective==usingExtDict) {
1118
0
                if (matchIndex < startIndex) {
1119
0
                    assert(dictBase);
1120
0
                    match = dictBase + matchIndex;
1121
0
                    lowLimit = dictionary;   /* required for match length counter */
1122
0
                } else {
1123
0
                    match = base + matchIndex;
1124
0
                    lowLimit = (const BYTE*)source;   /* required for match length counter */
1125
0
                }
1126
0
            } else {   /* single memory segment */
1127
0
                match = base + matchIndex;
1128
0
            }
1129
0
            LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1130
0
            assert(matchIndex < current);
1131
0
            if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1132
0
              && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1133
0
              && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1134
0
                token=op++;
1135
0
                *token=0;
1136
0
                if (maybe_extMem) offset = current - matchIndex;
1137
0
                DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1138
0
                            (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1139
0
                goto _next_match;
1140
0
            }
1141
0
        }
1142
1143
        /* Prepare next loop */
1144
0
        forwardH = LZ4_hashPosition(++ip, tableType);
1145
1146
0
    }
1147
1148
0
_last_literals:
1149
    /* Encode Last Literals */
1150
0
    {   size_t lastRun = (size_t)(iend - anchor);
1151
0
        if ( (outputDirective) &&  /* Check output buffer overflow */
1152
0
            (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1153
0
            if (outputDirective == fillOutput) {
1154
                /* adapt lastRun to fill 'dst' */
1155
0
                assert(olimit >= op);
1156
0
                lastRun  = (size_t)(olimit-op) - 1/*token*/;
1157
0
                lastRun -= (lastRun + 256 - RUN_MASK) / 256;  /*additional length tokens*/
1158
0
            } else {
1159
0
                assert(outputDirective == limitedOutput);
1160
0
                return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1161
0
            }
1162
0
        }
1163
0
        DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
1164
0
        if (lastRun >= RUN_MASK) {
1165
0
            size_t accumulator = lastRun - RUN_MASK;
1166
0
            *op++ = RUN_MASK << ML_BITS;
1167
0
            for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1168
0
            *op++ = (BYTE) accumulator;
1169
0
        } else {
1170
0
            *op++ = (BYTE)(lastRun<<ML_BITS);
1171
0
        }
1172
0
        LZ4_memcpy(op, anchor, lastRun);
1173
0
        ip = anchor + lastRun;
1174
0
        op += lastRun;
1175
0
    }
1176
1177
0
    if (outputDirective == fillOutput) {
1178
0
        *inputConsumed = (int) (((const char*)ip)-source);
1179
0
    }
1180
0
    result = (int)(((char*)op) - dest);
1181
0
    assert(result > 0);
1182
0
    DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
1183
0
    return result;
1184
0
}
1185
1186
/** LZ4_compress_generic() :
1187
 *  inlined, to ensure branches are decided at compilation time;
1188
 *  takes care of src == (NULL, 0)
1189
 *  and forward the rest to LZ4_compress_generic_validated */
1190
LZ4_FORCE_INLINE int LZ4_compress_generic(
1191
                 LZ4_stream_t_internal* const cctx,
1192
                 const char* const src,
1193
                 char* const dst,
1194
                 const int srcSize,
1195
                 int *inputConsumed, /* only written when outputDirective == fillOutput */
1196
                 const int dstCapacity,
1197
                 const limitedOutput_directive outputDirective,
1198
                 const tableType_t tableType,
1199
                 const dict_directive dictDirective,
1200
                 const dictIssue_directive dictIssue,
1201
                 const int acceleration)
1202
0
{
1203
0
    DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
1204
0
                srcSize, dstCapacity);
1205
1206
0
    if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; }  /* Unsupported srcSize, too large (or negative) */
1207
0
    if (srcSize == 0) {   /* src == NULL supported if srcSize == 0 */
1208
0
        if (outputDirective != notLimited && dstCapacity <= 0) return 0;  /* no output, can't write anything */
1209
0
        DEBUGLOG(5, "Generating an empty block");
1210
0
        assert(outputDirective == notLimited || dstCapacity >= 1);
1211
0
        assert(dst != NULL);
1212
0
        dst[0] = 0;
1213
0
        if (outputDirective == fillOutput) {
1214
0
            assert (inputConsumed != NULL);
1215
0
            *inputConsumed = 0;
1216
0
        }
1217
0
        return 1;
1218
0
    }
1219
0
    assert(src != NULL);
1220
1221
0
    return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
1222
0
                inputConsumed, /* only written into if outputDirective == fillOutput */
1223
0
                dstCapacity, outputDirective,
1224
0
                tableType, dictDirective, dictIssue, acceleration);
1225
0
}
1226
1227
1228
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1229
0
{
1230
0
    LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1231
0
    assert(ctx != NULL);
1232
0
    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1233
0
    if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1234
0
    if (maxOutputSize >= LZ4_compressBound(inputSize)) {
1235
0
        if (inputSize < LZ4_64Klimit) {
1236
0
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
1237
0
        } else {
1238
0
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1239
0
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1240
0
        }
1241
0
    } else {
1242
0
        if (inputSize < LZ4_64Klimit) {
1243
0
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
1244
0
        } else {
1245
0
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1246
0
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1247
0
        }
1248
0
    }
1249
0
}
1250
1251
/**
1252
 * LZ4_compress_fast_extState_fastReset() :
1253
 * A variant of LZ4_compress_fast_extState().
1254
 *
1255
 * Using this variant avoids an expensive initialization step. It is only safe
1256
 * to call if the state buffer is known to be correctly initialized already
1257
 * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
1258
 * "correctly initialized").
1259
 */
1260
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1261
0
{
1262
0
    LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1263
0
    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1264
0
    if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1265
1266
0
    if (dstCapacity >= LZ4_compressBound(srcSize)) {
1267
0
        if (srcSize < LZ4_64Klimit) {
1268
0
            const tableType_t tableType = byU16;
1269
0
            LZ4_prepareTable(ctx, srcSize, tableType);
1270
0
            if (ctx->currentOffset) {
1271
0
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1272
0
            } else {
1273
0
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1274
0
            }
1275
0
        } else {
1276
0
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1277
0
            LZ4_prepareTable(ctx, srcSize, tableType);
1278
0
            return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1279
0
        }
1280
0
    } else {
1281
0
        if (srcSize < LZ4_64Klimit) {
1282
0
            const tableType_t tableType = byU16;
1283
0
            LZ4_prepareTable(ctx, srcSize, tableType);
1284
0
            if (ctx->currentOffset) {
1285
0
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
1286
0
            } else {
1287
0
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1288
0
            }
1289
0
        } else {
1290
0
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1291
0
            LZ4_prepareTable(ctx, srcSize, tableType);
1292
0
            return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1293
0
        }
1294
0
    }
1295
0
}
1296
1297
1298
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1299
0
{
1300
0
    int result;
1301
#if (LZ4_HEAPMODE)
1302
    LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
1303
    if (ctxPtr == NULL) return 0;
1304
#else
1305
0
    LZ4_stream_t ctx;
1306
0
    LZ4_stream_t* const ctxPtr = &ctx;
1307
0
#endif
1308
0
    result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
1309
1310
#if (LZ4_HEAPMODE)
1311
    FREEMEM(ctxPtr);
1312
#endif
1313
0
    return result;
1314
0
}
1315
1316
1317
int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
1318
0
{
1319
0
    return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
1320
0
}
1321
1322
1323
/* Note!: This function leaves the stream in an unclean/broken state!
1324
 * It is not safe to subsequently use the same state with a _fastReset() or
1325
 * _continue() call without resetting it. */
1326
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1327
0
{
1328
0
    void* const s = LZ4_initStream(state, sizeof (*state));
1329
0
    assert(s != NULL); (void)s;
1330
1331
0
    if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
1332
0
        return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
1333
0
    } else {
1334
0
        if (*srcSizePtr < LZ4_64Klimit) {
1335
0
            return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
1336
0
        } else {
1337
0
            tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1338
0
            return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
1339
0
    }   }
1340
0
}
1341
1342
1343
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1344
0
{
1345
#if (LZ4_HEAPMODE)
1346
    LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
1347
    if (ctx == NULL) return 0;
1348
#else
1349
0
    LZ4_stream_t ctxBody;
1350
0
    LZ4_stream_t* ctx = &ctxBody;
1351
0
#endif
1352
1353
0
    int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1354
1355
#if (LZ4_HEAPMODE)
1356
    FREEMEM(ctx);
1357
#endif
1358
0
    return result;
1359
0
}
1360
1361
1362
1363
/*-******************************
1364
*  Streaming functions
1365
********************************/
1366
1367
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1368
LZ4_stream_t* LZ4_createStream(void)
1369
0
{
1370
0
    LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1371
0
    LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));
1372
0
    DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1373
0
    if (lz4s == NULL) return NULL;
1374
0
    LZ4_initStream(lz4s, sizeof(*lz4s));
1375
0
    return lz4s;
1376
0
}
1377
#endif
1378
1379
static size_t LZ4_stream_t_alignment(void)
1380
0
{
1381
0
#if LZ4_ALIGN_TEST
1382
0
    typedef struct { char c; LZ4_stream_t t; } t_a;
1383
0
    return sizeof(t_a) - sizeof(LZ4_stream_t);
1384
#else
1385
    return 1;  /* effectively disabled */
1386
#endif
1387
0
}
1388
1389
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
1390
0
{
1391
0
    DEBUGLOG(5, "LZ4_initStream");
1392
0
    if (buffer == NULL) { return NULL; }
1393
0
    if (size < sizeof(LZ4_stream_t)) { return NULL; }
1394
0
    if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
1395
0
    MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
1396
0
    return (LZ4_stream_t*)buffer;
1397
0
}
1398
1399
/* resetStream is now deprecated,
1400
 * prefer initStream() which is more general */
1401
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
1402
0
{
1403
0
    DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1404
0
    MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
1405
0
}
1406
1407
0
void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
1408
0
    LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
1409
0
}
1410
1411
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1412
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
1413
0
{
1414
0
    if (!LZ4_stream) return 0;   /* support free on NULL */
1415
0
    DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1416
0
    FREEMEM(LZ4_stream);
1417
0
    return (0);
1418
0
}
1419
#endif
1420
1421
1422
0
#define HASH_UNIT sizeof(reg_t)
1423
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1424
0
{
1425
0
    LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1426
0
    const tableType_t tableType = byU32;
1427
0
    const BYTE* p = (const BYTE*)dictionary;
1428
0
    const BYTE* const dictEnd = p + dictSize;
1429
0
    const BYTE* base;
1430
1431
0
    DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1432
1433
    /* It's necessary to reset the context,
1434
     * and not just continue it with prepareTable()
1435
     * to avoid any risk of generating overflowing matchIndex
1436
     * when compressing using this dictionary */
1437
0
    LZ4_resetStream(LZ4_dict);
1438
1439
    /* We always increment the offset by 64 KB, since, if the dict is longer,
1440
     * we truncate it to the last 64k, and if it's shorter, we still want to
1441
     * advance by a whole window length so we can provide the guarantee that
1442
     * there are only valid offsets in the window, which allows an optimization
1443
     * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1444
     * dictionary isn't a full 64k. */
1445
0
    dict->currentOffset += 64 KB;
1446
1447
0
    if (dictSize < (int)HASH_UNIT) {
1448
0
        return 0;
1449
0
    }
1450
1451
0
    if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1452
0
    base = dictEnd - dict->currentOffset;
1453
0
    dict->dictionary = p;
1454
0
    dict->dictSize = (U32)(dictEnd - p);
1455
0
    dict->tableType = (U32)tableType;
1456
1457
0
    while (p <= dictEnd-HASH_UNIT) {
1458
0
        LZ4_putPosition(p, dict->hashTable, tableType, base);
1459
0
        p+=3;
1460
0
    }
1461
1462
0
    return (int)dict->dictSize;
1463
0
}
1464
1465
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
1466
0
{
1467
0
    const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
1468
0
        &(dictionaryStream->internal_donotuse);
1469
1470
0
    DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1471
0
             workingStream, dictionaryStream,
1472
0
             dictCtx != NULL ? dictCtx->dictSize : 0);
1473
1474
0
    if (dictCtx != NULL) {
1475
        /* If the current offset is zero, we will never look in the
1476
         * external dictionary context, since there is no value a table
1477
         * entry can take that indicate a miss. In that case, we need
1478
         * to bump the offset to something non-zero.
1479
         */
1480
0
        if (workingStream->internal_donotuse.currentOffset == 0) {
1481
0
            workingStream->internal_donotuse.currentOffset = 64 KB;
1482
0
        }
1483
1484
        /* Don't actually attach an empty dictionary.
1485
         */
1486
0
        if (dictCtx->dictSize == 0) {
1487
0
            dictCtx = NULL;
1488
0
        }
1489
0
    }
1490
0
    workingStream->internal_donotuse.dictCtx = dictCtx;
1491
0
}
1492
1493
1494
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1495
0
{
1496
0
    assert(nextSize >= 0);
1497
0
    if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) {   /* potential ptrdiff_t overflow (32-bits mode) */
1498
        /* rescale hash table */
1499
0
        U32 const delta = LZ4_dict->currentOffset - 64 KB;
1500
0
        const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1501
0
        int i;
1502
0
        DEBUGLOG(4, "LZ4_renormDictT");
1503
0
        for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1504
0
            if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1505
0
            else LZ4_dict->hashTable[i] -= delta;
1506
0
        }
1507
0
        LZ4_dict->currentOffset = 64 KB;
1508
0
        if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1509
0
        LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1510
0
    }
1511
0
}
1512
1513
1514
int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
1515
                                const char* source, char* dest,
1516
                                int inputSize, int maxOutputSize,
1517
                                int acceleration)
1518
0
{
1519
0
    const tableType_t tableType = byU32;
1520
0
    LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;
1521
0
    const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;
1522
1523
0
    DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize);
1524
1525
0
    LZ4_renormDictT(streamPtr, inputSize);   /* fix index overflow */
1526
0
    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1527
0
    if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1528
1529
    /* invalidate tiny dictionaries */
1530
0
    if ( (streamPtr->dictSize < 4)     /* tiny dictionary : not enough for a hash */
1531
0
      && (dictEnd != source)           /* prefix mode */
1532
0
      && (inputSize > 0)               /* tolerance : don't lose history, in case next invocation would use prefix mode */
1533
0
      && (streamPtr->dictCtx == NULL)  /* usingDictCtx */
1534
0
      ) {
1535
0
        DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
1536
        /* remove dictionary existence from history, to employ faster prefix mode */
1537
0
        streamPtr->dictSize = 0;
1538
0
        streamPtr->dictionary = (const BYTE*)source;
1539
0
        dictEnd = source;
1540
0
    }
1541
1542
    /* Check overlapping input/dictionary space */
1543
0
    {   const char* const sourceEnd = source + inputSize;
1544
0
        if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1545
0
            streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1546
0
            if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1547
0
            if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1548
0
            streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;
1549
0
        }
1550
0
    }
1551
1552
    /* prefix mode : source data follows dictionary */
1553
0
    if (dictEnd == source) {
1554
0
        if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1555
0
            return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
1556
0
        else
1557
0
            return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
1558
0
    }
1559
1560
    /* external dictionary mode */
1561
0
    {   int result;
1562
0
        if (streamPtr->dictCtx) {
1563
            /* We depend here on the fact that dictCtx'es (produced by
1564
             * LZ4_loadDict) guarantee that their tables contain no references
1565
             * to offsets between dictCtx->currentOffset - 64 KB and
1566
             * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1567
             * to use noDictIssue even when the dict isn't a full 64 KB.
1568
             */
1569
0
            if (inputSize > 4 KB) {
1570
                /* For compressing large blobs, it is faster to pay the setup
1571
                 * cost to copy the dictionary's tables into the active context,
1572
                 * so that the compression loop is only looking into one table.
1573
                 */
1574
0
                LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
1575
0
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1576
0
            } else {
1577
0
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
1578
0
            }
1579
0
        } else {  /* small data <= 4 KB */
1580
0
            if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1581
0
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
1582
0
            } else {
1583
0
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1584
0
            }
1585
0
        }
1586
0
        streamPtr->dictionary = (const BYTE*)source;
1587
0
        streamPtr->dictSize = (U32)inputSize;
1588
0
        return result;
1589
0
    }
1590
0
}
1591
1592
1593
/* Hidden debug function, to force-test external dictionary mode */
1594
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
1595
0
{
1596
0
    LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1597
0
    int result;
1598
1599
0
    LZ4_renormDictT(streamPtr, srcSize);
1600
1601
0
    if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1602
0
        result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
1603
0
    } else {
1604
0
        result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1605
0
    }
1606
1607
0
    streamPtr->dictionary = (const BYTE*)source;
1608
0
    streamPtr->dictSize = (U32)srcSize;
1609
1610
0
    return result;
1611
0
}
1612
1613
1614
/*! LZ4_saveDict() :
1615
 *  If previously compressed data block is not guaranteed to remain available at its memory location,
1616
 *  save it into a safer place (char* safeBuffer).
1617
 *  Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,
1618
 *         one can therefore call LZ4_compress_fast_continue() right after.
1619
 * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1620
 */
1621
int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1622
0
{
1623
0
    LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1624
1625
0
    DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer);
1626
1627
0
    if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1628
0
    if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1629
1630
0
    if (safeBuffer == NULL) assert(dictSize == 0);
1631
0
    if (dictSize > 0) {
1632
0
        const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1633
0
        assert(dict->dictionary);
1634
0
        LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);
1635
0
    }
1636
1637
0
    dict->dictionary = (const BYTE*)safeBuffer;
1638
0
    dict->dictSize = (U32)dictSize;
1639
1640
0
    return dictSize;
1641
0
}
1642
1643
1644
1645
/*-*******************************
1646
 *  Decompression functions
1647
 ********************************/
1648
1649
typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
1650
1651
#undef MIN
1652
0
#define MIN(a,b)    ( (a) < (b) ? (a) : (b) )
1653
1654
1655
/* variant for decompress_unsafe()
1656
 * does not know end of input
1657
 * presumes input is well formed
1658
 * note : will consume at least one byte */
1659
size_t read_long_length_no_check(const BYTE** pp)
1660
0
{
1661
0
    size_t b, l = 0;
1662
0
    do { b = **pp; (*pp)++; l += b; } while (b==255);
1663
0
    DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1)
1664
0
    return l;
1665
0
}
1666
1667
/* core decoder variant for LZ4_decompress_fast*()
1668
 * for legacy support only : these entry points are deprecated.
1669
 * - Presumes input is correctly formed (no defense vs malformed inputs)
1670
 * - Does not know input size (presume input buffer is "large enough")
1671
 * - Decompress a full block (only)
1672
 * @return : nb of bytes read from input.
1673
 * Note : this variant is not optimized for speed, just for maintenance.
1674
 *        the goal is to remove support of decompress_fast*() variants by v2.0
1675
**/
1676
LZ4_FORCE_INLINE int
1677
LZ4_decompress_unsafe_generic(
1678
                 const BYTE* const istart,
1679
                 BYTE* const ostart,
1680
                 int decompressedSize,
1681
1682
                 size_t prefixSize,
1683
                 const BYTE* const dictStart,  /* only if dict==usingExtDict */
1684
                 const size_t dictSize         /* note: =0 if dictStart==NULL */
1685
                 )
1686
0
{
1687
0
    const BYTE* ip = istart;
1688
0
    BYTE* op = (BYTE*)ostart;
1689
0
    BYTE* const oend = ostart + decompressedSize;
1690
0
    const BYTE* const prefixStart = ostart - prefixSize;
1691
1692
0
    DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
1693
0
    if (dictStart == NULL) assert(dictSize == 0);
1694
1695
0
    while (1) {
1696
        /* start new sequence */
1697
0
        unsigned token = *ip++;
1698
1699
        /* literals */
1700
0
        {   size_t ll = token >> ML_BITS;
1701
0
            if (ll==15) {
1702
                /* long literal length */
1703
0
                ll += read_long_length_no_check(&ip);
1704
0
            }
1705
0
            if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */
1706
0
            LZ4_memmove(op, ip, ll); /* support in-place decompression */
1707
0
            op += ll;
1708
0
            ip += ll;
1709
0
            if ((size_t)(oend-op) < MFLIMIT) {
1710
0
                if (op==oend) break;  /* end of block */
1711
0
                DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op);
1712
                /* incorrect end of block :
1713
                 * last match must start at least MFLIMIT==12 bytes before end of output block */
1714
0
                return -1;
1715
0
        }   }
1716
1717
        /* match */
1718
0
        {   size_t ml = token & 15;
1719
0
            size_t const offset = LZ4_readLE16(ip);
1720
0
            ip+=2;
1721
1722
0
            if (ml==15) {
1723
                /* long literal length */
1724
0
                ml += read_long_length_no_check(&ip);
1725
0
            }
1726
0
            ml += MINMATCH;
1727
1728
0
            if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */
1729
1730
0
            {   const BYTE* match = op - offset;
1731
1732
                /* out of range */
1733
0
                if (offset > (size_t)(op - prefixStart) + dictSize) {
1734
0
                    DEBUGLOG(6, "offset out of range");
1735
0
                    return -1;
1736
0
                }
1737
1738
                /* check special case : extDict */
1739
0
                if (offset > (size_t)(op - prefixStart)) {
1740
                    /* extDict scenario */
1741
0
                    const BYTE* const dictEnd = dictStart + dictSize;
1742
0
                    const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));
1743
0
                    size_t const extml = (size_t)(dictEnd - extMatch);
1744
0
                    if (extml > ml) {
1745
                        /* match entirely within extDict */
1746
0
                        LZ4_memmove(op, extMatch, ml);
1747
0
                        op += ml;
1748
0
                        ml = 0;
1749
0
                    } else {
1750
                        /* match split between extDict & prefix */
1751
0
                        LZ4_memmove(op, extMatch, extml);
1752
0
                        op += extml;
1753
0
                        ml -= extml;
1754
0
                    }
1755
0
                    match = prefixStart;
1756
0
                }
1757
1758
                /* match copy - slow variant, supporting overlap copy */
1759
0
                {   size_t u;
1760
0
                    for (u=0; u<ml; u++) {
1761
0
                        op[u] = match[u];
1762
0
            }   }   }
1763
0
            op += ml;
1764
0
            if ((size_t)(oend-op) < LASTLITERALS) {
1765
0
                DEBUGLOG(5, "invalid: match ends at distance %zi from end of block", oend-op);
1766
                /* incorrect end of block :
1767
                 * last match must stop at least LASTLITERALS==5 bytes before end of output block */
1768
0
                return -1;
1769
0
            }
1770
0
        } /* match */
1771
0
    } /* main loop */
1772
0
    return (int)(ip - istart);
1773
0
}
1774
1775
1776
/* Read the variable-length literal or match length.
1777
 *
1778
 * @ip : input pointer
1779
 * @ilimit : position after which if length is not decoded, the input is necessarily corrupted.
1780
 * @initial_check - check ip >= ipmax before start of loop.  Returns initial_error if so.
1781
 * @error (output) - error code.  Must be set to 0 before call.
1782
**/
1783
typedef size_t Rvl_t;
1784
static const Rvl_t rvl_error = (Rvl_t)(-1);
1785
LZ4_FORCE_INLINE Rvl_t
1786
read_variable_length(const BYTE** ip, const BYTE* ilimit,
1787
                     int initial_check)
1788
0
{
1789
0
    Rvl_t s, length = 0;
1790
0
    assert(ip != NULL);
1791
0
    assert(*ip !=  NULL);
1792
0
    assert(ilimit != NULL);
1793
0
    if (initial_check && unlikely((*ip) >= ilimit)) {    /* read limit reached */
1794
0
        return rvl_error;
1795
0
    }
1796
0
    do {
1797
0
        s = **ip;
1798
0
        (*ip)++;
1799
0
        length += s;
1800
0
        if (unlikely((*ip) > ilimit)) {    /* read limit reached */
1801
0
            return rvl_error;
1802
0
        }
1803
        /* accumulator overflow detection (32-bit mode only) */
1804
0
        if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
1805
0
            return rvl_error;
1806
0
        }
1807
0
    } while (s==255);
1808
1809
0
    return length;
1810
0
}
1811
1812
/*! LZ4_decompress_generic() :
1813
 *  This generic decompression function covers all use cases.
1814
 *  It shall be instantiated several times, using different sets of directives.
1815
 *  Note that it is important for performance that this function really get inlined,
1816
 *  in order to remove useless branches during compilation optimization.
1817
 */
1818
LZ4_FORCE_INLINE int
1819
LZ4_decompress_generic(
1820
                 const char* const src,
1821
                 char* const dst,
1822
                 int srcSize,
1823
                 int outputSize,         /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1824
1825
                 earlyEnd_directive partialDecoding,  /* full, partial */
1826
                 dict_directive dict,                 /* noDict, withPrefix64k, usingExtDict */
1827
                 const BYTE* const lowPrefix,  /* always <= dst, == dst when no prefix */
1828
                 const BYTE* const dictStart,  /* only if dict==usingExtDict */
1829
                 const size_t dictSize         /* note : = 0 if noDict */
1830
                 )
1831
0
{
1832
0
    if ((src == NULL) || (outputSize < 0)) { return -1; }
1833
1834
0
    {   const BYTE* ip = (const BYTE*) src;
1835
0
        const BYTE* const iend = ip + srcSize;
1836
1837
0
        BYTE* op = (BYTE*) dst;
1838
0
        BYTE* const oend = op + outputSize;
1839
0
        BYTE* cpy;
1840
1841
0
        const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
1842
1843
0
        const int checkOffset = (dictSize < (int)(64 KB));
1844
1845
1846
        /* Set up the "end" pointers for the shortcut. */
1847
0
        const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;
1848
0
        const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;
1849
1850
0
        const BYTE* match;
1851
0
        size_t offset;
1852
0
        unsigned token;
1853
0
        size_t length;
1854
1855
1856
0
        DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
1857
1858
        /* Special cases */
1859
0
        assert(lowPrefix <= op);
1860
0
        if (unlikely(outputSize==0)) {
1861
            /* Empty output buffer */
1862
0
            if (partialDecoding) return 0;
1863
0
            return ((srcSize==1) && (*ip==0)) ? 0 : -1;
1864
0
        }
1865
0
        if (unlikely(srcSize==0)) { return -1; }
1866
1867
    /* LZ4_FAST_DEC_LOOP:
1868
     * designed for modern OoO performance cpus,
1869
     * where copying reliably 32-bytes is preferable to an unpredictable branch.
1870
     * note : fast loop may show a regression for some client arm chips. */
1871
0
#if LZ4_FAST_DEC_LOOP
1872
0
        if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
1873
0
            DEBUGLOG(6, "skip fast decode loop");
1874
0
            goto safe_decode;
1875
0
        }
1876
1877
        /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
1878
0
        while (1) {
1879
            /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
1880
0
            assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
1881
0
            assert(ip < iend);
1882
0
            token = *ip++;
1883
0
            length = token >> ML_BITS;  /* literal length */
1884
1885
            /* decode literal length */
1886
0
            if (length == RUN_MASK) {
1887
0
                size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
1888
0
                if (addl == rvl_error) { goto _output_error; }
1889
0
                length += addl;
1890
0
                if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1891
0
                if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1892
1893
                /* copy literals */
1894
0
                cpy = op+length;
1895
0
                LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
1896
0
                if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
1897
0
                LZ4_wildCopy32(op, ip, cpy);
1898
0
                ip += length; op = cpy;
1899
0
            } else {
1900
0
                cpy = op+length;
1901
0
                DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
1902
                /* We don't need to check oend, since we check it once for each loop below */
1903
0
                if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
1904
                /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
1905
0
                LZ4_memcpy(op, ip, 16);
1906
0
                ip += length; op = cpy;
1907
0
            }
1908
1909
            /* get offset */
1910
0
            offset = LZ4_readLE16(ip); ip+=2;
1911
0
            match = op - offset;
1912
0
            assert(match <= op);  /* overflow check */
1913
1914
            /* get matchlength */
1915
0
            length = token & ML_MASK;
1916
1917
0
            if (length == ML_MASK) {
1918
0
                size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
1919
0
                if (addl == rvl_error) { goto _output_error; }
1920
0
                length += addl;
1921
0
                length += MINMATCH;
1922
0
                if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
1923
0
                if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1924
0
                if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1925
0
                    goto safe_match_copy;
1926
0
                }
1927
0
            } else {
1928
0
                length += MINMATCH;
1929
0
                if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1930
0
                    goto safe_match_copy;
1931
0
                }
1932
1933
                /* Fastpath check: skip LZ4_wildCopy32 when true */
1934
0
                if ((dict == withPrefix64k) || (match >= lowPrefix)) {
1935
0
                    if (offset >= 8) {
1936
0
                        assert(match >= lowPrefix);
1937
0
                        assert(match <= op);
1938
0
                        assert(op + 18 <= oend);
1939
1940
0
                        LZ4_memcpy(op, match, 8);
1941
0
                        LZ4_memcpy(op+8, match+8, 8);
1942
0
                        LZ4_memcpy(op+16, match+16, 2);
1943
0
                        op += length;
1944
0
                        continue;
1945
0
            }   }   }
1946
1947
0
            if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1948
            /* match starting within external dictionary */
1949
0
            if ((dict==usingExtDict) && (match < lowPrefix)) {
1950
0
                assert(dictEnd != NULL);
1951
0
                if (unlikely(op+length > oend-LASTLITERALS)) {
1952
0
                    if (partialDecoding) {
1953
0
                        DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
1954
0
                        length = MIN(length, (size_t)(oend-op));
1955
0
                    } else {
1956
0
                        goto _output_error;  /* end-of-block condition violated */
1957
0
                }   }
1958
1959
0
                if (length <= (size_t)(lowPrefix-match)) {
1960
                    /* match fits entirely within external dictionary : just copy */
1961
0
                    LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
1962
0
                    op += length;
1963
0
                } else {
1964
                    /* match stretches into both external dictionary and current block */
1965
0
                    size_t const copySize = (size_t)(lowPrefix - match);
1966
0
                    size_t const restSize = length - copySize;
1967
0
                    LZ4_memcpy(op, dictEnd - copySize, copySize);
1968
0
                    op += copySize;
1969
0
                    if (restSize > (size_t)(op - lowPrefix)) {  /* overlap copy */
1970
0
                        BYTE* const endOfMatch = op + restSize;
1971
0
                        const BYTE* copyFrom = lowPrefix;
1972
0
                        while (op < endOfMatch) { *op++ = *copyFrom++; }
1973
0
                    } else {
1974
0
                        LZ4_memcpy(op, lowPrefix, restSize);
1975
0
                        op += restSize;
1976
0
                }   }
1977
0
                continue;
1978
0
            }
1979
1980
            /* copy match within block */
1981
0
            cpy = op + length;
1982
1983
0
            assert((op <= oend) && (oend-op >= 32));
1984
0
            if (unlikely(offset<16)) {
1985
0
                LZ4_memcpy_using_offset(op, match, cpy, offset);
1986
0
            } else {
1987
0
                LZ4_wildCopy32(op, match, cpy);
1988
0
            }
1989
1990
0
            op = cpy;   /* wildcopy correction */
1991
0
        }
1992
0
    safe_decode:
1993
0
#endif
1994
1995
        /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
1996
0
        while (1) {
1997
0
            assert(ip < iend);
1998
0
            token = *ip++;
1999
0
            length = token >> ML_BITS;  /* literal length */
2000
2001
            /* A two-stage shortcut for the most common case:
2002
             * 1) If the literal length is 0..14, and there is enough space,
2003
             * enter the shortcut and copy 16 bytes on behalf of the literals
2004
             * (in the fast mode, only 8 bytes can be safely copied this way).
2005
             * 2) Further if the match length is 4..18, copy 18 bytes in a similar
2006
             * manner; but we ensure that there's enough space in the output for
2007
             * those 18 bytes earlier, upon entering the shortcut (in other words,
2008
             * there is a combined check for both stages).
2009
             */
2010
0
            if ( (length != RUN_MASK)
2011
                /* strictly "less than" on input, to re-enter the loop with at least one byte */
2012
0
              && likely((ip < shortiend) & (op <= shortoend)) ) {
2013
                /* Copy the literals */
2014
0
                LZ4_memcpy(op, ip, 16);
2015
0
                op += length; ip += length;
2016
2017
                /* The second stage: prepare for match copying, decode full info.
2018
                 * If it doesn't work out, the info won't be wasted. */
2019
0
                length = token & ML_MASK; /* match length */
2020
0
                offset = LZ4_readLE16(ip); ip += 2;
2021
0
                match = op - offset;
2022
0
                assert(match <= op); /* check overflow */
2023
2024
                /* Do not deal with overlapping matches. */
2025
0
                if ( (length != ML_MASK)
2026
0
                  && (offset >= 8)
2027
0
                  && (dict==withPrefix64k || match >= lowPrefix) ) {
2028
                    /* Copy the match. */
2029
0
                    LZ4_memcpy(op + 0, match + 0, 8);
2030
0
                    LZ4_memcpy(op + 8, match + 8, 8);
2031
0
                    LZ4_memcpy(op +16, match +16, 2);
2032
0
                    op += length + MINMATCH;
2033
                    /* Both stages worked, load the next token. */
2034
0
                    continue;
2035
0
                }
2036
2037
                /* The second stage didn't work out, but the info is ready.
2038
                 * Propel it right to the point of match copying. */
2039
0
                goto _copy_match;
2040
0
            }
2041
2042
            /* decode literal length */
2043
0
            if (length == RUN_MASK) {
2044
0
                size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
2045
0
                if (addl == rvl_error) { goto _output_error; }
2046
0
                length += addl;
2047
0
                if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
2048
0
                if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
2049
0
            }
2050
2051
            /* copy literals */
2052
0
            cpy = op+length;
2053
0
#if LZ4_FAST_DEC_LOOP
2054
0
        safe_literal_copy:
2055
0
#endif
2056
0
            LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
2057
0
            if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
2058
                /* We've either hit the input parsing restriction or the output parsing restriction.
2059
                 * In the normal scenario, decoding a full block, it must be the last sequence,
2060
                 * otherwise it's an error (invalid input or dimensions).
2061
                 * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
2062
                 */
2063
0
                if (partialDecoding) {
2064
                    /* Since we are partial decoding we may be in this block because of the output parsing
2065
                     * restriction, which is not valid since the output buffer is allowed to be undersized.
2066
                     */
2067
0
                    DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
2068
0
                    DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
2069
0
                    DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
2070
0
                    DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
2071
                    /* Finishing in the middle of a literals segment,
2072
                     * due to lack of input.
2073
                     */
2074
0
                    if (ip+length > iend) {
2075
0
                        length = (size_t)(iend-ip);
2076
0
                        cpy = op + length;
2077
0
                    }
2078
                    /* Finishing in the middle of a literals segment,
2079
                     * due to lack of output space.
2080
                     */
2081
0
                    if (cpy > oend) {
2082
0
                        cpy = oend;
2083
0
                        assert(op<=oend);
2084
0
                        length = (size_t)(oend-op);
2085
0
                    }
2086
0
                } else {
2087
                     /* We must be on the last sequence (or invalid) because of the parsing limitations
2088
                      * so check that we exactly consume the input and don't overrun the output buffer.
2089
                      */
2090
0
                    if ((ip+length != iend) || (cpy > oend)) {
2091
0
                        DEBUGLOG(6, "should have been last run of literals")
2092
0
                        DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
2093
0
                        DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
2094
0
                        goto _output_error;
2095
0
                    }
2096
0
                }
2097
0
                LZ4_memmove(op, ip, length);  /* supports overlapping memory regions, for in-place decompression scenarios */
2098
0
                ip += length;
2099
0
                op += length;
2100
                /* Necessarily EOF when !partialDecoding.
2101
                 * When partialDecoding, it is EOF if we've either
2102
                 * filled the output buffer or
2103
                 * can't proceed with reading an offset for following match.
2104
                 */
2105
0
                if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
2106
0
                    break;
2107
0
                }
2108
0
            } else {
2109
0
                LZ4_wildCopy8(op, ip, cpy);   /* can overwrite up to 8 bytes beyond cpy */
2110
0
                ip += length; op = cpy;
2111
0
            }
2112
2113
            /* get offset */
2114
0
            offset = LZ4_readLE16(ip); ip+=2;
2115
0
            match = op - offset;
2116
2117
            /* get matchlength */
2118
0
            length = token & ML_MASK;
2119
2120
0
    _copy_match:
2121
0
            if (length == ML_MASK) {
2122
0
                size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2123
0
                if (addl == rvl_error) { goto _output_error; }
2124
0
                length += addl;
2125
0
                if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
2126
0
            }
2127
0
            length += MINMATCH;
2128
2129
0
#if LZ4_FAST_DEC_LOOP
2130
0
        safe_match_copy:
2131
0
#endif
2132
0
            if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error;   /* Error : offset outside buffers */
2133
            /* match starting within external dictionary */
2134
0
            if ((dict==usingExtDict) && (match < lowPrefix)) {
2135
0
                assert(dictEnd != NULL);
2136
0
                if (unlikely(op+length > oend-LASTLITERALS)) {
2137
0
                    if (partialDecoding) length = MIN(length, (size_t)(oend-op));
2138
0
                    else goto _output_error;   /* doesn't respect parsing restriction */
2139
0
                }
2140
2141
0
                if (length <= (size_t)(lowPrefix-match)) {
2142
                    /* match fits entirely within external dictionary : just copy */
2143
0
                    LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
2144
0
                    op += length;
2145
0
                } else {
2146
                    /* match stretches into both external dictionary and current block */
2147
0
                    size_t const copySize = (size_t)(lowPrefix - match);
2148
0
                    size_t const restSize = length - copySize;
2149
0
                    LZ4_memcpy(op, dictEnd - copySize, copySize);
2150
0
                    op += copySize;
2151
0
                    if (restSize > (size_t)(op - lowPrefix)) {  /* overlap copy */
2152
0
                        BYTE* const endOfMatch = op + restSize;
2153
0
                        const BYTE* copyFrom = lowPrefix;
2154
0
                        while (op < endOfMatch) *op++ = *copyFrom++;
2155
0
                    } else {
2156
0
                        LZ4_memcpy(op, lowPrefix, restSize);
2157
0
                        op += restSize;
2158
0
                }   }
2159
0
                continue;
2160
0
            }
2161
0
            assert(match >= lowPrefix);
2162
2163
            /* copy match within block */
2164
0
            cpy = op + length;
2165
2166
            /* partialDecoding : may end anywhere within the block */
2167
0
            assert(op<=oend);
2168
0
            if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2169
0
                size_t const mlen = MIN(length, (size_t)(oend-op));
2170
0
                const BYTE* const matchEnd = match + mlen;
2171
0
                BYTE* const copyEnd = op + mlen;
2172
0
                if (matchEnd > op) {   /* overlap copy */
2173
0
                    while (op < copyEnd) { *op++ = *match++; }
2174
0
                } else {
2175
0
                    LZ4_memcpy(op, match, mlen);
2176
0
                }
2177
0
                op = copyEnd;
2178
0
                if (op == oend) { break; }
2179
0
                continue;
2180
0
            }
2181
2182
0
            if (unlikely(offset<8)) {
2183
0
                LZ4_write32(op, 0);   /* silence msan warning when offset==0 */
2184
0
                op[0] = match[0];
2185
0
                op[1] = match[1];
2186
0
                op[2] = match[2];
2187
0
                op[3] = match[3];
2188
0
                match += inc32table[offset];
2189
0
                LZ4_memcpy(op+4, match, 4);
2190
0
                match -= dec64table[offset];
2191
0
            } else {
2192
0
                LZ4_memcpy(op, match, 8);
2193
0
                match += 8;
2194
0
            }
2195
0
            op += 8;
2196
2197
0
            if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2198
0
                BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2199
0
                if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2200
0
                if (op < oCopyLimit) {
2201
0
                    LZ4_wildCopy8(op, match, oCopyLimit);
2202
0
                    match += oCopyLimit - op;
2203
0
                    op = oCopyLimit;
2204
0
                }
2205
0
                while (op < cpy) { *op++ = *match++; }
2206
0
            } else {
2207
0
                LZ4_memcpy(op, match, 8);
2208
0
                if (length > 16)  { LZ4_wildCopy8(op+8, match+8, cpy); }
2209
0
            }
2210
0
            op = cpy;   /* wildcopy correction */
2211
0
        }
2212
2213
        /* end of decoding */
2214
0
        DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
2215
0
        return (int) (((char*)op)-dst);     /* Nb of output bytes decoded */
2216
2217
        /* Overflow error detected */
2218
0
    _output_error:
2219
0
        return (int) (-(((const char*)ip)-src))-1;
2220
0
    }
2221
0
}
2222
2223
2224
/*===== Instantiate the API decoding functions. =====*/
2225
2226
LZ4_FORCE_O2
2227
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
2228
0
{
2229
0
    return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
2230
0
                                  decode_full_block, noDict,
2231
0
                                  (BYTE*)dest, NULL, 0);
2232
0
}
2233
2234
LZ4_FORCE_O2
2235
int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
2236
0
{
2237
0
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2238
0
    return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
2239
0
                                  partial_decode,
2240
0
                                  noDict, (BYTE*)dst, NULL, 0);
2241
0
}
2242
2243
LZ4_FORCE_O2
2244
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
2245
0
{
2246
0
    DEBUGLOG(5, "LZ4_decompress_fast");
2247
0
    return LZ4_decompress_unsafe_generic(
2248
0
                (const BYTE*)source, (BYTE*)dest, originalSize,
2249
0
                0, NULL, 0);
2250
0
}
2251
2252
/*===== Instantiate a few more decoding cases, used more than once. =====*/
2253
2254
LZ4_FORCE_O2 /* Exported, an obsolete API function. */
2255
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
2256
0
{
2257
0
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2258
0
                                  decode_full_block, withPrefix64k,
2259
0
                                  (BYTE*)dest - 64 KB, NULL, 0);
2260
0
}
2261
2262
LZ4_FORCE_O2
2263
static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)
2264
0
{
2265
0
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2266
0
    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
2267
0
                                  partial_decode, withPrefix64k,
2268
0
                                  (BYTE*)dest - 64 KB, NULL, 0);
2269
0
}
2270
2271
/* Another obsolete API function, paired with the previous one. */
2272
int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
2273
0
{
2274
0
    return LZ4_decompress_unsafe_generic(
2275
0
                (const BYTE*)source, (BYTE*)dest, originalSize,
2276
0
                64 KB, NULL, 0);
2277
0
}
2278
2279
LZ4_FORCE_O2
2280
static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
2281
                                               size_t prefixSize)
2282
0
{
2283
0
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2284
0
                                  decode_full_block, noDict,
2285
0
                                  (BYTE*)dest-prefixSize, NULL, 0);
2286
0
}
2287
2288
LZ4_FORCE_O2
2289
static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,
2290
                                               size_t prefixSize)
2291
0
{
2292
0
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2293
0
    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
2294
0
                                  partial_decode, noDict,
2295
0
                                  (BYTE*)dest-prefixSize, NULL, 0);
2296
0
}
2297
2298
LZ4_FORCE_O2
2299
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
2300
                                     int compressedSize, int maxOutputSize,
2301
                                     const void* dictStart, size_t dictSize)
2302
0
{
2303
0
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2304
0
                                  decode_full_block, usingExtDict,
2305
0
                                  (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2306
0
}
2307
2308
LZ4_FORCE_O2
2309
int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,
2310
                                     int compressedSize, int targetOutputSize, int dstCapacity,
2311
                                     const void* dictStart, size_t dictSize)
2312
0
{
2313
0
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2314
0
    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
2315
0
                                  partial_decode, usingExtDict,
2316
0
                                  (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2317
0
}
2318
2319
LZ4_FORCE_O2
2320
static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2321
                                       const void* dictStart, size_t dictSize)
2322
0
{
2323
0
    return LZ4_decompress_unsafe_generic(
2324
0
                (const BYTE*)source, (BYTE*)dest, originalSize,
2325
0
                0, (const BYTE*)dictStart, dictSize);
2326
0
}
2327
2328
/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2329
 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2330
 * These routines are used only once, in LZ4_decompress_*_continue().
2331
 */
2332
LZ4_FORCE_INLINE
2333
int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
2334
                                   size_t prefixSize, const void* dictStart, size_t dictSize)
2335
0
{
2336
0
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2337
0
                                  decode_full_block, usingExtDict,
2338
0
                                  (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2339
0
}
2340
2341
/*===== streaming decompression functions =====*/
2342
2343
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2344
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
2345
0
{
2346
0
    LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));
2347
0
    return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
2348
0
}
2349
2350
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
2351
0
{
2352
0
    if (LZ4_stream == NULL) { return 0; }  /* support free on NULL */
2353
0
    FREEMEM(LZ4_stream);
2354
0
    return 0;
2355
0
}
2356
#endif
2357
2358
/*! LZ4_setStreamDecode() :
2359
 *  Use this function to instruct where to find the dictionary.
2360
 *  This function is not necessary if previous data is still available where it was decoded.
2361
 *  Loading a size of 0 is allowed (same effect as no dictionary).
2362
 * @return : 1 if OK, 0 if error
2363
 */
2364
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
2365
0
{
2366
0
    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2367
0
    lz4sd->prefixSize = (size_t)dictSize;
2368
0
    if (dictSize) {
2369
0
        assert(dictionary != NULL);
2370
0
        lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
2371
0
    } else {
2372
0
        lz4sd->prefixEnd = (const BYTE*) dictionary;
2373
0
    }
2374
0
    lz4sd->externalDict = NULL;
2375
0
    lz4sd->extDictSize  = 0;
2376
0
    return 1;
2377
0
}
2378
2379
/*! LZ4_decoderRingBufferSize() :
2380
 *  when setting a ring buffer for streaming decompression (optional scenario),
2381
 *  provides the minimum size of this ring buffer
2382
 *  to be compatible with any source respecting maxBlockSize condition.
2383
 *  Note : in a ring buffer scenario,
2384
 *  blocks are presumed decompressed next to each other.
2385
 *  When not enough space remains for next block (remainingSize < maxBlockSize),
2386
 *  decoding resumes from beginning of ring buffer.
2387
 * @return : minimum ring buffer size,
2388
 *           or 0 if there is an error (invalid maxBlockSize).
2389
 */
2390
int LZ4_decoderRingBufferSize(int maxBlockSize)
2391
0
{
2392
0
    if (maxBlockSize < 0) return 0;
2393
0
    if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2394
0
    if (maxBlockSize < 16) maxBlockSize = 16;
2395
0
    return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
2396
0
}
2397
2398
/*
2399
*_continue() :
2400
    These decoding functions allow decompression of multiple blocks in "streaming" mode.
2401
    Previously decoded blocks must still be available at the memory position where they were decoded.
2402
    If it's not possible, save the relevant part of decoded data into a safe buffer,
2403
    and indicate where it stands using LZ4_setStreamDecode()
2404
*/
2405
LZ4_FORCE_O2
2406
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
2407
0
{
2408
0
    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2409
0
    int result;
2410
2411
0
    if (lz4sd->prefixSize == 0) {
2412
        /* The first call, no dictionary yet. */
2413
0
        assert(lz4sd->extDictSize == 0);
2414
0
        result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2415
0
        if (result <= 0) return result;
2416
0
        lz4sd->prefixSize = (size_t)result;
2417
0
        lz4sd->prefixEnd = (BYTE*)dest + result;
2418
0
    } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2419
        /* They're rolling the current segment. */
2420
0
        if (lz4sd->prefixSize >= 64 KB - 1)
2421
0
            result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2422
0
        else if (lz4sd->extDictSize == 0)
2423
0
            result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
2424
0
                                                         lz4sd->prefixSize);
2425
0
        else
2426
0
            result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
2427
0
                                                    lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2428
0
        if (result <= 0) return result;
2429
0
        lz4sd->prefixSize += (size_t)result;
2430
0
        lz4sd->prefixEnd  += result;
2431
0
    } else {
2432
        /* The buffer wraps around, or they're switching to another buffer. */
2433
0
        lz4sd->extDictSize = lz4sd->prefixSize;
2434
0
        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2435
0
        result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
2436
0
                                                  lz4sd->externalDict, lz4sd->extDictSize);
2437
0
        if (result <= 0) return result;
2438
0
        lz4sd->prefixSize = (size_t)result;
2439
0
        lz4sd->prefixEnd  = (BYTE*)dest + result;
2440
0
    }
2441
2442
0
    return result;
2443
0
}
2444
2445
LZ4_FORCE_O2 int
2446
LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,
2447
                        const char* source, char* dest, int originalSize)
2448
0
{
2449
0
    LZ4_streamDecode_t_internal* const lz4sd =
2450
0
        (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);
2451
0
    int result;
2452
2453
0
    DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize);
2454
0
    assert(originalSize >= 0);
2455
2456
0
    if (lz4sd->prefixSize == 0) {
2457
0
        DEBUGLOG(5, "first invocation : no prefix nor extDict");
2458
0
        assert(lz4sd->extDictSize == 0);
2459
0
        result = LZ4_decompress_fast(source, dest, originalSize);
2460
0
        if (result <= 0) return result;
2461
0
        lz4sd->prefixSize = (size_t)originalSize;
2462
0
        lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2463
0
    } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2464
0
        DEBUGLOG(5, "continue using existing prefix");
2465
0
        result = LZ4_decompress_unsafe_generic(
2466
0
                        (const BYTE*)source, (BYTE*)dest, originalSize,
2467
0
                        lz4sd->prefixSize,
2468
0
                        lz4sd->externalDict, lz4sd->extDictSize);
2469
0
        if (result <= 0) return result;
2470
0
        lz4sd->prefixSize += (size_t)originalSize;
2471
0
        lz4sd->prefixEnd  += originalSize;
2472
0
    } else {
2473
0
        DEBUGLOG(5, "prefix becomes extDict");
2474
0
        lz4sd->extDictSize = lz4sd->prefixSize;
2475
0
        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2476
0
        result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2477
0
                                             lz4sd->externalDict, lz4sd->extDictSize);
2478
0
        if (result <= 0) return result;
2479
0
        lz4sd->prefixSize = (size_t)originalSize;
2480
0
        lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
2481
0
    }
2482
2483
0
    return result;
2484
0
}
2485
2486
2487
/*
2488
Advanced decoding functions :
2489
*_usingDict() :
2490
    These decoding functions work the same as "_continue" ones,
2491
    the dictionary must be explicitly provided within parameters
2492
*/
2493
2494
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
2495
0
{
2496
0
    if (dictSize==0)
2497
0
        return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2498
0
    if (dictStart+dictSize == dest) {
2499
0
        if (dictSize >= 64 KB - 1) {
2500
0
            return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2501
0
        }
2502
0
        assert(dictSize >= 0);
2503
0
        return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
2504
0
    }
2505
0
    assert(dictSize >= 0);
2506
0
    return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
2507
0
}
2508
2509
int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)
2510
0
{
2511
0
    if (dictSize==0)
2512
0
        return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);
2513
0
    if (dictStart+dictSize == dest) {
2514
0
        if (dictSize >= 64 KB - 1) {
2515
0
            return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);
2516
0
        }
2517
0
        assert(dictSize >= 0);
2518
0
        return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);
2519
0
    }
2520
0
    assert(dictSize >= 0);
2521
0
    return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);
2522
0
}
2523
2524
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
2525
0
{
2526
0
    if (dictSize==0 || dictStart+dictSize == dest)
2527
0
        return LZ4_decompress_unsafe_generic(
2528
0
                        (const BYTE*)source, (BYTE*)dest, originalSize,
2529
0
                        (size_t)dictSize, NULL, 0);
2530
0
    assert(dictSize >= 0);
2531
0
    return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
2532
0
}
2533
2534
2535
/*=*************************************************
2536
*  Obsolete Functions
2537
***************************************************/
2538
/* obsolete compression functions */
2539
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
2540
0
{
2541
0
    return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
2542
0
}
2543
int LZ4_compress(const char* src, char* dest, int srcSize)
2544
0
{
2545
0
    return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
2546
0
}
2547
int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2548
0
{
2549
0
    return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2550
0
}
2551
int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2552
0
{
2553
0
    return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
2554
0
}
2555
int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
2556
0
{
2557
0
    return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
2558
0
}
2559
int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
2560
0
{
2561
0
    return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
2562
0
}
2563
2564
/*
2565
These decompression functions are deprecated and should no longer be used.
2566
They are only provided here for compatibility with older user programs.
2567
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2568
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2569
*/
2570
int LZ4_uncompress (const char* source, char* dest, int outputSize)
2571
0
{
2572
0
    return LZ4_decompress_fast(source, dest, outputSize);
2573
0
}
2574
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
2575
0
{
2576
0
    return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
2577
0
}
2578
2579
/* Obsolete Streaming functions */
2580
2581
0
int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }
2582
2583
int LZ4_resetStreamState(void* state, char* inputBuffer)
2584
0
{
2585
0
    (void)inputBuffer;
2586
0
    LZ4_resetStream((LZ4_stream_t*)state);
2587
0
    return 0;
2588
0
}
2589
2590
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2591
void* LZ4_create (char* inputBuffer)
2592
0
{
2593
0
    (void)inputBuffer;
2594
0
    return LZ4_createStream();
2595
0
}
2596
#endif
2597
2598
char* LZ4_slideInputBuffer (void* state)
2599
0
{
2600
    /* avoid const char * -> char * conversion warning */
2601
0
    return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
2602
0
}
2603
2604
#endif   /* LZ4_COMMONDEFS_ONLY */
2605
}