Coverage Report

Created: 2025-06-12 06:11

/src/lz4/lib/lz4.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
   LZ4 - Fast LZ compression algorithm
3
   Copyright (c) Yann Collet. All rights reserved.
4
5
   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7
   Redistribution and use in source and binary forms, with or without
8
   modification, are permitted provided that the following conditions are
9
   met:
10
11
       * Redistributions of source code must retain the above copyright
12
   notice, this list of conditions and the following disclaimer.
13
       * Redistributions in binary form must reproduce the above
14
   copyright notice, this list of conditions and the following disclaimer
15
   in the documentation and/or other materials provided with the
16
   distribution.
17
18
   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24
   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25
   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26
   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
   You can contact the author at :
31
    - LZ4 homepage : http://www.lz4.org
32
    - LZ4 source repository : https://github.com/lz4/lz4
33
*/
34
35
/*-************************************
36
*  Tuning parameters
37
**************************************/
38
/*
39
 * LZ4_HEAPMODE :
40
 * Select how stateless compression functions like `LZ4_compress_default()`
41
 * allocate memory for their hash table,
42
 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
43
 */
44
#ifndef LZ4_HEAPMODE
45
#  define LZ4_HEAPMODE 0
46
#endif
47
48
/*
49
 * LZ4_ACCELERATION_DEFAULT :
50
 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
51
 */
52
327k
#define LZ4_ACCELERATION_DEFAULT 1
53
/*
54
 * LZ4_ACCELERATION_MAX :
55
 * Any "acceleration" value higher than this threshold
56
 * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
57
 */
58
340k
#define LZ4_ACCELERATION_MAX 65537
59
60
61
/*-************************************
62
*  CPU Feature Detection
63
**************************************/
64
/* LZ4_FORCE_MEMORY_ACCESS
65
 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
66
 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
67
 * The below switch allow to select different access method for improved performance.
68
 * Method 0 (default) : use `memcpy()`. Safe and portable.
69
 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
70
 *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
71
 * Method 2 : direct access. This method is portable but violate C standard.
72
 *            It can generate buggy code on targets which assembly generation depends on alignment.
73
 *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
74
 * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
75
 * Prefer these methods in priority order (0 > 1 > 2)
76
 */
77
#ifndef LZ4_FORCE_MEMORY_ACCESS   /* can be defined externally */
78
#  if defined(__GNUC__) && \
79
  ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
80
  || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
81
#    define LZ4_FORCE_MEMORY_ACCESS 2
82
#  elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)
83
#    define LZ4_FORCE_MEMORY_ACCESS 1
84
#  endif
85
#endif
86
87
/*
88
 * LZ4_FORCE_SW_BITCOUNT
89
 * Define this parameter if your target system or compiler does not support hardware bit count
90
 */
91
#if defined(_MSC_VER) && defined(_WIN32_WCE)   /* Visual Studio for WinCE doesn't support Hardware bit count */
92
#  undef  LZ4_FORCE_SW_BITCOUNT  /* avoid double def */
93
#  define LZ4_FORCE_SW_BITCOUNT
94
#endif
95
96
97
98
/*-************************************
99
*  Dependency
100
**************************************/
101
/*
102
 * LZ4_SRC_INCLUDED:
103
 * Amalgamation flag, whether lz4.c is included
104
 */
105
#ifndef LZ4_SRC_INCLUDED
106
#  define LZ4_SRC_INCLUDED 1
107
#endif
108
109
#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
110
#  define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
111
#endif
112
113
#ifndef LZ4_STATIC_LINKING_ONLY
114
#  define LZ4_STATIC_LINKING_ONLY
115
#endif
116
#include "lz4.h"
117
/* see also "memory routines" below */
118
119
120
/*-************************************
121
*  Compiler Options
122
**************************************/
123
#if defined(_MSC_VER) && (_MSC_VER >= 1400)  /* Visual Studio 2005+ */
124
#  include <intrin.h>               /* only present in VS2005+ */
125
#  pragma warning(disable : 4127)   /* disable: C4127: conditional expression is constant */
126
#  pragma warning(disable : 6237)   /* disable: C6237: conditional expression is always 0 */
127
#  pragma warning(disable : 6239)   /* disable: C6239: (<non-zero constant> && <expression>) always evaluates to the result of <expression> */
128
#  pragma warning(disable : 6240)   /* disable: C6240: (<expression> && <non-zero constant>) always evaluates to the result of <expression> */
129
#  pragma warning(disable : 6326)   /* disable: C6326: Potential comparison of a constant with another constant */
130
#endif  /* _MSC_VER */
131
132
#ifndef LZ4_FORCE_INLINE
133
#  if defined (_MSC_VER) && !defined (__clang__)    /* MSVC */
134
#    define LZ4_FORCE_INLINE static __forceinline
135
#  else
136
#    if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
137
#      if defined (__GNUC__) || defined (__clang__)
138
#        define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
139
#      else
140
#        define LZ4_FORCE_INLINE static inline
141
#      endif
142
#    else
143
#      define LZ4_FORCE_INLINE static
144
#    endif /* __STDC_VERSION__ */
145
#  endif  /* _MSC_VER */
146
#endif /* LZ4_FORCE_INLINE */
147
148
/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
149
 * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
150
 * together with a simple 8-byte copy loop as a fall-back path.
151
 * However, this optimization hurts the decompression speed by >30%,
152
 * because the execution does not go to the optimized loop
153
 * for typical compressible data, and all of the preamble checks
154
 * before going to the fall-back path become useless overhead.
155
 * This optimization happens only with the -O3 flag, and -O2 generates
156
 * a simple 8-byte copy loop.
157
 * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
158
 * functions are annotated with __attribute__((optimize("O2"))),
159
 * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
160
 * of LZ4_wildCopy8 does not affect the compression speed.
161
 */
162
#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
163
#  define LZ4_FORCE_O2  __attribute__((optimize("O2")))
164
#  undef LZ4_FORCE_INLINE
165
#  define LZ4_FORCE_INLINE  static __inline __attribute__((optimize("O2"),always_inline))
166
#else
167
#  define LZ4_FORCE_O2
168
#endif
169
170
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
171
19.7G
#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
172
#else
173
#  define expect(expr,value)    (expr)
174
#endif
175
176
#ifndef likely
177
621M
#define likely(expr)     expect((expr) != 0, 1)
178
#endif
179
#ifndef unlikely
180
517M
#define unlikely(expr)   expect((expr) != 0, 0)
181
#endif
182
183
/* Should the alignment test prove unreliable, for some reason,
184
 * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
185
#ifndef LZ4_ALIGN_TEST  /* can be externally provided */
186
# define LZ4_ALIGN_TEST 1
187
#endif
188
189
190
/*-************************************
191
*  Memory routines
192
**************************************/
193
194
/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :
195
 *  Disable relatively high-level LZ4/HC functions that use dynamic memory
196
 *  allocation functions (malloc(), calloc(), free()).
197
 *
198
 *  Note that this is a compile-time switch. And since it disables
199
 *  public/stable LZ4 v1 API functions, we don't recommend using this
200
 *  symbol to generate a library for distribution.
201
 *
202
 *  The following public functions are removed when this symbol is defined.
203
 *  - lz4   : LZ4_createStream, LZ4_freeStream,
204
 *            LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create (deprecated)
205
 *  - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,
206
 *            LZ4_createHC (deprecated), LZ4_freeHC  (deprecated)
207
 *  - lz4frame, lz4file : All LZ4F_* functions
208
 */
209
#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
210
#  define ALLOC(s)          lz4_error_memory_allocation_is_disabled
211
#  define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
212
#  define FREEMEM(p)        lz4_error_memory_allocation_is_disabled
213
#elif defined(LZ4_USER_MEMORY_FUNCTIONS)
214
/* memory management functions can be customized by user project.
215
 * Below functions must exist somewhere in the Project
216
 * and be available at link time */
217
void* LZ4_malloc(size_t s);
218
void* LZ4_calloc(size_t n, size_t s);
219
void  LZ4_free(void* p);
220
# define ALLOC(s)          LZ4_malloc(s)
221
# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
222
# define FREEMEM(p)        LZ4_free(p)
223
#else
224
# include <stdlib.h>   /* malloc, calloc, free */
225
168k
# define ALLOC(s)          malloc(s)
226
46.5k
# define ALLOC_AND_ZERO(s) calloc(1,s)
227
215k
# define FREEMEM(p)        free(p)
228
#endif
229
230
#if ! LZ4_FREESTANDING
231
#  include <string.h>   /* memset, memcpy */
232
#endif
233
#if !defined(LZ4_memset)
234
1.28M
#  define LZ4_memset(p,v,s) memset((p),(v),(s))
235
#endif
236
1.28M
#define MEM_INIT(p,v,s)   LZ4_memset((p),(v),(s))
237
238
239
/*-************************************
240
*  Common Constants
241
**************************************/
242
7.16G
#define MINMATCH 4
243
244
84.6k
#define WILDCOPYLENGTH 8
245
43.4M
#define LASTLITERALS   5   /* see ../doc/lz4_Block_format.md#parsing-restrictions */
246
1.83M
#define MFLIMIT       12   /* see ../doc/lz4_Block_format.md#parsing-restrictions */
247
18.2k
#define MATCH_SAFEGUARD_DISTANCE  ((2*WILDCOPYLENGTH) - MINMATCH)   /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
248
66.0M
#define FASTLOOP_SAFE_DISTANCE 64
249
static const int LZ4_minLength = (MFLIMIT+1);
250
251
2.22M
#define KB *(1 <<10)
252
#define MB *(1 <<20)
253
570k
#define GB *(1U<<30)
254
255
14.2M
#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
256
#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX)   /* max supported by LZ4 format */
257
#  error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
258
#endif
259
260
2.84G
#define ML_BITS  4
261
1.67G
#define ML_MASK  ((1U<<ML_BITS)-1)
262
1.05G
#define RUN_BITS (8-ML_BITS)
263
1.05G
#define RUN_MASK ((1U<<RUN_BITS)-1)
264
265
266
/*-************************************
267
*  Error detection
268
**************************************/
269
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
270
#  include <assert.h>
271
#else
272
#  ifndef assert
273
#    define assert(condition) ((void)0)
274
#  endif
275
#endif
276
277
189M
#define LZ4_STATIC_ASSERT(c)   { enum { LZ4_static_assert = 1/(int)(!!(c)) }; }   /* use after variable declarations */
278
279
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
280
#  include <stdio.h>
281
   static int g_debuglog_enable = 1;
282
#  define DEBUGLOG(l, ...) {                          \
283
        if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) {  \
284
            fprintf(stderr, __FILE__  " %i: ", __LINE__); \
285
            fprintf(stderr, __VA_ARGS__);             \
286
            fprintf(stderr, " \n");                   \
287
    }   }
288
#else
289
3.43G
#  define DEBUGLOG(l, ...) {}    /* disabled */
290
#endif
291
292
static int LZ4_isAligned(const void* ptr, size_t alignment)
293
166k
{
294
166k
    return ((size_t)ptr & (alignment -1)) == 0;
295
166k
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: lz4_helpers.c:LZ4_isAligned
Unexecuted instantiation: fuzz_data_producer.c:LZ4_isAligned
lz4hc.c:LZ4_isAligned
Line
Count
Source
293
125k
{
294
125k
    return ((size_t)ptr & (alignment -1)) == 0;
295
125k
}
lz4.c:LZ4_isAligned
Line
Count
Source
293
41.2k
{
294
41.2k
    return ((size_t)ptr & (alignment -1)) == 0;
295
41.2k
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: decompress_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: compress_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_isAligned
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_isAligned
296
297
298
/*-************************************
299
*  Types
300
**************************************/
301
#include <limits.h>
302
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
303
# include <stdint.h>
304
  typedef unsigned char BYTE; /*uint8_t not necessarily blessed to alias arbitrary type*/
305
  typedef uint16_t      U16;
306
  typedef uint32_t      U32;
307
  typedef  int32_t      S32;
308
  typedef uint64_t      U64;
309
  typedef uintptr_t     uptrval;
310
#else
311
# if UINT_MAX != 4294967295UL
312
#   error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
313
# endif
314
  typedef unsigned char       BYTE;
315
  typedef unsigned short      U16;
316
  typedef unsigned int        U32;
317
  typedef   signed int        S32;
318
  typedef unsigned long long  U64;
319
  typedef size_t              uptrval;   /* generally true, except OpenVMS-64 */
320
#endif
321
322
#if defined(__x86_64__)
323
  typedef U64    reg_t;   /* 64-bits in x32 mode */
324
#else
325
  typedef size_t reg_t;   /* 32-bits in x32 mode */
326
#endif
327
328
typedef enum {
329
    notLimited = 0,
330
    limitedOutput = 1,
331
    fillOutput = 2
332
} limitedOutput_directive;
333
334
335
/*-************************************
336
*  Reading and writing into memory
337
**************************************/
338
339
/**
340
 * LZ4 relies on memcpy with a constant size being inlined. In freestanding
341
 * environments, the compiler can't assume the implementation of memcpy() is
342
 * standard compliant, so it can't apply its specialized memcpy() inlining
343
 * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
344
 * memcpy() as if it were standard compliant, so it can inline it in freestanding
345
 * environments. This is needed when decompressing the Linux Kernel, for example.
346
 */
347
#if !defined(LZ4_memcpy)
348
#  if defined(__GNUC__) && (__GNUC__ >= 4)
349
738M
#    define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
350
#  else
351
#    define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
352
#  endif
353
#endif
354
355
#if !defined(LZ4_memmove)
356
#  if defined(__GNUC__) && (__GNUC__ >= 4)
357
1.18M
#    define LZ4_memmove __builtin_memmove
358
#  else
359
#    define LZ4_memmove memmove
360
#  endif
361
#endif
362
363
static unsigned LZ4_isLittleEndian(void)
364
1.49G
{
365
1.49G
    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
366
1.49G
    return one.c[0];
367
1.49G
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: lz4_helpers.c:LZ4_isLittleEndian
Unexecuted instantiation: fuzz_data_producer.c:LZ4_isLittleEndian
lz4hc.c:LZ4_isLittleEndian
Line
Count
Source
364
1.15G
{
365
1.15G
    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
366
1.15G
    return one.c[0];
367
1.15G
}
lz4.c:LZ4_isLittleEndian
Line
Count
Source
364
345M
{
365
345M
    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental */
366
345M
    return one.c[0];
367
345M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: decompress_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: compress_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_isLittleEndian
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_isLittleEndian
368
369
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
370
#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
371
#elif defined(_MSC_VER)
372
#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
373
#endif
374
375
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
376
/* lie to the compiler about data alignment; use with caution */
377
378
static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
379
static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
380
static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
381
382
static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
383
static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
384
385
#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
386
387
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
388
/* currently only defined for gcc and icc */
389
LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
390
LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
391
LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
392
393
5.87G
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_read16
Unexecuted instantiation: lz4_helpers.c:LZ4_read16
Unexecuted instantiation: fuzz_data_producer.c:LZ4_read16
lz4hc.c:LZ4_read16
Line
Count
Source
393
5.81G
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
lz4.c:LZ4_read16
Line
Count
Source
393
66.0M
static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_read16
Unexecuted instantiation: decompress_fuzzer.c:LZ4_read16
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_read16
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_read16
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_read16
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_read16
Unexecuted instantiation: compress_fuzzer.c:LZ4_read16
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_read16
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_read16
394
18.2G
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_read32
Unexecuted instantiation: lz4_helpers.c:LZ4_read32
Unexecuted instantiation: fuzz_data_producer.c:LZ4_read32
lz4hc.c:LZ4_read32
Line
Count
Source
394
18.0G
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
lz4.c:LZ4_read32
Line
Count
Source
394
272M
static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_read32
Unexecuted instantiation: decompress_fuzzer.c:LZ4_read32
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_read32
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_read32
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_read32
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_read32
Unexecuted instantiation: compress_fuzzer.c:LZ4_read32
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_read32
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_read32
395
11.6G
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: lz4_helpers.c:LZ4_read_ARCH
Unexecuted instantiation: fuzz_data_producer.c:LZ4_read_ARCH
lz4hc.c:LZ4_read_ARCH
Line
Count
Source
395
11.0G
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }
lz4.c:LZ4_read_ARCH
Line
Count
Source
395
602M
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: decompress_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: compress_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_read_ARCH
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_read_ARCH
396
397
59.0M
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_write16
Unexecuted instantiation: lz4_helpers.c:LZ4_write16
Unexecuted instantiation: fuzz_data_producer.c:LZ4_write16
lz4hc.c:LZ4_write16
Line
Count
Source
397
33.4M
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }
lz4.c:LZ4_write16
Line
Count
Source
397
25.5M
static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_write16
Unexecuted instantiation: decompress_fuzzer.c:LZ4_write16
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_write16
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_write16
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_write16
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_write16
Unexecuted instantiation: compress_fuzzer.c:LZ4_write16
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_write16
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_write16
398
10.5M
static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_write32
Unexecuted instantiation: lz4_helpers.c:LZ4_write32
Unexecuted instantiation: fuzz_data_producer.c:LZ4_write32
Unexecuted instantiation: lz4hc.c:LZ4_write32
lz4.c:LZ4_write32
Line
Count
Source
398
10.5M
static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_write32
Unexecuted instantiation: decompress_fuzzer.c:LZ4_write32
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_write32
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_write32
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_write32
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_write32
Unexecuted instantiation: compress_fuzzer.c:LZ4_write32
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_write32
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_write32
399
400
#else  /* safe and portable access using memcpy() */
401
402
static U16 LZ4_read16(const void* memPtr)
403
{
404
    U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
405
}
406
407
static U32 LZ4_read32(const void* memPtr)
408
{
409
    U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
410
}
411
412
static reg_t LZ4_read_ARCH(const void* memPtr)
413
{
414
    reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
415
}
416
417
static void LZ4_write16(void* memPtr, U16 value)
418
{
419
    LZ4_memcpy(memPtr, &value, sizeof(value));
420
}
421
422
static void LZ4_write32(void* memPtr, U32 value)
423
{
424
    LZ4_memcpy(memPtr, &value, sizeof(value));
425
}
426
427
#endif /* LZ4_FORCE_MEMORY_ACCESS */
428
429
430
static U16 LZ4_readLE16(const void* memPtr)
431
65.8M
{
432
65.8M
    if (LZ4_isLittleEndian()) {
433
65.8M
        return LZ4_read16(memPtr);
434
65.8M
    } else {
435
0
        const BYTE* p = (const BYTE*)memPtr;
436
0
        return (U16)((U16)p[0] | (p[1]<<8));
437
0
    }
438
65.8M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: lz4_helpers.c:LZ4_readLE16
Unexecuted instantiation: fuzz_data_producer.c:LZ4_readLE16
Unexecuted instantiation: lz4hc.c:LZ4_readLE16
lz4.c:LZ4_readLE16
Line
Count
Source
431
65.8M
{
432
65.8M
    if (LZ4_isLittleEndian()) {
433
65.8M
        return LZ4_read16(memPtr);
434
65.8M
    } else {
435
0
        const BYTE* p = (const BYTE*)memPtr;
436
0
        return (U16)((U16)p[0] | (p[1]<<8));
437
0
    }
438
65.8M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: decompress_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: compress_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_readLE16
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_readLE16
439
440
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
441
static U32 LZ4_readLE32(const void* memPtr)
442
{
443
    if (LZ4_isLittleEndian()) {
444
        return LZ4_read32(memPtr);
445
    } else {
446
        const BYTE* p = (const BYTE*)memPtr;
447
        return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);
448
    }
449
}
450
#endif
451
452
static void LZ4_writeLE16(void* memPtr, U16 value)
453
59.0M
{
454
59.0M
    if (LZ4_isLittleEndian()) {
455
59.0M
        LZ4_write16(memPtr, value);
456
59.0M
    } else {
457
0
        BYTE* p = (BYTE*)memPtr;
458
0
        p[0] = (BYTE) value;
459
0
        p[1] = (BYTE)(value>>8);
460
0
    }
461
59.0M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: lz4_helpers.c:LZ4_writeLE16
Unexecuted instantiation: fuzz_data_producer.c:LZ4_writeLE16
lz4hc.c:LZ4_writeLE16
Line
Count
Source
453
33.4M
{
454
33.4M
    if (LZ4_isLittleEndian()) {
455
33.4M
        LZ4_write16(memPtr, value);
456
33.4M
    } else {
457
0
        BYTE* p = (BYTE*)memPtr;
458
0
        p[0] = (BYTE) value;
459
0
        p[1] = (BYTE)(value>>8);
460
0
    }
461
33.4M
}
lz4.c:LZ4_writeLE16
Line
Count
Source
453
25.5M
{
454
25.5M
    if (LZ4_isLittleEndian()) {
455
25.5M
        LZ4_write16(memPtr, value);
456
25.5M
    } else {
457
0
        BYTE* p = (BYTE*)memPtr;
458
0
        p[0] = (BYTE) value;
459
0
        p[1] = (BYTE)(value>>8);
460
0
    }
461
25.5M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: decompress_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: compress_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_writeLE16
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_writeLE16
462
463
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
464
LZ4_FORCE_INLINE
465
void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
466
48.7M
{
467
48.7M
    BYTE* d = (BYTE*)dstPtr;
468
48.7M
    const BYTE* s = (const BYTE*)srcPtr;
469
48.7M
    BYTE* const e = (BYTE*)dstEnd;
470
471
191M
    do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
472
48.7M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: lz4_helpers.c:LZ4_wildCopy8
Unexecuted instantiation: fuzz_data_producer.c:LZ4_wildCopy8
lz4hc.c:LZ4_wildCopy8
Line
Count
Source
466
33.4M
{
467
33.4M
    BYTE* d = (BYTE*)dstPtr;
468
33.4M
    const BYTE* s = (const BYTE*)srcPtr;
469
33.4M
    BYTE* const e = (BYTE*)dstEnd;
470
471
89.9M
    do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
472
33.4M
}
lz4.c:LZ4_wildCopy8
Line
Count
Source
466
15.2M
{
467
15.2M
    BYTE* d = (BYTE*)dstPtr;
468
15.2M
    const BYTE* s = (const BYTE*)srcPtr;
469
15.2M
    BYTE* const e = (BYTE*)dstEnd;
470
471
101M
    do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
472
15.2M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: decompress_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: compress_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_wildCopy8
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_wildCopy8
473
474
static const unsigned inc32table[8] = {0, 1, 2,  1,  0,  4, 4, 4};
475
static const int      dec64table[8] = {0, 0, 0, -1, -4,  1, 2, 3};
476
477
478
#ifndef LZ4_FAST_DEC_LOOP
479
#  if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
480
#    define LZ4_FAST_DEC_LOOP 1
481
#  elif defined(__aarch64__)
482
#    if defined(__clang__) && defined(__ANDROID__)
483
     /* On Android aarch64, we disable this optimization for clang because
484
      * on certain mobile chipsets, performance is reduced with clang. For
485
      * more information refer to https://github.com/lz4/lz4/pull/707 */
486
#      define LZ4_FAST_DEC_LOOP 0
487
#    else
488
#      define LZ4_FAST_DEC_LOOP 1
489
#    endif
490
#  else
491
#    define LZ4_FAST_DEC_LOOP 0
492
#  endif
493
#endif
494
495
#if LZ4_FAST_DEC_LOOP
496
497
LZ4_FORCE_INLINE void
498
LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
499
2.81M
{
500
2.81M
    assert(srcPtr + offset == dstPtr);
501
2.81M
    if (offset < 8) {
502
2.70M
        LZ4_write32(dstPtr, 0);   /* silence an msan warning when offset==0 */
503
2.70M
        dstPtr[0] = srcPtr[0];
504
2.70M
        dstPtr[1] = srcPtr[1];
505
2.70M
        dstPtr[2] = srcPtr[2];
506
2.70M
        dstPtr[3] = srcPtr[3];
507
2.70M
        srcPtr += inc32table[offset];
508
2.70M
        LZ4_memcpy(dstPtr+4, srcPtr, 4);
509
2.70M
        srcPtr -= dec64table[offset];
510
2.70M
        dstPtr += 8;
511
2.70M
    } else {
512
110k
        LZ4_memcpy(dstPtr, srcPtr, 8);
513
110k
        dstPtr += 8;
514
110k
        srcPtr += 8;
515
110k
    }
516
517
2.81M
    LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
518
2.81M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: lz4_helpers.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: fuzz_data_producer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: lz4hc.c:LZ4_memcpy_using_offset_base
lz4.c:LZ4_memcpy_using_offset_base
Line
Count
Source
499
2.81M
{
500
2.81M
    assert(srcPtr + offset == dstPtr);
501
2.81M
    if (offset < 8) {
502
2.70M
        LZ4_write32(dstPtr, 0);   /* silence an msan warning when offset==0 */
503
2.70M
        dstPtr[0] = srcPtr[0];
504
2.70M
        dstPtr[1] = srcPtr[1];
505
2.70M
        dstPtr[2] = srcPtr[2];
506
2.70M
        dstPtr[3] = srcPtr[3];
507
2.70M
        srcPtr += inc32table[offset];
508
2.70M
        LZ4_memcpy(dstPtr+4, srcPtr, 4);
509
2.70M
        srcPtr -= dec64table[offset];
510
2.70M
        dstPtr += 8;
511
2.70M
    } else {
512
110k
        LZ4_memcpy(dstPtr, srcPtr, 8);
513
110k
        dstPtr += 8;
514
110k
        srcPtr += 8;
515
110k
    }
516
517
2.81M
    LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
518
2.81M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: decompress_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: compress_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_memcpy_using_offset_base
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_memcpy_using_offset_base
519
520
/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
521
 * this version copies two times 16 bytes (instead of one time 32 bytes)
522
 * because it must be compatible with offsets >= 16. */
523
LZ4_FORCE_INLINE void
524
LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
525
19.9M
{
526
19.9M
    BYTE* d = (BYTE*)dstPtr;
527
19.9M
    const BYTE* s = (const BYTE*)srcPtr;
528
19.9M
    BYTE* const e = (BYTE*)dstEnd;
529
530
103M
    do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
531
19.9M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: lz4_helpers.c:LZ4_wildCopy32
Unexecuted instantiation: fuzz_data_producer.c:LZ4_wildCopy32
Unexecuted instantiation: lz4hc.c:LZ4_wildCopy32
lz4.c:LZ4_wildCopy32
Line
Count
Source
525
19.9M
{
526
19.9M
    BYTE* d = (BYTE*)dstPtr;
527
19.9M
    const BYTE* s = (const BYTE*)srcPtr;
528
19.9M
    BYTE* const e = (BYTE*)dstEnd;
529
530
103M
    do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
531
19.9M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: decompress_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: compress_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_wildCopy32
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_wildCopy32
532
533
/* LZ4_memcpy_using_offset()  presumes :
534
 * - dstEnd >= dstPtr + MINMATCH
535
 * - there is at least 12 bytes available to write after dstEnd */
536
LZ4_FORCE_INLINE void
537
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
538
7.62M
{
539
7.62M
    BYTE v[8];
540
541
7.62M
    assert(dstEnd >= dstPtr + MINMATCH);
542
543
7.62M
    switch(offset) {
544
1.13M
    case 1:
545
1.13M
        MEM_INIT(v, *srcPtr, 8);
546
1.13M
        break;
547
3.27M
    case 2:
548
3.27M
        LZ4_memcpy(v, srcPtr, 2);
549
3.27M
        LZ4_memcpy(&v[2], srcPtr, 2);
550
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
551
#  pragma warning(push)
552
#  pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
553
#endif
554
3.27M
        LZ4_memcpy(&v[4], v, 4);
555
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
556
#  pragma warning(pop)
557
#endif
558
3.27M
        break;
559
409k
    case 4:
560
409k
        LZ4_memcpy(v, srcPtr, 4);
561
409k
        LZ4_memcpy(&v[4], srcPtr, 4);
562
409k
        break;
563
2.81M
    default:
564
2.81M
        LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
565
2.81M
        return;
566
7.62M
    }
567
568
4.81M
    LZ4_memcpy(dstPtr, v, 8);
569
4.81M
    dstPtr += 8;
570
139M
    while (dstPtr < dstEnd) {
571
134M
        LZ4_memcpy(dstPtr, v, 8);
572
134M
        dstPtr += 8;
573
134M
    }
574
4.81M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: lz4_helpers.c:LZ4_memcpy_using_offset
Unexecuted instantiation: fuzz_data_producer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: lz4hc.c:LZ4_memcpy_using_offset
lz4.c:LZ4_memcpy_using_offset
Line
Count
Source
538
7.62M
{
539
7.62M
    BYTE v[8];
540
541
7.62M
    assert(dstEnd >= dstPtr + MINMATCH);
542
543
7.62M
    switch(offset) {
544
1.13M
    case 1:
545
1.13M
        MEM_INIT(v, *srcPtr, 8);
546
1.13M
        break;
547
3.27M
    case 2:
548
3.27M
        LZ4_memcpy(v, srcPtr, 2);
549
3.27M
        LZ4_memcpy(&v[2], srcPtr, 2);
550
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
551
#  pragma warning(push)
552
#  pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
553
#endif
554
3.27M
        LZ4_memcpy(&v[4], v, 4);
555
#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
556
#  pragma warning(pop)
557
#endif
558
3.27M
        break;
559
409k
    case 4:
560
409k
        LZ4_memcpy(v, srcPtr, 4);
561
409k
        LZ4_memcpy(&v[4], srcPtr, 4);
562
409k
        break;
563
2.81M
    default:
564
2.81M
        LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
565
2.81M
        return;
566
7.62M
    }
567
568
4.81M
    LZ4_memcpy(dstPtr, v, 8);
569
4.81M
    dstPtr += 8;
570
139M
    while (dstPtr < dstEnd) {
571
134M
        LZ4_memcpy(dstPtr, v, 8);
572
134M
        dstPtr += 8;
573
134M
    }
574
4.81M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: decompress_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: compress_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_memcpy_using_offset
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_memcpy_using_offset
575
#endif
576
577
578
/*-************************************
579
*  Common functions
580
**************************************/
581
static unsigned LZ4_NbCommonBytes (reg_t val)
582
887M
{
583
887M
    assert(val != 0);
584
887M
    if (LZ4_isLittleEndian()) {
585
887M
        if (sizeof(val) == 8) {
586
#       if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)
587
/*-*************************************************************************************************
588
* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
589
* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics
590
* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.
591
****************************************************************************************************/
592
#         if defined(__clang__) && (__clang_major__ < 10)
593
            /* Avoid undefined clang-cl intrinsics issue.
594
             * See https://github.com/lz4/lz4/pull/1017 for details. */
595
            return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
596
#         else
597
            /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
598
            return (unsigned)_tzcnt_u64(val) >> 3;
599
#         endif
600
#       elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
601
            unsigned long r = 0;
602
            _BitScanForward64(&r, (U64)val);
603
            return (unsigned)r >> 3;
604
#       elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
605
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
606
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
607
            return (unsigned)__builtin_ctzll((U64)val) >> 3;
608
#       else
609
            const U64 m = 0x0101010101010101ULL;
610
            val ^= val - 1;
611
            return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
612
#       endif
613
887M
        } else /* 32 bits */ {
614
#       if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
615
            unsigned long r;
616
            _BitScanForward(&r, (U32)val);
617
            return (unsigned)r >> 3;
618
#       elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
619
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
620
                        !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
621
            return (unsigned)__builtin_ctz((U32)val) >> 3;
622
#       else
623
            const U32 m = 0x01010101;
624
            return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
625
#       endif
626
0
        }
627
887M
    } else   /* Big Endian CPU */ {
628
0
        if (sizeof(val)==8) {
629
0
#       if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
630
0
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
631
0
                        !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
632
0
            return (unsigned)__builtin_clzll((U64)val) >> 3;
633
#       else
634
#if 1
635
            /* this method is probably faster,
636
             * but adds a 128 bytes lookup table */
637
            static const unsigned char ctz7_tab[128] = {
638
                7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
639
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
640
                5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
641
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
642
                6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
643
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
644
                5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
645
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
646
            };
647
            U64 const mask = 0x0101010101010101ULL;
648
            U64 const t = (((val >> 8) - mask) | val) & mask;
649
            return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
650
#else
651
            /* this method doesn't consume memory space like the previous one,
652
             * but it contains several branches,
653
             * that may end up slowing execution */
654
            static const U32 by32 = sizeof(val)*4;  /* 32 on 64 bits (goal), 16 on 32 bits.
655
            Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
656
            Note that this code path is never triggered in 32-bits mode. */
657
            unsigned r;
658
            if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
659
            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
660
            r += (!val);
661
            return r;
662
#endif
663
#       endif
664
0
        } else /* 32 bits */ {
665
0
#       if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
666
0
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
667
0
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
668
0
            return (unsigned)__builtin_clz((U32)val) >> 3;
669
#       else
670
            val >>= 8;
671
            val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
672
              (val + 0x00FF0000)) >> 24;
673
            return (unsigned)val ^ 3;
674
#       endif
675
0
        }
676
0
    }
677
887M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: lz4_helpers.c:LZ4_NbCommonBytes
Unexecuted instantiation: fuzz_data_producer.c:LZ4_NbCommonBytes
lz4hc.c:LZ4_NbCommonBytes
Line
Count
Source
582
861M
{
583
861M
    assert(val != 0);
584
861M
    if (LZ4_isLittleEndian()) {
585
861M
        if (sizeof(val) == 8) {
586
#       if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)
587
/*-*************************************************************************************************
588
* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
589
* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics
590
* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.
591
****************************************************************************************************/
592
#         if defined(__clang__) && (__clang_major__ < 10)
593
            /* Avoid undefined clang-cl intrinsics issue.
594
             * See https://github.com/lz4/lz4/pull/1017 for details. */
595
            return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
596
#         else
597
            /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
598
            return (unsigned)_tzcnt_u64(val) >> 3;
599
#         endif
600
#       elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
601
            unsigned long r = 0;
602
            _BitScanForward64(&r, (U64)val);
603
            return (unsigned)r >> 3;
604
#       elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
605
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
606
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
607
            return (unsigned)__builtin_ctzll((U64)val) >> 3;
608
#       else
609
            const U64 m = 0x0101010101010101ULL;
610
            val ^= val - 1;
611
            return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
612
#       endif
613
861M
        } else /* 32 bits */ {
614
#       if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
615
            unsigned long r;
616
            _BitScanForward(&r, (U32)val);
617
            return (unsigned)r >> 3;
618
#       elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
619
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
620
                        !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
621
            return (unsigned)__builtin_ctz((U32)val) >> 3;
622
#       else
623
            const U32 m = 0x01010101;
624
            return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
625
#       endif
626
0
        }
627
861M
    } else   /* Big Endian CPU */ {
628
0
        if (sizeof(val)==8) {
629
0
#       if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
630
0
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
631
0
                        !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
632
0
            return (unsigned)__builtin_clzll((U64)val) >> 3;
633
#       else
634
#if 1
635
            /* this method is probably faster,
636
             * but adds a 128 bytes lookup table */
637
            static const unsigned char ctz7_tab[128] = {
638
                7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
639
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
640
                5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
641
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
642
                6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
643
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
644
                5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
645
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
646
            };
647
            U64 const mask = 0x0101010101010101ULL;
648
            U64 const t = (((val >> 8) - mask) | val) & mask;
649
            return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
650
#else
651
            /* this method doesn't consume memory space like the previous one,
652
             * but it contains several branches,
653
             * that may end up slowing execution */
654
            static const U32 by32 = sizeof(val)*4;  /* 32 on 64 bits (goal), 16 on 32 bits.
655
            Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
656
            Note that this code path is never triggered in 32-bits mode. */
657
            unsigned r;
658
            if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
659
            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
660
            r += (!val);
661
            return r;
662
#endif
663
#       endif
664
0
        } else /* 32 bits */ {
665
0
#       if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
666
0
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
667
0
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
668
0
            return (unsigned)__builtin_clz((U32)val) >> 3;
669
#       else
670
            val >>= 8;
671
            val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
672
              (val + 0x00FF0000)) >> 24;
673
            return (unsigned)val ^ 3;
674
#       endif
675
0
        }
676
0
    }
677
861M
}
lz4.c:LZ4_NbCommonBytes
Line
Count
Source
582
25.4M
{
583
25.4M
    assert(val != 0);
584
25.4M
    if (LZ4_isLittleEndian()) {
585
25.4M
        if (sizeof(val) == 8) {
586
#       if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)
587
/*-*************************************************************************************************
588
* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
589
* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics
590
* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.
591
****************************************************************************************************/
592
#         if defined(__clang__) && (__clang_major__ < 10)
593
            /* Avoid undefined clang-cl intrinsics issue.
594
             * See https://github.com/lz4/lz4/pull/1017 for details. */
595
            return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
596
#         else
597
            /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
598
            return (unsigned)_tzcnt_u64(val) >> 3;
599
#         endif
600
#       elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
601
            unsigned long r = 0;
602
            _BitScanForward64(&r, (U64)val);
603
            return (unsigned)r >> 3;
604
#       elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
605
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
606
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
607
            return (unsigned)__builtin_ctzll((U64)val) >> 3;
608
#       else
609
            const U64 m = 0x0101010101010101ULL;
610
            val ^= val - 1;
611
            return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
612
#       endif
613
25.4M
        } else /* 32 bits */ {
614
#       if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
615
            unsigned long r;
616
            _BitScanForward(&r, (U32)val);
617
            return (unsigned)r >> 3;
618
#       elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
619
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
620
                        !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
621
            return (unsigned)__builtin_ctz((U32)val) >> 3;
622
#       else
623
            const U32 m = 0x01010101;
624
            return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
625
#       endif
626
0
        }
627
25.4M
    } else   /* Big Endian CPU */ {
628
0
        if (sizeof(val)==8) {
629
0
#       if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
630
0
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
631
0
                        !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
632
0
            return (unsigned)__builtin_clzll((U64)val) >> 3;
633
#       else
634
#if 1
635
            /* this method is probably faster,
636
             * but adds a 128 bytes lookup table */
637
            static const unsigned char ctz7_tab[128] = {
638
                7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
639
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
640
                5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
641
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
642
                6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
643
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
644
                5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
645
                4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
646
            };
647
            U64 const mask = 0x0101010101010101ULL;
648
            U64 const t = (((val >> 8) - mask) | val) & mask;
649
            return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
650
#else
651
            /* this method doesn't consume memory space like the previous one,
652
             * but it contains several branches,
653
             * that may end up slowing execution */
654
            static const U32 by32 = sizeof(val)*4;  /* 32 on 64 bits (goal), 16 on 32 bits.
655
            Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
656
            Note that this code path is never triggered in 32-bits mode. */
657
            unsigned r;
658
            if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
659
            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
660
            r += (!val);
661
            return r;
662
#endif
663
#       endif
664
0
        } else /* 32 bits */ {
665
0
#       if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
666
0
                            ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
667
0
                                        !defined(LZ4_FORCE_SW_BITCOUNT)
668
0
            return (unsigned)__builtin_clz((U32)val) >> 3;
669
#       else
670
            val >>= 8;
671
            val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
672
              (val + 0x00FF0000)) >> 24;
673
            return (unsigned)val ^ 3;
674
#       endif
675
0
        }
676
0
    }
677
25.4M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: decompress_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: compress_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_NbCommonBytes
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_NbCommonBytes
678
679
680
9.55G
#define STEPSIZE sizeof(reg_t)
681
LZ4_FORCE_INLINE
682
unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
683
471M
{
684
471M
    const BYTE* const pStart = pIn;
685
686
471M
    if (likely(pIn < pInLimit-(STEPSIZE-1))) {
687
468M
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
688
468M
        if (!diff) {
689
126M
            pIn+=STEPSIZE; pMatch+=STEPSIZE;
690
342M
        } else {
691
342M
            return LZ4_NbCommonBytes(diff);
692
342M
    }   }
693
694
4.75G
    while (likely(pIn < pInLimit-(STEPSIZE-1))) {
695
4.75G
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
696
4.75G
        if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
697
111M
        pIn += LZ4_NbCommonBytes(diff);
698
111M
        return (unsigned)(pIn - pStart);
699
4.75G
    }
700
701
17.3M
    if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
702
17.3M
    if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
703
17.3M
    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
704
17.3M
    return (unsigned)(pIn - pStart);
705
128M
}
Unexecuted instantiation: compress_frame_fuzzer.c:LZ4_count
Unexecuted instantiation: lz4_helpers.c:LZ4_count
Unexecuted instantiation: fuzz_data_producer.c:LZ4_count
lz4hc.c:LZ4_count
Line
Count
Source
683
445M
{
684
445M
    const BYTE* const pStart = pIn;
685
686
445M
    if (likely(pIn < pInLimit-(STEPSIZE-1))) {
687
443M
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
688
443M
        if (!diff) {
689
117M
            pIn+=STEPSIZE; pMatch+=STEPSIZE;
690
326M
        } else {
691
326M
            return LZ4_NbCommonBytes(diff);
692
326M
    }   }
693
694
4.59G
    while (likely(pIn < pInLimit-(STEPSIZE-1))) {
695
4.59G
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
696
4.59G
        if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
697
102M
        pIn += LZ4_NbCommonBytes(diff);
698
102M
        return (unsigned)(pIn - pStart);
699
4.59G
    }
700
701
17.1M
    if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
702
17.1M
    if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
703
17.1M
    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
704
17.1M
    return (unsigned)(pIn - pStart);
705
119M
}
lz4.c:LZ4_count
Line
Count
Source
683
25.5M
{
684
25.5M
    const BYTE* const pStart = pIn;
685
686
25.5M
    if (likely(pIn < pInLimit-(STEPSIZE-1))) {
687
25.5M
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
688
25.5M
        if (!diff) {
689
9.16M
            pIn+=STEPSIZE; pMatch+=STEPSIZE;
690
16.3M
        } else {
691
16.3M
            return LZ4_NbCommonBytes(diff);
692
16.3M
    }   }
693
694
161M
    while (likely(pIn < pInLimit-(STEPSIZE-1))) {
695
161M
        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
696
161M
        if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
697
9.08M
        pIn += LZ4_NbCommonBytes(diff);
698
9.08M
        return (unsigned)(pIn - pStart);
699
161M
    }
700
701
132k
    if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
702
132k
    if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
703
132k
    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
704
132k
    return (unsigned)(pIn - pStart);
705
9.21M
}
Unexecuted instantiation: round_trip_stream_fuzzer.c:LZ4_count
Unexecuted instantiation: decompress_fuzzer.c:LZ4_count
Unexecuted instantiation: round_trip_hc_fuzzer.c:LZ4_count
Unexecuted instantiation: round_trip_frame_uncompressed_fuzzer.c:LZ4_count
Unexecuted instantiation: decompress_frame_fuzzer.c:LZ4_count
Unexecuted instantiation: round_trip_fuzzer.c:LZ4_count
Unexecuted instantiation: compress_fuzzer.c:LZ4_count
Unexecuted instantiation: compress_hc_fuzzer.c:LZ4_count
Unexecuted instantiation: round_trip_frame_fuzzer.c:LZ4_count
706
707
708
#ifndef LZ4_COMMONDEFS_ONLY
709
/*-************************************
710
*  Local Constants
711
**************************************/
712
static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
713
static const U32 LZ4_skipTrigger = 6;  /* Increase this value ==> compression run slower on incompressible data */
714
715
716
/*-************************************
717
*  Local Structures and types
718
**************************************/
719
typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
720
721
/**
722
 * This enum distinguishes several different modes of accessing previous
723
 * content in the stream.
724
 *
725
 * - noDict        : There is no preceding content.
726
 * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
727
 *                   blob being compressed are valid and refer to the preceding
728
 *                   content (of length ctx->dictSize), which is available
729
 *                   contiguously preceding in memory the content currently
730
 *                   being compressed.
731
 * - usingExtDict  : Like withPrefix64k, but the preceding content is somewhere
732
 *                   else in memory, starting at ctx->dictionary with length
733
 *                   ctx->dictSize.
734
 * - usingDictCtx  : Everything concerning the preceding content is
735
 *                   in a separate context, pointed to by ctx->dictCtx.
736
 *                   ctx->dictionary, ctx->dictSize, and table entries
737
 *                   in the current context that refer to positions
738
 *                   preceding the beginning of the current compression are
739
 *                   ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
740
 *                   ->dictSize describe the location and size of the preceding
741
 *                   content, and matches are found by looking in the ctx
742
 *                   ->dictCtx->hashTable.
743
 */
744
typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
745
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
746
747
748
/*-************************************
749
*  Local Utils
750
**************************************/
751
0
int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
752
0
const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
753
436k
int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
754
6.08k
int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }
755
756
757
/*-****************************************
758
*  Internal Definitions, used only in Tests
759
*******************************************/
760
#if defined (__cplusplus)
761
extern "C" {
762
#endif
763
764
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
765
766
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
767
                                     int compressedSize, int maxOutputSize,
768
                                     const void* dictStart, size_t dictSize);
769
int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,
770
                                     int compressedSize, int targetOutputSize, int dstCapacity,
771
                                     const void* dictStart, size_t dictSize);
772
#if defined (__cplusplus)
773
}
774
#endif
775
776
/*-******************************
777
*  Compression functions
778
********************************/
779
LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
780
19.2M
{
781
19.2M
    if (tableType == byU16)
782
19.2M
        return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
783
0
    else
784
0
        return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
785
19.2M
}
786
787
LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
788
228M
{
789
228M
    const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
790
228M
    if (LZ4_isLittleEndian()) {
791
228M
        const U64 prime5bytes = 889523592379ULL;
792
228M
        return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
793
228M
    } else {
794
0
        const U64 prime8bytes = 11400714785074694791ULL;
795
0
        return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
796
0
    }
797
228M
}
798
799
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
800
248M
{
801
248M
    if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
802
803
#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
804
    return LZ4_hash4(LZ4_readLE32(p), tableType);
805
#else
806
19.2M
    return LZ4_hash4(LZ4_read32(p), tableType);
807
248M
#endif
808
248M
}
809
810
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
811
0
{
812
0
    switch (tableType)
813
0
    {
814
0
    default: /* fallthrough */
815
0
    case clearedTable: { /* illegal! */ assert(0); return; }
816
0
    case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
817
0
    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
818
0
    case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
819
0
    }
820
0
}
821
822
LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
823
236M
{
824
236M
    switch (tableType)
825
236M
    {
826
0
    default: /* fallthrough */
827
0
    case clearedTable: /* fallthrough */
828
0
    case byPtr: { /* illegal! */ assert(0); return; }
829
218M
    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
830
18.1M
    case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
831
236M
    }
832
236M
}
833
834
/* LZ4_putPosition*() : only used in byPtr mode */
835
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
836
                                  void* tableBase, tableType_t const tableType)
837
0
{
838
0
    const BYTE** const hashTable = (const BYTE**)tableBase;
839
0
    assert(tableType == byPtr); (void)tableType;
840
0
    hashTable[h] = p;
841
0
}
842
843
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)
844
0
{
845
0
    U32 const h = LZ4_hashPosition(p, tableType);
846
0
    LZ4_putPositionOnHash(p, h, tableBase, tableType);
847
0
}
848
849
/* LZ4_getIndexOnHash() :
850
 * Index of match position registered in hash table.
851
 * hash position must be calculated by using base+index, or dictBase+index.
852
 * Assumption 1 : only valid if tableType == byU32 or byU16.
853
 * Assumption 2 : h is presumed valid (within limits of hash table)
854
 */
855
LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
856
151M
{
857
151M
    LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
858
151M
    if (tableType == byU32) {
859
136M
        const U32* const hashTable = (const U32*) tableBase;
860
136M
        assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
861
136M
        return hashTable[h];
862
136M
    }
863
14.3M
    if (tableType == byU16) {
864
14.3M
        const U16* const hashTable = (const U16*) tableBase;
865
14.3M
        assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
866
14.3M
        return hashTable[h];
867
14.3M
    }
868
0
    assert(0); return 0;  /* forbidden case */
869
0
}
870
871
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)
872
0
{
873
0
    assert(tableType == byPtr); (void)tableType;
874
0
    { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
875
0
}
876
877
LZ4_FORCE_INLINE const BYTE*
878
LZ4_getPosition(const BYTE* p,
879
                const void* tableBase, tableType_t tableType)
880
0
{
881
0
    U32 const h = LZ4_hashPosition(p, tableType);
882
0
    return LZ4_getPositionOnHash(h, tableBase, tableType);
883
0
}
884
885
LZ4_FORCE_INLINE void
886
LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
887
           const int inputSize,
888
131k
           const tableType_t tableType) {
889
    /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
890
     * therefore safe to use no matter what mode we're in. Otherwise, we figure
891
     * out if it's safe to leave as is or whether it needs to be reset.
892
     */
893
131k
    if ((tableType_t)cctx->tableType != clearedTable) {
894
111k
        assert(inputSize >= 0);
895
111k
        if ((tableType_t)cctx->tableType != tableType
896
111k
          || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
897
111k
          || ((tableType == byU32) && cctx->currentOffset > 1 GB)
898
111k
          || tableType == byPtr
899
111k
          || inputSize >= 4 KB)
900
2.52k
        {
901
2.52k
            DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", (void*)cctx);
902
2.52k
            MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
903
2.52k
            cctx->currentOffset = 0;
904
2.52k
            cctx->tableType = (U32)clearedTable;
905
109k
        } else {
906
109k
            DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
907
109k
        }
908
111k
    }
909
910
    /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
911
     * is faster than compressing without a gap.
912
     * However, compressing with currentOffset == 0 is faster still,
913
     * so we preserve that case.
914
     */
915
131k
    if (cctx->currentOffset != 0 && tableType == byU32) {
916
108k
        DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
917
108k
        cctx->currentOffset += 64 KB;
918
108k
    }
919
920
    /* Finally, clear history */
921
131k
    cctx->dictCtx = NULL;
922
131k
    cctx->dictionary = NULL;
923
131k
    cctx->dictSize = 0;
924
131k
}
925
926
/** LZ4_compress_generic_validated() :
927
 *  inlined, to ensure branches are decided at compilation time.
928
 *  The following conditions are presumed already validated:
929
 *  - source != NULL
930
 *  - inputSize > 0
931
 */
932
LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
933
                 LZ4_stream_t_internal* const cctx,
934
                 const char* const source,
935
                 char* const dest,
936
                 const int inputSize,
937
                 int*  inputConsumed, /* only written when outputDirective == fillOutput */
938
                 const int maxOutputSize,
939
                 const limitedOutput_directive outputDirective,
940
                 const tableType_t tableType,
941
                 const dict_directive dictDirective,
942
                 const dictIssue_directive dictIssue,
943
                 const int acceleration)
944
296k
{
945
296k
    int result;
946
296k
    const BYTE* ip = (const BYTE*)source;
947
948
296k
    U32 const startIndex = cctx->currentOffset;
949
296k
    const BYTE* base = (const BYTE*)source - startIndex;
950
296k
    const BYTE* lowLimit;
951
952
296k
    const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
953
296k
    const BYTE* const dictionary =
954
296k
        dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
955
296k
    const U32 dictSize =
956
296k
        dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
957
296k
    const U32 dictDelta =
958
296k
        (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0;   /* make indexes in dictCtx comparable with indexes in current context */
959
960
296k
    int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
961
296k
    U32 const prefixIdxLimit = startIndex - dictSize;   /* used when dictDirective == dictSmall */
962
296k
    const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
963
296k
    const BYTE* anchor = (const BYTE*) source;
964
296k
    const BYTE* const iend = ip + inputSize;
965
296k
    const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
966
296k
    const BYTE* const matchlimit = iend - LASTLITERALS;
967
968
    /* the dictCtx currentOffset is indexed on the start of the dictionary,
969
     * while a dictionary in the current context precedes the currentOffset */
970
296k
    const BYTE* dictBase = (dictionary == NULL) ? NULL :
971
296k
                           (dictDirective == usingDictCtx) ?
972
8.59k
                            dictionary + dictSize - dictCtx->currentOffset :
973
284k
                            dictionary + dictSize - startIndex;
974
975
296k
    BYTE* op = (BYTE*) dest;
976
296k
    BYTE* const olimit = op + maxOutputSize;
977
978
296k
    U32 offset = 0;
979
296k
    U32 forwardH;
980
981
296k
    DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
982
296k
    assert(ip != NULL);
983
296k
    if (tableType == byU16) assert(inputSize<LZ4_64Klimit);  /* Size too large (not within 64K limit) */
984
296k
    if (tableType == byPtr) assert(dictDirective==noDict);   /* only supported use case with byPtr */
985
    /* If init conditions are not met, we don't have to mark stream
986
     * as having dirty context, since no action was taken yet */
987
296k
    if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
988
296k
    assert(acceleration >= 1);
989
990
296k
    lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
991
992
    /* Update context state */
993
296k
    if (dictDirective == usingDictCtx) {
994
        /* Subsequent linked blocks can't use the dictionary. */
995
        /* Instead, they use the block we just compressed. */
996
8.59k
        cctx->dictCtx = NULL;
997
8.59k
        cctx->dictSize = (U32)inputSize;
998
287k
    } else {
999
287k
        cctx->dictSize += (U32)inputSize;
1000
287k
    }
1001
296k
    cctx->currentOffset += (U32)inputSize;
1002
296k
    cctx->tableType = (U32)tableType;
1003
1004
296k
    if (inputSize<LZ4_minLength) goto _last_literals;        /* Input too small, no compression (all literals) */
1005
1006
    /* First Byte */
1007
169k
    {   U32 const h = LZ4_hashPosition(ip, tableType);
1008
169k
        if (tableType == byPtr) {
1009
0
            LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
1010
169k
        } else {
1011
169k
            LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);
1012
169k
    }   }
1013
169k
    ip++; forwardH = LZ4_hashPosition(ip, tableType);
1014
1015
    /* Main Loop */
1016
11.8M
    for ( ; ; ) {
1017
11.8M
        const BYTE* match;
1018
11.8M
        BYTE* token;
1019
11.8M
        const BYTE* filledIp;
1020
1021
        /* Find a match */
1022
11.8M
        if (tableType == byPtr) {
1023
0
            const BYTE* forwardIp = ip;
1024
0
            int step = 1;
1025
0
            int searchMatchNb = acceleration << LZ4_skipTrigger;
1026
0
            do {
1027
0
                U32 const h = forwardH;
1028
0
                ip = forwardIp;
1029
0
                forwardIp += step;
1030
0
                step = (searchMatchNb++ >> LZ4_skipTrigger);
1031
1032
0
                if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
1033
0
                assert(ip < mflimitPlusOne);
1034
1035
0
                match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);
1036
0
                forwardH = LZ4_hashPosition(forwardIp, tableType);
1037
0
                LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);
1038
1039
0
            } while ( (match+LZ4_DISTANCE_MAX < ip)
1040
0
                   || (LZ4_read32(match) != LZ4_read32(ip)) );
1041
1042
11.8M
        } else {   /* byU32, byU16 */
1043
1044
11.8M
            const BYTE* forwardIp = ip;
1045
11.8M
            int step = 1;
1046
11.8M
            int searchMatchNb = acceleration << LZ4_skipTrigger;
1047
125M
            do {
1048
125M
                U32 const h = forwardH;
1049
125M
                U32 const current = (U32)(forwardIp - base);
1050
125M
                U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1051
125M
                assert(matchIndex <= current);
1052
125M
                assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
1053
125M
                ip = forwardIp;
1054
125M
                forwardIp += step;
1055
125M
                step = (searchMatchNb++ >> LZ4_skipTrigger);
1056
1057
125M
                if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
1058
125M
                assert(ip < mflimitPlusOne);
1059
1060
125M
                if (dictDirective == usingDictCtx) {
1061
282k
                    if (matchIndex < startIndex) {
1062
                        /* there was no match, try the dictionary */
1063
239k
                        assert(tableType == byU32);
1064
239k
                        matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1065
239k
                        match = dictBase + matchIndex;
1066
239k
                        matchIndex += dictDelta;   /* make dictCtx index comparable with current context */
1067
239k
                        lowLimit = dictionary;
1068
239k
                    } else {
1069
43.3k
                        match = base + matchIndex;
1070
43.3k
                        lowLimit = (const BYTE*)source;
1071
43.3k
                    }
1072
125M
                } else if (dictDirective == usingExtDict) {
1073
33.2M
                    if (matchIndex < startIndex) {
1074
10.8M
                        DEBUGLOG(7, "extDict candidate: matchIndex=%5u  <  startIndex=%5u", matchIndex, startIndex);
1075
10.8M
                        assert(startIndex - matchIndex >= MINMATCH);
1076
10.8M
                        assert(dictBase);
1077
10.8M
                        match = dictBase + matchIndex;
1078
10.8M
                        lowLimit = dictionary;
1079
22.3M
                    } else {
1080
22.3M
                        match = base + matchIndex;
1081
22.3M
                        lowLimit = (const BYTE*)source;
1082
22.3M
                    }
1083
92.0M
                } else {   /* single continuous memory segment */
1084
92.0M
                    match = base + matchIndex;
1085
92.0M
                }
1086
125M
                forwardH = LZ4_hashPosition(forwardIp, tableType);
1087
125M
                LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1088
1089
125M
                DEBUGLOG(7, "candidate at pos=%u  (offset=%u \n", matchIndex, current - matchIndex);
1090
125M
                if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; }    /* match outside of valid area */
1091
114M
                assert(matchIndex < current);
1092
114M
                if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
1093
114M
                  && (matchIndex+LZ4_DISTANCE_MAX < current)) {
1094
10.6M
                    continue;
1095
10.6M
                } /* too far */
1096
103M
                assert((current - matchIndex) <= LZ4_DISTANCE_MAX);  /* match now expected within distance */
1097
1098
103M
                if (LZ4_read32(match) == LZ4_read32(ip)) {
1099
11.7M
                    if (maybe_extMem) offset = current - matchIndex;
1100
11.7M
                    break;   /* match found */
1101
11.7M
                }
1102
1103
113M
            } while(1);
1104
11.8M
        }
1105
1106
        /* Catch up */
1107
11.7M
        filledIp = ip;
1108
11.7M
        assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */
1109
11.7M
        if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
1110
5.34M
            do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));
1111
2.09M
        }
1112
1113
        /* Encode Literals */
1114
11.7M
        {   unsigned const litLength = (unsigned)(ip - anchor);
1115
11.7M
            token = op++;
1116
11.7M
            if ((outputDirective == limitedOutput) &&  /* Check output buffer overflow */
1117
11.7M
                (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
1118
288
                return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1119
288
            }
1120
11.7M
            if ((outputDirective == fillOutput) &&
1121
11.7M
                (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
1122
232
                op--;
1123
232
                goto _last_literals;
1124
232
            }
1125
11.7M
            if (litLength >= RUN_MASK) {
1126
1.49M
                unsigned len = litLength - RUN_MASK;
1127
1.49M
                *token = (RUN_MASK<<ML_BITS);
1128
2.29M
                for(; len >= 255 ; len-=255) *op++ = 255;
1129
1.49M
                *op++ = (BYTE)len;
1130
1.49M
            }
1131
10.2M
            else *token = (BYTE)(litLength<<ML_BITS);
1132
1133
            /* Copy Literals */
1134
11.7M
            LZ4_wildCopy8(op, anchor, op+litLength);
1135
11.7M
            op+=litLength;
1136
11.7M
            DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1137
11.7M
                        (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
1138
11.7M
        }
1139
1140
25.5M
_next_match:
1141
        /* at this stage, the following variables must be correctly set :
1142
         * - ip : at start of LZ operation
1143
         * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
1144
         * - offset : if maybe_ext_memSegment==1 (constant)
1145
         * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
1146
         * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
1147
         */
1148
1149
25.5M
        if ((outputDirective == fillOutput) &&
1150
25.5M
            (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1151
            /* the match was too close to the end, rewind and go to last literals */
1152
178
            op = token;
1153
178
            goto _last_literals;
1154
178
        }
1155
1156
        /* Encode Offset */
1157
25.5M
        if (maybe_extMem) {   /* static test */
1158
7.10M
            DEBUGLOG(6, "             with offset=%u  (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1159
7.10M
            assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
1160
7.10M
            LZ4_writeLE16(op, (U16)offset); op+=2;
1161
18.4M
        } else  {
1162
18.4M
            DEBUGLOG(6, "             with offset=%u  (same segment)", (U32)(ip - match));
1163
18.4M
            assert(ip-match <= LZ4_DISTANCE_MAX);
1164
18.4M
            LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1165
18.4M
        }
1166
1167
        /* Encode MatchLength */
1168
25.5M
        {   unsigned matchCode;
1169
1170
25.5M
            if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
1171
25.5M
              && (lowLimit==dictionary) /* match within extDict */ ) {
1172
481k
                const BYTE* limit = ip + (dictEnd-match);
1173
481k
                assert(dictEnd > match);
1174
481k
                if (limit > matchlimit) limit = matchlimit;
1175
481k
                matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
1176
481k
                ip += (size_t)matchCode + MINMATCH;
1177
481k
                if (ip==limit) {
1178
18.2k
                    unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
1179
18.2k
                    matchCode += more;
1180
18.2k
                    ip += more;
1181
18.2k
                }
1182
481k
                DEBUGLOG(6, "             with matchLength=%u starting in extDict", matchCode+MINMATCH);
1183
25.0M
            } else {
1184
25.0M
                matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
1185
25.0M
                ip += (size_t)matchCode + MINMATCH;
1186
25.0M
                DEBUGLOG(6, "             with matchLength=%u", matchCode+MINMATCH);
1187
25.0M
            }
1188
1189
25.5M
            if ((outputDirective) &&    /* Check output buffer overflow */
1190
25.5M
                (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1191
335
                if (outputDirective == fillOutput) {
1192
                    /* Match description too long : reduce it */
1193
122
                    U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1194
122
                    ip -= matchCode - newMatchCode;
1195
122
                    assert(newMatchCode < matchCode);
1196
122
                    matchCode = newMatchCode;
1197
122
                    if (unlikely(ip <= filledIp)) {
1198
                        /* We have already filled up to filledIp so if ip ends up less than filledIp
1199
                         * we have positions in the hash table beyond the current position. This is
1200
                         * a problem if we reuse the hash table. So we have to remove these positions
1201
                         * from the hash table.
1202
                         */
1203
0
                        const BYTE* ptr;
1204
0
                        DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1205
0
                        for (ptr = ip; ptr <= filledIp; ++ptr) {
1206
0
                            U32 const h = LZ4_hashPosition(ptr, tableType);
1207
0
                            LZ4_clearHash(h, cctx->hashTable, tableType);
1208
0
                        }
1209
0
                    }
1210
213
                } else {
1211
213
                    assert(outputDirective == limitedOutput);
1212
213
                    return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1213
213
                }
1214
335
            }
1215
25.5M
            if (matchCode >= ML_MASK) {
1216
6.83M
                *token += ML_MASK;
1217
6.83M
                matchCode -= ML_MASK;
1218
6.83M
                LZ4_write32(op, 0xFFFFFFFF);
1219
7.60M
                while (matchCode >= 4*255) {
1220
768k
                    op+=4;
1221
768k
                    LZ4_write32(op, 0xFFFFFFFF);
1222
768k
                    matchCode -= 4*255;
1223
768k
                }
1224
6.83M
                op += matchCode / 255;
1225
6.83M
                *op++ = (BYTE)(matchCode % 255);
1226
6.83M
            } else
1227
18.7M
                *token += (BYTE)(matchCode);
1228
25.5M
        }
1229
        /* Ensure we have enough space for the last literals. */
1230
0
        assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
1231
1232
25.5M
        anchor = ip;
1233
1234
        /* Test end of chunk */
1235
25.5M
        if (ip >= mflimitPlusOne) break;
1236
1237
        /* Fill table */
1238
25.4M
        {   U32 const h = LZ4_hashPosition(ip-2, tableType);
1239
25.4M
            if (tableType == byPtr) {
1240
0
                LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);
1241
25.4M
            } else {
1242
25.4M
                U32 const idx = (U32)((ip-2) - base);
1243
25.4M
                LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);
1244
25.4M
        }   }
1245
1246
        /* Test next position */
1247
25.4M
        if (tableType == byPtr) {
1248
1249
0
            match = LZ4_getPosition(ip, cctx->hashTable, tableType);
1250
0
            LZ4_putPosition(ip, cctx->hashTable, tableType);
1251
0
            if ( (match+LZ4_DISTANCE_MAX >= ip)
1252
0
              && (LZ4_read32(match) == LZ4_read32(ip)) )
1253
0
            { token=op++; *token=0; goto _next_match; }
1254
1255
25.4M
        } else {   /* byU32, byU16 */
1256
1257
25.4M
            U32 const h = LZ4_hashPosition(ip, tableType);
1258
25.4M
            U32 const current = (U32)(ip-base);
1259
25.4M
            U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1260
25.4M
            assert(matchIndex < current);
1261
25.4M
            if (dictDirective == usingDictCtx) {
1262
61.7k
                if (matchIndex < startIndex) {
1263
                    /* there was no match, try the dictionary */
1264
32.1k
                    assert(tableType == byU32);
1265
32.1k
                    matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
1266
32.1k
                    match = dictBase + matchIndex;
1267
32.1k
                    lowLimit = dictionary;   /* required for match length counter */
1268
32.1k
                    matchIndex += dictDelta;
1269
32.1k
                } else {
1270
29.6k
                    match = base + matchIndex;
1271
29.6k
                    lowLimit = (const BYTE*)source;  /* required for match length counter */
1272
29.6k
                }
1273
25.3M
            } else if (dictDirective==usingExtDict) {
1274
6.99M
                if (matchIndex < startIndex) {
1275
1.29M
                    assert(dictBase);
1276
1.29M
                    match = dictBase + matchIndex;
1277
1.29M
                    lowLimit = dictionary;   /* required for match length counter */
1278
5.70M
                } else {
1279
5.70M
                    match = base + matchIndex;
1280
5.70M
                    lowLimit = (const BYTE*)source;   /* required for match length counter */
1281
5.70M
                }
1282
18.3M
            } else {   /* single memory segment */
1283
18.3M
                match = base + matchIndex;
1284
18.3M
            }
1285
25.4M
            LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1286
25.4M
            assert(matchIndex < current);
1287
25.4M
            if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1288
25.4M
              && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1289
25.4M
              && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1290
13.7M
                token=op++;
1291
13.7M
                *token=0;
1292
13.7M
                if (maybe_extMem) offset = current - matchIndex;
1293
13.7M
                DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1294
13.7M
                            (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1295
13.7M
                goto _next_match;
1296
13.7M
            }
1297
25.4M
        }
1298
1299
        /* Prepare next loop */
1300
11.6M
        forwardH = LZ4_hashPosition(++ip, tableType);
1301
1302
11.6M
    }
1303
1304
295k
_last_literals:
1305
    /* Encode Last Literals */
1306
295k
    {   size_t lastRun = (size_t)(iend - anchor);
1307
295k
        if ( (outputDirective) &&  /* Check output buffer overflow */
1308
295k
            (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1309
2.67k
            if (outputDirective == fillOutput) {
1310
                /* adapt lastRun to fill 'dst' */
1311
577
                assert(olimit >= op);
1312
577
                lastRun  = (size_t)(olimit-op) - 1/*token*/;
1313
577
                lastRun -= (lastRun + 256 - RUN_MASK) / 256;  /*additional length tokens*/
1314
2.09k
            } else {
1315
2.09k
                assert(outputDirective == limitedOutput);
1316
2.09k
                return 0;   /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1317
2.09k
            }
1318
2.67k
        }
1319
293k
        DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
1320
293k
        if (lastRun >= RUN_MASK) {
1321
31.2k
            size_t accumulator = lastRun - RUN_MASK;
1322
31.2k
            *op++ = RUN_MASK << ML_BITS;
1323
492k
            for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1324
31.2k
            *op++ = (BYTE) accumulator;
1325
262k
        } else {
1326
262k
            *op++ = (BYTE)(lastRun<<ML_BITS);
1327
262k
        }
1328
293k
        LZ4_memcpy(op, anchor, lastRun);
1329
293k
        ip = anchor + lastRun;
1330
293k
        op += lastRun;
1331
293k
    }
1332
1333
293k
    if (outputDirective == fillOutput) {
1334
1.41k
        *inputConsumed = (int) (((const char*)ip)-source);
1335
1.41k
    }
1336
293k
    result = (int)(((char*)op) - dest);
1337
293k
    assert(result > 0);
1338
293k
    DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
1339
293k
    return result;
1340
293k
}
1341
1342
/** LZ4_compress_generic() :
1343
 *  inlined, to ensure branches are decided at compilation time;
1344
 *  takes care of src == (NULL, 0)
1345
 *  and forward the rest to LZ4_compress_generic_validated */
1346
LZ4_FORCE_INLINE int LZ4_compress_generic(
1347
                 LZ4_stream_t_internal* const cctx,
1348
                 const char* const src,
1349
                 char* const dst,
1350
                 const int srcSize,
1351
                 int *inputConsumed, /* only written when outputDirective == fillOutput */
1352
                 const int dstCapacity,
1353
                 const limitedOutput_directive outputDirective,
1354
                 const tableType_t tableType,
1355
                 const dict_directive dictDirective,
1356
                 const dictIssue_directive dictIssue,
1357
                 const int acceleration)
1358
341k
{
1359
341k
    DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
1360
341k
                srcSize, dstCapacity);
1361
1362
341k
    if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; }  /* Unsupported srcSize, too large (or negative) */
1363
341k
    if (srcSize == 0) {   /* src == NULL supported if srcSize == 0 */
1364
45.3k
        if (outputDirective != notLimited && dstCapacity <= 0) return 0;  /* no output, can't write anything */
1365
45.3k
        DEBUGLOG(5, "Generating an empty block");
1366
45.3k
        assert(outputDirective == notLimited || dstCapacity >= 1);
1367
45.3k
        assert(dst != NULL);
1368
45.3k
        dst[0] = 0;
1369
45.3k
        if (outputDirective == fillOutput) {
1370
7
            assert (inputConsumed != NULL);
1371
7
            *inputConsumed = 0;
1372
7
        }
1373
45.3k
        return 1;
1374
45.3k
    }
1375
296k
    assert(src != NULL);
1376
1377
296k
    return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
1378
296k
                inputConsumed, /* only written into if outputDirective == fillOutput */
1379
296k
                dstCapacity, outputDirective,
1380
296k
                tableType, dictDirective, dictIssue, acceleration);
1381
296k
}
1382
1383
1384
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1385
4.26k
{
1386
4.26k
    LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1387
4.26k
    assert(ctx != NULL);
1388
4.26k
    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1389
4.26k
    if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1390
4.26k
    if (maxOutputSize >= LZ4_compressBound(inputSize)) {
1391
2.81k
        if (inputSize < LZ4_64Klimit) {
1392
2.52k
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
1393
2.52k
        } else {
1394
286
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1395
286
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1396
286
        }
1397
2.81k
    } else {
1398
1.45k
        if (inputSize < LZ4_64Klimit) {
1399
1.18k
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
1400
1.18k
        } else {
1401
267
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1402
267
            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1403
267
        }
1404
1.45k
    }
1405
4.26k
}
1406
1407
/**
1408
 * LZ4_compress_fast_extState_fastReset() :
1409
 * A variant of LZ4_compress_fast_extState().
1410
 *
1411
 * Using this variant avoids an expensive initialization step. It is only safe
1412
 * to call if the state buffer is known to be correctly initialized already
1413
 * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
1414
 * "correctly initialized").
1415
 */
1416
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1417
6.40k
{
1418
6.40k
    LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1419
6.40k
    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1420
6.40k
    if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1421
6.40k
    assert(ctx != NULL);
1422
1423
6.40k
    if (dstCapacity >= LZ4_compressBound(srcSize)) {
1424
0
        if (srcSize < LZ4_64Klimit) {
1425
0
            const tableType_t tableType = byU16;
1426
0
            LZ4_prepareTable(ctx, srcSize, tableType);
1427
0
            if (ctx->currentOffset) {
1428
0
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1429
0
            } else {
1430
0
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1431
0
            }
1432
0
        } else {
1433
0
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1434
0
            LZ4_prepareTable(ctx, srcSize, tableType);
1435
0
            return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1436
0
        }
1437
6.40k
    } else {
1438
6.40k
        if (srcSize < LZ4_64Klimit) {
1439
5.77k
            const tableType_t tableType = byU16;
1440
5.77k
            LZ4_prepareTable(ctx, srcSize, tableType);
1441
5.77k
            if (ctx->currentOffset) {
1442
533
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
1443
5.24k
            } else {
1444
5.24k
                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1445
5.24k
            }
1446
5.77k
        } else {
1447
631
            const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1448
631
            LZ4_prepareTable(ctx, srcSize, tableType);
1449
631
            return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
1450
631
        }
1451
6.40k
    }
1452
6.40k
}
1453
1454
1455
int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)
1456
3.70k
{
1457
3.70k
    int result;
1458
#if (LZ4_HEAPMODE)
1459
    LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
1460
    if (ctxPtr == NULL) return 0;
1461
#else
1462
3.70k
    LZ4_stream_t ctx;
1463
3.70k
    LZ4_stream_t* const ctxPtr = &ctx;
1464
3.70k
#endif
1465
3.70k
    result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);
1466
1467
#if (LZ4_HEAPMODE)
1468
    FREEMEM(ctxPtr);
1469
#endif
1470
3.70k
    return result;
1471
3.70k
}
1472
1473
1474
int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)
1475
3.70k
{
1476
3.70k
    return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
1477
3.70k
}
1478
1479
1480
/* Note!: This function leaves the stream in an unclean/broken state!
1481
 * It is not safe to subsequently use the same state with a _fastReset() or
1482
 * _continue() call without resetting it. */
1483
static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
1484
1.98k
{
1485
1.98k
    void* const s = LZ4_initStream(state, sizeof (*state));
1486
1.98k
    assert(s != NULL); (void)s;
1487
1488
1.98k
    if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) {  /* compression success is guaranteed */
1489
563
        return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);
1490
1.42k
    } else {
1491
1.42k
        if (*srcSizePtr < LZ4_64Klimit) {
1492
1.16k
            return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);
1493
1.16k
        } else {
1494
257
            tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1495
257
            return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);
1496
257
    }   }
1497
1.98k
}
1498
1499
int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
1500
0
{
1501
0
    int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);
1502
    /* clean the state on exit */
1503
0
    LZ4_initStream(state, sizeof (LZ4_stream_t));
1504
0
    return r;
1505
0
}
1506
1507
1508
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1509
1.98k
{
1510
#if (LZ4_HEAPMODE)
1511
    LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));   /* malloc-calloc always properly aligned */
1512
    if (ctx == NULL) return 0;
1513
#else
1514
1.98k
    LZ4_stream_t ctxBody;
1515
1.98k
    LZ4_stream_t* const ctx = &ctxBody;
1516
1.98k
#endif
1517
1518
1.98k
    int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);
1519
1520
#if (LZ4_HEAPMODE)
1521
    FREEMEM(ctx);
1522
#endif
1523
1.98k
    return result;
1524
1.98k
}
1525
1526
1527
1528
/*-******************************
1529
*  Streaming functions
1530
********************************/
1531
1532
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1533
LZ4_stream_t* LZ4_createStream(void)
1534
31.0k
{
1535
31.0k
    LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1536
31.0k
    LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));
1537
31.0k
    DEBUGLOG(4, "LZ4_createStream %p", (void*)lz4s);
1538
31.0k
    if (lz4s == NULL) return NULL;
1539
31.0k
    LZ4_initStream(lz4s, sizeof(*lz4s));
1540
31.0k
    return lz4s;
1541
31.0k
}
1542
#endif
1543
1544
static size_t LZ4_stream_t_alignment(void)
1545
41.2k
{
1546
41.2k
#if LZ4_ALIGN_TEST
1547
41.2k
    typedef struct { char c; LZ4_stream_t t; } t_a;
1548
41.2k
    return sizeof(t_a) - sizeof(LZ4_stream_t);
1549
#else
1550
    return 1;  /* effectively disabled */
1551
#endif
1552
41.2k
}
1553
1554
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
1555
41.2k
{
1556
41.2k
    DEBUGLOG(5, "LZ4_initStream");
1557
41.2k
    if (buffer == NULL) { return NULL; }
1558
41.2k
    if (size < sizeof(LZ4_stream_t)) { return NULL; }
1559
41.2k
    if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
1560
41.2k
    MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
1561
41.2k
    return (LZ4_stream_t*)buffer;
1562
41.2k
}
1563
1564
/* resetStream is now deprecated,
1565
 * prefer initStream() which is more general */
1566
void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
1567
31.0k
{
1568
31.0k
    DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", (void*)LZ4_stream);
1569
31.0k
    MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
1570
31.0k
}
1571
1572
124k
void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
1573
124k
    LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
1574
124k
}
1575
1576
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1577
int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
1578
31.0k
{
1579
31.0k
    if (!LZ4_stream) return 0;   /* support free on NULL */
1580
31.0k
    DEBUGLOG(5, "LZ4_freeStream %p", (void*)LZ4_stream);
1581
31.0k
    FREEMEM(LZ4_stream);
1582
31.0k
    return (0);
1583
31.0k
}
1584
#endif
1585
1586
1587
typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
1588
59.7M
#define HASH_UNIT sizeof(reg_t)
1589
int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,
1590
                    const char* dictionary, int dictSize,
1591
                    LoadDict_mode_e _ld)
1592
31.0k
{
1593
31.0k
    LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1594
31.0k
    const tableType_t tableType = byU32;
1595
31.0k
    const BYTE* p = (const BYTE*)dictionary;
1596
31.0k
    const BYTE* const dictEnd = p + dictSize;
1597
31.0k
    U32 idx32;
1598
1599
31.0k
    DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, (void*)dictionary, (void*)LZ4_dict);
1600
1601
    /* It's necessary to reset the context,
1602
     * and not just continue it with prepareTable()
1603
     * to avoid any risk of generating overflowing matchIndex
1604
     * when compressing using this dictionary */
1605
31.0k
    LZ4_resetStream(LZ4_dict);
1606
1607
    /* We always increment the offset by 64 KB, since, if the dict is longer,
1608
     * we truncate it to the last 64k, and if it's shorter, we still want to
1609
     * advance by a whole window length so we can provide the guarantee that
1610
     * there are only valid offsets in the window, which allows an optimization
1611
     * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1612
     * dictionary isn't a full 64k. */
1613
31.0k
    dict->currentOffset += 64 KB;
1614
1615
31.0k
    if (dictSize < (int)HASH_UNIT) {
1616
9.13k
        return 0;
1617
9.13k
    }
1618
1619
21.9k
    if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1620
21.9k
    dict->dictionary = p;
1621
21.9k
    dict->dictSize = (U32)(dictEnd - p);
1622
21.9k
    dict->tableType = (U32)tableType;
1623
21.9k
    idx32 = dict->currentOffset - dict->dictSize;
1624
1625
59.7M
    while (p <= dictEnd-HASH_UNIT) {
1626
59.7M
        U32 const h = LZ4_hashPosition(p, tableType);
1627
        /* Note: overwriting => favors positions end of dictionary */
1628
59.7M
        LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
1629
59.7M
        p+=3; idx32+=3;
1630
59.7M
    }
1631
1632
21.9k
    if (_ld == _ld_slow) {
1633
        /* Fill hash table with additional references, to improve compression capability */
1634
0
        p = dict->dictionary;
1635
0
        idx32 = dict->currentOffset - dict->dictSize;
1636
0
        while (p <= dictEnd-HASH_UNIT) {
1637
0
            U32 const h = LZ4_hashPosition(p, tableType);
1638
0
            U32 const limit = dict->currentOffset - 64 KB;
1639
0
            if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {
1640
                /* Note: not overwriting => favors positions beginning of dictionary */
1641
0
                LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
1642
0
            }
1643
0
            p++; idx32++;
1644
0
        }
1645
0
    }
1646
1647
21.9k
    return (int)dict->dictSize;
1648
31.0k
}
1649
1650
int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1651
31.0k
{
1652
31.0k
    return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
1653
31.0k
}
1654
1655
int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1656
0
{
1657
0
    return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
1658
0
}
1659
1660
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
1661
15.5k
{
1662
15.5k
    const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
1663
15.5k
        &(dictionaryStream->internal_donotuse);
1664
1665
15.5k
    DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1666
15.5k
             (void*)workingStream, (void*)dictionaryStream,
1667
15.5k
             dictCtx != NULL ? dictCtx->dictSize : 0);
1668
1669
15.5k
    if (dictCtx != NULL) {
1670
        /* If the current offset is zero, we will never look in the
1671
         * external dictionary context, since there is no value a table
1672
         * entry can take that indicate a miss. In that case, we need
1673
         * to bump the offset to something non-zero.
1674
         */
1675
15.5k
        if (workingStream->internal_donotuse.currentOffset == 0) {
1676
0
            workingStream->internal_donotuse.currentOffset = 64 KB;
1677
0
        }
1678
1679
        /* Don't actually attach an empty dictionary.
1680
         */
1681
15.5k
        if (dictCtx->dictSize == 0) {
1682
4.56k
            dictCtx = NULL;
1683
4.56k
        }
1684
15.5k
    }
1685
15.5k
    workingStream->internal_donotuse.dictCtx = dictCtx;
1686
15.5k
}
1687
1688
1689
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1690
329k
{
1691
329k
    assert(nextSize >= 0);
1692
329k
    if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) {   /* potential ptrdiff_t overflow (32-bits mode) */
1693
        /* rescale hash table */
1694
0
        U32 const delta = LZ4_dict->currentOffset - 64 KB;
1695
0
        const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1696
0
        int i;
1697
0
        DEBUGLOG(4, "LZ4_renormDictT");
1698
0
        for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1699
0
            if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1700
0
            else LZ4_dict->hashTable[i] -= delta;
1701
0
        }
1702
0
        LZ4_dict->currentOffset = 64 KB;
1703
0
        if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1704
0
        LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1705
0
    }
1706
329k
}
1707
1708
1709
int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
1710
                                const char* source, char* dest,
1711
                                int inputSize, int maxOutputSize,
1712
                                int acceleration)
1713
329k
{
1714
329k
    const tableType_t tableType = byU32;
1715
329k
    LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;
1716
329k
    const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;
1717
1718
329k
    DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize);
1719
1720
329k
    LZ4_renormDictT(streamPtr, inputSize);   /* fix index overflow */
1721
329k
    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
1722
329k
    if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
1723
1724
    /* invalidate tiny dictionaries */
1725
329k
    if ( (streamPtr->dictSize < 4)     /* tiny dictionary : not enough for a hash */
1726
329k
      && (dictEnd != source)           /* prefix mode */
1727
329k
      && (inputSize > 0)               /* tolerance : don't lose history, in case next invocation would use prefix mode */
1728
329k
      && (streamPtr->dictCtx == NULL)  /* usingDictCtx */
1729
329k
      ) {
1730
70.2k
        DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, (void*)streamPtr->dictionary);
1731
        /* remove dictionary existence from history, to employ faster prefix mode */
1732
70.2k
        streamPtr->dictSize = 0;
1733
70.2k
        streamPtr->dictionary = (const BYTE*)source;
1734
70.2k
        dictEnd = source;
1735
70.2k
    }
1736
1737
    /* Check overlapping input/dictionary space */
1738
329k
    {   const char* const sourceEnd = source + inputSize;
1739
329k
        if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1740
0
            streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1741
0
            if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1742
0
            if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1743
0
            streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;
1744
0
        }
1745
329k
    }
1746
1747
    /* prefix mode : source data follows dictionary */
1748
329k
    if (dictEnd == source) {
1749
201k
        if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1750
99.7k
            return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
1751
101k
        else
1752
101k
            return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
1753
201k
    }
1754
1755
    /* external dictionary mode */
1756
127k
    {   int result;
1757
127k
        if (streamPtr->dictCtx) {
1758
            /* We depend here on the fact that dictCtx'es (produced by
1759
             * LZ4_loadDict) guarantee that their tables contain no references
1760
             * to offsets between dictCtx->currentOffset - 64 KB and
1761
             * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1762
             * to use noDictIssue even when the dict isn't a full 64 KB.
1763
             */
1764
10.6k
            if (inputSize > 4 KB) {
1765
                /* For compressing large blobs, it is faster to pay the setup
1766
                 * cost to copy the dictionary's tables into the active context,
1767
                 * so that the compression loop is only looking into one table.
1768
                 */
1769
1.64k
                LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
1770
1.64k
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1771
9.03k
            } else {
1772
9.03k
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
1773
9.03k
            }
1774
117k
        } else {  /* small data <= 4 KB */
1775
117k
            if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1776
111k
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
1777
111k
            } else {
1778
5.38k
                result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
1779
5.38k
            }
1780
117k
        }
1781
127k
        streamPtr->dictionary = (const BYTE*)source;
1782
127k
        streamPtr->dictSize = (U32)inputSize;
1783
127k
        return result;
1784
329k
    }
1785
329k
}
1786
1787
1788
/* Hidden debug function, to force-test external dictionary mode */
1789
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
1790
0
{
1791
0
    LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;
1792
0
    int result;
1793
1794
0
    LZ4_renormDictT(streamPtr, srcSize);
1795
1796
0
    if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1797
0
        result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
1798
0
    } else {
1799
0
        result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1800
0
    }
1801
1802
0
    streamPtr->dictionary = (const BYTE*)source;
1803
0
    streamPtr->dictSize = (U32)srcSize;
1804
1805
0
    return result;
1806
0
}
1807
1808
1809
/*! LZ4_saveDict() :
1810
 *  If previously compressed data block is not guaranteed to remain available at its memory location,
1811
 *  save it into a safer place (char* safeBuffer).
1812
 *  Note : no need to call LZ4_loadDict() afterwards, dictionary is immediately usable,
1813
 *         one can therefore call LZ4_compress_fast_continue() right after.
1814
 * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
1815
 */
1816
int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1817
0
{
1818
0
    LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1819
1820
0
    DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, (void*)safeBuffer);
1821
1822
0
    if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1823
0
    if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1824
1825
0
    if (safeBuffer == NULL) assert(dictSize == 0);
1826
0
    if (dictSize > 0) {
1827
0
        const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1828
0
        assert(dict->dictionary);
1829
0
        LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);
1830
0
    }
1831
1832
0
    dict->dictionary = (const BYTE*)safeBuffer;
1833
0
    dict->dictSize = (U32)dictSize;
1834
1835
0
    return dictSize;
1836
0
}
1837
1838
1839
1840
/*-*******************************
1841
 *  Decompression functions
1842
 ********************************/
1843
1844
typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
1845
1846
#undef MIN
1847
24.6k
#define MIN(a,b)    ( (a) < (b) ? (a) : (b) )
1848
1849
1850
/* variant for decompress_unsafe()
1851
 * does not know end of input
1852
 * presumes input is well formed
1853
 * note : will consume at least one byte */
1854
static size_t read_long_length_no_check(const BYTE** pp)
1855
0
{
1856
0
    size_t b, l = 0;
1857
0
    do { b = **pp; (*pp)++; l += b; } while (b==255);
1858
0
    DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1)
1859
0
    return l;
1860
0
}
1861
1862
/* core decoder variant for LZ4_decompress_fast*()
1863
 * for legacy support only : these entry points are deprecated.
1864
 * - Presumes input is correctly formed (no defense vs malformed inputs)
1865
 * - Does not know input size (presume input buffer is "large enough")
1866
 * - Decompress a full block (only)
1867
 * @return : nb of bytes read from input.
1868
 * Note : this variant is not optimized for speed, just for maintenance.
1869
 *        the goal is to remove support of decompress_fast*() variants by v2.0
1870
**/
1871
LZ4_FORCE_INLINE int
1872
LZ4_decompress_unsafe_generic(
1873
                 const BYTE* const istart,
1874
                 BYTE* const ostart,
1875
                 int decompressedSize,
1876
1877
                 size_t prefixSize,
1878
                 const BYTE* const dictStart,  /* only if dict==usingExtDict */
1879
                 const size_t dictSize         /* note: =0 if dictStart==NULL */
1880
                 )
1881
0
{
1882
0
    const BYTE* ip = istart;
1883
0
    BYTE* op = (BYTE*)ostart;
1884
0
    BYTE* const oend = ostart + decompressedSize;
1885
0
    const BYTE* const prefixStart = ostart - prefixSize;
1886
1887
0
    DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
1888
0
    if (dictStart == NULL) assert(dictSize == 0);
1889
1890
0
    while (1) {
1891
        /* start new sequence */
1892
0
        unsigned token = *ip++;
1893
1894
        /* literals */
1895
0
        {   size_t ll = token >> ML_BITS;
1896
0
            if (ll==15) {
1897
                /* long literal length */
1898
0
                ll += read_long_length_no_check(&ip);
1899
0
            }
1900
0
            if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */
1901
0
            LZ4_memmove(op, ip, ll); /* support in-place decompression */
1902
0
            op += ll;
1903
0
            ip += ll;
1904
0
            if ((size_t)(oend-op) < MFLIMIT) {
1905
0
                if (op==oend) break;  /* end of block */
1906
0
                DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op);
1907
                /* incorrect end of block :
1908
                 * last match must start at least MFLIMIT==12 bytes before end of output block */
1909
0
                return -1;
1910
0
        }   }
1911
1912
        /* match */
1913
0
        {   size_t ml = token & 15;
1914
0
            size_t const offset = LZ4_readLE16(ip);
1915
0
            ip+=2;
1916
1917
0
            if (ml==15) {
1918
                /* long literal length */
1919
0
                ml += read_long_length_no_check(&ip);
1920
0
            }
1921
0
            ml += MINMATCH;
1922
1923
0
            if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */
1924
1925
0
            {   const BYTE* match = op - offset;
1926
1927
                /* out of range */
1928
0
                if (offset > (size_t)(op - prefixStart) + dictSize) {
1929
0
                    DEBUGLOG(6, "offset out of range");
1930
0
                    return -1;
1931
0
                }
1932
1933
                /* check special case : extDict */
1934
0
                if (offset > (size_t)(op - prefixStart)) {
1935
                    /* extDict scenario */
1936
0
                    const BYTE* const dictEnd = dictStart + dictSize;
1937
0
                    const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));
1938
0
                    size_t const extml = (size_t)(dictEnd - extMatch);
1939
0
                    if (extml > ml) {
1940
                        /* match entirely within extDict */
1941
0
                        LZ4_memmove(op, extMatch, ml);
1942
0
                        op += ml;
1943
0
                        ml = 0;
1944
0
                    } else {
1945
                        /* match split between extDict & prefix */
1946
0
                        LZ4_memmove(op, extMatch, extml);
1947
0
                        op += extml;
1948
0
                        ml -= extml;
1949
0
                    }
1950
0
                    match = prefixStart;
1951
0
                }
1952
1953
                /* match copy - slow variant, supporting overlap copy */
1954
0
                {   size_t u;
1955
0
                    for (u=0; u<ml; u++) {
1956
0
                        op[u] = match[u];
1957
0
            }   }   }
1958
0
            op += ml;
1959
0
            if ((size_t)(oend-op) < LASTLITERALS) {
1960
0
                DEBUGLOG(5, "invalid: match ends at distance %zi from end of block", oend-op);
1961
                /* incorrect end of block :
1962
                 * last match must stop at least LASTLITERALS==5 bytes before end of output block */
1963
0
                return -1;
1964
0
            }
1965
0
        } /* match */
1966
0
    } /* main loop */
1967
0
    return (int)(ip - istart);
1968
0
}
1969
1970
1971
/* Read the variable-length literal or match length.
1972
 *
1973
 * @ip : input pointer
1974
 * @ilimit : position after which if length is not decoded, the input is necessarily corrupted.
1975
 * @initial_check - check ip >= ipmax before start of loop.  Returns initial_error if so.
1976
 * @error (output) - error code.  Must be set to 0 before call.
1977
**/
1978
typedef size_t Rvl_t;
1979
static const Rvl_t rvl_error = (Rvl_t)(-1);
1980
LZ4_FORCE_INLINE Rvl_t
1981
read_variable_length(const BYTE** ip, const BYTE* ilimit,
1982
                     int initial_check)
1983
21.9M
{
1984
21.9M
    Rvl_t s, length = 0;
1985
21.9M
    assert(ip != NULL);
1986
21.9M
    assert(*ip !=  NULL);
1987
21.9M
    assert(ilimit != NULL);
1988
21.9M
    if (initial_check && unlikely((*ip) >= ilimit)) {    /* read limit reached */
1989
1.24k
        return rvl_error;
1990
1.24k
    }
1991
21.9M
    s = **ip;
1992
21.9M
    (*ip)++;
1993
21.9M
    length += s;
1994
21.9M
    if (unlikely((*ip) > ilimit)) {    /* read limit reached */
1995
136
        return rvl_error;
1996
136
    }
1997
    /* accumulator overflow detection (32-bit mode only) */
1998
21.9M
    if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
1999
0
        return rvl_error;
2000
0
    }
2001
21.9M
    if (likely(s != 255)) return length;
2002
232M
    do {
2003
232M
        s = **ip;
2004
232M
        (*ip)++;
2005
232M
        length += s;
2006
232M
        if (unlikely((*ip) > ilimit)) {    /* read limit reached */
2007
807
            return rvl_error;
2008
807
        }
2009
        /* accumulator overflow detection (32-bit mode only) */
2010
232M
        if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
2011
0
            return rvl_error;
2012
0
        }
2013
232M
    } while (s == 255);
2014
2015
1.49M
    return length;
2016
1.49M
}
2017
2018
/*! LZ4_decompress_generic() :
2019
 *  This generic decompression function covers all use cases.
2020
 *  It shall be instantiated several times, using different sets of directives.
2021
 *  Note that it is important for performance that this function really get inlined,
2022
 *  in order to remove useless branches during compilation optimization.
2023
 */
2024
LZ4_FORCE_INLINE int
2025
LZ4_decompress_generic(
2026
                 const char* const src,
2027
                 char* const dst,
2028
                 int srcSize,
2029
                 int outputSize,         /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
2030
2031
                 earlyEnd_directive partialDecoding,  /* full, partial */
2032
                 dict_directive dict,                 /* noDict, withPrefix64k, usingExtDict */
2033
                 const BYTE* const lowPrefix,  /* always <= dst, == dst when no prefix */
2034
                 const BYTE* const dictStart,  /* only if dict==usingExtDict */
2035
                 const size_t dictSize         /* note : = 0 if noDict */
2036
                 )
2037
746k
{
2038
746k
    if ((src == NULL) || (outputSize < 0)) { return -1; }
2039
2040
746k
    {   const BYTE* ip = (const BYTE*) src;
2041
746k
        const BYTE* const iend = ip + srcSize;
2042
2043
746k
        BYTE* op = (BYTE*) dst;
2044
746k
        BYTE* const oend = op + outputSize;
2045
746k
        BYTE* cpy;
2046
2047
746k
        const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
2048
2049
746k
        const int checkOffset = (dictSize < (int)(64 KB));
2050
2051
2052
        /* Set up the "end" pointers for the shortcut. */
2053
746k
        const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;
2054
746k
        const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;
2055
2056
746k
        const BYTE* match;
2057
746k
        size_t offset;
2058
746k
        unsigned token;
2059
746k
        size_t length;
2060
2061
2062
746k
        DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
2063
2064
        /* Special cases */
2065
746k
        assert(lowPrefix <= op);
2066
746k
        if (unlikely(outputSize==0)) {
2067
            /* Empty output buffer */
2068
1.75k
            if (partialDecoding) return 0;
2069
206
            return ((srcSize==1) && (*ip==0)) ? 0 : -1;
2070
1.75k
        }
2071
744k
        if (unlikely(srcSize==0)) { return -1; }
2072
2073
    /* LZ4_FAST_DEC_LOOP:
2074
     * designed for modern OoO performance cpus,
2075
     * where copying reliably 32-bytes is preferable to an unpredictable branch.
2076
     * note : fast loop may show a regression for some client arm chips. */
2077
744k
#if LZ4_FAST_DEC_LOOP
2078
744k
        if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
2079
462k
            DEBUGLOG(6, "move to safe decode loop");
2080
462k
            goto safe_decode;
2081
462k
        }
2082
2083
        /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
2084
282k
        DEBUGLOG(6, "using fast decode loop");
2085
65.5M
        while (1) {
2086
            /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
2087
65.5M
            assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
2088
65.5M
            assert(ip < iend);
2089
65.5M
            token = *ip++;
2090
65.5M
            length = token >> ML_BITS;  /* literal length */
2091
65.5M
            DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
2092
2093
            /* decode literal length */
2094
65.5M
            if (length == RUN_MASK) {
2095
3.61M
                size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
2096
3.61M
                if (addl == rvl_error) {
2097
721
                    DEBUGLOG(6, "error reading long literal length");
2098
721
                    goto _output_error;
2099
721
                }
2100
3.60M
                length += addl;
2101
3.60M
                if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
2102
3.60M
                if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
2103
2104
                /* copy literals */
2105
3.60M
                LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
2106
3.60M
                if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
2107
3.55M
                LZ4_wildCopy32(op, ip, op+length);
2108
3.55M
                ip += length; op += length;
2109
61.9M
            } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {
2110
                /* We don't need to check oend, since we check it once for each loop below */
2111
61.7M
                DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
2112
                /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
2113
61.7M
                LZ4_memcpy(op, ip, 16);
2114
61.7M
                ip += length; op += length;
2115
61.7M
            } else {
2116
182k
                goto safe_literal_copy;
2117
182k
            }
2118
2119
            /* get offset */
2120
65.3M
            offset = LZ4_readLE16(ip); ip+=2;
2121
65.3M
            DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset);
2122
65.3M
            match = op - offset;
2123
65.3M
            assert(match <= op);  /* overflow check */
2124
2125
            /* get matchlength */
2126
65.3M
            length = token & ML_MASK;
2127
65.3M
            DEBUGLOG(7, "  match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH);
2128
2129
65.3M
            if (length == ML_MASK) {
2130
18.1M
                size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2131
18.1M
                if (addl == rvl_error) {
2132
142
                    DEBUGLOG(5, "error reading long match length");
2133
142
                    goto _output_error;
2134
142
                }
2135
18.1M
                length += addl;
2136
18.1M
                length += MINMATCH;
2137
18.1M
                DEBUGLOG(7, "  long match length == %u", (unsigned)length);
2138
18.1M
                if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
2139
18.1M
                if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
2140
17.1k
                    goto safe_match_copy;
2141
17.1k
                }
2142
47.2M
            } else {
2143
47.2M
                length += MINMATCH;
2144
47.2M
                if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
2145
22.1k
                    DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length);
2146
22.1k
                    goto safe_match_copy;
2147
22.1k
                }
2148
2149
                /* Fastpath check: skip LZ4_wildCopy32 when true */
2150
47.1M
                if ((dict == withPrefix64k) || (match >= lowPrefix)) {
2151
46.8M
                    if (offset >= 8) {
2152
40.8M
                        assert(match >= lowPrefix);
2153
40.8M
                        assert(match <= op);
2154
40.8M
                        assert(op + 18 <= oend);
2155
2156
40.8M
                        LZ4_memcpy(op, match, 8);
2157
40.8M
                        LZ4_memcpy(op+8, match+8, 8);
2158
40.8M
                        LZ4_memcpy(op+16, match+16, 2);
2159
40.8M
                        op += length;
2160
40.8M
                        continue;
2161
40.8M
            }   }   }
2162
2163
24.4M
            if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {
2164
1.21k
                DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
2165
1.21k
                goto _output_error;
2166
1.21k
            }
2167
            /* match starting within external dictionary */
2168
24.4M
            if ((dict==usingExtDict) && (match < lowPrefix)) {
2169
438k
                assert(dictEnd != NULL);
2170
438k
                if (unlikely(op+length > oend-LASTLITERALS)) {
2171
0
                    if (partialDecoding) {
2172
0
                        DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
2173
0
                        length = MIN(length, (size_t)(oend-op));
2174
0
                    } else {
2175
0
                        DEBUGLOG(6, "end-of-block condition violated")
2176
0
                        goto _output_error;
2177
0
                }   }
2178
2179
438k
                if (length <= (size_t)(lowPrefix-match)) {
2180
                    /* match fits entirely within external dictionary : just copy */
2181
425k
                    LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
2182
425k
                    op += length;
2183
425k
                } else {
2184
                    /* match stretches into both external dictionary and current block */
2185
13.0k
                    size_t const copySize = (size_t)(lowPrefix - match);
2186
13.0k
                    size_t const restSize = length - copySize;
2187
13.0k
                    LZ4_memcpy(op, dictEnd - copySize, copySize);
2188
13.0k
                    op += copySize;
2189
13.0k
                    if (restSize > (size_t)(op - lowPrefix)) {  /* overlap copy */
2190
3.43k
                        BYTE* const endOfMatch = op + restSize;
2191
3.43k
                        const BYTE* copyFrom = lowPrefix;
2192
136M
                        while (op < endOfMatch) { *op++ = *copyFrom++; }
2193
9.63k
                    } else {
2194
9.63k
                        LZ4_memcpy(op, lowPrefix, restSize);
2195
9.63k
                        op += restSize;
2196
9.63k
                }   }
2197
438k
                continue;
2198
438k
            }
2199
2200
            /* copy match within block */
2201
23.9M
            cpy = op + length;
2202
2203
23.9M
            assert((op <= oend) && (oend-op >= 32));
2204
23.9M
            if (unlikely(offset<16)) {
2205
7.62M
                LZ4_memcpy_using_offset(op, match, cpy, offset);
2206
16.3M
            } else {
2207
16.3M
                LZ4_wildCopy32(op, match, cpy);
2208
16.3M
            }
2209
2210
23.9M
            op = cpy;   /* wildcopy correction */
2211
23.9M
        }
2212
462k
    safe_decode:
2213
462k
#endif
2214
2215
        /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
2216
462k
        DEBUGLOG(6, "using safe decode loop");
2217
1.04M
        while (1) {
2218
1.04M
            assert(ip < iend);
2219
1.04M
            token = *ip++;
2220
1.04M
            length = token >> ML_BITS;  /* literal length */
2221
1.04M
            DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
2222
2223
            /* A two-stage shortcut for the most common case:
2224
             * 1) If the literal length is 0..14, and there is enough space,
2225
             * enter the shortcut and copy 16 bytes on behalf of the literals
2226
             * (in the fast mode, only 8 bytes can be safely copied this way).
2227
             * 2) Further if the match length is 4..18, copy 18 bytes in a similar
2228
             * manner; but we ensure that there's enough space in the output for
2229
             * those 18 bytes earlier, upon entering the shortcut (in other words,
2230
             * there is a combined check for both stages).
2231
             */
2232
1.04M
            if ( (length != RUN_MASK)
2233
                /* strictly "less than" on input, to re-enter the loop with at least one byte */
2234
1.04M
              && likely((ip < shortiend) & (op <= shortoend)) ) {
2235
                /* Copy the literals */
2236
97.9k
                LZ4_memcpy(op, ip, 16);
2237
97.9k
                op += length; ip += length;
2238
2239
                /* The second stage: prepare for match copying, decode full info.
2240
                 * If it doesn't work out, the info won't be wasted. */
2241
97.9k
                length = token & ML_MASK; /* match length */
2242
97.9k
                DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);
2243
97.9k
                offset = LZ4_readLE16(ip); ip += 2;
2244
97.9k
                match = op - offset;
2245
97.9k
                assert(match <= op); /* check overflow */
2246
2247
                /* Do not deal with overlapping matches. */
2248
97.9k
                if ( (length != ML_MASK)
2249
97.9k
                  && (offset >= 8)
2250
97.9k
                  && (dict==withPrefix64k || match >= lowPrefix) ) {
2251
                    /* Copy the match. */
2252
46.5k
                    LZ4_memcpy(op + 0, match + 0, 8);
2253
46.5k
                    LZ4_memcpy(op + 8, match + 8, 8);
2254
46.5k
                    LZ4_memcpy(op +16, match +16, 2);
2255
46.5k
                    op += length + MINMATCH;
2256
                    /* Both stages worked, load the next token. */
2257
46.5k
                    continue;
2258
46.5k
                }
2259
2260
                /* The second stage didn't work out, but the info is ready.
2261
                 * Propel it right to the point of match copying. */
2262
51.4k
                goto _copy_match;
2263
97.9k
            }
2264
2265
            /* decode literal length */
2266
942k
            if (length == RUN_MASK) {
2267
33.7k
                size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
2268
33.7k
                if (addl == rvl_error) { goto _output_error; }
2269
32.8k
                length += addl;
2270
32.8k
                if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
2271
32.8k
                if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
2272
32.8k
            }
2273
2274
941k
#if LZ4_FAST_DEC_LOOP
2275
1.18M
        safe_literal_copy:
2276
1.18M
#endif
2277
            /* copy literals */
2278
1.18M
            cpy = op+length;
2279
2280
1.18M
            LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
2281
1.18M
            if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
2282
                /* We've either hit the input parsing restriction or the output parsing restriction.
2283
                 * In the normal scenario, decoding a full block, it must be the last sequence,
2284
                 * otherwise it's an error (invalid input or dimensions).
2285
                 * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
2286
                 */
2287
738k
                if (partialDecoding) {
2288
                    /* Since we are partial decoding we may be in this block because of the output parsing
2289
                     * restriction, which is not valid since the output buffer is allowed to be undersized.
2290
                     */
2291
13.0k
                    DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
2292
13.0k
                    DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
2293
13.0k
                    DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
2294
13.0k
                    DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
2295
                    /* Finishing in the middle of a literals segment,
2296
                     * due to lack of input.
2297
                     */
2298
13.0k
                    if (ip+length > iend) {
2299
1.64k
                        length = (size_t)(iend-ip);
2300
1.64k
                        cpy = op + length;
2301
1.64k
                    }
2302
                    /* Finishing in the middle of a literals segment,
2303
                     * due to lack of output space.
2304
                     */
2305
13.0k
                    if (cpy > oend) {
2306
4.63k
                        cpy = oend;
2307
4.63k
                        assert(op<=oend);
2308
4.63k
                        length = (size_t)(oend-op);
2309
4.63k
                    }
2310
725k
                } else {
2311
                     /* We must be on the last sequence (or invalid) because of the parsing limitations
2312
                      * so check that we exactly consume the input and don't overrun the output buffer.
2313
                      */
2314
725k
                    if ((ip+length != iend) || (cpy > oend)) {
2315
5.08k
                        DEBUGLOG(5, "should have been last run of literals")
2316
5.08k
                        DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", (void*)ip, (int)length, (void*)(ip+length), (void*)iend);
2317
5.08k
                        DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", (void*)cpy, (void*)(oend-MFLIMIT));
2318
5.08k
                        DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize);
2319
5.08k
                        goto _output_error;
2320
5.08k
                    }
2321
725k
                }
2322
733k
                LZ4_memmove(op, ip, length);  /* supports overlapping memory regions, for in-place decompression scenarios */
2323
733k
                ip += length;
2324
733k
                op += length;
2325
                /* Necessarily EOF when !partialDecoding.
2326
                 * When partialDecoding, it is EOF if we've either
2327
                 * filled the output buffer or
2328
                 * can't proceed with reading an offset for following match.
2329
                 */
2330
733k
                if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
2331
727k
                    break;
2332
727k
                }
2333
733k
            } else {
2334
444k
                LZ4_wildCopy8(op, ip, cpy);   /* can overwrite up to 8 bytes beyond cpy */
2335
444k
                ip += length; op = cpy;
2336
444k
            }
2337
2338
            /* get offset */
2339
449k
            offset = LZ4_readLE16(ip); ip+=2;
2340
449k
            match = op - offset;
2341
2342
            /* get matchlength */
2343
449k
            length = token & ML_MASK;
2344
449k
            DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
2345
2346
500k
    _copy_match:
2347
500k
            if (length == ML_MASK) {
2348
158k
                size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2349
158k
                if (addl == rvl_error) { goto _output_error; }
2350
158k
                length += addl;
2351
158k
                if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error;   /* overflow detection */
2352
158k
            }
2353
500k
            length += MINMATCH;
2354
2355
500k
#if LZ4_FAST_DEC_LOOP
2356
539k
        safe_match_copy:
2357
539k
#endif
2358
539k
            if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error;   /* Error : offset outside buffers */
2359
            /* match starting within external dictionary */
2360
536k
            if ((dict==usingExtDict) && (match < lowPrefix)) {
2361
33.3k
                assert(dictEnd != NULL);
2362
33.3k
                if (unlikely(op+length > oend-LASTLITERALS)) {
2363
1.05k
                    if (partialDecoding) length = MIN(length, (size_t)(oend-op));
2364
404
                    else goto _output_error;   /* doesn't respect parsing restriction */
2365
1.05k
                }
2366
2367
32.9k
                if (length <= (size_t)(lowPrefix-match)) {
2368
                    /* match fits entirely within external dictionary : just copy */
2369
25.7k
                    LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
2370
25.7k
                    op += length;
2371
25.7k
                } else {
2372
                    /* match stretches into both external dictionary and current block */
2373
7.25k
                    size_t const copySize = (size_t)(lowPrefix - match);
2374
7.25k
                    size_t const restSize = length - copySize;
2375
7.25k
                    LZ4_memcpy(op, dictEnd - copySize, copySize);
2376
7.25k
                    op += copySize;
2377
7.25k
                    if (restSize > (size_t)(op - lowPrefix)) {  /* overlap copy */
2378
3.18k
                        BYTE* const endOfMatch = op + restSize;
2379
3.18k
                        const BYTE* copyFrom = lowPrefix;
2380
61.3M
                        while (op < endOfMatch) *op++ = *copyFrom++;
2381
4.07k
                    } else {
2382
4.07k
                        LZ4_memcpy(op, lowPrefix, restSize);
2383
4.07k
                        op += restSize;
2384
4.07k
                }   }
2385
32.9k
                continue;
2386
33.3k
            }
2387
503k
            assert(match >= lowPrefix);
2388
2389
            /* copy match within block */
2390
503k
            cpy = op + length;
2391
2392
            /* partialDecoding : may end anywhere within the block */
2393
503k
            assert(op<=oend);
2394
503k
            if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2395
7.82k
                size_t const mlen = MIN(length, (size_t)(oend-op));
2396
7.82k
                const BYTE* const matchEnd = match + mlen;
2397
7.82k
                BYTE* const copyEnd = op + mlen;
2398
7.82k
                if (matchEnd > op) {   /* overlap copy */
2399
209k
                    while (op < copyEnd) { *op++ = *match++; }
2400
4.77k
                } else {
2401
3.05k
                    LZ4_memcpy(op, match, mlen);
2402
3.05k
                }
2403
7.82k
                op = copyEnd;
2404
7.82k
                if (op == oend) { break; }
2405
3.50k
                continue;
2406
7.82k
            }
2407
2408
495k
            if (unlikely(offset<8)) {
2409
232k
                LZ4_write32(op, 0);   /* silence msan warning when offset==0 */
2410
232k
                op[0] = match[0];
2411
232k
                op[1] = match[1];
2412
232k
                op[2] = match[2];
2413
232k
                op[3] = match[3];
2414
232k
                match += inc32table[offset];
2415
232k
                LZ4_memcpy(op+4, match, 4);
2416
232k
                match -= dec64table[offset];
2417
262k
            } else {
2418
262k
                LZ4_memcpy(op, match, 8);
2419
262k
                match += 8;
2420
262k
            }
2421
495k
            op += 8;
2422
2423
495k
            if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
2424
66.4k
                BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2425
66.4k
                if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2426
65.9k
                if (op < oCopyLimit) {
2427
38.7k
                    LZ4_wildCopy8(op, match, oCopyLimit);
2428
38.7k
                    match += oCopyLimit - op;
2429
38.7k
                    op = oCopyLimit;
2430
38.7k
                }
2431
100k
                while (op < cpy) { *op++ = *match++; }
2432
428k
            } else {
2433
428k
                LZ4_memcpy(op, match, 8);
2434
428k
                if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
2435
428k
            }
2436
494k
            op = cpy;   /* wildcopy correction */
2437
494k
        }
2438
2439
        /* end of decoding */
2440
732k
        DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
2441
732k
        return (int) (((char*)op)-dst);     /* Nb of output bytes decoded */
2442
2443
        /* Overflow error detected */
2444
12.8k
    _output_error:
2445
12.8k
        return (int) (-(((const char*)ip)-src))-1;
2446
462k
    }
2447
462k
}
2448
2449
2450
/*===== Instantiate the API decoding functions. =====*/
2451
2452
LZ4_FORCE_O2
2453
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
2454
138k
{
2455
138k
    return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
2456
138k
                                  decode_full_block, noDict,
2457
138k
                                  (BYTE*)dest, NULL, 0);
2458
138k
}
2459
2460
LZ4_FORCE_O2
2461
int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
2462
5.40k
{
2463
5.40k
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2464
5.40k
    return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
2465
5.40k
                                  partial_decode,
2466
5.40k
                                  noDict, (BYTE*)dst, NULL, 0);
2467
5.40k
}
2468
2469
LZ4_FORCE_O2
2470
int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
2471
0
{
2472
0
    DEBUGLOG(5, "LZ4_decompress_fast");
2473
0
    return LZ4_decompress_unsafe_generic(
2474
0
                (const BYTE*)source, (BYTE*)dest, originalSize,
2475
0
                0, NULL, 0);
2476
0
}
2477
2478
/*===== Instantiate a few more decoding cases, used more than once. =====*/
2479
2480
LZ4_FORCE_O2 /* Exported, an obsolete API function. */
2481
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
2482
95.3k
{
2483
95.3k
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2484
95.3k
                                  decode_full_block, withPrefix64k,
2485
95.3k
                                  (BYTE*)dest - 64 KB, NULL, 0);
2486
95.3k
}
2487
2488
LZ4_FORCE_O2
2489
static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)
2490
0
{
2491
0
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2492
0
    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
2493
0
                                  partial_decode, withPrefix64k,
2494
0
                                  (BYTE*)dest - 64 KB, NULL, 0);
2495
0
}
2496
2497
/* Another obsolete API function, paired with the previous one. */
2498
int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
2499
0
{
2500
0
    return LZ4_decompress_unsafe_generic(
2501
0
                (const BYTE*)source, (BYTE*)dest, originalSize,
2502
0
                64 KB, NULL, 0);
2503
0
}
2504
2505
LZ4_FORCE_O2
2506
static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
2507
                                               size_t prefixSize)
2508
250k
{
2509
250k
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2510
250k
                                  decode_full_block, noDict,
2511
250k
                                  (BYTE*)dest-prefixSize, NULL, 0);
2512
250k
}
2513
2514
LZ4_FORCE_O2
2515
static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,
2516
                                               size_t prefixSize)
2517
0
{
2518
0
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2519
0
    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
2520
0
                                  partial_decode, noDict,
2521
0
                                  (BYTE*)dest-prefixSize, NULL, 0);
2522
0
}
2523
2524
LZ4_FORCE_O2
2525
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
2526
                                     int compressedSize, int maxOutputSize,
2527
                                     const void* dictStart, size_t dictSize)
2528
64.1k
{
2529
64.1k
    DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
2530
64.1k
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2531
64.1k
                                  decode_full_block, usingExtDict,
2532
64.1k
                                  (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2533
64.1k
}
2534
2535
LZ4_FORCE_O2
2536
int LZ4_decompress_safe_partial_forceExtDict(const char* source, char* dest,
2537
                                     int compressedSize, int targetOutputSize, int dstCapacity,
2538
                                     const void* dictStart, size_t dictSize)
2539
10.8k
{
2540
10.8k
    dstCapacity = MIN(targetOutputSize, dstCapacity);
2541
10.8k
    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
2542
10.8k
                                  partial_decode, usingExtDict,
2543
10.8k
                                  (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2544
10.8k
}
2545
2546
LZ4_FORCE_O2
2547
static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2548
                                       const void* dictStart, size_t dictSize)
2549
0
{
2550
0
    return LZ4_decompress_unsafe_generic(
2551
0
                (const BYTE*)source, (BYTE*)dest, originalSize,
2552
0
                0, (const BYTE*)dictStart, dictSize);
2553
0
}
2554
2555
/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2556
 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2557
 * These routines are used only once, in LZ4_decompress_*_continue().
2558
 */
2559
LZ4_FORCE_INLINE
2560
int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
2561
                                   size_t prefixSize, const void* dictStart, size_t dictSize)
2562
181k
{
2563
181k
    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
2564
181k
                                  decode_full_block, usingExtDict,
2565
181k
                                  (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2566
181k
}
2567
2568
/*===== streaming decompression functions =====*/
2569
2570
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2571
LZ4_streamDecode_t* LZ4_createStreamDecode(void)
2572
15.5k
{
2573
15.5k
    LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >= sizeof(LZ4_streamDecode_t_internal));
2574
15.5k
    return (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
2575
15.5k
}
2576
2577
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
2578
15.5k
{
2579
15.5k
    if (LZ4_stream == NULL) { return 0; }  /* support free on NULL */
2580
15.5k
    FREEMEM(LZ4_stream);
2581
15.5k
    return 0;
2582
15.5k
}
2583
#endif
2584
2585
/*! LZ4_setStreamDecode() :
2586
 *  Use this function to instruct where to find the dictionary.
2587
 *  This function is not necessary if previous data is still available where it was decoded.
2588
 *  Loading a size of 0 is allowed (same effect as no dictionary).
2589
 * @return : 1 if OK, 0 if error
2590
 */
2591
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
2592
186k
{
2593
186k
    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2594
186k
    lz4sd->prefixSize = (size_t)dictSize;
2595
186k
    if (dictSize) {
2596
55.4k
        assert(dictionary != NULL);
2597
55.4k
        lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
2598
130k
    } else {
2599
130k
        lz4sd->prefixEnd = (const BYTE*) dictionary;
2600
130k
    }
2601
186k
    lz4sd->externalDict = NULL;
2602
186k
    lz4sd->extDictSize  = 0;
2603
186k
    return 1;
2604
186k
}
2605
2606
/*! LZ4_decoderRingBufferSize() :
2607
 *  when setting a ring buffer for streaming decompression (optional scenario),
2608
 *  provides the minimum size of this ring buffer
2609
 *  to be compatible with any source respecting maxBlockSize condition.
2610
 *  Note : in a ring buffer scenario,
2611
 *  blocks are presumed decompressed next to each other.
2612
 *  When not enough space remains for next block (remainingSize < maxBlockSize),
2613
 *  decoding resumes from beginning of ring buffer.
2614
 * @return : minimum ring buffer size,
2615
 *           or 0 if there is an error (invalid maxBlockSize).
2616
 */
2617
int LZ4_decoderRingBufferSize(int maxBlockSize)
2618
0
{
2619
0
    if (maxBlockSize < 0) return 0;
2620
0
    if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2621
0
    if (maxBlockSize < 16) maxBlockSize = 16;
2622
0
    return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
2623
0
}
2624
2625
/*
2626
*_continue() :
2627
    These decoding functions allow decompression of multiple blocks in "streaming" mode.
2628
    Previously decoded blocks must still be available at the memory position where they were decoded.
2629
    If it's not possible, save the relevant part of decoded data into a safe buffer,
2630
    and indicate where it stands using LZ4_setStreamDecode()
2631
*/
2632
LZ4_FORCE_O2
2633
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
2634
655k
{
2635
655k
    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2636
655k
    int result;
2637
2638
655k
    if (lz4sd->prefixSize == 0) {
2639
        /* The first call, no dictionary yet. */
2640
76.6k
        assert(lz4sd->extDictSize == 0);
2641
76.6k
        result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2642
76.6k
        if (result <= 0) return result;
2643
68.5k
        lz4sd->prefixSize = (size_t)result;
2644
68.5k
        lz4sd->prefixEnd = (BYTE*)dest + result;
2645
578k
    } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2646
        /* They're rolling the current segment. */
2647
523k
        if (lz4sd->prefixSize >= 64 KB - 1)
2648
92.8k
            result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2649
430k
        else if (lz4sd->extDictSize == 0)
2650
248k
            result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
2651
248k
                                                         lz4sd->prefixSize);
2652
181k
        else
2653
181k
            result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
2654
181k
                                                    lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2655
523k
        if (result <= 0) return result;
2656
443k
        lz4sd->prefixSize += (size_t)result;
2657
443k
        lz4sd->prefixEnd  += result;
2658
443k
    } else {
2659
        /* The buffer wraps around, or they're switching to another buffer. */
2660
54.8k
        lz4sd->extDictSize = lz4sd->prefixSize;
2661
54.8k
        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2662
54.8k
        result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
2663
54.8k
                                                  lz4sd->externalDict, lz4sd->extDictSize);
2664
54.8k
        if (result <= 0) return result;
2665
52.5k
        lz4sd->prefixSize = (size_t)result;
2666
52.5k
        lz4sd->prefixEnd  = (BYTE*)dest + result;
2667
52.5k
    }
2668
2669
564k
    return result;
2670
655k
}
2671
2672
LZ4_FORCE_O2 int
2673
LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode,
2674
                        const char* source, char* dest, int originalSize)
2675
0
{
2676
0
    LZ4_streamDecode_t_internal* const lz4sd =
2677
0
        (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);
2678
0
    int result;
2679
2680
0
    DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize);
2681
0
    assert(originalSize >= 0);
2682
2683
0
    if (lz4sd->prefixSize == 0) {
2684
0
        DEBUGLOG(5, "first invocation : no prefix nor extDict");
2685
0
        assert(lz4sd->extDictSize == 0);
2686
0
        result = LZ4_decompress_fast(source, dest, originalSize);
2687
0
        if (result <= 0) return result;
2688
0
        lz4sd->prefixSize = (size_t)originalSize;
2689
0
        lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2690
0
    } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2691
0
        DEBUGLOG(5, "continue using existing prefix");
2692
0
        result = LZ4_decompress_unsafe_generic(
2693
0
                        (const BYTE*)source, (BYTE*)dest, originalSize,
2694
0
                        lz4sd->prefixSize,
2695
0
                        lz4sd->externalDict, lz4sd->extDictSize);
2696
0
        if (result <= 0) return result;
2697
0
        lz4sd->prefixSize += (size_t)originalSize;
2698
0
        lz4sd->prefixEnd  += originalSize;
2699
0
    } else {
2700
0
        DEBUGLOG(5, "prefix becomes extDict");
2701
0
        lz4sd->extDictSize = lz4sd->prefixSize;
2702
0
        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2703
0
        result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2704
0
                                             lz4sd->externalDict, lz4sd->extDictSize);
2705
0
        if (result <= 0) return result;
2706
0
        lz4sd->prefixSize = (size_t)originalSize;
2707
0
        lz4sd->prefixEnd  = (BYTE*)dest + originalSize;
2708
0
    }
2709
2710
0
    return result;
2711
0
}
2712
2713
2714
/*
2715
Advanced decoding functions :
2716
*_usingDict() :
2717
    These decoding functions work the same as "_continue" ones,
2718
    the dictionary must be explicitly provided within parameters
2719
*/
2720
2721
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
2722
49.5k
{
2723
49.5k
    if (dictSize==0)
2724
36.0k
        return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
2725
13.5k
    if (dictStart+dictSize == dest) {
2726
4.19k
        if (dictSize >= 64 KB - 1) {
2727
2.44k
            return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
2728
2.44k
        }
2729
1.75k
        assert(dictSize >= 0);
2730
1.75k
        return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
2731
1.75k
    }
2732
9.32k
    assert(dictSize >= 0);
2733
9.32k
    return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
2734
9.32k
}
2735
2736
int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)
2737
13.5k
{
2738
13.5k
    if (dictSize==0)
2739
2.70k
        return LZ4_decompress_safe_partial(source, dest, compressedSize, targetOutputSize, dstCapacity);
2740
10.8k
    if (dictStart+dictSize == dest) {
2741
0
        if (dictSize >= 64 KB - 1) {
2742
0
            return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);
2743
0
        }
2744
0
        assert(dictSize >= 0);
2745
0
        return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);
2746
0
    }
2747
10.8k
    assert(dictSize >= 0);
2748
10.8k
    return LZ4_decompress_safe_partial_forceExtDict(source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart, (size_t)dictSize);
2749
10.8k
}
2750
2751
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
2752
0
{
2753
0
    if (dictSize==0 || dictStart+dictSize == dest)
2754
0
        return LZ4_decompress_unsafe_generic(
2755
0
                        (const BYTE*)source, (BYTE*)dest, originalSize,
2756
0
                        (size_t)dictSize, NULL, 0);
2757
0
    assert(dictSize >= 0);
2758
0
    return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
2759
0
}
2760
2761
2762
/*=*************************************************
2763
*  Obsolete Functions
2764
***************************************************/
2765
/* obsolete compression functions */
2766
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
2767
0
{
2768
0
    return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
2769
0
}
2770
int LZ4_compress(const char* src, char* dest, int srcSize)
2771
0
{
2772
0
    return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
2773
0
}
2774
int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2775
0
{
2776
0
    return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2777
0
}
2778
int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2779
0
{
2780
0
    return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
2781
0
}
2782
int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
2783
0
{
2784
0
    return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
2785
0
}
2786
int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
2787
0
{
2788
0
    return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
2789
0
}
2790
2791
/*
2792
These decompression functions are deprecated and should no longer be used.
2793
They are only provided here for compatibility with older user programs.
2794
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2795
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2796
*/
2797
int LZ4_uncompress (const char* source, char* dest, int outputSize)
2798
0
{
2799
0
    return LZ4_decompress_fast(source, dest, outputSize);
2800
0
}
2801
int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
2802
0
{
2803
0
    return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
2804
0
}
2805
2806
/* Obsolete Streaming functions */
2807
2808
0
int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }
2809
2810
int LZ4_resetStreamState(void* state, char* inputBuffer)
2811
0
{
2812
0
    (void)inputBuffer;
2813
0
    LZ4_resetStream((LZ4_stream_t*)state);
2814
0
    return 0;
2815
0
}
2816
2817
#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2818
void* LZ4_create (char* inputBuffer)
2819
0
{
2820
0
    (void)inputBuffer;
2821
0
    return LZ4_createStream();
2822
0
}
2823
#endif
2824
2825
char* LZ4_slideInputBuffer (void* state)
2826
0
{
2827
    /* avoid const char * -> char * conversion warning */
2828
0
    return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
2829
0
}
2830
2831
#endif   /* LZ4_COMMONDEFS_ONLY */