/work/_deps/lz4-src/lib/lz4frame.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * LZ4 auto-framing library |
3 | | * Copyright (C) 2011-2016, Yann Collet. |
4 | | * |
5 | | * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
6 | | * |
7 | | * Redistribution and use in source and binary forms, with or without |
8 | | * modification, are permitted provided that the following conditions are |
9 | | * met: |
10 | | * |
11 | | * - Redistributions of source code must retain the above copyright |
12 | | * notice, this list of conditions and the following disclaimer. |
13 | | * - Redistributions in binary form must reproduce the above |
14 | | * copyright notice, this list of conditions and the following disclaimer |
15 | | * in the documentation and/or other materials provided with the |
16 | | * distribution. |
17 | | * |
18 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
19 | | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
20 | | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
21 | | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
22 | | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
23 | | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
24 | | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
25 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
26 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 | | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 | | * |
30 | | * You can contact the author at : |
31 | | * - LZ4 homepage : http://www.lz4.org |
32 | | * - LZ4 source repository : https://github.com/lz4/lz4 |
33 | | */ |
34 | | |
35 | | /* LZ4F is a stand-alone API to create LZ4-compressed Frames |
36 | | * in full conformance with specification v1.6.1 . |
37 | | * This library rely upon memory management capabilities (malloc, free) |
38 | | * provided either by <stdlib.h>, |
39 | | * or redirected towards another library of user's choice |
40 | | * (see Memory Routines below). |
41 | | */ |
42 | | |
43 | | |
44 | | /*-************************************ |
45 | | * Compiler Options |
46 | | **************************************/ |
47 | | #include <limits.h> |
48 | | #ifdef _MSC_VER /* Visual Studio */ |
49 | | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ |
50 | | #endif |
51 | | |
52 | | |
53 | | /*-************************************ |
54 | | * Tuning parameters |
55 | | **************************************/ |
56 | | /* |
57 | | * LZ4F_HEAPMODE : |
58 | | * Control how LZ4F_compressFrame allocates the Compression State, |
59 | | * either on stack (0:default, fastest), or in memory heap (1:requires malloc()). |
60 | | */ |
61 | | #ifndef LZ4F_HEAPMODE |
62 | | # define LZ4F_HEAPMODE 0 |
63 | | #endif |
64 | | |
65 | | |
66 | | /*-************************************ |
67 | | * Library declarations |
68 | | **************************************/ |
69 | | #define LZ4F_STATIC_LINKING_ONLY |
70 | | #include "lz4frame.h" |
71 | | #define LZ4_STATIC_LINKING_ONLY |
72 | | #include "lz4.h" |
73 | | #define LZ4_HC_STATIC_LINKING_ONLY |
74 | | #include "lz4hc.h" |
75 | | #define XXH_STATIC_LINKING_ONLY |
76 | | #include "xxhash.h" |
77 | | |
78 | | |
79 | | /*-************************************ |
80 | | * Memory routines |
81 | | **************************************/ |
82 | | /* |
83 | | * User may redirect invocations of |
84 | | * malloc(), calloc() and free() |
85 | | * towards another library or solution of their choice |
86 | | * by modifying below section. |
87 | | **/ |
88 | | |
89 | | #include <string.h> /* memset, memcpy, memmove */ |
90 | | #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ |
91 | 19.8k | # define MEM_INIT(p,v,s) memset((p),(v),(s)) |
92 | | #endif |
93 | | |
94 | | #ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */ |
95 | | # include <stdlib.h> /* malloc, calloc, free */ |
96 | 18.0k | # define ALLOC(s) malloc(s) |
97 | 9.66k | # define ALLOC_AND_ZERO(s) calloc(1,(s)) |
98 | 27.7k | # define FREEMEM(p) free(p) |
99 | | #endif |
100 | | |
101 | | static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem) |
102 | 9.67k | { |
103 | | /* custom calloc defined : use it */ |
104 | 9.67k | if (cmem.customCalloc != NULL) { |
105 | 0 | return cmem.customCalloc(cmem.opaqueState, s); |
106 | 0 | } |
107 | | /* nothing defined : use default <stdlib.h>'s calloc() */ |
108 | 9.67k | if (cmem.customAlloc == NULL) { |
109 | 9.66k | return ALLOC_AND_ZERO(s); |
110 | 9.66k | } |
111 | | /* only custom alloc defined : use it, and combine it with memset() */ |
112 | 5 | { void* const p = cmem.customAlloc(cmem.opaqueState, s); |
113 | 5 | if (p != NULL) MEM_INIT(p, 0, s); |
114 | 5 | return p; |
115 | 9.67k | } } |
116 | | |
117 | | static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem) |
118 | 18.0k | { |
119 | | /* custom malloc defined : use it */ |
120 | 18.0k | if (cmem.customAlloc != NULL) { |
121 | 0 | return cmem.customAlloc(cmem.opaqueState, s); |
122 | 0 | } |
123 | | /* nothing defined : use default <stdlib.h>'s malloc() */ |
124 | 18.0k | return ALLOC(s); |
125 | 18.0k | } |
126 | | |
127 | | static void LZ4F_free(void* p, LZ4F_CustomMem cmem) |
128 | 47.0k | { |
129 | 47.0k | if (p == NULL) return; |
130 | 27.7k | if (cmem.customFree != NULL) { |
131 | | /* custom allocation defined : use it */ |
132 | 0 | cmem.customFree(cmem.opaqueState, p); |
133 | 0 | return; |
134 | 0 | } |
135 | | /* nothing defined : use default <stdlib.h>'s free() */ |
136 | 27.7k | FREEMEM(p); |
137 | 27.7k | } |
138 | | |
139 | | |
140 | | /*-************************************ |
141 | | * Debug |
142 | | **************************************/ |
143 | | #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) |
144 | | # include <assert.h> |
145 | | #else |
146 | | # ifndef assert |
147 | 59.2k | # define assert(condition) ((void)0) |
148 | | # endif |
149 | | #endif |
150 | | |
151 | 10.3k | #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ |
152 | | |
153 | | #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG) |
154 | | # include <stdio.h> |
155 | | static int g_debuglog_enable = 1; |
156 | | # define DEBUGLOG(l, ...) { \ |
157 | | if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ |
158 | | fprintf(stderr, __FILE__ " (%i): ", __LINE__ ); \ |
159 | | fprintf(stderr, __VA_ARGS__); \ |
160 | | fprintf(stderr, " \n"); \ |
161 | | } } |
162 | | #else |
163 | 86.7k | # define DEBUGLOG(l, ...) {} /* disabled */ |
164 | | #endif |
165 | | |
166 | | |
167 | | /*-************************************ |
168 | | * Basic Types |
169 | | **************************************/ |
170 | | #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) |
171 | | # include <stdint.h> |
172 | | typedef uint8_t BYTE; |
173 | | typedef uint16_t U16; |
174 | | typedef uint32_t U32; |
175 | | typedef int32_t S32; |
176 | | typedef uint64_t U64; |
177 | | #else |
178 | | typedef unsigned char BYTE; |
179 | | typedef unsigned short U16; |
180 | | typedef unsigned int U32; |
181 | | typedef signed int S32; |
182 | | typedef unsigned long long U64; |
183 | | #endif |
184 | | |
185 | | |
186 | | /* unoptimized version; solves endianness & alignment issues */ |
187 | | static U32 LZ4F_readLE32 (const void* src) |
188 | 38.9k | { |
189 | 38.9k | const BYTE* const srcPtr = (const BYTE*)src; |
190 | 38.9k | U32 value32 = srcPtr[0]; |
191 | 38.9k | value32 |= ((U32)srcPtr[1])<< 8; |
192 | 38.9k | value32 |= ((U32)srcPtr[2])<<16; |
193 | 38.9k | value32 |= ((U32)srcPtr[3])<<24; |
194 | 38.9k | return value32; |
195 | 38.9k | } |
196 | | |
197 | | static void LZ4F_writeLE32 (void* dst, U32 value32) |
198 | 0 | { |
199 | 0 | BYTE* const dstPtr = (BYTE*)dst; |
200 | 0 | dstPtr[0] = (BYTE)value32; |
201 | 0 | dstPtr[1] = (BYTE)(value32 >> 8); |
202 | 0 | dstPtr[2] = (BYTE)(value32 >> 16); |
203 | 0 | dstPtr[3] = (BYTE)(value32 >> 24); |
204 | 0 | } |
205 | | |
206 | | static U64 LZ4F_readLE64 (const void* src) |
207 | 179 | { |
208 | 179 | const BYTE* const srcPtr = (const BYTE*)src; |
209 | 179 | U64 value64 = srcPtr[0]; |
210 | 179 | value64 |= ((U64)srcPtr[1]<<8); |
211 | 179 | value64 |= ((U64)srcPtr[2]<<16); |
212 | 179 | value64 |= ((U64)srcPtr[3]<<24); |
213 | 179 | value64 |= ((U64)srcPtr[4]<<32); |
214 | 179 | value64 |= ((U64)srcPtr[5]<<40); |
215 | 179 | value64 |= ((U64)srcPtr[6]<<48); |
216 | 179 | value64 |= ((U64)srcPtr[7]<<56); |
217 | 179 | return value64; |
218 | 179 | } |
219 | | |
220 | | static void LZ4F_writeLE64 (void* dst, U64 value64) |
221 | 0 | { |
222 | 0 | BYTE* const dstPtr = (BYTE*)dst; |
223 | 0 | dstPtr[0] = (BYTE)value64; |
224 | 0 | dstPtr[1] = (BYTE)(value64 >> 8); |
225 | 0 | dstPtr[2] = (BYTE)(value64 >> 16); |
226 | 0 | dstPtr[3] = (BYTE)(value64 >> 24); |
227 | 0 | dstPtr[4] = (BYTE)(value64 >> 32); |
228 | 0 | dstPtr[5] = (BYTE)(value64 >> 40); |
229 | 0 | dstPtr[6] = (BYTE)(value64 >> 48); |
230 | 0 | dstPtr[7] = (BYTE)(value64 >> 56); |
231 | 0 | } |
232 | | |
233 | | |
234 | | /*-************************************ |
235 | | * Constants |
236 | | **************************************/ |
237 | | #ifndef LZ4_SRC_INCLUDED /* avoid double definition */ |
238 | 24.0k | # define KB *(1<<10) |
239 | 17.9k | # define MB *(1<<20) |
240 | 4.99k | # define GB *(1<<30) |
241 | | #endif |
242 | | |
243 | 66.8k | #define _1BIT 0x01 |
244 | 9.61k | #define _2BITS 0x03 |
245 | 9.15k | #define _3BITS 0x07 |
246 | 9.05k | #define _4BITS 0x0F |
247 | | #define _8BITS 0xFF |
248 | | |
249 | 14.7k | #define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U |
250 | 0 | #define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB |
251 | | |
252 | | static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */ |
253 | | static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */ |
254 | | static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */ |
255 | | static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */ |
256 | | |
257 | | |
258 | | /*-************************************ |
259 | | * Structures and local types |
260 | | **************************************/ |
261 | | |
262 | | typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e; |
263 | | typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e; |
264 | | |
265 | | typedef struct LZ4F_cctx_s |
266 | | { |
267 | | LZ4F_CustomMem cmem; |
268 | | LZ4F_preferences_t prefs; |
269 | | U32 version; |
270 | | U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */ |
271 | | const LZ4F_CDict* cdict; |
272 | | size_t maxBlockSize; |
273 | | size_t maxBufferSize; |
274 | | BYTE* tmpBuff; /* internal buffer, for streaming */ |
275 | | BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */ |
276 | | size_t tmpInSize; /* amount of data to compress after tmpIn */ |
277 | | U64 totalInSize; |
278 | | XXH32_state_t xxh; |
279 | | void* lz4CtxPtr; |
280 | | U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ |
281 | | U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ |
282 | | LZ4F_BlockCompressMode_e blockCompressMode; |
283 | | } LZ4F_cctx_t; |
284 | | |
285 | | |
286 | | /*-************************************ |
287 | | * Error management |
288 | | **************************************/ |
289 | | #define LZ4F_GENERATE_STRING(STRING) #STRING, |
290 | | static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) }; |
291 | | |
292 | | |
293 | | unsigned LZ4F_isError(LZ4F_errorCode_t code) |
294 | 35.0k | { |
295 | 35.0k | return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode)); |
296 | 35.0k | } |
297 | | |
298 | | const char* LZ4F_getErrorName(LZ4F_errorCode_t code) |
299 | 5.68k | { |
300 | 5.68k | static const char* codeError = "Unspecified error code"; |
301 | 5.68k | if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)]; |
302 | 18.4E | return codeError; |
303 | 5.68k | } |
304 | | |
305 | | LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult) |
306 | 0 | { |
307 | 0 | if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError; |
308 | 0 | return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult); |
309 | 0 | } |
310 | | |
311 | | static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code) |
312 | 5.75k | { |
313 | | /* A compilation error here means sizeof(ptrdiff_t) is not large enough */ |
314 | 5.75k | LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)); |
315 | 5.75k | return (LZ4F_errorCode_t)-(ptrdiff_t)code; |
316 | 5.75k | } |
317 | | |
318 | 5.75k | #define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e) |
319 | | |
320 | 47.3k | #define RETURN_ERROR_IF(c,e) do { \ |
321 | 47.3k | if (c) { \ |
322 | 4.80k | DEBUGLOG(3, "Error: " #c); \ |
323 | 4.80k | RETURN_ERROR(e); \ |
324 | 4.80k | } \ |
325 | 47.3k | } while (0) |
326 | | |
327 | 9.63k | #define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0) |
328 | | |
329 | 0 | unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; } |
330 | | |
331 | 0 | int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; } |
332 | | |
333 | | size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID) |
334 | 8.97k | { |
335 | 8.97k | static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB }; |
336 | | |
337 | 8.97k | if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; |
338 | 8.97k | if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB) |
339 | 0 | RETURN_ERROR(maxBlockSize_invalid); |
340 | 8.97k | { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB; |
341 | 8.97k | return blockSizes[blockSizeIdx]; |
342 | 8.97k | } } |
343 | | |
344 | | /*-************************************ |
345 | | * Private functions |
346 | | **************************************/ |
347 | 19.9k | #define MIN(a,b) ( (a) < (b) ? (a) : (b) ) |
348 | | |
349 | | static BYTE LZ4F_headerChecksum (const void* header, size_t length) |
350 | 0 | { |
351 | 0 | U32 const xxh = XXH32(header, length, 0); |
352 | 0 | return (BYTE)(xxh >> 8); |
353 | 0 | } |
354 | | |
355 | | |
356 | | /*-************************************ |
357 | | * Simple-pass compression functions |
358 | | **************************************/ |
359 | | static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID, |
360 | | const size_t srcSize) |
361 | 0 | { |
362 | 0 | LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB; |
363 | 0 | size_t maxBlockSize = 64 KB; |
364 | 0 | while (requestedBSID > proposedBSID) { |
365 | 0 | if (srcSize <= maxBlockSize) |
366 | 0 | return proposedBSID; |
367 | 0 | proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1); |
368 | 0 | maxBlockSize <<= 2; |
369 | 0 | } |
370 | 0 | return requestedBSID; |
371 | 0 | } |
372 | | |
373 | | /*! LZ4F_compressBound_internal() : |
374 | | * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations. |
375 | | * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario. |
376 | | * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers. |
377 | | * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. |
378 | | */ |
379 | | static size_t LZ4F_compressBound_internal(size_t srcSize, |
380 | | const LZ4F_preferences_t* preferencesPtr, |
381 | | size_t alreadyBuffered) |
382 | 0 | { |
383 | 0 | LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES; |
384 | 0 | prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */ |
385 | 0 | prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */ |
386 | 0 | { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr; |
387 | 0 | U32 const flush = prefsPtr->autoFlush | (srcSize==0); |
388 | 0 | LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID; |
389 | 0 | size_t const blockSize = LZ4F_getBlockSize(blockID); |
390 | 0 | size_t const maxBuffered = blockSize - 1; |
391 | 0 | size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered); |
392 | 0 | size_t const maxSrcSize = srcSize + bufferedSize; |
393 | 0 | unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize); |
394 | 0 | size_t const partialBlockSize = maxSrcSize & (blockSize-1); |
395 | 0 | size_t const lastBlockSize = flush ? partialBlockSize : 0; |
396 | 0 | unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0); |
397 | |
|
398 | 0 | size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag; |
399 | 0 | size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize); |
400 | |
|
401 | 0 | return ((BHSize + blockCRCSize) * nbBlocks) + |
402 | 0 | (blockSize * nbFullBlocks) + lastBlockSize + frameEnd; |
403 | 0 | } |
404 | 0 | } |
405 | | |
406 | | size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) |
407 | 0 | { |
408 | 0 | LZ4F_preferences_t prefs; |
409 | 0 | size_t const headerSize = maxFHSize; /* max header size, including optional fields */ |
410 | |
|
411 | 0 | if (preferencesPtr!=NULL) prefs = *preferencesPtr; |
412 | 0 | else MEM_INIT(&prefs, 0, sizeof(prefs)); |
413 | 0 | prefs.autoFlush = 1; |
414 | |
|
415 | 0 | return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);; |
416 | 0 | } |
417 | | |
418 | | |
419 | | /*! LZ4F_compressFrame_usingCDict() : |
420 | | * Compress srcBuffer using a dictionary, in a single step. |
421 | | * cdict can be NULL, in which case, no dictionary is used. |
422 | | * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). |
423 | | * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, |
424 | | * however, it's the only way to provide a dictID, so it's not recommended. |
425 | | * @return : number of bytes written into dstBuffer, |
426 | | * or an error code if it fails (can be tested using LZ4F_isError()) |
427 | | */ |
428 | | size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, |
429 | | void* dstBuffer, size_t dstCapacity, |
430 | | const void* srcBuffer, size_t srcSize, |
431 | | const LZ4F_CDict* cdict, |
432 | | const LZ4F_preferences_t* preferencesPtr) |
433 | 0 | { |
434 | 0 | LZ4F_preferences_t prefs; |
435 | 0 | LZ4F_compressOptions_t options; |
436 | 0 | BYTE* const dstStart = (BYTE*) dstBuffer; |
437 | 0 | BYTE* dstPtr = dstStart; |
438 | 0 | BYTE* const dstEnd = dstStart + dstCapacity; |
439 | |
|
440 | 0 | DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize); |
441 | 0 | if (preferencesPtr!=NULL) |
442 | 0 | prefs = *preferencesPtr; |
443 | 0 | else |
444 | 0 | MEM_INIT(&prefs, 0, sizeof(prefs)); |
445 | 0 | if (prefs.frameInfo.contentSize != 0) |
446 | 0 | prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */ |
447 | |
|
448 | 0 | prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize); |
449 | 0 | prefs.autoFlush = 1; |
450 | 0 | if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID)) |
451 | 0 | prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */ |
452 | |
|
453 | 0 | MEM_INIT(&options, 0, sizeof(options)); |
454 | 0 | options.stableSrc = 1; |
455 | |
|
456 | 0 | RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall); |
457 | | |
458 | 0 | { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ |
459 | 0 | FORWARD_IF_ERROR(headerSize); |
460 | 0 | dstPtr += headerSize; /* header size */ } |
461 | | |
462 | 0 | assert(dstEnd >= dstPtr); |
463 | 0 | { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options); |
464 | 0 | FORWARD_IF_ERROR(cSize); |
465 | 0 | dstPtr += cSize; } |
466 | | |
467 | 0 | assert(dstEnd >= dstPtr); |
468 | 0 | { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */ |
469 | 0 | FORWARD_IF_ERROR(tailSize); |
470 | 0 | dstPtr += tailSize; } |
471 | | |
472 | 0 | assert(dstEnd >= dstStart); |
473 | 0 | return (size_t)(dstPtr - dstStart); |
474 | 0 | } |
475 | | |
476 | | |
477 | | /*! LZ4F_compressFrame() : |
478 | | * Compress an entire srcBuffer into a valid LZ4 frame, in a single step. |
479 | | * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). |
480 | | * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default. |
481 | | * @return : number of bytes written into dstBuffer. |
482 | | * or an error code if it fails (can be tested using LZ4F_isError()) |
483 | | */ |
484 | | size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, |
485 | | const void* srcBuffer, size_t srcSize, |
486 | | const LZ4F_preferences_t* preferencesPtr) |
487 | 0 | { |
488 | 0 | size_t result; |
489 | | #if (LZ4F_HEAPMODE) |
490 | | LZ4F_cctx_t* cctxPtr; |
491 | | result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION); |
492 | | FORWARD_IF_ERROR(result); |
493 | | #else |
494 | 0 | LZ4F_cctx_t cctx; |
495 | 0 | LZ4_stream_t lz4ctx; |
496 | 0 | LZ4F_cctx_t* const cctxPtr = &cctx; |
497 | |
|
498 | 0 | MEM_INIT(&cctx, 0, sizeof(cctx)); |
499 | 0 | cctx.version = LZ4F_VERSION; |
500 | 0 | cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ |
501 | 0 | if ( preferencesPtr == NULL |
502 | 0 | || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN ) { |
503 | 0 | LZ4_initStream(&lz4ctx, sizeof(lz4ctx)); |
504 | 0 | cctxPtr->lz4CtxPtr = &lz4ctx; |
505 | 0 | cctxPtr->lz4CtxAlloc = 1; |
506 | 0 | cctxPtr->lz4CtxType = ctxFast; |
507 | 0 | } |
508 | 0 | #endif |
509 | 0 | DEBUGLOG(4, "LZ4F_compressFrame"); |
510 | |
|
511 | 0 | result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity, |
512 | 0 | srcBuffer, srcSize, |
513 | 0 | NULL, preferencesPtr); |
514 | |
|
515 | | #if (LZ4F_HEAPMODE) |
516 | | LZ4F_freeCompressionContext(cctxPtr); |
517 | | #else |
518 | 0 | if ( preferencesPtr != NULL |
519 | 0 | && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN ) { |
520 | 0 | LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); |
521 | 0 | } |
522 | 0 | #endif |
523 | 0 | return result; |
524 | 0 | } |
525 | | |
526 | | |
527 | | /*-*************************************************** |
528 | | * Dictionary compression |
529 | | *****************************************************/ |
530 | | |
531 | | struct LZ4F_CDict_s { |
532 | | LZ4F_CustomMem cmem; |
533 | | void* dictContent; |
534 | | LZ4_stream_t* fastCtx; |
535 | | LZ4_streamHC_t* HCCtx; |
536 | | }; /* typedef'd to LZ4F_CDict within lz4frame_static.h */ |
537 | | |
538 | | LZ4F_CDict* |
539 | | LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize) |
540 | 0 | { |
541 | 0 | const char* dictStart = (const char*)dictBuffer; |
542 | 0 | LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem); |
543 | 0 | DEBUGLOG(4, "LZ4F_createCDict_advanced"); |
544 | 0 | if (!cdict) return NULL; |
545 | 0 | cdict->cmem = cmem; |
546 | 0 | if (dictSize > 64 KB) { |
547 | 0 | dictStart += dictSize - 64 KB; |
548 | 0 | dictSize = 64 KB; |
549 | 0 | } |
550 | 0 | cdict->dictContent = LZ4F_malloc(dictSize, cmem); |
551 | | /* note: using @cmem to allocate => can't use default create */ |
552 | 0 | cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem); |
553 | 0 | cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem); |
554 | 0 | if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) { |
555 | 0 | LZ4F_freeCDict(cdict); |
556 | 0 | return NULL; |
557 | 0 | } |
558 | 0 | memcpy(cdict->dictContent, dictStart, dictSize); |
559 | 0 | LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t)); |
560 | 0 | LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize); |
561 | 0 | LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t)); |
562 | | /* note: we don't know at this point which compression level is going to be used |
563 | | * as a consequence, HCCtx is created for the more common HC mode */ |
564 | 0 | LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); |
565 | 0 | LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize); |
566 | 0 | return cdict; |
567 | 0 | } |
568 | | |
569 | | /*! LZ4F_createCDict() : |
570 | | * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. |
571 | | * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. |
572 | | * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. |
573 | | * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict |
574 | | * @return : digested dictionary for compression, or NULL if failed */ |
575 | | LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) |
576 | 0 | { |
577 | 0 | DEBUGLOG(4, "LZ4F_createCDict"); |
578 | 0 | return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize); |
579 | 0 | } |
580 | | |
581 | | void LZ4F_freeCDict(LZ4F_CDict* cdict) |
582 | 0 | { |
583 | 0 | if (cdict==NULL) return; /* support free on NULL */ |
584 | 0 | LZ4F_free(cdict->dictContent, cdict->cmem); |
585 | 0 | LZ4F_free(cdict->fastCtx, cdict->cmem); |
586 | 0 | LZ4F_free(cdict->HCCtx, cdict->cmem); |
587 | 0 | LZ4F_free(cdict, cdict->cmem); |
588 | 0 | } |
589 | | |
590 | | |
591 | | /*-********************************* |
592 | | * Advanced compression functions |
593 | | ***********************************/ |
594 | | |
595 | | LZ4F_cctx* |
596 | | LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) |
597 | 0 | { |
598 | 0 | LZ4F_cctx* const cctxPtr = |
599 | 0 | (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem); |
600 | 0 | if (cctxPtr==NULL) return NULL; |
601 | | |
602 | 0 | cctxPtr->cmem = customMem; |
603 | 0 | cctxPtr->version = version; |
604 | 0 | cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */ |
605 | |
|
606 | 0 | return cctxPtr; |
607 | 0 | } |
608 | | |
609 | | /*! LZ4F_createCompressionContext() : |
610 | | * The first thing to do is to create a compressionContext object, which will be used in all compression operations. |
611 | | * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure. |
612 | | * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries. |
613 | | * The function will provide a pointer to an allocated LZ4F_compressionContext_t object. |
614 | | * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation. |
615 | | * Object can release its memory using LZ4F_freeCompressionContext(); |
616 | | **/ |
617 | | LZ4F_errorCode_t |
618 | | LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version) |
619 | 0 | { |
620 | 0 | assert(LZ4F_compressionContextPtr != NULL); /* considered a violation of narrow contract */ |
621 | | /* in case it nonetheless happen in production */ |
622 | 0 | RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null); |
623 | | |
624 | 0 | *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version); |
625 | 0 | RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed); |
626 | 0 | return LZ4F_OK_NoError; |
627 | 0 | } |
628 | | |
629 | | LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr) |
630 | 0 | { |
631 | 0 | if (cctxPtr != NULL) { /* support free on NULL */ |
632 | 0 | LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */ |
633 | 0 | LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem); |
634 | 0 | LZ4F_free(cctxPtr, cctxPtr->cmem); |
635 | 0 | } |
636 | 0 | return LZ4F_OK_NoError; |
637 | 0 | } |
638 | | |
639 | | |
640 | | /** |
641 | | * This function prepares the internal LZ4(HC) stream for a new compression, |
642 | | * resetting the context and attaching the dictionary, if there is one. |
643 | | * |
644 | | * It needs to be called at the beginning of each independent compression |
645 | | * stream (i.e., at the beginning of a frame in blockLinked mode, or at the |
646 | | * beginning of each block in blockIndependent mode). |
647 | | */ |
648 | | static void LZ4F_initStream(void* ctx, |
649 | | const LZ4F_CDict* cdict, |
650 | | int level, |
651 | 0 | LZ4F_blockMode_t blockMode) { |
652 | 0 | if (level < LZ4HC_CLEVEL_MIN) { |
653 | 0 | if (cdict || blockMode == LZ4F_blockLinked) { |
654 | | /* In these cases, we will call LZ4_compress_fast_continue(), |
655 | | * which needs an already reset context. Otherwise, we'll call a |
656 | | * one-shot API. The non-continued APIs internally perform their own |
657 | | * resets at the beginning of their calls, where they know what |
658 | | * tableType they need the context to be in. So in that case this |
659 | | * would be misguided / wasted work. */ |
660 | 0 | LZ4_resetStream_fast((LZ4_stream_t*)ctx); |
661 | 0 | if (cdict) |
662 | 0 | LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx); |
663 | 0 | } |
664 | | /* In these cases, we'll call a one-shot API. |
665 | | * The non-continued APIs internally perform their own resets |
666 | | * at the beginning of their calls, where they know |
667 | | * which tableType they need the context to be in. |
668 | | * Therefore, a reset here would be wasted work. */ |
669 | 0 | } else { |
670 | 0 | LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level); |
671 | 0 | if (cdict) |
672 | 0 | LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx); |
673 | 0 | } |
674 | 0 | } |
675 | | |
676 | 0 | static int ctxTypeID_to_size(int ctxTypeID) { |
677 | 0 | switch(ctxTypeID) { |
678 | 0 | case 1: |
679 | 0 | return LZ4_sizeofState(); |
680 | 0 | case 2: |
681 | 0 | return LZ4_sizeofStateHC(); |
682 | 0 | default: |
683 | 0 | return 0; |
684 | 0 | } |
685 | 0 | } |
686 | | |
687 | | /* LZ4F_compressBegin_internal() |
688 | | * Note: only accepts @cdict _or_ @dictBuffer as non NULL. |
689 | | */ |
690 | | size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx, |
691 | | void* dstBuffer, size_t dstCapacity, |
692 | | const void* dictBuffer, size_t dictSize, |
693 | | const LZ4F_CDict* cdict, |
694 | | const LZ4F_preferences_t* preferencesPtr) |
695 | 0 | { |
696 | 0 | LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES; |
697 | 0 | BYTE* const dstStart = (BYTE*)dstBuffer; |
698 | 0 | BYTE* dstPtr = dstStart; |
699 | |
|
700 | 0 | RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall); |
701 | 0 | if (preferencesPtr == NULL) preferencesPtr = &prefNull; |
702 | 0 | cctx->prefs = *preferencesPtr; |
703 | | |
704 | | /* cctx Management */ |
705 | 0 | { U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; |
706 | 0 | int requiredSize = ctxTypeID_to_size(ctxTypeID); |
707 | 0 | int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc); |
708 | 0 | if (allocatedSize < requiredSize) { |
709 | | /* not enough space allocated */ |
710 | 0 | LZ4F_free(cctx->lz4CtxPtr, cctx->cmem); |
711 | 0 | if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { |
712 | | /* must take ownership of memory allocation, |
713 | | * in order to respect custom allocator contract */ |
714 | 0 | cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem); |
715 | 0 | if (cctx->lz4CtxPtr) |
716 | 0 | LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t)); |
717 | 0 | } else { |
718 | 0 | cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem); |
719 | 0 | if (cctx->lz4CtxPtr) |
720 | 0 | LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t)); |
721 | 0 | } |
722 | 0 | RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed); |
723 | 0 | cctx->lz4CtxAlloc = ctxTypeID; |
724 | 0 | cctx->lz4CtxType = ctxTypeID; |
725 | 0 | } else if (cctx->lz4CtxType != ctxTypeID) { |
726 | | /* otherwise, a sufficient buffer is already allocated, |
727 | | * but we need to reset it to the correct context type */ |
728 | 0 | if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { |
729 | 0 | LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t)); |
730 | 0 | } else { |
731 | 0 | LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t)); |
732 | 0 | LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel); |
733 | 0 | } |
734 | 0 | cctx->lz4CtxType = ctxTypeID; |
735 | 0 | } } |
736 | | |
737 | | /* Buffer Management */ |
738 | 0 | if (cctx->prefs.frameInfo.blockSizeID == 0) |
739 | 0 | cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT; |
740 | 0 | cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID); |
741 | |
|
742 | 0 | { size_t const requiredBuffSize = preferencesPtr->autoFlush ? |
743 | 0 | ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */ |
744 | 0 | cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0); |
745 | |
|
746 | 0 | if (cctx->maxBufferSize < requiredBuffSize) { |
747 | 0 | cctx->maxBufferSize = 0; |
748 | 0 | LZ4F_free(cctx->tmpBuff, cctx->cmem); |
749 | 0 | cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem); |
750 | 0 | RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed); |
751 | 0 | cctx->maxBufferSize = requiredBuffSize; |
752 | 0 | } } |
753 | 0 | cctx->tmpIn = cctx->tmpBuff; |
754 | 0 | cctx->tmpInSize = 0; |
755 | 0 | (void)XXH32_reset(&(cctx->xxh), 0); |
756 | | |
757 | | /* context init */ |
758 | 0 | cctx->cdict = cdict; |
759 | 0 | if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) { |
760 | | /* frame init only for blockLinked : blockIndependent will be init at each block */ |
761 | 0 | LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked); |
762 | 0 | } |
763 | 0 | if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) { |
764 | 0 | LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed); |
765 | 0 | } |
766 | 0 | if (dictBuffer) { |
767 | 0 | assert(cdict == NULL); |
768 | 0 | RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid); |
769 | 0 | if (cctx->lz4CtxType == ctxFast) { |
770 | | /* lz4 fast*/ |
771 | 0 | LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize); |
772 | 0 | } else { |
773 | | /* lz4hc */ |
774 | 0 | assert(cctx->lz4CtxType == ctxHC); |
775 | 0 | LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize); |
776 | 0 | } |
777 | 0 | } |
778 | | |
779 | | /* Stage 2 : Write Frame Header */ |
780 | | |
781 | | /* Magic Number */ |
782 | 0 | LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER); |
783 | 0 | dstPtr += 4; |
784 | 0 | { BYTE* const headerStart = dstPtr; |
785 | | |
786 | | /* FLG Byte */ |
787 | 0 | *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */ |
788 | 0 | + ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5) |
789 | 0 | + ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4) |
790 | 0 | + ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3) |
791 | 0 | + ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2) |
792 | 0 | + (cctx->prefs.frameInfo.dictID > 0) ); |
793 | | /* BD Byte */ |
794 | 0 | *dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4); |
795 | | /* Optional Frame content size field */ |
796 | 0 | if (cctx->prefs.frameInfo.contentSize) { |
797 | 0 | LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize); |
798 | 0 | dstPtr += 8; |
799 | 0 | cctx->totalInSize = 0; |
800 | 0 | } |
801 | | /* Optional dictionary ID field */ |
802 | 0 | if (cctx->prefs.frameInfo.dictID) { |
803 | 0 | LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID); |
804 | 0 | dstPtr += 4; |
805 | 0 | } |
806 | | /* Header CRC Byte */ |
807 | 0 | *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart)); |
808 | 0 | dstPtr++; |
809 | 0 | } |
810 | |
|
811 | 0 | cctx->cStage = 1; /* header written, now request input data block */ |
812 | 0 | return (size_t)(dstPtr - dstStart); |
813 | 0 | } |
814 | | |
815 | | size_t LZ4F_compressBegin(LZ4F_cctx* cctx, |
816 | | void* dstBuffer, size_t dstCapacity, |
817 | | const LZ4F_preferences_t* preferencesPtr) |
818 | 0 | { |
819 | 0 | return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity, |
820 | 0 | NULL, 0, |
821 | 0 | NULL, preferencesPtr); |
822 | 0 | } |
823 | | |
824 | | /* LZ4F_compressBegin_usingDictOnce: |
825 | | * Hidden implementation, |
826 | | * employed for multi-threaded compression |
827 | | * when frame defines linked blocks */ |
828 | | size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx, |
829 | | void* dstBuffer, size_t dstCapacity, |
830 | | const void* dict, size_t dictSize, |
831 | | const LZ4F_preferences_t* preferencesPtr) |
832 | 0 | { |
833 | 0 | return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity, |
834 | 0 | dict, dictSize, |
835 | 0 | NULL, preferencesPtr); |
836 | 0 | } |
837 | | |
838 | | size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx, |
839 | | void* dstBuffer, size_t dstCapacity, |
840 | | const void* dict, size_t dictSize, |
841 | | const LZ4F_preferences_t* preferencesPtr) |
842 | 0 | { |
843 | | /* note : incorrect implementation : |
844 | | * this will only use the dictionary once, |
845 | | * instead of once *per* block when frames defines independent blocks */ |
846 | 0 | return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity, |
847 | 0 | dict, dictSize, |
848 | 0 | preferencesPtr); |
849 | 0 | } |
850 | | |
851 | | size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx, |
852 | | void* dstBuffer, size_t dstCapacity, |
853 | | const LZ4F_CDict* cdict, |
854 | | const LZ4F_preferences_t* preferencesPtr) |
855 | 0 | { |
856 | 0 | return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity, |
857 | 0 | NULL, 0, |
858 | 0 | cdict, preferencesPtr); |
859 | 0 | } |
860 | | |
861 | | |
862 | | /* LZ4F_compressBound() : |
863 | | * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario. |
864 | | * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario. |
865 | | * This function cannot fail. |
866 | | */ |
867 | | size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr) |
868 | 0 | { |
869 | 0 | if (preferencesPtr && preferencesPtr->autoFlush) { |
870 | 0 | return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0); |
871 | 0 | } |
872 | 0 | return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1); |
873 | 0 | } |
874 | | |
875 | | |
876 | | typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict); |
877 | | |
878 | | |
879 | | /*! LZ4F_makeBlock(): |
880 | | * compress a single block, add header and optional checksum. |
881 | | * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize |
882 | | */ |
883 | | static size_t LZ4F_makeBlock(void* dst, |
884 | | const void* src, size_t srcSize, |
885 | | compressFunc_t compress, void* lz4ctx, int level, |
886 | | const LZ4F_CDict* cdict, |
887 | | LZ4F_blockChecksum_t crcFlag) |
888 | 0 | { |
889 | 0 | BYTE* const cSizePtr = (BYTE*)dst; |
890 | 0 | U32 cSize; |
891 | 0 | assert(compress != NULL); |
892 | 0 | cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize), |
893 | 0 | (int)(srcSize), (int)(srcSize-1), |
894 | 0 | level, cdict); |
895 | |
|
896 | 0 | if (cSize == 0 || cSize >= srcSize) { |
897 | 0 | cSize = (U32)srcSize; |
898 | 0 | LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG); |
899 | 0 | memcpy(cSizePtr+BHSize, src, srcSize); |
900 | 0 | } else { |
901 | 0 | LZ4F_writeLE32(cSizePtr, cSize); |
902 | 0 | } |
903 | 0 | if (crcFlag) { |
904 | 0 | U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */ |
905 | 0 | LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32); |
906 | 0 | } |
907 | 0 | return BHSize + cSize + ((U32)crcFlag)*BFSize; |
908 | 0 | } |
909 | | |
910 | | |
911 | | static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) |
912 | 0 | { |
913 | 0 | int const acceleration = (level < 0) ? -level + 1 : 1; |
914 | 0 | DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize); |
915 | 0 | LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); |
916 | 0 | if (cdict) { |
917 | 0 | return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); |
918 | 0 | } else { |
919 | 0 | return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration); |
920 | 0 | } |
921 | 0 | } |
922 | | |
923 | | static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) |
924 | 0 | { |
925 | 0 | int const acceleration = (level < 0) ? -level + 1 : 1; |
926 | 0 | (void)cdict; /* init once at beginning of frame */ |
927 | 0 | DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize); |
928 | 0 | return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); |
929 | 0 | } |
930 | | |
931 | | static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) |
932 | 0 | { |
933 | 0 | LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); |
934 | 0 | if (cdict) { |
935 | 0 | return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); |
936 | 0 | } |
937 | 0 | return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level); |
938 | 0 | } |
939 | | |
940 | | static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) |
941 | 0 | { |
942 | 0 | (void)level; (void)cdict; /* init once at beginning of frame */ |
943 | 0 | return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); |
944 | 0 | } |
945 | | |
946 | | static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) |
947 | 0 | { |
948 | 0 | (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict; |
949 | 0 | return 0; |
950 | 0 | } |
951 | | |
952 | | static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode) |
953 | 0 | { |
954 | 0 | if (compressMode == LZ4B_UNCOMPRESSED) |
955 | 0 | return LZ4F_doNotCompressBlock; |
956 | 0 | if (level < LZ4HC_CLEVEL_MIN) { |
957 | 0 | if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock; |
958 | 0 | return LZ4F_compressBlock_continue; |
959 | 0 | } |
960 | 0 | if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC; |
961 | 0 | return LZ4F_compressBlockHC_continue; |
962 | 0 | } |
963 | | |
964 | | /* Save history (up to 64KB) into @tmpBuff */ |
965 | | static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr) |
966 | 0 | { |
967 | 0 | if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) |
968 | 0 | return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); |
969 | 0 | return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB); |
970 | 0 | } |
971 | | |
972 | | typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus; |
973 | | |
974 | | static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } }; |
975 | | |
976 | | |
977 | | /*! LZ4F_compressUpdateImpl() : |
978 | | * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. |
979 | | * When successful, the function always entirely consumes @srcBuffer. |
980 | | * src data is either buffered or compressed into @dstBuffer. |
981 | | * If the block compression does not match the compression of the previous block, the old data is flushed |
982 | | * and operations continue with the new compression mode. |
983 | | * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on. |
984 | | * @compressOptionsPtr is optional : provide NULL to mean "default". |
985 | | * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. |
986 | | * or an error code if it fails (which can be tested using LZ4F_isError()) |
987 | | * After an error, the state is left in a UB state, and must be re-initialized. |
988 | | */ |
989 | | static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr, |
990 | | void* dstBuffer, size_t dstCapacity, |
991 | | const void* srcBuffer, size_t srcSize, |
992 | | const LZ4F_compressOptions_t* compressOptionsPtr, |
993 | | LZ4F_BlockCompressMode_e blockCompression) |
994 | 0 | { |
995 | 0 | size_t const blockSize = cctxPtr->maxBlockSize; |
996 | 0 | const BYTE* srcPtr = (const BYTE*)srcBuffer; |
997 | 0 | const BYTE* const srcEnd = srcPtr + srcSize; |
998 | 0 | BYTE* const dstStart = (BYTE*)dstBuffer; |
999 | 0 | BYTE* dstPtr = dstStart; |
1000 | 0 | LZ4F_lastBlockStatus lastBlockCompressed = notDone; |
1001 | 0 | compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression); |
1002 | 0 | size_t bytesWritten; |
1003 | 0 | DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize); |
1004 | |
|
1005 | 0 | RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); /* state must be initialized and waiting for next block */ |
1006 | 0 | if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) |
1007 | 0 | RETURN_ERROR(dstMaxSize_tooSmall); |
1008 | | |
1009 | 0 | if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize) |
1010 | 0 | RETURN_ERROR(dstMaxSize_tooSmall); |
1011 | | |
1012 | | /* flush currently written block, to continue with new block compression */ |
1013 | 0 | if (cctxPtr->blockCompressMode != blockCompression) { |
1014 | 0 | bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); |
1015 | 0 | dstPtr += bytesWritten; |
1016 | 0 | cctxPtr->blockCompressMode = blockCompression; |
1017 | 0 | } |
1018 | |
|
1019 | 0 | if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull; |
1020 | | |
1021 | | /* complete tmp buffer */ |
1022 | 0 | if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */ |
1023 | 0 | size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize; |
1024 | 0 | assert(blockSize > cctxPtr->tmpInSize); |
1025 | 0 | if (sizeToCopy > srcSize) { |
1026 | | /* add src to tmpIn buffer */ |
1027 | 0 | memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize); |
1028 | 0 | srcPtr = srcEnd; |
1029 | 0 | cctxPtr->tmpInSize += srcSize; |
1030 | | /* still needs some CRC */ |
1031 | 0 | } else { |
1032 | | /* complete tmpIn block and then compress it */ |
1033 | 0 | lastBlockCompressed = fromTmpBuffer; |
1034 | 0 | memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy); |
1035 | 0 | srcPtr += sizeToCopy; |
1036 | |
|
1037 | 0 | dstPtr += LZ4F_makeBlock(dstPtr, |
1038 | 0 | cctxPtr->tmpIn, blockSize, |
1039 | 0 | compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, |
1040 | 0 | cctxPtr->cdict, |
1041 | 0 | cctxPtr->prefs.frameInfo.blockChecksumFlag); |
1042 | 0 | if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize; |
1043 | 0 | cctxPtr->tmpInSize = 0; |
1044 | 0 | } } |
1045 | |
|
1046 | 0 | while ((size_t)(srcEnd - srcPtr) >= blockSize) { |
1047 | | /* compress full blocks */ |
1048 | 0 | lastBlockCompressed = fromSrcBuffer; |
1049 | 0 | dstPtr += LZ4F_makeBlock(dstPtr, |
1050 | 0 | srcPtr, blockSize, |
1051 | 0 | compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, |
1052 | 0 | cctxPtr->cdict, |
1053 | 0 | cctxPtr->prefs.frameInfo.blockChecksumFlag); |
1054 | 0 | srcPtr += blockSize; |
1055 | 0 | } |
1056 | |
|
1057 | 0 | if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) { |
1058 | | /* autoFlush : remaining input (< blockSize) is compressed */ |
1059 | 0 | lastBlockCompressed = fromSrcBuffer; |
1060 | 0 | dstPtr += LZ4F_makeBlock(dstPtr, |
1061 | 0 | srcPtr, (size_t)(srcEnd - srcPtr), |
1062 | 0 | compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, |
1063 | 0 | cctxPtr->cdict, |
1064 | 0 | cctxPtr->prefs.frameInfo.blockChecksumFlag); |
1065 | 0 | srcPtr = srcEnd; |
1066 | 0 | } |
1067 | | |
1068 | | /* preserve dictionary within @tmpBuff whenever necessary */ |
1069 | 0 | if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) { |
1070 | | /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */ |
1071 | 0 | assert(blockCompression == LZ4B_COMPRESSED); |
1072 | 0 | if (compressOptionsPtr->stableSrc) { |
1073 | 0 | cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */ |
1074 | 0 | } else { |
1075 | 0 | int const realDictSize = LZ4F_localSaveDict(cctxPtr); |
1076 | 0 | assert(0 <= realDictSize && realDictSize <= 64 KB); |
1077 | 0 | cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; |
1078 | 0 | } |
1079 | 0 | } |
1080 | | |
1081 | | /* keep tmpIn within limits */ |
1082 | 0 | if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */ |
1083 | 0 | && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */ |
1084 | 0 | { |
1085 | | /* only preserve 64KB within internal buffer. Ensures there is enough room for next block. |
1086 | | * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */ |
1087 | 0 | int const realDictSize = LZ4F_localSaveDict(cctxPtr); |
1088 | 0 | cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; |
1089 | 0 | assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)); |
1090 | 0 | } |
1091 | | |
1092 | | /* some input data left, necessarily < blockSize */ |
1093 | 0 | if (srcPtr < srcEnd) { |
1094 | | /* fill tmp buffer */ |
1095 | 0 | size_t const sizeToCopy = (size_t)(srcEnd - srcPtr); |
1096 | 0 | memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy); |
1097 | 0 | cctxPtr->tmpInSize = sizeToCopy; |
1098 | 0 | } |
1099 | |
|
1100 | 0 | if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) |
1101 | 0 | (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize); |
1102 | |
|
1103 | 0 | cctxPtr->totalInSize += srcSize; |
1104 | 0 | return (size_t)(dstPtr - dstStart); |
1105 | 0 | } |
1106 | | |
1107 | | /*! LZ4F_compressUpdate() : |
1108 | | * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. |
1109 | | * When successful, the function always entirely consumes @srcBuffer. |
1110 | | * src data is either buffered or compressed into @dstBuffer. |
1111 | | * If previously an uncompressed block was written, buffered data is flushed |
1112 | | * before appending compressed data is continued. |
1113 | | * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). |
1114 | | * @compressOptionsPtr is optional : provide NULL to mean "default". |
1115 | | * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. |
1116 | | * or an error code if it fails (which can be tested using LZ4F_isError()) |
1117 | | * After an error, the state is left in a UB state, and must be re-initialized. |
1118 | | */ |
1119 | | size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, |
1120 | | void* dstBuffer, size_t dstCapacity, |
1121 | | const void* srcBuffer, size_t srcSize, |
1122 | | const LZ4F_compressOptions_t* compressOptionsPtr) |
1123 | 0 | { |
1124 | 0 | return LZ4F_compressUpdateImpl(cctxPtr, |
1125 | 0 | dstBuffer, dstCapacity, |
1126 | 0 | srcBuffer, srcSize, |
1127 | 0 | compressOptionsPtr, LZ4B_COMPRESSED); |
1128 | 0 | } |
1129 | | |
1130 | | /*! LZ4F_uncompressedUpdate() : |
1131 | | * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed. |
1132 | | * This symbol is only supported when LZ4F_blockIndependent is used |
1133 | | * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr). |
1134 | | * @compressOptionsPtr is optional : provide NULL to mean "default". |
1135 | | * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered. |
1136 | | * or an error code if it fails (which can be tested using LZ4F_isError()) |
1137 | | * After an error, the state is left in a UB state, and must be re-initialized. |
1138 | | */ |
1139 | | size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr, |
1140 | | void* dstBuffer, size_t dstCapacity, |
1141 | | const void* srcBuffer, size_t srcSize, |
1142 | | const LZ4F_compressOptions_t* compressOptionsPtr) |
1143 | 0 | { |
1144 | 0 | return LZ4F_compressUpdateImpl(cctxPtr, |
1145 | 0 | dstBuffer, dstCapacity, |
1146 | 0 | srcBuffer, srcSize, |
1147 | 0 | compressOptionsPtr, LZ4B_UNCOMPRESSED); |
1148 | 0 | } |
1149 | | |
1150 | | |
1151 | | /*! LZ4F_flush() : |
1152 | | * When compressed data must be sent immediately, without waiting for a block to be filled, |
1153 | | * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx. |
1154 | | * The result of the function is the number of bytes written into dstBuffer. |
1155 | | * It can be zero, this means there was no data left within LZ4F_cctx. |
1156 | | * The function outputs an error code if it fails (can be tested using LZ4F_isError()) |
1157 | | * LZ4F_compressOptions_t* is optional. NULL is a valid argument. |
1158 | | */ |
1159 | | size_t LZ4F_flush(LZ4F_cctx* cctxPtr, |
1160 | | void* dstBuffer, size_t dstCapacity, |
1161 | | const LZ4F_compressOptions_t* compressOptionsPtr) |
1162 | 0 | { |
1163 | 0 | BYTE* const dstStart = (BYTE*)dstBuffer; |
1164 | 0 | BYTE* dstPtr = dstStart; |
1165 | 0 | compressFunc_t compress; |
1166 | |
|
1167 | 0 | if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */ |
1168 | 0 | RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized); |
1169 | 0 | RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall); |
1170 | 0 | (void)compressOptionsPtr; /* not useful (yet) */ |
1171 | | |
1172 | | /* select compression function */ |
1173 | 0 | compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode); |
1174 | | |
1175 | | /* compress tmp buffer */ |
1176 | 0 | dstPtr += LZ4F_makeBlock(dstPtr, |
1177 | 0 | cctxPtr->tmpIn, cctxPtr->tmpInSize, |
1178 | 0 | compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel, |
1179 | 0 | cctxPtr->cdict, |
1180 | 0 | cctxPtr->prefs.frameInfo.blockChecksumFlag); |
1181 | 0 | assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity)); |
1182 | |
|
1183 | 0 | if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) |
1184 | 0 | cctxPtr->tmpIn += cctxPtr->tmpInSize; |
1185 | 0 | cctxPtr->tmpInSize = 0; |
1186 | | |
1187 | | /* keep tmpIn within limits */ |
1188 | 0 | if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */ |
1189 | 0 | int const realDictSize = LZ4F_localSaveDict(cctxPtr); |
1190 | 0 | cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize; |
1191 | 0 | } |
1192 | |
|
1193 | 0 | return (size_t)(dstPtr - dstStart); |
1194 | 0 | } |
1195 | | |
1196 | | |
1197 | | /*! LZ4F_compressEnd() : |
1198 | | * When you want to properly finish the compressed frame, just call LZ4F_compressEnd(). |
1199 | | * It will flush whatever data remained within compressionContext (like LZ4_flush()) |
1200 | | * but also properly finalize the frame, with an endMark and an (optional) checksum. |
1201 | | * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument. |
1202 | | * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size)) |
1203 | | * or an error code if it fails (can be tested using LZ4F_isError()) |
1204 | | * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin(). |
1205 | | */ |
1206 | | size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr, |
1207 | | void* dstBuffer, size_t dstCapacity, |
1208 | | const LZ4F_compressOptions_t* compressOptionsPtr) |
1209 | 0 | { |
1210 | 0 | BYTE* const dstStart = (BYTE*)dstBuffer; |
1211 | 0 | BYTE* dstPtr = dstStart; |
1212 | |
|
1213 | 0 | size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr); |
1214 | 0 | DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity); |
1215 | 0 | FORWARD_IF_ERROR(flushSize); |
1216 | 0 | dstPtr += flushSize; |
1217 | |
|
1218 | 0 | assert(flushSize <= dstCapacity); |
1219 | 0 | dstCapacity -= flushSize; |
1220 | |
|
1221 | 0 | RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall); |
1222 | 0 | LZ4F_writeLE32(dstPtr, 0); |
1223 | 0 | dstPtr += 4; /* endMark */ |
1224 | |
|
1225 | 0 | if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) { |
1226 | 0 | U32 const xxh = XXH32_digest(&(cctxPtr->xxh)); |
1227 | 0 | RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall); |
1228 | 0 | DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh); |
1229 | 0 | LZ4F_writeLE32(dstPtr, xxh); |
1230 | 0 | dstPtr+=4; /* content Checksum */ |
1231 | 0 | } |
1232 | | |
1233 | 0 | cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */ |
1234 | |
|
1235 | 0 | if (cctxPtr->prefs.frameInfo.contentSize) { |
1236 | 0 | if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize) |
1237 | 0 | RETURN_ERROR(frameSize_wrong); |
1238 | 0 | } |
1239 | | |
1240 | 0 | return (size_t)(dstPtr - dstStart); |
1241 | 0 | } |
1242 | | |
1243 | | |
1244 | | /*-*************************************************** |
1245 | | * Frame Decompression |
1246 | | *****************************************************/ |
1247 | | |
1248 | | typedef enum { |
1249 | | dstage_getFrameHeader=0, dstage_storeFrameHeader, |
1250 | | dstage_init, |
1251 | | dstage_getBlockHeader, dstage_storeBlockHeader, |
1252 | | dstage_copyDirect, dstage_getBlockChecksum, |
1253 | | dstage_getCBlock, dstage_storeCBlock, |
1254 | | dstage_flushOut, |
1255 | | dstage_getSuffix, dstage_storeSuffix, |
1256 | | dstage_getSFrameSize, dstage_storeSFrameSize, |
1257 | | dstage_skipSkippable |
1258 | | } dStage_t; |
1259 | | |
1260 | | struct LZ4F_dctx_s { |
1261 | | LZ4F_CustomMem cmem; |
1262 | | LZ4F_frameInfo_t frameInfo; |
1263 | | U32 version; |
1264 | | dStage_t dStage; |
1265 | | U64 frameRemainingSize; |
1266 | | size_t maxBlockSize; |
1267 | | size_t maxBufferSize; |
1268 | | BYTE* tmpIn; |
1269 | | size_t tmpInSize; |
1270 | | size_t tmpInTarget; |
1271 | | BYTE* tmpOutBuffer; |
1272 | | const BYTE* dict; |
1273 | | size_t dictSize; |
1274 | | BYTE* tmpOut; |
1275 | | size_t tmpOutSize; |
1276 | | size_t tmpOutStart; |
1277 | | XXH32_state_t xxh; |
1278 | | XXH32_state_t blockChecksum; |
1279 | | int skipChecksum; |
1280 | | BYTE header[LZ4F_HEADER_SIZE_MAX]; |
1281 | | }; /* typedef'd to LZ4F_dctx in lz4frame.h */ |
1282 | | |
1283 | | |
1284 | | LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version) |
1285 | 9.67k | { |
1286 | 9.67k | LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem); |
1287 | 9.67k | if (dctx == NULL) return NULL; |
1288 | | |
1289 | 9.67k | dctx->cmem = customMem; |
1290 | 9.67k | dctx->version = version; |
1291 | 9.67k | return dctx; |
1292 | 9.67k | } |
1293 | | |
1294 | | /*! LZ4F_createDecompressionContext() : |
1295 | | * Create a decompressionContext object, which will track all decompression operations. |
1296 | | * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object. |
1297 | | * Object can later be released using LZ4F_freeDecompressionContext(). |
1298 | | * @return : if != 0, there was an error during context creation. |
1299 | | */ |
1300 | | LZ4F_errorCode_t |
1301 | | LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) |
1302 | 9.67k | { |
1303 | 9.67k | assert(LZ4F_decompressionContextPtr != NULL); /* violation of narrow contract */ |
1304 | 9.67k | RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null); /* in case it nonetheless happen in production */ |
1305 | | |
1306 | 9.67k | *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber); |
1307 | 9.67k | if (*LZ4F_decompressionContextPtr == NULL) { /* failed allocation */ |
1308 | 0 | RETURN_ERROR(allocation_failed); |
1309 | 0 | } |
1310 | 9.67k | return LZ4F_OK_NoError; |
1311 | 9.67k | } |
1312 | | |
1313 | | LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx) |
1314 | 9.67k | { |
1315 | 9.67k | LZ4F_errorCode_t result = LZ4F_OK_NoError; |
1316 | 9.67k | if (dctx != NULL) { /* can accept NULL input, like free() */ |
1317 | 9.67k | result = (LZ4F_errorCode_t)dctx->dStage; |
1318 | 9.67k | LZ4F_free(dctx->tmpIn, dctx->cmem); |
1319 | 9.67k | LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); |
1320 | 9.67k | LZ4F_free(dctx, dctx->cmem); |
1321 | 9.67k | } |
1322 | 9.67k | return result; |
1323 | 9.67k | } |
1324 | | |
1325 | | |
1326 | | /*==--- Streaming Decompression operations ---==*/ |
1327 | | void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx) |
1328 | 2.90k | { |
1329 | 2.90k | DEBUGLOG(5, "LZ4F_resetDecompressionContext"); |
1330 | 2.90k | dctx->dStage = dstage_getFrameHeader; |
1331 | 2.90k | dctx->dict = NULL; |
1332 | 2.90k | dctx->dictSize = 0; |
1333 | 2.90k | dctx->skipChecksum = 0; |
1334 | 2.90k | dctx->frameRemainingSize = 0; |
1335 | 2.90k | } |
1336 | | |
1337 | | |
1338 | | /*! LZ4F_decodeHeader() : |
1339 | | * input : `src` points at the **beginning of the frame** |
1340 | | * output : set internal values of dctx, such as |
1341 | | * dctx->frameInfo and dctx->dStage. |
1342 | | * Also allocates internal buffers. |
1343 | | * @return : nb Bytes read from src (necessarily <= srcSize) |
1344 | | * or an error code (testable with LZ4F_isError()) |
1345 | | */ |
1346 | | static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize) |
1347 | 9.71k | { |
1348 | 9.71k | unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID; |
1349 | 9.71k | size_t frameHeaderSize; |
1350 | 9.71k | const BYTE* srcPtr = (const BYTE*)src; |
1351 | | |
1352 | 9.71k | DEBUGLOG(5, "LZ4F_decodeHeader"); |
1353 | | /* need to decode header to get frameInfo */ |
1354 | 9.71k | RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete); /* minimal frame header size */ |
1355 | 9.71k | MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); |
1356 | | |
1357 | | /* special case : skippable frames */ |
1358 | 9.71k | if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) { |
1359 | 98 | dctx->frameInfo.frameType = LZ4F_skippableFrame; |
1360 | 98 | if (src == (void*)(dctx->header)) { |
1361 | 40 | dctx->tmpInSize = srcSize; |
1362 | 40 | dctx->tmpInTarget = 8; |
1363 | 40 | dctx->dStage = dstage_storeSFrameSize; |
1364 | 40 | return srcSize; |
1365 | 58 | } else { |
1366 | 58 | dctx->dStage = dstage_getSFrameSize; |
1367 | 58 | return 4; |
1368 | 58 | } } |
1369 | | |
1370 | | /* control magic number */ |
1371 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
1372 | | if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) { |
1373 | | DEBUGLOG(4, "frame header error : unknown magic number"); |
1374 | | RETURN_ERROR(frameType_unknown); |
1375 | | } |
1376 | | #endif |
1377 | 9.61k | dctx->frameInfo.frameType = LZ4F_frame; |
1378 | | |
1379 | | /* Flags */ |
1380 | 9.61k | { U32 const FLG = srcPtr[4]; |
1381 | 9.61k | U32 const version = (FLG>>6) & _2BITS; |
1382 | 9.61k | blockChecksumFlag = (FLG>>4) & _1BIT; |
1383 | 9.61k | blockMode = (FLG>>5) & _1BIT; |
1384 | 9.61k | contentSizeFlag = (FLG>>3) & _1BIT; |
1385 | 9.61k | contentChecksumFlag = (FLG>>2) & _1BIT; |
1386 | 9.61k | dictIDFlag = FLG & _1BIT; |
1387 | | /* validate */ |
1388 | 9.61k | if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ |
1389 | 9.53k | if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */ |
1390 | 9.53k | } |
1391 | 9.17k | DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag); |
1392 | | |
1393 | | /* Frame Header Size */ |
1394 | 9.17k | frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); |
1395 | | |
1396 | 9.17k | if (srcSize < frameHeaderSize) { |
1397 | | /* not enough input to fully decode frame header */ |
1398 | 23 | if (srcPtr != dctx->header) |
1399 | 0 | memcpy(dctx->header, srcPtr, srcSize); |
1400 | 23 | dctx->tmpInSize = srcSize; |
1401 | 23 | dctx->tmpInTarget = frameHeaderSize; |
1402 | 23 | dctx->dStage = dstage_storeFrameHeader; |
1403 | 23 | return srcSize; |
1404 | 23 | } |
1405 | | |
1406 | 9.15k | { U32 const BD = srcPtr[5]; |
1407 | 9.15k | blockSizeID = (BD>>4) & _3BITS; |
1408 | | /* validate */ |
1409 | 9.15k | if (((BD>>7)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */ |
1410 | 9.11k | if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid); /* 4-7 only supported values for the time being */ |
1411 | 9.05k | if (((BD>>0)&_4BITS) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bits */ |
1412 | 9.05k | } |
1413 | | |
1414 | | /* check header */ |
1415 | 9.01k | assert(frameHeaderSize > 5); |
1416 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
1417 | | { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5); |
1418 | | RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid); |
1419 | | } |
1420 | | #endif |
1421 | | |
1422 | | /* save */ |
1423 | 9.01k | dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode; |
1424 | 9.01k | dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag; |
1425 | 9.01k | dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag; |
1426 | 9.01k | dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID; |
1427 | 9.01k | dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID); |
1428 | 9.01k | if (contentSizeFlag) { |
1429 | 179 | dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6); |
1430 | 179 | } |
1431 | 9.01k | if (dictIDFlag) |
1432 | 146 | dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5); |
1433 | | |
1434 | 9.01k | dctx->dStage = dstage_init; |
1435 | | |
1436 | 9.01k | return frameHeaderSize; |
1437 | 9.05k | } |
1438 | | |
1439 | | |
1440 | | /*! LZ4F_headerSize() : |
1441 | | * @return : size of frame header |
1442 | | * or an error code, which can be tested using LZ4F_isError() |
1443 | | */ |
1444 | | size_t LZ4F_headerSize(const void* src, size_t srcSize) |
1445 | 0 | { |
1446 | 0 | RETURN_ERROR_IF(src == NULL, srcPtr_wrong); |
1447 | | |
1448 | | /* minimal srcSize to determine header size */ |
1449 | 0 | if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH) |
1450 | 0 | RETURN_ERROR(frameHeader_incomplete); |
1451 | | |
1452 | | /* special case : skippable frames */ |
1453 | 0 | if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) |
1454 | 0 | return 8; |
1455 | | |
1456 | | /* control magic number */ |
1457 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
1458 | | if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER) |
1459 | | RETURN_ERROR(frameType_unknown); |
1460 | | #endif |
1461 | | |
1462 | | /* Frame Header Size */ |
1463 | 0 | { BYTE const FLG = ((const BYTE*)src)[4]; |
1464 | 0 | U32 const contentSizeFlag = (FLG>>3) & _1BIT; |
1465 | 0 | U32 const dictIDFlag = FLG & _1BIT; |
1466 | 0 | return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0); |
1467 | 0 | } |
1468 | 0 | } |
1469 | | |
1470 | | /*! LZ4F_getFrameInfo() : |
1471 | | * This function extracts frame parameters (max blockSize, frame checksum, etc.). |
1472 | | * Usage is optional. Objective is to provide relevant information for allocation purposes. |
1473 | | * This function works in 2 situations : |
1474 | | * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process. |
1475 | | * Amount of input data provided must be large enough to successfully decode the frame header. |
1476 | | * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum. |
1477 | | * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx. |
1478 | | * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value). |
1479 | | * Decompression must resume from (srcBuffer + *srcSizePtr). |
1480 | | * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call, |
1481 | | * or an error code which can be tested using LZ4F_isError() |
1482 | | * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped. |
1483 | | * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure. |
1484 | | */ |
1485 | | LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, |
1486 | | LZ4F_frameInfo_t* frameInfoPtr, |
1487 | | const void* srcBuffer, size_t* srcSizePtr) |
1488 | 0 | { |
1489 | 0 | LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader); |
1490 | 0 | if (dctx->dStage > dstage_storeFrameHeader) { |
1491 | | /* frameInfo already decoded */ |
1492 | 0 | size_t o=0, i=0; |
1493 | 0 | *srcSizePtr = 0; |
1494 | 0 | *frameInfoPtr = dctx->frameInfo; |
1495 | | /* returns : recommended nb of bytes for LZ4F_decompress() */ |
1496 | 0 | return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL); |
1497 | 0 | } else { |
1498 | 0 | if (dctx->dStage == dstage_storeFrameHeader) { |
1499 | | /* frame decoding already started, in the middle of header => automatic fail */ |
1500 | 0 | *srcSizePtr = 0; |
1501 | 0 | RETURN_ERROR(frameDecoding_alreadyStarted); |
1502 | 0 | } else { |
1503 | 0 | size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr); |
1504 | 0 | if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; } |
1505 | 0 | if (*srcSizePtr < hSize) { |
1506 | 0 | *srcSizePtr=0; |
1507 | 0 | RETURN_ERROR(frameHeader_incomplete); |
1508 | 0 | } |
1509 | | |
1510 | 0 | { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize); |
1511 | 0 | if (LZ4F_isError(decodeResult)) { |
1512 | 0 | *srcSizePtr = 0; |
1513 | 0 | } else { |
1514 | 0 | *srcSizePtr = decodeResult; |
1515 | 0 | decodeResult = BHSize; /* block header size */ |
1516 | 0 | } |
1517 | 0 | *frameInfoPtr = dctx->frameInfo; |
1518 | 0 | return decodeResult; |
1519 | 0 | } } } |
1520 | 0 | } |
1521 | | |
1522 | | |
1523 | | /* LZ4F_updateDict() : |
1524 | | * only used for LZ4F_blockLinked mode |
1525 | | * Condition : @dstPtr != NULL |
1526 | | */ |
1527 | | static void LZ4F_updateDict(LZ4F_dctx* dctx, |
1528 | | const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart, |
1529 | | unsigned withinTmp) |
1530 | 6.62k | { |
1531 | 6.62k | assert(dstPtr != NULL); |
1532 | 6.62k | if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */ |
1533 | 6.62k | assert(dctx->dict != NULL); |
1534 | | |
1535 | 6.62k | if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */ |
1536 | 6.47k | dctx->dictSize += dstSize; |
1537 | 6.47k | return; |
1538 | 6.47k | } |
1539 | | |
1540 | 156 | assert(dstPtr >= dstBufferStart); |
1541 | 156 | if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */ |
1542 | 0 | dctx->dict = (const BYTE*)dstBufferStart; |
1543 | 0 | dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize; |
1544 | 0 | return; |
1545 | 0 | } |
1546 | | |
1547 | 156 | assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */ |
1548 | | |
1549 | | /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */ |
1550 | 156 | assert(dctx->tmpOutBuffer != NULL); |
1551 | | |
1552 | 156 | if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */ |
1553 | | /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */ |
1554 | 88 | assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart); |
1555 | 88 | dctx->dictSize += dstSize; |
1556 | 88 | return; |
1557 | 88 | } |
1558 | | |
1559 | 68 | if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */ |
1560 | 0 | size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); |
1561 | 0 | size_t copySize = 64 KB - dctx->tmpOutSize; |
1562 | 0 | const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; |
1563 | 0 | if (dctx->tmpOutSize > 64 KB) copySize = 0; |
1564 | 0 | if (copySize > preserveSize) copySize = preserveSize; |
1565 | |
|
1566 | 0 | memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); |
1567 | |
|
1568 | 0 | dctx->dict = dctx->tmpOutBuffer; |
1569 | 0 | dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize; |
1570 | 0 | return; |
1571 | 0 | } |
1572 | | |
1573 | 68 | if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */ |
1574 | 67 | if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */ |
1575 | 0 | size_t const preserveSize = 64 KB - dstSize; |
1576 | 0 | memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); |
1577 | 0 | dctx->dictSize = preserveSize; |
1578 | 0 | } |
1579 | 67 | memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize); |
1580 | 67 | dctx->dictSize += dstSize; |
1581 | 67 | return; |
1582 | 67 | } |
1583 | | |
1584 | | /* join dict & dest into tmp */ |
1585 | 1 | { size_t preserveSize = 64 KB - dstSize; |
1586 | 1 | if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize; |
1587 | 1 | memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); |
1588 | 1 | memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize); |
1589 | 1 | dctx->dict = dctx->tmpOutBuffer; |
1590 | 1 | dctx->dictSize = preserveSize + dstSize; |
1591 | 1 | } |
1592 | 1 | } |
1593 | | |
1594 | | |
1595 | | /*! LZ4F_decompress() : |
1596 | | * Call this function repetitively to regenerate compressed data in srcBuffer. |
1597 | | * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer |
1598 | | * into dstBuffer of capacity *dstSizePtr. |
1599 | | * |
1600 | | * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value). |
1601 | | * |
1602 | | * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value). |
1603 | | * If number of bytes read is < number of bytes provided, then decompression operation is not complete. |
1604 | | * Remaining data will have to be presented again in a subsequent invocation. |
1605 | | * |
1606 | | * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress. |
1607 | | * Schematically, it's the size of the current (or remaining) compressed block + header of next block. |
1608 | | * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling. |
1609 | | * Note that this is just a hint, and it's always possible to any srcSize value. |
1610 | | * When a frame is fully decoded, @return will be 0. |
1611 | | * If decompression failed, @return is an error code which can be tested using LZ4F_isError(). |
1612 | | */ |
1613 | | size_t LZ4F_decompress(LZ4F_dctx* dctx, |
1614 | | void* dstBuffer, size_t* dstSizePtr, |
1615 | | const void* srcBuffer, size_t* srcSizePtr, |
1616 | | const LZ4F_decompressOptions_t* decompressOptionsPtr) |
1617 | 10.1k | { |
1618 | 10.1k | LZ4F_decompressOptions_t optionsNull; |
1619 | 10.1k | const BYTE* const srcStart = (const BYTE*)srcBuffer; |
1620 | 10.1k | const BYTE* const srcEnd = srcStart + *srcSizePtr; |
1621 | 10.1k | const BYTE* srcPtr = srcStart; |
1622 | 10.1k | BYTE* const dstStart = (BYTE*)dstBuffer; |
1623 | 10.1k | BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL; |
1624 | 10.1k | BYTE* dstPtr = dstStart; |
1625 | 10.1k | const BYTE* selectedIn = NULL; |
1626 | 10.1k | unsigned doAnotherStage = 1; |
1627 | 10.1k | size_t nextSrcSizeHint = 1; |
1628 | | |
1629 | | |
1630 | 10.1k | DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)", |
1631 | 10.1k | srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr); |
1632 | 10.1k | if (dstBuffer == NULL) assert(*dstSizePtr == 0); |
1633 | 10.1k | MEM_INIT(&optionsNull, 0, sizeof(optionsNull)); |
1634 | 10.1k | if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull; |
1635 | 10.1k | *srcSizePtr = 0; |
1636 | 10.1k | *dstSizePtr = 0; |
1637 | 10.1k | assert(dctx != NULL); |
1638 | 10.1k | dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */ |
1639 | | |
1640 | | /* behaves as a state machine */ |
1641 | | |
1642 | 55.0k | while (doAnotherStage) { |
1643 | | |
1644 | 50.4k | switch(dctx->dStage) |
1645 | 50.4k | { |
1646 | | |
1647 | 9.64k | case dstage_getFrameHeader: |
1648 | 9.64k | DEBUGLOG(6, "dstage_getFrameHeader"); |
1649 | 9.64k | if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */ |
1650 | 9.29k | size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */ |
1651 | 9.29k | FORWARD_IF_ERROR(hSize); |
1652 | 8.85k | srcPtr += hSize; |
1653 | 8.85k | break; |
1654 | 9.29k | } |
1655 | 350 | dctx->tmpInSize = 0; |
1656 | 350 | if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */ |
1657 | 350 | dctx->tmpInTarget = minFHSize; /* minimum size to decode header */ |
1658 | 350 | dctx->dStage = dstage_storeFrameHeader; |
1659 | | /* fall-through */ |
1660 | | |
1661 | 373 | case dstage_storeFrameHeader: |
1662 | 373 | DEBUGLOG(6, "dstage_storeFrameHeader"); |
1663 | 373 | { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr)); |
1664 | 373 | memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); |
1665 | 373 | dctx->tmpInSize += sizeToCopy; |
1666 | 373 | srcPtr += sizeToCopy; |
1667 | 373 | } |
1668 | 373 | if (dctx->tmpInSize < dctx->tmpInTarget) { |
1669 | 31 | nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */ |
1670 | 31 | doAnotherStage = 0; /* not enough src data, ask for some more */ |
1671 | 31 | break; |
1672 | 31 | } |
1673 | 342 | FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) ); /* will update dStage appropriately */ |
1674 | 277 | break; |
1675 | | |
1676 | 8.99k | case dstage_init: |
1677 | 8.99k | DEBUGLOG(6, "dstage_init"); |
1678 | 8.99k | if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0); |
1679 | | /* internal buffers allocation */ |
1680 | 8.99k | { size_t const bufferNeeded = dctx->maxBlockSize |
1681 | 8.99k | + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0); |
1682 | 9.00k | if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ |
1683 | 9.00k | dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ |
1684 | 9.00k | LZ4F_free(dctx->tmpIn, dctx->cmem); |
1685 | 9.00k | dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem); |
1686 | 9.00k | RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed); |
1687 | 9.00k | LZ4F_free(dctx->tmpOutBuffer, dctx->cmem); |
1688 | 9.00k | dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem); |
1689 | 9.00k | RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed); |
1690 | 9.00k | dctx->maxBufferSize = bufferNeeded; |
1691 | 9.00k | } } |
1692 | 8.99k | dctx->tmpInSize = 0; |
1693 | 8.99k | dctx->tmpInTarget = 0; |
1694 | 8.99k | dctx->tmpOut = dctx->tmpOutBuffer; |
1695 | 8.99k | dctx->tmpOutStart = 0; |
1696 | 8.99k | dctx->tmpOutSize = 0; |
1697 | | |
1698 | 8.99k | dctx->dStage = dstage_getBlockHeader; |
1699 | | /* fall-through */ |
1700 | | |
1701 | 18.0k | case dstage_getBlockHeader: |
1702 | 18.0k | if ((size_t)(srcEnd - srcPtr) >= BHSize) { |
1703 | 18.0k | selectedIn = srcPtr; |
1704 | 18.0k | srcPtr += BHSize; |
1705 | 18.4E | } else { |
1706 | | /* not enough input to read cBlockSize field */ |
1707 | 18.4E | dctx->tmpInSize = 0; |
1708 | 18.4E | dctx->dStage = dstage_storeBlockHeader; |
1709 | 18.4E | } |
1710 | | |
1711 | 18.0k | if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */ |
1712 | 26 | case dstage_storeBlockHeader: |
1713 | 26 | { size_t const remainingInput = (size_t)(srcEnd - srcPtr); |
1714 | 26 | size_t const wantedData = BHSize - dctx->tmpInSize; |
1715 | 26 | size_t const sizeToCopy = MIN(wantedData, remainingInput); |
1716 | 26 | memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); |
1717 | 26 | srcPtr += sizeToCopy; |
1718 | 26 | dctx->tmpInSize += sizeToCopy; |
1719 | | |
1720 | 26 | if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */ |
1721 | 26 | nextSrcSizeHint = BHSize - dctx->tmpInSize; |
1722 | 26 | doAnotherStage = 0; |
1723 | 26 | break; |
1724 | 26 | } |
1725 | 0 | selectedIn = dctx->tmpIn; |
1726 | 0 | } /* if (dctx->dStage == dstage_storeBlockHeader) */ |
1727 | | |
1728 | | /* decode block header */ |
1729 | 18.0k | { U32 const blockHeader = LZ4F_readLE32(selectedIn); |
1730 | 18.0k | size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU; |
1731 | 18.0k | size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize; |
1732 | 18.0k | if (blockHeader==0) { /* frameEnd signal, no more block */ |
1733 | 2.92k | DEBUGLOG(5, "end of frame"); |
1734 | 2.92k | dctx->dStage = dstage_getSuffix; |
1735 | 2.92k | break; |
1736 | 2.92k | } |
1737 | 15.1k | if (nextCBlockSize > dctx->maxBlockSize) { |
1738 | 379 | RETURN_ERROR(maxBlockSize_invalid); |
1739 | 379 | } |
1740 | 14.7k | if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) { |
1741 | | /* next block is uncompressed */ |
1742 | 7.33k | dctx->tmpInTarget = nextCBlockSize; |
1743 | 7.33k | DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize); |
1744 | 7.33k | if (dctx->frameInfo.blockChecksumFlag) { |
1745 | 4.97k | (void)XXH32_reset(&dctx->blockChecksum, 0); |
1746 | 4.97k | } |
1747 | 7.33k | dctx->dStage = dstage_copyDirect; |
1748 | 7.33k | break; |
1749 | 7.33k | } |
1750 | | /* next block is a compressed block */ |
1751 | 7.40k | dctx->tmpInTarget = nextCBlockSize + crcSize; |
1752 | 7.40k | dctx->dStage = dstage_getCBlock; |
1753 | 7.40k | if (dstPtr==dstEnd || srcPtr==srcEnd) { |
1754 | 311 | nextSrcSizeHint = BHSize + nextCBlockSize + crcSize; |
1755 | 311 | doAnotherStage = 0; |
1756 | 311 | } |
1757 | 7.40k | break; |
1758 | 14.7k | } |
1759 | | |
1760 | 7.42k | case dstage_copyDirect: /* uncompressed block */ |
1761 | 7.42k | DEBUGLOG(6, "dstage_copyDirect"); |
1762 | 7.42k | { size_t sizeToCopy; |
1763 | 7.42k | if (dstPtr == NULL) { |
1764 | 0 | sizeToCopy = 0; |
1765 | 7.42k | } else { |
1766 | 7.42k | size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr)); |
1767 | 7.42k | sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize); |
1768 | 7.42k | memcpy(dstPtr, srcPtr, sizeToCopy); |
1769 | 7.42k | if (!dctx->skipChecksum) { |
1770 | 7.42k | if (dctx->frameInfo.blockChecksumFlag) { |
1771 | 5.05k | (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy); |
1772 | 5.05k | } |
1773 | 7.42k | if (dctx->frameInfo.contentChecksumFlag) |
1774 | 957 | (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy); |
1775 | 7.42k | } |
1776 | 7.42k | if (dctx->frameInfo.contentSize) |
1777 | 46 | dctx->frameRemainingSize -= sizeToCopy; |
1778 | | |
1779 | | /* history management (linked blocks only)*/ |
1780 | 7.42k | if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { |
1781 | 4.84k | LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0); |
1782 | 4.84k | } |
1783 | 7.42k | srcPtr += sizeToCopy; |
1784 | 7.42k | dstPtr += sizeToCopy; |
1785 | 7.42k | } |
1786 | 7.42k | if (sizeToCopy == dctx->tmpInTarget) { /* all done */ |
1787 | 6.86k | if (dctx->frameInfo.blockChecksumFlag) { |
1788 | 4.54k | dctx->tmpInSize = 0; |
1789 | 4.54k | dctx->dStage = dstage_getBlockChecksum; |
1790 | 4.54k | } else |
1791 | 2.32k | dctx->dStage = dstage_getBlockHeader; /* new block */ |
1792 | 6.86k | break; |
1793 | 6.86k | } |
1794 | 562 | dctx->tmpInTarget -= sizeToCopy; /* need to copy more */ |
1795 | 562 | } |
1796 | 0 | nextSrcSizeHint = dctx->tmpInTarget + |
1797 | 562 | +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0) |
1798 | 562 | + BHSize /* next header size */; |
1799 | 562 | doAnotherStage = 0; |
1800 | 562 | break; |
1801 | | |
1802 | | /* check block checksum for recently transferred uncompressed block */ |
1803 | 4.55k | case dstage_getBlockChecksum: |
1804 | 4.55k | DEBUGLOG(6, "dstage_getBlockChecksum"); |
1805 | 4.55k | { const void* crcSrc; |
1806 | 4.55k | if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) { |
1807 | 4.53k | crcSrc = srcPtr; |
1808 | 4.53k | srcPtr += 4; |
1809 | 4.53k | } else { |
1810 | 13 | size_t const stillToCopy = 4 - dctx->tmpInSize; |
1811 | 13 | size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr)); |
1812 | 13 | memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); |
1813 | 13 | dctx->tmpInSize += sizeToCopy; |
1814 | 13 | srcPtr += sizeToCopy; |
1815 | 13 | if (dctx->tmpInSize < 4) { /* all input consumed */ |
1816 | 10 | doAnotherStage = 0; |
1817 | 10 | break; |
1818 | 10 | } |
1819 | 3 | crcSrc = dctx->header; |
1820 | 3 | } |
1821 | 4.54k | if (!dctx->skipChecksum) { |
1822 | 4.53k | U32 const readCRC = LZ4F_readLE32(crcSrc); |
1823 | 4.53k | U32 const calcCRC = XXH32_digest(&dctx->blockChecksum); |
1824 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
1825 | | DEBUGLOG(6, "compare block checksum"); |
1826 | | if (readCRC != calcCRC) { |
1827 | | DEBUGLOG(4, "incorrect block checksum: %08X != %08X", |
1828 | | readCRC, calcCRC); |
1829 | | RETURN_ERROR(blockChecksum_invalid); |
1830 | | } |
1831 | | #else |
1832 | 4.53k | (void)readCRC; |
1833 | 4.53k | (void)calcCRC; |
1834 | 4.53k | #endif |
1835 | 4.53k | } } |
1836 | 0 | dctx->dStage = dstage_getBlockHeader; /* new block */ |
1837 | 4.54k | break; |
1838 | | |
1839 | 7.44k | case dstage_getCBlock: |
1840 | 7.44k | DEBUGLOG(6, "dstage_getCBlock"); |
1841 | 7.44k | if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) { |
1842 | 377 | dctx->tmpInSize = 0; |
1843 | 377 | dctx->dStage = dstage_storeCBlock; |
1844 | 377 | break; |
1845 | 377 | } |
1846 | | /* input large enough to read full block directly */ |
1847 | 7.06k | selectedIn = srcPtr; |
1848 | 7.06k | srcPtr += dctx->tmpInTarget; |
1849 | | |
1850 | 7.06k | if (0) /* always jump over next block */ |
1851 | 377 | case dstage_storeCBlock: |
1852 | 377 | { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize; |
1853 | 377 | size_t const inputLeft = (size_t)(srcEnd-srcPtr); |
1854 | 377 | size_t const sizeToCopy = MIN(wantedData, inputLeft); |
1855 | 377 | memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); |
1856 | 377 | dctx->tmpInSize += sizeToCopy; |
1857 | 377 | srcPtr += sizeToCopy; |
1858 | 377 | if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */ |
1859 | 376 | nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) |
1860 | 376 | + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0) |
1861 | 376 | + BHSize /* next header size */; |
1862 | 376 | doAnotherStage = 0; |
1863 | 376 | break; |
1864 | 376 | } |
1865 | 1 | selectedIn = dctx->tmpIn; |
1866 | 1 | } |
1867 | | |
1868 | | /* At this stage, input is large enough to decode a block */ |
1869 | | |
1870 | | /* First, decode and control block checksum if it exists */ |
1871 | 7.06k | if (dctx->frameInfo.blockChecksumFlag) { |
1872 | 6.30k | assert(dctx->tmpInTarget >= 4); |
1873 | 6.30k | dctx->tmpInTarget -= 4; |
1874 | 6.30k | assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */ |
1875 | 6.30k | { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget); |
1876 | 6.30k | U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0); |
1877 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
1878 | | RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid); |
1879 | | #else |
1880 | 6.30k | (void)readBlockCrc; |
1881 | 6.30k | (void)calcBlockCrc; |
1882 | 6.30k | #endif |
1883 | 6.30k | } } |
1884 | | |
1885 | | /* decode directly into destination buffer if there is enough room */ |
1886 | 7.06k | if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) |
1887 | | /* unless the dictionary is stored in tmpOut: |
1888 | | * in which case it's faster to decode within tmpOut |
1889 | | * to benefit from prefix speedup */ |
1890 | 7.06k | && !(dctx->dict!= NULL && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) ) |
1891 | 3.81k | { |
1892 | 3.81k | const char* dict = (const char*)dctx->dict; |
1893 | 3.81k | size_t dictSize = dctx->dictSize; |
1894 | 3.81k | int decodedSize; |
1895 | 3.81k | assert(dstPtr != NULL); |
1896 | 3.81k | if (dict && dictSize > 1 GB) { |
1897 | | /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */ |
1898 | 0 | dict += dictSize - 64 KB; |
1899 | 0 | dictSize = 64 KB; |
1900 | 0 | } |
1901 | 3.81k | decodedSize = LZ4_decompress_safe_usingDict( |
1902 | 3.81k | (const char*)selectedIn, (char*)dstPtr, |
1903 | 3.81k | (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, |
1904 | 3.81k | dict, (int)dictSize); |
1905 | 3.81k | RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); |
1906 | 1.34k | if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum)) |
1907 | 533 | XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize); |
1908 | 1.34k | if (dctx->frameInfo.contentSize) |
1909 | 41 | dctx->frameRemainingSize -= (size_t)decodedSize; |
1910 | | |
1911 | | /* dictionary management */ |
1912 | 1.34k | if (dctx->frameInfo.blockMode==LZ4F_blockLinked) { |
1913 | 1.21k | LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0); |
1914 | 1.21k | } |
1915 | | |
1916 | 1.34k | dstPtr += decodedSize; |
1917 | 1.34k | dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */ |
1918 | 1.34k | break; |
1919 | 3.81k | } |
1920 | | |
1921 | | /* not enough place into dst : decode into tmpOut */ |
1922 | | |
1923 | | /* manage dictionary */ |
1924 | 3.25k | if (dctx->frameInfo.blockMode == LZ4F_blockLinked) { |
1925 | 2.38k | if (dctx->dict == dctx->tmpOutBuffer) { |
1926 | | /* truncate dictionary to 64 KB if too big */ |
1927 | 68 | if (dctx->dictSize > 128 KB) { |
1928 | 0 | memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB); |
1929 | 0 | dctx->dictSize = 64 KB; |
1930 | 0 | } |
1931 | 68 | dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize; |
1932 | 2.31k | } else { /* dict not within tmpOut */ |
1933 | 2.31k | size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB); |
1934 | 2.31k | dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace; |
1935 | 2.31k | } } |
1936 | | |
1937 | | /* Decode block into tmpOut */ |
1938 | 3.25k | { const char* dict = (const char*)dctx->dict; |
1939 | 3.25k | size_t dictSize = dctx->dictSize; |
1940 | 3.25k | int decodedSize; |
1941 | 3.25k | if (dict && dictSize > 1 GB) { |
1942 | | /* the dictSize param is an int, avoid truncation / sign issues */ |
1943 | 0 | dict += dictSize - 64 KB; |
1944 | 0 | dictSize = 64 KB; |
1945 | 0 | } |
1946 | 3.25k | decodedSize = LZ4_decompress_safe_usingDict( |
1947 | 3.25k | (const char*)selectedIn, (char*)dctx->tmpOut, |
1948 | 3.25k | (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, |
1949 | 3.25k | dict, (int)dictSize); |
1950 | 3.25k | RETURN_ERROR_IF(decodedSize < 0, decompressionFailed); |
1951 | 957 | if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum) |
1952 | 207 | XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize); |
1953 | 957 | if (dctx->frameInfo.contentSize) |
1954 | 56 | dctx->frameRemainingSize -= (size_t)decodedSize; |
1955 | 957 | dctx->tmpOutSize = (size_t)decodedSize; |
1956 | 957 | dctx->tmpOutStart = 0; |
1957 | 957 | dctx->dStage = dstage_flushOut; |
1958 | 957 | } |
1959 | | /* fall-through */ |
1960 | | |
1961 | 1.06k | case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */ |
1962 | 1.06k | DEBUGLOG(6, "dstage_flushOut"); |
1963 | 1.06k | if (dstPtr != NULL) { |
1964 | 1.06k | size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr)); |
1965 | 1.06k | memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy); |
1966 | | |
1967 | | /* dictionary management */ |
1968 | 1.06k | if (dctx->frameInfo.blockMode == LZ4F_blockLinked) |
1969 | 556 | LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/); |
1970 | | |
1971 | 1.06k | dctx->tmpOutStart += sizeToCopy; |
1972 | 1.06k | dstPtr += sizeToCopy; |
1973 | 1.06k | } |
1974 | 1.06k | if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */ |
1975 | 835 | dctx->dStage = dstage_getBlockHeader; /* get next block */ |
1976 | 835 | break; |
1977 | 835 | } |
1978 | | /* could not flush everything : stop there, just request a block header */ |
1979 | 234 | doAnotherStage = 0; |
1980 | 234 | nextSrcSizeHint = BHSize; |
1981 | 234 | break; |
1982 | | |
1983 | 2.92k | case dstage_getSuffix: |
1984 | 2.92k | RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong); /* incorrect frame size decoded */ |
1985 | 2.89k | if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */ |
1986 | 2.67k | nextSrcSizeHint = 0; |
1987 | 2.67k | LZ4F_resetDecompressionContext(dctx); |
1988 | 2.67k | doAnotherStage = 0; |
1989 | 2.67k | break; |
1990 | 2.67k | } |
1991 | 223 | if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */ |
1992 | 13 | dctx->tmpInSize = 0; |
1993 | 13 | dctx->dStage = dstage_storeSuffix; |
1994 | 210 | } else { |
1995 | 210 | selectedIn = srcPtr; |
1996 | 210 | srcPtr += 4; |
1997 | 210 | } |
1998 | | |
1999 | 223 | if (dctx->dStage == dstage_storeSuffix) /* can be skipped */ |
2000 | 13 | case dstage_storeSuffix: |
2001 | 13 | { size_t const remainingInput = (size_t)(srcEnd - srcPtr); |
2002 | 13 | size_t const wantedData = 4 - dctx->tmpInSize; |
2003 | 13 | size_t const sizeToCopy = MIN(wantedData, remainingInput); |
2004 | 13 | memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy); |
2005 | 13 | srcPtr += sizeToCopy; |
2006 | 13 | dctx->tmpInSize += sizeToCopy; |
2007 | 13 | if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */ |
2008 | 13 | nextSrcSizeHint = 4 - dctx->tmpInSize; |
2009 | 13 | doAnotherStage=0; |
2010 | 13 | break; |
2011 | 13 | } |
2012 | 0 | selectedIn = dctx->tmpIn; |
2013 | 0 | } /* if (dctx->dStage == dstage_storeSuffix) */ |
2014 | | |
2015 | | /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */ |
2016 | 211 | if (!dctx->skipChecksum) { |
2017 | 211 | U32 const readCRC = LZ4F_readLE32(selectedIn); |
2018 | 211 | U32 const resultCRC = XXH32_digest(&(dctx->xxh)); |
2019 | 211 | DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC); |
2020 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
2021 | | RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid); |
2022 | | #else |
2023 | 211 | (void)readCRC; |
2024 | 211 | (void)resultCRC; |
2025 | 211 | #endif |
2026 | 211 | } |
2027 | 210 | nextSrcSizeHint = 0; |
2028 | 210 | LZ4F_resetDecompressionContext(dctx); |
2029 | 210 | doAnotherStage = 0; |
2030 | 210 | break; |
2031 | | |
2032 | 58 | case dstage_getSFrameSize: |
2033 | 58 | if ((srcEnd - srcPtr) >= 4) { |
2034 | 58 | selectedIn = srcPtr; |
2035 | 58 | srcPtr += 4; |
2036 | 58 | } else { |
2037 | | /* not enough input to read cBlockSize field */ |
2038 | 0 | dctx->tmpInSize = 4; |
2039 | 0 | dctx->tmpInTarget = 8; |
2040 | 0 | dctx->dStage = dstage_storeSFrameSize; |
2041 | 0 | } |
2042 | | |
2043 | 58 | if (dctx->dStage == dstage_storeSFrameSize) |
2044 | 40 | case dstage_storeSFrameSize: |
2045 | 40 | { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, |
2046 | 40 | (size_t)(srcEnd - srcPtr) ); |
2047 | 40 | memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); |
2048 | 40 | srcPtr += sizeToCopy; |
2049 | 40 | dctx->tmpInSize += sizeToCopy; |
2050 | 40 | if (dctx->tmpInSize < dctx->tmpInTarget) { |
2051 | | /* not enough input to get full sBlockSize; wait for more */ |
2052 | 11 | nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize; |
2053 | 11 | doAnotherStage = 0; |
2054 | 11 | break; |
2055 | 11 | } |
2056 | 29 | selectedIn = dctx->header + 4; |
2057 | 29 | } /* if (dctx->dStage == dstage_storeSFrameSize) */ |
2058 | | |
2059 | | /* case dstage_decodeSFrameSize: */ /* no direct entry */ |
2060 | 87 | { size_t const SFrameSize = LZ4F_readLE32(selectedIn); |
2061 | 87 | dctx->frameInfo.contentSize = SFrameSize; |
2062 | 87 | dctx->tmpInTarget = SFrameSize; |
2063 | 87 | dctx->dStage = dstage_skipSkippable; |
2064 | 87 | break; |
2065 | 58 | } |
2066 | | |
2067 | 88 | case dstage_skipSkippable: |
2068 | 88 | { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr)); |
2069 | 88 | srcPtr += skipSize; |
2070 | 88 | dctx->tmpInTarget -= skipSize; |
2071 | 88 | doAnotherStage = 0; |
2072 | 88 | nextSrcSizeHint = dctx->tmpInTarget; |
2073 | 88 | if (nextSrcSizeHint) break; /* still more to skip */ |
2074 | | /* frame fully skipped : prepare context for a new frame */ |
2075 | 23 | LZ4F_resetDecompressionContext(dctx); |
2076 | 23 | break; |
2077 | 88 | } |
2078 | 50.4k | } /* switch (dctx->dStage) */ |
2079 | 50.4k | } /* while (doAnotherStage) */ |
2080 | | |
2081 | | /* preserve history within tmpOut whenever necessary */ |
2082 | 4.56k | LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2); |
2083 | 4.56k | if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */ |
2084 | 4.56k | && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */ |
2085 | 4.56k | && (dctx->dict != NULL) /* dictionary exists */ |
2086 | 4.56k | && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */ |
2087 | 4.56k | && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */ |
2088 | 893 | { |
2089 | 893 | if (dctx->dStage == dstage_flushOut) { |
2090 | 112 | size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer); |
2091 | 112 | size_t copySize = 64 KB - dctx->tmpOutSize; |
2092 | 112 | const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart; |
2093 | 112 | if (dctx->tmpOutSize > 64 KB) copySize = 0; |
2094 | 112 | if (copySize > preserveSize) copySize = preserveSize; |
2095 | 112 | assert(dctx->tmpOutBuffer != NULL); |
2096 | | |
2097 | 112 | memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize); |
2098 | | |
2099 | 112 | dctx->dict = dctx->tmpOutBuffer; |
2100 | 112 | dctx->dictSize = preserveSize + dctx->tmpOutStart; |
2101 | 781 | } else { |
2102 | 781 | const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize; |
2103 | 781 | size_t const newDictSize = MIN(dctx->dictSize, 64 KB); |
2104 | | |
2105 | 781 | memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize); |
2106 | | |
2107 | 781 | dctx->dict = dctx->tmpOutBuffer; |
2108 | 781 | dctx->dictSize = newDictSize; |
2109 | 781 | dctx->tmpOut = dctx->tmpOutBuffer + newDictSize; |
2110 | 781 | } |
2111 | 893 | } |
2112 | | |
2113 | 4.56k | *srcSizePtr = (size_t)(srcPtr - srcStart); |
2114 | 4.56k | *dstSizePtr = (size_t)(dstPtr - dstStart); |
2115 | 4.56k | return nextSrcSizeHint; |
2116 | 10.1k | } |
2117 | | |
2118 | | /*! LZ4F_decompress_usingDict() : |
2119 | | * Same as LZ4F_decompress(), using a predefined dictionary. |
2120 | | * Dictionary is used "in place", without any preprocessing. |
2121 | | * It must remain accessible throughout the entire frame decoding. |
2122 | | */ |
2123 | | size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx, |
2124 | | void* dstBuffer, size_t* dstSizePtr, |
2125 | | const void* srcBuffer, size_t* srcSizePtr, |
2126 | | const void* dict, size_t dictSize, |
2127 | | const LZ4F_decompressOptions_t* decompressOptionsPtr) |
2128 | 0 | { |
2129 | 0 | if (dctx->dStage <= dstage_init) { |
2130 | 0 | dctx->dict = (const BYTE*)dict; |
2131 | 0 | dctx->dictSize = dictSize; |
2132 | 0 | } |
2133 | 0 | return LZ4F_decompress(dctx, dstBuffer, dstSizePtr, |
2134 | 0 | srcBuffer, srcSizePtr, |
2135 | 0 | decompressOptionsPtr); |
2136 | 0 | } |