/src/zstd/lib/decompress/zstd_decompress_block.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | /* zstd_decompress_block : |
12 | | * this module takes care of decompressing _compressed_ block */ |
13 | | |
14 | | /*-******************************************************* |
15 | | * Dependencies |
16 | | *********************************************************/ |
17 | | #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ |
18 | | #include "../common/compiler.h" /* prefetch */ |
19 | | #include "../common/cpu.h" /* bmi2 */ |
20 | | #include "../common/mem.h" /* low level memory routines */ |
21 | | #define FSE_STATIC_LINKING_ONLY |
22 | | #include "../common/fse.h" |
23 | | #include "../common/huf.h" |
24 | | #include "../common/zstd_internal.h" |
25 | | #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ |
26 | | #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ |
27 | | #include "zstd_decompress_block.h" |
28 | | #include "../common/bits.h" /* ZSTD_highbit32 */ |
29 | | |
30 | | /*_******************************************************* |
31 | | * Macros |
32 | | **********************************************************/ |
33 | | |
34 | | /* These two optional macros force the use one way or another of the two |
35 | | * ZSTD_decompressSequences implementations. You can't force in both directions |
36 | | * at the same time. |
37 | | */ |
38 | | #if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ |
39 | | defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) |
40 | | #error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!" |
41 | | #endif |
42 | | |
43 | | |
44 | | /*_******************************************************* |
45 | | * Memory operations |
46 | | **********************************************************/ |
47 | 53.0M | static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); } |
48 | | |
49 | | |
50 | | /*-************************************************************* |
51 | | * Block decoding |
52 | | ***************************************************************/ |
53 | | |
54 | | static size_t ZSTD_blockSizeMax(ZSTD_DCtx const* dctx) |
55 | 261M | { |
56 | 261M | size_t const blockSizeMax = dctx->isFrameDecompression ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX; |
57 | 261M | assert(blockSizeMax <= ZSTD_BLOCKSIZE_MAX); |
58 | 261M | return blockSizeMax; |
59 | 261M | } |
60 | | |
61 | | /*! ZSTD_getcBlockSize() : |
62 | | * Provides the size of compressed block from block header `src` */ |
63 | | size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, |
64 | | blockProperties_t* bpPtr) |
65 | 60.2M | { |
66 | 60.2M | RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, ""); |
67 | | |
68 | 60.2M | { U32 const cBlockHeader = MEM_readLE24(src); |
69 | 60.2M | U32 const cSize = cBlockHeader >> 3; |
70 | 60.2M | bpPtr->lastBlock = cBlockHeader & 1; |
71 | 60.2M | bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); |
72 | 60.2M | bpPtr->origSize = cSize; /* only useful for RLE */ |
73 | 60.2M | if (bpPtr->blockType == bt_rle) return 1; |
74 | 59.3M | RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, ""); |
75 | 59.3M | return cSize; |
76 | 59.3M | } |
77 | 59.3M | } |
78 | | |
79 | | /* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ |
80 | | static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize, |
81 | | const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) |
82 | 4.66M | { |
83 | 4.66M | size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); |
84 | 4.66M | assert(litSize <= blockSizeMax); |
85 | 4.66M | assert(dctx->isFrameDecompression || streaming == not_streaming); |
86 | 4.66M | assert(expectedWriteSize <= blockSizeMax); |
87 | 4.66M | if (streaming == not_streaming && dstCapacity > blockSizeMax + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) { |
88 | | /* If we aren't streaming, we can just put the literals after the output |
89 | | * of the current block. We don't need to worry about overwriting the |
90 | | * extDict of our window, because it doesn't exist. |
91 | | * So if we have space after the end of the block, just put it there. |
92 | | */ |
93 | 3.68M | dctx->litBuffer = (BYTE*)dst + blockSizeMax + WILDCOPY_OVERLENGTH; |
94 | 3.68M | dctx->litBufferEnd = dctx->litBuffer + litSize; |
95 | 3.68M | dctx->litBufferLocation = ZSTD_in_dst; |
96 | 3.68M | } else if (litSize <= ZSTD_LITBUFFEREXTRASIZE) { |
97 | | /* Literals fit entirely within the extra buffer, put them there to avoid |
98 | | * having to split the literals. |
99 | | */ |
100 | 917k | dctx->litBuffer = dctx->litExtraBuffer; |
101 | 917k | dctx->litBufferEnd = dctx->litBuffer + litSize; |
102 | 917k | dctx->litBufferLocation = ZSTD_not_in_dst; |
103 | 917k | } else { |
104 | 63.5k | assert(blockSizeMax > ZSTD_LITBUFFEREXTRASIZE); |
105 | | /* Literals must be split between the output block and the extra lit |
106 | | * buffer. We fill the extra lit buffer with the tail of the literals, |
107 | | * and put the rest of the literals at the end of the block, with |
108 | | * WILDCOPY_OVERLENGTH of buffer room to allow for overreads. |
109 | | * This MUST not write more than our maxBlockSize beyond dst, because in |
110 | | * streaming mode, that could overwrite part of our extDict window. |
111 | | */ |
112 | 63.5k | if (splitImmediately) { |
113 | | /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ |
114 | 56.9k | dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; |
115 | 56.9k | dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; |
116 | 56.9k | } else { |
117 | | /* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */ |
118 | 6.56k | dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; |
119 | 6.56k | dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; |
120 | 6.56k | } |
121 | 63.5k | dctx->litBufferLocation = ZSTD_split; |
122 | 63.5k | assert(dctx->litBufferEnd <= (BYTE*)dst + expectedWriteSize); |
123 | 63.5k | } |
124 | 4.66M | } |
125 | | |
126 | | /*! ZSTD_decodeLiteralsBlock() : |
127 | | * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored |
128 | | * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current |
129 | | * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being |
130 | | * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. |
131 | | * |
132 | | * @return : nb of bytes read from src (< srcSize ) |
133 | | * note : symbol not declared but exposed for fullbench */ |
134 | | static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, |
135 | | const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ |
136 | | void* dst, size_t dstCapacity, const streaming_operation streaming) |
137 | 4.67M | { |
138 | 4.67M | DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); |
139 | 4.67M | RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); |
140 | | |
141 | 4.67M | { const BYTE* const istart = (const BYTE*) src; |
142 | 4.67M | symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); |
143 | 4.67M | size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); |
144 | | |
145 | 4.67M | switch(litEncType) |
146 | 4.67M | { |
147 | 569k | case set_repeat: |
148 | 569k | DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); |
149 | 569k | RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); |
150 | 569k | ZSTD_FALLTHROUGH; |
151 | | |
152 | 2.25M | case set_compressed: |
153 | 2.25M | RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need up to 5 for case 3"); |
154 | 2.25M | { size_t lhSize, litSize, litCSize; |
155 | 2.25M | U32 singleStream=0; |
156 | 2.25M | U32 const lhlCode = (istart[0] >> 2) & 3; |
157 | 2.25M | U32 const lhc = MEM_readLE32(istart); |
158 | 2.25M | size_t hufSuccess; |
159 | 2.25M | size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); |
160 | 2.25M | int const flags = 0 |
161 | 2.25M | | (ZSTD_DCtx_get_bmi2(dctx) ? HUF_flags_bmi2 : 0) |
162 | 2.25M | | (dctx->disableHufAsm ? HUF_flags_disableAsm : 0); |
163 | 2.25M | switch(lhlCode) |
164 | 2.25M | { |
165 | 1.59M | case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ |
166 | | /* 2 - 2 - 10 - 10 */ |
167 | 1.59M | singleStream = !lhlCode; |
168 | 1.59M | lhSize = 3; |
169 | 1.59M | litSize = (lhc >> 4) & 0x3FF; |
170 | 1.59M | litCSize = (lhc >> 14) & 0x3FF; |
171 | 1.59M | break; |
172 | 630k | case 2: |
173 | | /* 2 - 2 - 14 - 14 */ |
174 | 630k | lhSize = 4; |
175 | 630k | litSize = (lhc >> 4) & 0x3FFF; |
176 | 630k | litCSize = lhc >> 18; |
177 | 630k | break; |
178 | 31.1k | case 3: |
179 | | /* 2 - 2 - 18 - 18 */ |
180 | 31.1k | lhSize = 5; |
181 | 31.1k | litSize = (lhc >> 4) & 0x3FFFF; |
182 | 31.1k | litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); |
183 | 31.1k | break; |
184 | 2.25M | } |
185 | 2.25M | RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); |
186 | 2.25M | RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); |
187 | 2.25M | if (!singleStream) |
188 | 1.08M | RETURN_ERROR_IF(litSize < MIN_LITERALS_FOR_4_STREAMS, literals_headerWrong, |
189 | 2.25M | "Not enough literals (%zu) for the 4-streams mode (min %u)", |
190 | 2.25M | litSize, MIN_LITERALS_FOR_4_STREAMS); |
191 | 2.25M | RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); |
192 | 2.25M | RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); |
193 | 2.24M | ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); |
194 | | |
195 | | /* prefetch huffman table if cold */ |
196 | 2.24M | if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { |
197 | 11.1k | PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable)); |
198 | 11.1k | } |
199 | | |
200 | 2.24M | if (litEncType==set_repeat) { |
201 | 569k | if (singleStream) { |
202 | 213k | hufSuccess = HUF_decompress1X_usingDTable( |
203 | 213k | dctx->litBuffer, litSize, istart+lhSize, litCSize, |
204 | 213k | dctx->HUFptr, flags); |
205 | 355k | } else { |
206 | 355k | assert(litSize >= MIN_LITERALS_FOR_4_STREAMS); |
207 | 355k | hufSuccess = HUF_decompress4X_usingDTable( |
208 | 355k | dctx->litBuffer, litSize, istart+lhSize, litCSize, |
209 | 355k | dctx->HUFptr, flags); |
210 | 355k | } |
211 | 1.68M | } else { |
212 | 1.68M | if (singleStream) { |
213 | | #if defined(HUF_FORCE_DECOMPRESS_X2) |
214 | | hufSuccess = HUF_decompress1X_DCtx_wksp( |
215 | | dctx->entropy.hufTable, dctx->litBuffer, litSize, |
216 | | istart+lhSize, litCSize, dctx->workspace, |
217 | | sizeof(dctx->workspace), flags); |
218 | | #else |
219 | 957k | hufSuccess = HUF_decompress1X1_DCtx_wksp( |
220 | 957k | dctx->entropy.hufTable, dctx->litBuffer, litSize, |
221 | 957k | istart+lhSize, litCSize, dctx->workspace, |
222 | 957k | sizeof(dctx->workspace), flags); |
223 | 957k | #endif |
224 | 957k | } else { |
225 | 722k | hufSuccess = HUF_decompress4X_hufOnly_wksp( |
226 | 722k | dctx->entropy.hufTable, dctx->litBuffer, litSize, |
227 | 722k | istart+lhSize, litCSize, dctx->workspace, |
228 | 722k | sizeof(dctx->workspace), flags); |
229 | 722k | } |
230 | 1.68M | } |
231 | 2.24M | if (dctx->litBufferLocation == ZSTD_split) |
232 | 6.56k | { |
233 | 6.56k | assert(litSize > ZSTD_LITBUFFEREXTRASIZE); |
234 | 6.56k | ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); |
235 | 6.56k | ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE); |
236 | 6.56k | dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; |
237 | 6.56k | dctx->litBufferEnd -= WILDCOPY_OVERLENGTH; |
238 | 6.56k | assert(dctx->litBufferEnd <= (BYTE*)dst + blockSizeMax); |
239 | 6.56k | } |
240 | | |
241 | 2.24M | RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); |
242 | | |
243 | 2.24M | dctx->litPtr = dctx->litBuffer; |
244 | 2.24M | dctx->litSize = litSize; |
245 | 2.24M | dctx->litEntropy = 1; |
246 | 2.24M | if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; |
247 | 2.24M | return litCSize + lhSize; |
248 | 2.24M | } |
249 | | |
250 | 2.33M | case set_basic: |
251 | 2.33M | { size_t litSize, lhSize; |
252 | 2.33M | U32 const lhlCode = ((istart[0]) >> 2) & 3; |
253 | 2.33M | size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); |
254 | 2.33M | switch(lhlCode) |
255 | 2.33M | { |
256 | 1.50M | case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ |
257 | 1.50M | lhSize = 1; |
258 | 1.50M | litSize = istart[0] >> 3; |
259 | 1.50M | break; |
260 | 799k | case 1: |
261 | 799k | lhSize = 2; |
262 | 799k | litSize = MEM_readLE16(istart) >> 4; |
263 | 799k | break; |
264 | 22.4k | case 3: |
265 | 22.4k | lhSize = 3; |
266 | 22.4k | RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize = 3"); |
267 | 22.4k | litSize = MEM_readLE24(istart) >> 4; |
268 | 22.4k | break; |
269 | 2.33M | } |
270 | | |
271 | 2.33M | RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); |
272 | 2.33M | RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); |
273 | 2.33M | RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); |
274 | 2.33M | ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); |
275 | 2.33M | if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ |
276 | 1.45M | RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); |
277 | 1.45M | if (dctx->litBufferLocation == ZSTD_split) |
278 | 576 | { |
279 | 576 | ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE); |
280 | 576 | ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); |
281 | 576 | } |
282 | 1.45M | else |
283 | 1.45M | { |
284 | 1.45M | ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize); |
285 | 1.45M | } |
286 | 1.45M | dctx->litPtr = dctx->litBuffer; |
287 | 1.45M | dctx->litSize = litSize; |
288 | 1.45M | return lhSize+litSize; |
289 | 1.45M | } |
290 | | /* direct reference into compressed stream */ |
291 | 873k | dctx->litPtr = istart+lhSize; |
292 | 873k | dctx->litSize = litSize; |
293 | 873k | dctx->litBufferEnd = dctx->litPtr + litSize; |
294 | 873k | dctx->litBufferLocation = ZSTD_not_in_dst; |
295 | 873k | return lhSize+litSize; |
296 | 2.33M | } |
297 | | |
298 | 85.1k | case set_rle: |
299 | 85.1k | { U32 const lhlCode = ((istart[0]) >> 2) & 3; |
300 | 85.1k | size_t litSize, lhSize; |
301 | 85.1k | size_t expectedWriteSize = MIN(blockSizeMax, dstCapacity); |
302 | 85.1k | switch(lhlCode) |
303 | 85.1k | { |
304 | 13.1k | case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ |
305 | 13.1k | lhSize = 1; |
306 | 13.1k | litSize = istart[0] >> 3; |
307 | 13.1k | break; |
308 | 11.8k | case 1: |
309 | 11.8k | lhSize = 2; |
310 | 11.8k | RETURN_ERROR_IF(srcSize<3, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 3"); |
311 | 11.7k | litSize = MEM_readLE16(istart) >> 4; |
312 | 11.7k | break; |
313 | 60.2k | case 3: |
314 | 60.2k | lhSize = 3; |
315 | 60.2k | RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 2; here we need lhSize+1 = 4"); |
316 | 60.1k | litSize = MEM_readLE24(istart) >> 4; |
317 | 60.1k | break; |
318 | 85.1k | } |
319 | 85.0k | RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); |
320 | 82.4k | RETURN_ERROR_IF(litSize > blockSizeMax, corruption_detected, ""); |
321 | 82.3k | RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); |
322 | 82.1k | ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); |
323 | 82.1k | if (dctx->litBufferLocation == ZSTD_split) |
324 | 55.7k | { |
325 | 55.7k | ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE); |
326 | 55.7k | ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE); |
327 | 55.7k | } |
328 | 26.3k | else |
329 | 26.3k | { |
330 | 26.3k | ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize); |
331 | 26.3k | } |
332 | 82.1k | dctx->litPtr = dctx->litBuffer; |
333 | 82.1k | dctx->litSize = litSize; |
334 | 82.1k | return lhSize+1; |
335 | 82.3k | } |
336 | 0 | default: |
337 | 0 | RETURN_ERROR(corruption_detected, "impossible"); |
338 | 4.67M | } |
339 | 4.67M | } |
340 | 4.67M | } |
341 | | |
342 | | /* Hidden declaration for fullbench */ |
343 | | size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, |
344 | | const void* src, size_t srcSize, |
345 | | void* dst, size_t dstCapacity); |
346 | | size_t ZSTD_decodeLiteralsBlock_wrapper(ZSTD_DCtx* dctx, |
347 | | const void* src, size_t srcSize, |
348 | | void* dst, size_t dstCapacity) |
349 | 0 | { |
350 | 0 | dctx->isFrameDecompression = 0; |
351 | 0 | return ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, not_streaming); |
352 | 0 | } |
353 | | |
354 | | /* Default FSE distribution tables. |
355 | | * These are pre-calculated FSE decoding tables using default distributions as defined in specification : |
356 | | * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions |
357 | | * They were generated programmatically with following method : |
358 | | * - start from default distributions, present in /lib/common/zstd_internal.h |
359 | | * - generate tables normally, using ZSTD_buildFSETable() |
360 | | * - printout the content of tables |
361 | | * - pretify output, report below, test with fuzzer to ensure it's correct */ |
362 | | |
363 | | /* Default FSE distribution table for Literal Lengths */ |
364 | | static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = { |
365 | | { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */ |
366 | | /* nextState, nbAddBits, nbBits, baseVal */ |
367 | | { 0, 0, 4, 0}, { 16, 0, 4, 0}, |
368 | | { 32, 0, 5, 1}, { 0, 0, 5, 3}, |
369 | | { 0, 0, 5, 4}, { 0, 0, 5, 6}, |
370 | | { 0, 0, 5, 7}, { 0, 0, 5, 9}, |
371 | | { 0, 0, 5, 10}, { 0, 0, 5, 12}, |
372 | | { 0, 0, 6, 14}, { 0, 1, 5, 16}, |
373 | | { 0, 1, 5, 20}, { 0, 1, 5, 22}, |
374 | | { 0, 2, 5, 28}, { 0, 3, 5, 32}, |
375 | | { 0, 4, 5, 48}, { 32, 6, 5, 64}, |
376 | | { 0, 7, 5, 128}, { 0, 8, 6, 256}, |
377 | | { 0, 10, 6, 1024}, { 0, 12, 6, 4096}, |
378 | | { 32, 0, 4, 0}, { 0, 0, 4, 1}, |
379 | | { 0, 0, 5, 2}, { 32, 0, 5, 4}, |
380 | | { 0, 0, 5, 5}, { 32, 0, 5, 7}, |
381 | | { 0, 0, 5, 8}, { 32, 0, 5, 10}, |
382 | | { 0, 0, 5, 11}, { 0, 0, 6, 13}, |
383 | | { 32, 1, 5, 16}, { 0, 1, 5, 18}, |
384 | | { 32, 1, 5, 22}, { 0, 2, 5, 24}, |
385 | | { 32, 3, 5, 32}, { 0, 3, 5, 40}, |
386 | | { 0, 6, 4, 64}, { 16, 6, 4, 64}, |
387 | | { 32, 7, 5, 128}, { 0, 9, 6, 512}, |
388 | | { 0, 11, 6, 2048}, { 48, 0, 4, 0}, |
389 | | { 16, 0, 4, 1}, { 32, 0, 5, 2}, |
390 | | { 32, 0, 5, 3}, { 32, 0, 5, 5}, |
391 | | { 32, 0, 5, 6}, { 32, 0, 5, 8}, |
392 | | { 32, 0, 5, 9}, { 32, 0, 5, 11}, |
393 | | { 32, 0, 5, 12}, { 0, 0, 6, 15}, |
394 | | { 32, 1, 5, 18}, { 32, 1, 5, 20}, |
395 | | { 32, 2, 5, 24}, { 32, 2, 5, 28}, |
396 | | { 32, 3, 5, 40}, { 32, 4, 5, 48}, |
397 | | { 0, 16, 6,65536}, { 0, 15, 6,32768}, |
398 | | { 0, 14, 6,16384}, { 0, 13, 6, 8192}, |
399 | | }; /* LL_defaultDTable */ |
400 | | |
401 | | /* Default FSE distribution table for Offset Codes */ |
402 | | static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = { |
403 | | { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */ |
404 | | /* nextState, nbAddBits, nbBits, baseVal */ |
405 | | { 0, 0, 5, 0}, { 0, 6, 4, 61}, |
406 | | { 0, 9, 5, 509}, { 0, 15, 5,32765}, |
407 | | { 0, 21, 5,2097149}, { 0, 3, 5, 5}, |
408 | | { 0, 7, 4, 125}, { 0, 12, 5, 4093}, |
409 | | { 0, 18, 5,262141}, { 0, 23, 5,8388605}, |
410 | | { 0, 5, 5, 29}, { 0, 8, 4, 253}, |
411 | | { 0, 14, 5,16381}, { 0, 20, 5,1048573}, |
412 | | { 0, 2, 5, 1}, { 16, 7, 4, 125}, |
413 | | { 0, 11, 5, 2045}, { 0, 17, 5,131069}, |
414 | | { 0, 22, 5,4194301}, { 0, 4, 5, 13}, |
415 | | { 16, 8, 4, 253}, { 0, 13, 5, 8189}, |
416 | | { 0, 19, 5,524285}, { 0, 1, 5, 1}, |
417 | | { 16, 6, 4, 61}, { 0, 10, 5, 1021}, |
418 | | { 0, 16, 5,65533}, { 0, 28, 5,268435453}, |
419 | | { 0, 27, 5,134217725}, { 0, 26, 5,67108861}, |
420 | | { 0, 25, 5,33554429}, { 0, 24, 5,16777213}, |
421 | | }; /* OF_defaultDTable */ |
422 | | |
423 | | |
424 | | /* Default FSE distribution table for Match Lengths */ |
425 | | static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = { |
426 | | { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */ |
427 | | /* nextState, nbAddBits, nbBits, baseVal */ |
428 | | { 0, 0, 6, 3}, { 0, 0, 4, 4}, |
429 | | { 32, 0, 5, 5}, { 0, 0, 5, 6}, |
430 | | { 0, 0, 5, 8}, { 0, 0, 5, 9}, |
431 | | { 0, 0, 5, 11}, { 0, 0, 6, 13}, |
432 | | { 0, 0, 6, 16}, { 0, 0, 6, 19}, |
433 | | { 0, 0, 6, 22}, { 0, 0, 6, 25}, |
434 | | { 0, 0, 6, 28}, { 0, 0, 6, 31}, |
435 | | { 0, 0, 6, 34}, { 0, 1, 6, 37}, |
436 | | { 0, 1, 6, 41}, { 0, 2, 6, 47}, |
437 | | { 0, 3, 6, 59}, { 0, 4, 6, 83}, |
438 | | { 0, 7, 6, 131}, { 0, 9, 6, 515}, |
439 | | { 16, 0, 4, 4}, { 0, 0, 4, 5}, |
440 | | { 32, 0, 5, 6}, { 0, 0, 5, 7}, |
441 | | { 32, 0, 5, 9}, { 0, 0, 5, 10}, |
442 | | { 0, 0, 6, 12}, { 0, 0, 6, 15}, |
443 | | { 0, 0, 6, 18}, { 0, 0, 6, 21}, |
444 | | { 0, 0, 6, 24}, { 0, 0, 6, 27}, |
445 | | { 0, 0, 6, 30}, { 0, 0, 6, 33}, |
446 | | { 0, 1, 6, 35}, { 0, 1, 6, 39}, |
447 | | { 0, 2, 6, 43}, { 0, 3, 6, 51}, |
448 | | { 0, 4, 6, 67}, { 0, 5, 6, 99}, |
449 | | { 0, 8, 6, 259}, { 32, 0, 4, 4}, |
450 | | { 48, 0, 4, 4}, { 16, 0, 4, 5}, |
451 | | { 32, 0, 5, 7}, { 32, 0, 5, 8}, |
452 | | { 32, 0, 5, 10}, { 32, 0, 5, 11}, |
453 | | { 0, 0, 6, 14}, { 0, 0, 6, 17}, |
454 | | { 0, 0, 6, 20}, { 0, 0, 6, 23}, |
455 | | { 0, 0, 6, 26}, { 0, 0, 6, 29}, |
456 | | { 0, 0, 6, 32}, { 0, 16, 6,65539}, |
457 | | { 0, 15, 6,32771}, { 0, 14, 6,16387}, |
458 | | { 0, 13, 6, 8195}, { 0, 12, 6, 4099}, |
459 | | { 0, 11, 6, 2051}, { 0, 10, 6, 1027}, |
460 | | }; /* ML_defaultDTable */ |
461 | | |
462 | | |
463 | | static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U8 nbAddBits) |
464 | 415k | { |
465 | 415k | void* ptr = dt; |
466 | 415k | ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr; |
467 | 415k | ZSTD_seqSymbol* const cell = dt + 1; |
468 | | |
469 | 415k | DTableH->tableLog = 0; |
470 | 415k | DTableH->fastMode = 0; |
471 | | |
472 | 415k | cell->nbBits = 0; |
473 | 415k | cell->nextState = 0; |
474 | 415k | assert(nbAddBits < 255); |
475 | 415k | cell->nbAdditionalBits = nbAddBits; |
476 | 415k | cell->baseValue = baseValue; |
477 | 415k | } |
478 | | |
479 | | |
480 | | /* ZSTD_buildFSETable() : |
481 | | * generate FSE decoding table for one symbol (ll, ml or off) |
482 | | * cannot fail if input is valid => |
483 | | * all inputs are presumed validated at this stage */ |
484 | | FORCE_INLINE_TEMPLATE |
485 | | void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, |
486 | | const short* normalizedCounter, unsigned maxSymbolValue, |
487 | | const U32* baseValue, const U8* nbAdditionalBits, |
488 | | unsigned tableLog, void* wksp, size_t wkspSize) |
489 | 3.28M | { |
490 | 3.28M | ZSTD_seqSymbol* const tableDecode = dt+1; |
491 | 3.28M | U32 const maxSV1 = maxSymbolValue + 1; |
492 | 3.28M | U32 const tableSize = 1 << tableLog; |
493 | | |
494 | 3.28M | U16* symbolNext = (U16*)wksp; |
495 | 3.28M | BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1); |
496 | 3.28M | U32 highThreshold = tableSize - 1; |
497 | | |
498 | | |
499 | | /* Sanity Checks */ |
500 | 3.28M | assert(maxSymbolValue <= MaxSeq); |
501 | 3.28M | assert(tableLog <= MaxFSELog); |
502 | 3.28M | assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE); |
503 | 3.28M | (void)wkspSize; |
504 | | /* Init, lay down lowprob symbols */ |
505 | 3.28M | { ZSTD_seqSymbol_header DTableH; |
506 | 3.28M | DTableH.tableLog = tableLog; |
507 | 3.28M | DTableH.fastMode = 1; |
508 | 3.28M | { S16 const largeLimit= (S16)(1 << (tableLog-1)); |
509 | 3.28M | U32 s; |
510 | 64.2M | for (s=0; s<maxSV1; s++) { |
511 | 61.0M | if (normalizedCounter[s]==-1) { |
512 | 581k | tableDecode[highThreshold--].baseValue = s; |
513 | 581k | symbolNext[s] = 1; |
514 | 60.4M | } else { |
515 | 60.4M | if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; |
516 | 60.4M | assert(normalizedCounter[s]>=0); |
517 | 60.4M | symbolNext[s] = (U16)normalizedCounter[s]; |
518 | 60.4M | } } } |
519 | 3.28M | ZSTD_memcpy(dt, &DTableH, sizeof(DTableH)); |
520 | 3.28M | } |
521 | | |
522 | | /* Spread symbols */ |
523 | 0 | assert(tableSize <= 512); |
524 | | /* Specialized symbol spreading for the case when there are |
525 | | * no low probability (-1 count) symbols. When compressing |
526 | | * small blocks we avoid low probability symbols to hit this |
527 | | * case, since header decoding speed matters more. |
528 | | */ |
529 | 3.28M | if (highThreshold == tableSize - 1) { |
530 | 3.21M | size_t const tableMask = tableSize-1; |
531 | 3.21M | size_t const step = FSE_TABLESTEP(tableSize); |
532 | | /* First lay down the symbols in order. |
533 | | * We use a uint64_t to lay down 8 bytes at a time. This reduces branch |
534 | | * misses since small blocks generally have small table logs, so nearly |
535 | | * all symbols have counts <= 8. We ensure we have 8 bytes at the end of |
536 | | * our buffer to handle the over-write. |
537 | | */ |
538 | 3.21M | { |
539 | 3.21M | U64 const add = 0x0101010101010101ull; |
540 | 3.21M | size_t pos = 0; |
541 | 3.21M | U64 sv = 0; |
542 | 3.21M | U32 s; |
543 | 62.6M | for (s=0; s<maxSV1; ++s, sv += add) { |
544 | 59.3M | int i; |
545 | 59.3M | int const n = normalizedCounter[s]; |
546 | 59.3M | MEM_write64(spread + pos, sv); |
547 | 76.5M | for (i = 8; i < n; i += 8) { |
548 | 17.1M | MEM_write64(spread + pos + i, sv); |
549 | 17.1M | } |
550 | 59.3M | assert(n>=0); |
551 | 59.3M | pos += (size_t)n; |
552 | 59.3M | } |
553 | 3.21M | } |
554 | | /* Now we spread those positions across the table. |
555 | | * The benefit of doing it in two stages is that we avoid the |
556 | | * variable size inner loop, which caused lots of branch misses. |
557 | | * Now we can run through all the positions without any branch misses. |
558 | | * We unroll the loop twice, since that is what empirically worked best. |
559 | | */ |
560 | 3.21M | { |
561 | 3.21M | size_t position = 0; |
562 | 3.21M | size_t s; |
563 | 3.21M | size_t const unroll = 2; |
564 | 3.21M | assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ |
565 | 125M | for (s = 0; s < (size_t)tableSize; s += unroll) { |
566 | 121M | size_t u; |
567 | 365M | for (u = 0; u < unroll; ++u) { |
568 | 243M | size_t const uPosition = (position + (u * step)) & tableMask; |
569 | 243M | tableDecode[uPosition].baseValue = spread[s + u]; |
570 | 243M | } |
571 | 121M | position = (position + (unroll * step)) & tableMask; |
572 | 121M | } |
573 | 3.21M | assert(position == 0); |
574 | 3.21M | } |
575 | 3.21M | } else { |
576 | 62.2k | U32 const tableMask = tableSize-1; |
577 | 62.2k | U32 const step = FSE_TABLESTEP(tableSize); |
578 | 62.2k | U32 s, position = 0; |
579 | 1.67M | for (s=0; s<maxSV1; s++) { |
580 | 1.61M | int i; |
581 | 1.61M | int const n = normalizedCounter[s]; |
582 | 20.9M | for (i=0; i<n; i++) { |
583 | 19.3M | tableDecode[position].baseValue = s; |
584 | 19.3M | position = (position + step) & tableMask; |
585 | 19.9M | while (UNLIKELY(position > highThreshold)) position = (position + step) & tableMask; /* lowprob area */ |
586 | 19.3M | } } |
587 | 62.2k | assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ |
588 | 62.2k | } |
589 | | |
590 | | /* Build Decoding table */ |
591 | 3.28M | { |
592 | 3.28M | U32 u; |
593 | 266M | for (u=0; u<tableSize; u++) { |
594 | 263M | U32 const symbol = tableDecode[u].baseValue; |
595 | 263M | U32 const nextState = symbolNext[symbol]++; |
596 | 263M | tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) ); |
597 | 263M | tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize); |
598 | 263M | assert(nbAdditionalBits[symbol] < 255); |
599 | 263M | tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol]; |
600 | 263M | tableDecode[u].baseValue = baseValue[symbol]; |
601 | 263M | } |
602 | 3.28M | } |
603 | 3.28M | } |
604 | | |
605 | | /* Avoids the FORCE_INLINE of the _body() function. */ |
606 | | static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, |
607 | | const short* normalizedCounter, unsigned maxSymbolValue, |
608 | | const U32* baseValue, const U8* nbAdditionalBits, |
609 | | unsigned tableLog, void* wksp, size_t wkspSize) |
610 | 195k | { |
611 | 195k | ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue, |
612 | 195k | baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); |
613 | 195k | } |
614 | | |
615 | | #if DYNAMIC_BMI2 |
616 | | BMI2_TARGET_ATTRIBUTE static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt, |
617 | | const short* normalizedCounter, unsigned maxSymbolValue, |
618 | | const U32* baseValue, const U8* nbAdditionalBits, |
619 | | unsigned tableLog, void* wksp, size_t wkspSize) |
620 | 3.08M | { |
621 | 3.08M | ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue, |
622 | 3.08M | baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); |
623 | 3.08M | } |
624 | | #endif |
625 | | |
626 | | void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, |
627 | | const short* normalizedCounter, unsigned maxSymbolValue, |
628 | | const U32* baseValue, const U8* nbAdditionalBits, |
629 | | unsigned tableLog, void* wksp, size_t wkspSize, int bmi2) |
630 | 3.28M | { |
631 | 3.28M | #if DYNAMIC_BMI2 |
632 | 3.28M | if (bmi2) { |
633 | 3.08M | ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue, |
634 | 3.08M | baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); |
635 | 3.08M | return; |
636 | 3.08M | } |
637 | 195k | #endif |
638 | 195k | (void)bmi2; |
639 | 195k | ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue, |
640 | 195k | baseValue, nbAdditionalBits, tableLog, wksp, wkspSize); |
641 | 195k | } |
642 | | |
643 | | |
644 | | /*! ZSTD_buildSeqTable() : |
645 | | * @return : nb bytes read from src, |
646 | | * or an error code if it fails */ |
647 | | static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr, |
648 | | symbolEncodingType_e type, unsigned max, U32 maxLog, |
649 | | const void* src, size_t srcSize, |
650 | | const U32* baseValue, const U8* nbAdditionalBits, |
651 | | const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable, |
652 | | int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize, |
653 | | int bmi2) |
654 | 12.6M | { |
655 | 12.6M | switch(type) |
656 | 12.6M | { |
657 | 415k | case set_rle : |
658 | 415k | RETURN_ERROR_IF(!srcSize, srcSize_wrong, ""); |
659 | 415k | RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, ""); |
660 | 415k | { U32 const symbol = *(const BYTE*)src; |
661 | 415k | U32 const baseline = baseValue[symbol]; |
662 | 415k | U8 const nbBits = nbAdditionalBits[symbol]; |
663 | 415k | ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); |
664 | 415k | } |
665 | 415k | *DTablePtr = DTableSpace; |
666 | 415k | return 1; |
667 | 7.15M | case set_basic : |
668 | 7.15M | *DTablePtr = defaultTable; |
669 | 7.15M | return 0; |
670 | 1.98M | case set_repeat: |
671 | 1.98M | RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, ""); |
672 | | /* prefetch FSE table if used */ |
673 | 1.98M | if (ddictIsCold && (nbSeq > 24 /* heuristic */)) { |
674 | 35.3k | const void* const pStart = *DTablePtr; |
675 | 35.3k | size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog)); |
676 | 35.3k | PREFETCH_AREA(pStart, pSize); |
677 | 35.3k | } |
678 | 1.98M | return 0; |
679 | 3.08M | case set_compressed : |
680 | 3.08M | { unsigned tableLog; |
681 | 3.08M | S16 norm[MaxSeq+1]; |
682 | 3.08M | size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); |
683 | 3.08M | RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, ""); |
684 | 3.08M | RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, ""); |
685 | 3.08M | ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2); |
686 | 3.08M | *DTablePtr = DTableSpace; |
687 | 3.08M | return headerSize; |
688 | 3.08M | } |
689 | 0 | default : |
690 | 0 | assert(0); |
691 | 12.6M | RETURN_ERROR(GENERIC, "impossible"); |
692 | 12.6M | } |
693 | 12.6M | } |
694 | | |
695 | | size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, |
696 | | const void* src, size_t srcSize) |
697 | 4.65M | { |
698 | 4.65M | const BYTE* const istart = (const BYTE*)src; |
699 | 4.65M | const BYTE* const iend = istart + srcSize; |
700 | 4.65M | const BYTE* ip = istart; |
701 | 4.65M | int nbSeq; |
702 | 4.65M | DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); |
703 | | |
704 | | /* check */ |
705 | 4.65M | RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, ""); |
706 | | |
707 | | /* SeqHead */ |
708 | 4.65M | nbSeq = *ip++; |
709 | 4.65M | if (nbSeq > 0x7F) { |
710 | 424k | if (nbSeq == 0xFF) { |
711 | 1.22k | RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); |
712 | 1.11k | nbSeq = MEM_readLE16(ip) + LONGNBSEQ; |
713 | 1.11k | ip+=2; |
714 | 422k | } else { |
715 | 422k | RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); |
716 | 422k | nbSeq = ((nbSeq-0x80)<<8) + *ip++; |
717 | 422k | } |
718 | 424k | } |
719 | 4.65M | *nbSeqPtr = nbSeq; |
720 | | |
721 | 4.65M | if (nbSeq == 0) { |
722 | | /* No sequence : section ends immediately */ |
723 | 436k | RETURN_ERROR_IF(ip != iend, corruption_detected, |
724 | 436k | "extraneous data present in the Sequences section"); |
725 | 436k | return (size_t)(ip - istart); |
726 | 436k | } |
727 | | |
728 | | /* FSE table descriptors */ |
729 | 4.21M | RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ |
730 | 4.21M | RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */ |
731 | 4.21M | { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); |
732 | 4.21M | symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); |
733 | 4.21M | symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); |
734 | 4.21M | ip++; |
735 | | |
736 | | /* Build DTables */ |
737 | 4.21M | { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, |
738 | 4.21M | LLtype, MaxLL, LLFSELog, |
739 | 4.21M | ip, iend-ip, |
740 | 4.21M | LL_base, LL_bits, |
741 | 4.21M | LL_defaultDTable, dctx->fseEntropy, |
742 | 4.21M | dctx->ddictIsCold, nbSeq, |
743 | 4.21M | dctx->workspace, sizeof(dctx->workspace), |
744 | 4.21M | ZSTD_DCtx_get_bmi2(dctx)); |
745 | 4.21M | RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); |
746 | 4.21M | ip += llhSize; |
747 | 4.21M | } |
748 | | |
749 | 0 | { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, |
750 | 4.21M | OFtype, MaxOff, OffFSELog, |
751 | 4.21M | ip, iend-ip, |
752 | 4.21M | OF_base, OF_bits, |
753 | 4.21M | OF_defaultDTable, dctx->fseEntropy, |
754 | 4.21M | dctx->ddictIsCold, nbSeq, |
755 | 4.21M | dctx->workspace, sizeof(dctx->workspace), |
756 | 4.21M | ZSTD_DCtx_get_bmi2(dctx)); |
757 | 4.21M | RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); |
758 | 4.21M | ip += ofhSize; |
759 | 4.21M | } |
760 | | |
761 | 0 | { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, |
762 | 4.21M | MLtype, MaxML, MLFSELog, |
763 | 4.21M | ip, iend-ip, |
764 | 4.21M | ML_base, ML_bits, |
765 | 4.21M | ML_defaultDTable, dctx->fseEntropy, |
766 | 4.21M | dctx->ddictIsCold, nbSeq, |
767 | 4.21M | dctx->workspace, sizeof(dctx->workspace), |
768 | 4.21M | ZSTD_DCtx_get_bmi2(dctx)); |
769 | 4.21M | RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); |
770 | 4.21M | ip += mlhSize; |
771 | 4.21M | } |
772 | 4.21M | } |
773 | | |
774 | 0 | return ip-istart; |
775 | 4.21M | } |
776 | | |
777 | | |
778 | | typedef struct { |
779 | | size_t litLength; |
780 | | size_t matchLength; |
781 | | size_t offset; |
782 | | } seq_t; |
783 | | |
784 | | typedef struct { |
785 | | size_t state; |
786 | | const ZSTD_seqSymbol* table; |
787 | | } ZSTD_fseState; |
788 | | |
789 | | typedef struct { |
790 | | BIT_DStream_t DStream; |
791 | | ZSTD_fseState stateLL; |
792 | | ZSTD_fseState stateOffb; |
793 | | ZSTD_fseState stateML; |
794 | | size_t prevOffset[ZSTD_REP_NUM]; |
795 | | } seqState_t; |
796 | | |
797 | | /*! ZSTD_overlapCopy8() : |
798 | | * Copies 8 bytes from ip to op and updates op and ip where ip <= op. |
799 | | * If the offset is < 8 then the offset is spread to at least 8 bytes. |
800 | | * |
801 | | * Precondition: *ip <= *op |
802 | | * Postcondition: *op - *op >= 8 |
803 | | */ |
804 | 72.0M | HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { |
805 | 72.0M | assert(*ip <= *op); |
806 | 72.0M | if (offset < 8) { |
807 | | /* close range match, overlap */ |
808 | 53.0M | static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ |
809 | 53.0M | static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ |
810 | 53.0M | int const sub2 = dec64table[offset]; |
811 | 53.0M | (*op)[0] = (*ip)[0]; |
812 | 53.0M | (*op)[1] = (*ip)[1]; |
813 | 53.0M | (*op)[2] = (*ip)[2]; |
814 | 53.0M | (*op)[3] = (*ip)[3]; |
815 | 53.0M | *ip += dec32table[offset]; |
816 | 53.0M | ZSTD_copy4(*op+4, *ip); |
817 | 53.0M | *ip -= sub2; |
818 | 53.0M | } else { |
819 | 18.9M | ZSTD_copy8(*op, *ip); |
820 | 18.9M | } |
821 | 72.0M | *ip += 8; |
822 | 72.0M | *op += 8; |
823 | 72.0M | assert(*op - *ip >= 8); |
824 | 72.0M | } |
825 | | |
826 | | /*! ZSTD_safecopy() : |
827 | | * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer |
828 | | * and write up to 16 bytes past oend_w (op >= oend_w is allowed). |
829 | | * This function is only called in the uncommon case where the sequence is near the end of the block. It |
830 | | * should be fast for a single long sequence, but can be slow for several short sequences. |
831 | | * |
832 | | * @param ovtype controls the overlap detection |
833 | | * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. |
834 | | * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. |
835 | | * The src buffer must be before the dst buffer. |
836 | | */ |
837 | 2.56M | static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { |
838 | 2.56M | ptrdiff_t const diff = op - ip; |
839 | 2.56M | BYTE* const oend = op + length; |
840 | | |
841 | 2.56M | assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) || |
842 | 2.56M | (ovtype == ZSTD_overlap_src_before_dst && diff >= 0)); |
843 | | |
844 | 2.56M | if (length < 8) { |
845 | | /* Handle short lengths. */ |
846 | 6.82M | while (op < oend) *op++ = *ip++; |
847 | 1.65M | return; |
848 | 1.65M | } |
849 | 913k | if (ovtype == ZSTD_overlap_src_before_dst) { |
850 | | /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ |
851 | 871k | assert(length >= 8); |
852 | 871k | ZSTD_overlapCopy8(&op, &ip, diff); |
853 | 871k | length -= 8; |
854 | 871k | assert(op - ip >= 8); |
855 | 871k | assert(op <= oend); |
856 | 871k | } |
857 | | |
858 | 913k | if (oend <= oend_w) { |
859 | | /* No risk of overwrite. */ |
860 | 15.0k | ZSTD_wildcopy(op, ip, length, ovtype); |
861 | 15.0k | return; |
862 | 15.0k | } |
863 | 898k | if (op <= oend_w) { |
864 | | /* Wildcopy until we get close to the end. */ |
865 | 60.8k | assert(oend > oend_w); |
866 | 60.8k | ZSTD_wildcopy(op, ip, oend_w - op, ovtype); |
867 | 60.8k | ip += oend_w - op; |
868 | 60.8k | op += oend_w - op; |
869 | 60.8k | } |
870 | | /* Handle the leftovers. */ |
871 | 4.32G | while (op < oend) *op++ = *ip++; |
872 | 898k | } |
873 | | |
874 | | /* ZSTD_safecopyDstBeforeSrc(): |
875 | | * This version allows overlap with dst before src, or handles the non-overlap case with dst after src |
876 | | * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ |
877 | 2.05M | static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length) { |
878 | 2.05M | ptrdiff_t const diff = op - ip; |
879 | 2.05M | BYTE* const oend = op + length; |
880 | | |
881 | 2.05M | if (length < 8 || diff > -8) { |
882 | | /* Handle short lengths, close overlaps, and dst not before src. */ |
883 | 16.3M | while (op < oend) *op++ = *ip++; |
884 | 2.02M | return; |
885 | 2.02M | } |
886 | | |
887 | 37.3k | if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { |
888 | 33.4k | ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap); |
889 | 33.4k | ip += oend - WILDCOPY_OVERLENGTH - op; |
890 | 33.4k | op += oend - WILDCOPY_OVERLENGTH - op; |
891 | 33.4k | } |
892 | | |
893 | | /* Handle the leftovers. */ |
894 | 1.95M | while (op < oend) *op++ = *ip++; |
895 | 37.3k | } |
896 | | |
897 | | /* ZSTD_execSequenceEnd(): |
898 | | * This version handles cases that are near the end of the output buffer. It requires |
899 | | * more careful checks to make sure there is no overflow. By separating out these hard |
900 | | * and unlikely cases, we can speed up the common cases. |
901 | | * |
902 | | * NOTE: This function needs to be fast for a single long sequence, but doesn't need |
903 | | * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). |
904 | | */ |
905 | | FORCE_NOINLINE |
906 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
907 | | size_t ZSTD_execSequenceEnd(BYTE* op, |
908 | | BYTE* const oend, seq_t sequence, |
909 | | const BYTE** litPtr, const BYTE* const litLimit, |
910 | | const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) |
911 | 304k | { |
912 | 304k | BYTE* const oLitEnd = op + sequence.litLength; |
913 | 304k | size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
914 | 304k | const BYTE* const iLitEnd = *litPtr + sequence.litLength; |
915 | 304k | const BYTE* match = oLitEnd - sequence.offset; |
916 | 304k | BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; |
917 | | |
918 | | /* bounds checks : careful of address space overflow in 32-bit mode */ |
919 | 304k | RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); |
920 | 299k | RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); |
921 | 298k | assert(op < op + sequenceLength); |
922 | 298k | assert(oLitEnd < op + sequenceLength); |
923 | | |
924 | | /* copy literals */ |
925 | 298k | ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap); |
926 | 298k | op = oLitEnd; |
927 | 298k | *litPtr = iLitEnd; |
928 | | |
929 | | /* copy Match */ |
930 | 298k | if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { |
931 | | /* offset beyond prefix */ |
932 | 27.3k | RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); |
933 | 26.7k | match = dictEnd - (prefixStart - match); |
934 | 26.7k | if (match + sequence.matchLength <= dictEnd) { |
935 | 24.9k | ZSTD_memmove(oLitEnd, match, sequence.matchLength); |
936 | 24.9k | return sequenceLength; |
937 | 24.9k | } |
938 | | /* span extDict & currentPrefixSegment */ |
939 | 1.74k | { size_t const length1 = dictEnd - match; |
940 | 1.74k | ZSTD_memmove(oLitEnd, match, length1); |
941 | 1.74k | op = oLitEnd + length1; |
942 | 1.74k | sequence.matchLength -= length1; |
943 | 1.74k | match = prefixStart; |
944 | 1.74k | } |
945 | 1.74k | } |
946 | 272k | ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); |
947 | 272k | return sequenceLength; |
948 | 298k | } |
949 | | |
950 | | /* ZSTD_execSequenceEndSplitLitBuffer(): |
951 | | * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. |
952 | | */ |
953 | | FORCE_NOINLINE |
954 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
955 | | size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, |
956 | | BYTE* const oend, const BYTE* const oend_w, seq_t sequence, |
957 | | const BYTE** litPtr, const BYTE* const litLimit, |
958 | | const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) |
959 | 2.01M | { |
960 | 2.01M | BYTE* const oLitEnd = op + sequence.litLength; |
961 | 2.01M | size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
962 | 2.01M | const BYTE* const iLitEnd = *litPtr + sequence.litLength; |
963 | 2.01M | const BYTE* match = oLitEnd - sequence.offset; |
964 | | |
965 | | |
966 | | /* bounds checks : careful of address space overflow in 32-bit mode */ |
967 | 2.01M | RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); |
968 | 2.01M | RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); |
969 | 2.01M | assert(op < op + sequenceLength); |
970 | 2.01M | assert(oLitEnd < op + sequenceLength); |
971 | | |
972 | | /* copy literals */ |
973 | 2.01M | RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer"); |
974 | 2.01M | ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength); |
975 | 2.01M | op = oLitEnd; |
976 | 2.01M | *litPtr = iLitEnd; |
977 | | |
978 | | /* copy Match */ |
979 | 2.01M | if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { |
980 | | /* offset beyond prefix */ |
981 | 20.9k | RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); |
982 | 20.4k | match = dictEnd - (prefixStart - match); |
983 | 20.4k | if (match + sequence.matchLength <= dictEnd) { |
984 | 20.4k | ZSTD_memmove(oLitEnd, match, sequence.matchLength); |
985 | 20.4k | return sequenceLength; |
986 | 20.4k | } |
987 | | /* span extDict & currentPrefixSegment */ |
988 | 78 | { size_t const length1 = dictEnd - match; |
989 | 78 | ZSTD_memmove(oLitEnd, match, length1); |
990 | 78 | op = oLitEnd + length1; |
991 | 78 | sequence.matchLength -= length1; |
992 | 78 | match = prefixStart; |
993 | 78 | } |
994 | 78 | } |
995 | 1.99M | ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); |
996 | 1.99M | return sequenceLength; |
997 | 2.01M | } |
998 | | |
999 | | HINT_INLINE |
1000 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
1001 | | size_t ZSTD_execSequence(BYTE* op, |
1002 | | BYTE* const oend, seq_t sequence, |
1003 | | const BYTE** litPtr, const BYTE* const litLimit, |
1004 | | const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) |
1005 | 255M | { |
1006 | 255M | BYTE* const oLitEnd = op + sequence.litLength; |
1007 | 255M | size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
1008 | 255M | BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ |
1009 | 255M | BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */ |
1010 | 255M | const BYTE* const iLitEnd = *litPtr + sequence.litLength; |
1011 | 255M | const BYTE* match = oLitEnd - sequence.offset; |
1012 | | |
1013 | 255M | assert(op != NULL /* Precondition */); |
1014 | 255M | assert(oend_w < oend /* No underflow */); |
1015 | | |
1016 | | #if defined(__aarch64__) |
1017 | | /* prefetch sequence starting from match that will be used for copy later */ |
1018 | | PREFETCH_L1(match); |
1019 | | #endif |
1020 | | /* Handle edge cases in a slow path: |
1021 | | * - Read beyond end of literals |
1022 | | * - Match end is within WILDCOPY_OVERLIMIT of oend |
1023 | | * - 32-bit mode and the match length overflows |
1024 | | */ |
1025 | 255M | if (UNLIKELY( |
1026 | 255M | iLitEnd > litLimit || |
1027 | 255M | oMatchEnd > oend_w || |
1028 | 255M | (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) |
1029 | 304k | return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); |
1030 | | |
1031 | | /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ |
1032 | 255M | assert(op <= oLitEnd /* No overflow */); |
1033 | 255M | assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); |
1034 | 255M | assert(oMatchEnd <= oend /* No underflow */); |
1035 | 255M | assert(iLitEnd <= litLimit /* Literal length is in bounds */); |
1036 | 255M | assert(oLitEnd <= oend_w /* Can wildcopy literals */); |
1037 | 255M | assert(oMatchEnd <= oend_w /* Can wildcopy matches */); |
1038 | | |
1039 | | /* Copy Literals: |
1040 | | * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. |
1041 | | * We likely don't need the full 32-byte wildcopy. |
1042 | | */ |
1043 | 255M | assert(WILDCOPY_OVERLENGTH >= 16); |
1044 | 255M | ZSTD_copy16(op, (*litPtr)); |
1045 | 255M | if (UNLIKELY(sequence.litLength > 16)) { |
1046 | 14.4M | ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap); |
1047 | 14.4M | } |
1048 | 255M | op = oLitEnd; |
1049 | 255M | *litPtr = iLitEnd; /* update for next sequence */ |
1050 | | |
1051 | | /* Copy Match */ |
1052 | 255M | if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { |
1053 | | /* offset beyond prefix -> go into extDict */ |
1054 | 3.66M | RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); |
1055 | 3.66M | match = dictEnd + (match - prefixStart); |
1056 | 3.66M | if (match + sequence.matchLength <= dictEnd) { |
1057 | 3.60M | ZSTD_memmove(oLitEnd, match, sequence.matchLength); |
1058 | 3.60M | return sequenceLength; |
1059 | 3.60M | } |
1060 | | /* span extDict & currentPrefixSegment */ |
1061 | 59.4k | { size_t const length1 = dictEnd - match; |
1062 | 59.4k | ZSTD_memmove(oLitEnd, match, length1); |
1063 | 59.4k | op = oLitEnd + length1; |
1064 | 59.4k | sequence.matchLength -= length1; |
1065 | 59.4k | match = prefixStart; |
1066 | 59.4k | } |
1067 | 59.4k | } |
1068 | | /* Match within prefix of 1 or more bytes */ |
1069 | 251M | assert(op <= oMatchEnd); |
1070 | 251M | assert(oMatchEnd <= oend_w); |
1071 | 251M | assert(match >= prefixStart); |
1072 | 251M | assert(sequence.matchLength >= 1); |
1073 | | |
1074 | | /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy |
1075 | | * without overlap checking. |
1076 | | */ |
1077 | 251M | if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { |
1078 | | /* We bet on a full wildcopy for matches, since we expect matches to be |
1079 | | * longer than literals (in general). In silesia, ~10% of matches are longer |
1080 | | * than 16 bytes. |
1081 | | */ |
1082 | 182M | ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); |
1083 | 182M | return sequenceLength; |
1084 | 182M | } |
1085 | 69.0M | assert(sequence.offset < WILDCOPY_VECLEN); |
1086 | | |
1087 | | /* Copy 8 bytes and spread the offset to be >= 8. */ |
1088 | 69.0M | ZSTD_overlapCopy8(&op, &match, sequence.offset); |
1089 | | |
1090 | | /* If the match length is > 8 bytes, then continue with the wildcopy. */ |
1091 | 69.0M | if (sequence.matchLength > 8) { |
1092 | 20.1M | assert(op < oMatchEnd); |
1093 | 20.1M | ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst); |
1094 | 20.1M | } |
1095 | 69.0M | return sequenceLength; |
1096 | 69.0M | } |
1097 | | |
1098 | | HINT_INLINE |
1099 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
1100 | | size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, |
1101 | | BYTE* const oend, const BYTE* const oend_w, seq_t sequence, |
1102 | | const BYTE** litPtr, const BYTE* const litLimit, |
1103 | | const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) |
1104 | 6.13M | { |
1105 | 6.13M | BYTE* const oLitEnd = op + sequence.litLength; |
1106 | 6.13M | size_t const sequenceLength = sequence.litLength + sequence.matchLength; |
1107 | 6.13M | BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ |
1108 | 6.13M | const BYTE* const iLitEnd = *litPtr + sequence.litLength; |
1109 | 6.13M | const BYTE* match = oLitEnd - sequence.offset; |
1110 | | |
1111 | 6.13M | assert(op != NULL /* Precondition */); |
1112 | 6.13M | assert(oend_w < oend /* No underflow */); |
1113 | | /* Handle edge cases in a slow path: |
1114 | | * - Read beyond end of literals |
1115 | | * - Match end is within WILDCOPY_OVERLIMIT of oend |
1116 | | * - 32-bit mode and the match length overflows |
1117 | | */ |
1118 | 6.13M | if (UNLIKELY( |
1119 | 6.13M | iLitEnd > litLimit || |
1120 | 6.13M | oMatchEnd > oend_w || |
1121 | 6.13M | (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) |
1122 | 2.01M | return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); |
1123 | | |
1124 | | /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ |
1125 | 4.11M | assert(op <= oLitEnd /* No overflow */); |
1126 | 4.11M | assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); |
1127 | 4.11M | assert(oMatchEnd <= oend /* No underflow */); |
1128 | 4.11M | assert(iLitEnd <= litLimit /* Literal length is in bounds */); |
1129 | 4.11M | assert(oLitEnd <= oend_w /* Can wildcopy literals */); |
1130 | 4.11M | assert(oMatchEnd <= oend_w /* Can wildcopy matches */); |
1131 | | |
1132 | | /* Copy Literals: |
1133 | | * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. |
1134 | | * We likely don't need the full 32-byte wildcopy. |
1135 | | */ |
1136 | 4.11M | assert(WILDCOPY_OVERLENGTH >= 16); |
1137 | 4.11M | ZSTD_copy16(op, (*litPtr)); |
1138 | 4.11M | if (UNLIKELY(sequence.litLength > 16)) { |
1139 | 300k | ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap); |
1140 | 300k | } |
1141 | 4.11M | op = oLitEnd; |
1142 | 4.11M | *litPtr = iLitEnd; /* update for next sequence */ |
1143 | | |
1144 | | /* Copy Match */ |
1145 | 4.11M | if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { |
1146 | | /* offset beyond prefix -> go into extDict */ |
1147 | 128k | RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); |
1148 | 128k | match = dictEnd + (match - prefixStart); |
1149 | 128k | if (match + sequence.matchLength <= dictEnd) { |
1150 | 126k | ZSTD_memmove(oLitEnd, match, sequence.matchLength); |
1151 | 126k | return sequenceLength; |
1152 | 126k | } |
1153 | | /* span extDict & currentPrefixSegment */ |
1154 | 1.42k | { size_t const length1 = dictEnd - match; |
1155 | 1.42k | ZSTD_memmove(oLitEnd, match, length1); |
1156 | 1.42k | op = oLitEnd + length1; |
1157 | 1.42k | sequence.matchLength -= length1; |
1158 | 1.42k | match = prefixStart; |
1159 | 1.42k | } } |
1160 | | /* Match within prefix of 1 or more bytes */ |
1161 | 3.98M | assert(op <= oMatchEnd); |
1162 | 3.98M | assert(oMatchEnd <= oend_w); |
1163 | 3.98M | assert(match >= prefixStart); |
1164 | 3.98M | assert(sequence.matchLength >= 1); |
1165 | | |
1166 | | /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy |
1167 | | * without overlap checking. |
1168 | | */ |
1169 | 3.98M | if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { |
1170 | | /* We bet on a full wildcopy for matches, since we expect matches to be |
1171 | | * longer than literals (in general). In silesia, ~10% of matches are longer |
1172 | | * than 16 bytes. |
1173 | | */ |
1174 | 1.81M | ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); |
1175 | 1.81M | return sequenceLength; |
1176 | 1.81M | } |
1177 | 2.16M | assert(sequence.offset < WILDCOPY_VECLEN); |
1178 | | |
1179 | | /* Copy 8 bytes and spread the offset to be >= 8. */ |
1180 | 2.16M | ZSTD_overlapCopy8(&op, &match, sequence.offset); |
1181 | | |
1182 | | /* If the match length is > 8 bytes, then continue with the wildcopy. */ |
1183 | 2.16M | if (sequence.matchLength > 8) { |
1184 | 391k | assert(op < oMatchEnd); |
1185 | 391k | ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); |
1186 | 391k | } |
1187 | 2.16M | return sequenceLength; |
1188 | 2.16M | } |
1189 | | |
1190 | | |
1191 | | static void |
1192 | | ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) |
1193 | 12.6M | { |
1194 | 12.6M | const void* ptr = dt; |
1195 | 12.6M | const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr; |
1196 | 12.6M | DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); |
1197 | 12.6M | DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits", |
1198 | 12.6M | (U32)DStatePtr->state, DTableH->tableLog); |
1199 | 12.6M | BIT_reloadDStream(bitD); |
1200 | 12.6M | DStatePtr->table = dt + 1; |
1201 | 12.6M | } |
1202 | | |
1203 | | FORCE_INLINE_TEMPLATE void |
1204 | | ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits) |
1205 | 772M | { |
1206 | 772M | size_t const lowBits = BIT_readBits(bitD, nbBits); |
1207 | 772M | DStatePtr->state = nextState + lowBits; |
1208 | 772M | } |
1209 | | |
1210 | | /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum |
1211 | | * offset bits. But we can only read at most STREAM_ACCUMULATOR_MIN_32 |
1212 | | * bits before reloading. This value is the maximum number of bytes we read |
1213 | | * after reloading when we are decoding long offsets. |
1214 | | */ |
1215 | | #define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ |
1216 | 0 | (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ |
1217 | 0 | ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ |
1218 | 0 | : 0) |
1219 | | |
1220 | | typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; |
1221 | | |
1222 | | /** |
1223 | | * ZSTD_decodeSequence(): |
1224 | | * @p longOffsets : tells the decoder to reload more bit while decoding large offsets |
1225 | | * only used in 32-bit mode |
1226 | | * @return : Sequence (litL + matchL + offset) |
1227 | | */ |
1228 | | FORCE_INLINE_TEMPLATE seq_t |
1229 | | ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const int isLastSeq) |
1230 | 261M | { |
1231 | 261M | seq_t seq; |
1232 | | /* |
1233 | | * ZSTD_seqSymbol is a 64 bits wide structure. |
1234 | | * It can be loaded in one operation |
1235 | | * and its fields extracted by simply shifting or bit-extracting on aarch64. |
1236 | | * GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh |
1237 | | * operations that cause performance drop. This can be avoided by using this |
1238 | | * ZSTD_memcpy hack. |
1239 | | */ |
1240 | | #if defined(__aarch64__) && (defined(__GNUC__) && !defined(__clang__)) |
1241 | | ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS; |
1242 | | ZSTD_seqSymbol* const llDInfo = &llDInfoS; |
1243 | | ZSTD_seqSymbol* const mlDInfo = &mlDInfoS; |
1244 | | ZSTD_seqSymbol* const ofDInfo = &ofDInfoS; |
1245 | | ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol)); |
1246 | | ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol)); |
1247 | | ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol)); |
1248 | | #else |
1249 | 261M | const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; |
1250 | 261M | const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; |
1251 | 261M | const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; |
1252 | 261M | #endif |
1253 | 261M | seq.matchLength = mlDInfo->baseValue; |
1254 | 261M | seq.litLength = llDInfo->baseValue; |
1255 | 261M | { U32 const ofBase = ofDInfo->baseValue; |
1256 | 261M | BYTE const llBits = llDInfo->nbAdditionalBits; |
1257 | 261M | BYTE const mlBits = mlDInfo->nbAdditionalBits; |
1258 | 261M | BYTE const ofBits = ofDInfo->nbAdditionalBits; |
1259 | 261M | BYTE const totalBits = llBits+mlBits+ofBits; |
1260 | | |
1261 | 261M | U16 const llNext = llDInfo->nextState; |
1262 | 261M | U16 const mlNext = mlDInfo->nextState; |
1263 | 261M | U16 const ofNext = ofDInfo->nextState; |
1264 | 261M | U32 const llnbBits = llDInfo->nbBits; |
1265 | 261M | U32 const mlnbBits = mlDInfo->nbBits; |
1266 | 261M | U32 const ofnbBits = ofDInfo->nbBits; |
1267 | | |
1268 | 261M | assert(llBits <= MaxLLBits); |
1269 | 261M | assert(mlBits <= MaxMLBits); |
1270 | 261M | assert(ofBits <= MaxOff); |
1271 | | /* |
1272 | | * As gcc has better branch and block analyzers, sometimes it is only |
1273 | | * valuable to mark likeliness for clang, it gives around 3-4% of |
1274 | | * performance. |
1275 | | */ |
1276 | | |
1277 | | /* sequence */ |
1278 | 261M | { size_t offset; |
1279 | 261M | if (ofBits > 1) { |
1280 | 162M | ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); |
1281 | 162M | ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); |
1282 | 162M | ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32); |
1283 | 162M | ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits); |
1284 | 162M | if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { |
1285 | | /* Always read extra bits, this keeps the logic simple, |
1286 | | * avoids branches, and avoids accidentally reading 0 bits. |
1287 | | */ |
1288 | 0 | U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32; |
1289 | 0 | offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); |
1290 | 0 | BIT_reloadDStream(&seqState->DStream); |
1291 | 0 | offset += BIT_readBitsFast(&seqState->DStream, extraBits); |
1292 | 162M | } else { |
1293 | 162M | offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ |
1294 | 162M | if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); |
1295 | 162M | } |
1296 | 162M | seqState->prevOffset[2] = seqState->prevOffset[1]; |
1297 | 162M | seqState->prevOffset[1] = seqState->prevOffset[0]; |
1298 | 162M | seqState->prevOffset[0] = offset; |
1299 | 162M | } else { |
1300 | 99.5M | U32 const ll0 = (llDInfo->baseValue == 0); |
1301 | 99.5M | if (LIKELY((ofBits == 0))) { |
1302 | 91.8M | offset = seqState->prevOffset[ll0]; |
1303 | 91.8M | seqState->prevOffset[1] = seqState->prevOffset[!ll0]; |
1304 | 91.8M | seqState->prevOffset[0] = offset; |
1305 | 91.8M | } else { |
1306 | 7.67M | offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); |
1307 | 7.67M | { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; |
1308 | 7.67M | temp -= !temp; /* 0 is not valid: input corrupted => force offset to -1 => corruption detected at execSequence */ |
1309 | 7.67M | if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; |
1310 | 7.67M | seqState->prevOffset[1] = seqState->prevOffset[0]; |
1311 | 7.67M | seqState->prevOffset[0] = offset = temp; |
1312 | 7.67M | } } } |
1313 | 261M | seq.offset = offset; |
1314 | 261M | } |
1315 | | |
1316 | 261M | if (mlBits > 0) |
1317 | 17.8M | seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); |
1318 | | |
1319 | 261M | if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) |
1320 | 0 | BIT_reloadDStream(&seqState->DStream); |
1321 | 261M | if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) |
1322 | 144k | BIT_reloadDStream(&seqState->DStream); |
1323 | | /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ |
1324 | 261M | ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); |
1325 | | |
1326 | 261M | if (llBits > 0) |
1327 | 16.1M | seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); |
1328 | | |
1329 | 261M | if (MEM_32bits()) |
1330 | 0 | BIT_reloadDStream(&seqState->DStream); |
1331 | | |
1332 | 261M | DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", |
1333 | 261M | (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); |
1334 | | |
1335 | 261M | if (!isLastSeq) { |
1336 | | /* don't update FSE state for last Sequence */ |
1337 | 257M | ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ |
1338 | 257M | ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ |
1339 | 257M | if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ |
1340 | 257M | ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ |
1341 | 257M | BIT_reloadDStream(&seqState->DStream); |
1342 | 257M | } |
1343 | 261M | } |
1344 | | |
1345 | 0 | return seq; |
1346 | 261M | } |
1347 | | |
1348 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1349 | | #if DEBUGLEVEL >= 1 |
1350 | | static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) |
1351 | 238M | { |
1352 | 238M | size_t const windowSize = dctx->fParams.windowSize; |
1353 | | /* No dictionary used. */ |
1354 | 238M | if (dctx->dictContentEndForFuzzing == NULL) return 0; |
1355 | | /* Dictionary is our prefix. */ |
1356 | 98.6M | if (prefixStart == dctx->dictContentBeginForFuzzing) return 1; |
1357 | | /* Dictionary is not our ext-dict. */ |
1358 | 98.6M | if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0; |
1359 | | /* Dictionary is not within our window size. */ |
1360 | 98.6M | if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0; |
1361 | | /* Dictionary is active. */ |
1362 | 28.7M | return 1; |
1363 | 98.6M | } |
1364 | | #endif |
1365 | | |
1366 | | static void ZSTD_assertValidSequence( |
1367 | | ZSTD_DCtx const* dctx, |
1368 | | BYTE const* op, BYTE const* oend, |
1369 | | seq_t const seq, |
1370 | | BYTE const* prefixStart, BYTE const* virtualStart) |
1371 | 241M | { |
1372 | 241M | #if DEBUGLEVEL >= 1 |
1373 | 241M | if (dctx->isFrameDecompression) { |
1374 | 238M | size_t const windowSize = dctx->fParams.windowSize; |
1375 | 238M | size_t const sequenceSize = seq.litLength + seq.matchLength; |
1376 | 238M | BYTE const* const oLitEnd = op + seq.litLength; |
1377 | 238M | DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", |
1378 | 238M | (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); |
1379 | 238M | assert(op <= oend); |
1380 | 238M | assert((size_t)(oend - op) >= sequenceSize); |
1381 | 238M | assert(sequenceSize <= ZSTD_blockSizeMax(dctx)); |
1382 | 238M | if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { |
1383 | 28.7M | size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); |
1384 | | /* Offset must be within the dictionary. */ |
1385 | 28.7M | assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); |
1386 | 28.7M | assert(seq.offset <= windowSize + dictSize); |
1387 | 209M | } else { |
1388 | | /* Offset must be within our window. */ |
1389 | 209M | assert(seq.offset <= windowSize); |
1390 | 209M | } |
1391 | 238M | } |
1392 | | #else |
1393 | | (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart; |
1394 | | #endif |
1395 | 241M | } |
1396 | | #endif |
1397 | | |
1398 | | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG |
1399 | | |
1400 | | |
1401 | | FORCE_INLINE_TEMPLATE size_t |
1402 | | DONT_VECTORIZE |
1403 | | ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, |
1404 | | void* dst, size_t maxDstSize, |
1405 | | const void* seqStart, size_t seqSize, int nbSeq, |
1406 | | const ZSTD_longOffset_e isLongOffset) |
1407 | 25.1k | { |
1408 | 25.1k | const BYTE* ip = (const BYTE*)seqStart; |
1409 | 25.1k | const BYTE* const iend = ip + seqSize; |
1410 | 25.1k | BYTE* const ostart = (BYTE*)dst; |
1411 | 25.1k | BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize); |
1412 | 25.1k | BYTE* op = ostart; |
1413 | 25.1k | const BYTE* litPtr = dctx->litPtr; |
1414 | 25.1k | const BYTE* litBufferEnd = dctx->litBufferEnd; |
1415 | 25.1k | const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); |
1416 | 25.1k | const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); |
1417 | 25.1k | const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); |
1418 | 25.1k | DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer (%i seqs)", nbSeq); |
1419 | | |
1420 | | /* Literals are split between internal buffer & output buffer */ |
1421 | 25.1k | if (nbSeq) { |
1422 | 24.9k | seqState_t seqState; |
1423 | 24.9k | dctx->fseEntropy = 1; |
1424 | 99.9k | { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } |
1425 | 24.9k | RETURN_ERROR_IF( |
1426 | 24.9k | ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), |
1427 | 24.9k | corruption_detected, ""); |
1428 | 24.8k | ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); |
1429 | 24.8k | ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); |
1430 | 24.8k | ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); |
1431 | 24.8k | assert(dst != NULL); |
1432 | | |
1433 | 24.8k | ZSTD_STATIC_ASSERT( |
1434 | 24.8k | BIT_DStream_unfinished < BIT_DStream_completed && |
1435 | 24.8k | BIT_DStream_endOfBuffer < BIT_DStream_completed && |
1436 | 24.8k | BIT_DStream_completed < BIT_DStream_overflow); |
1437 | | |
1438 | | /* decompress without overrunning litPtr begins */ |
1439 | 24.8k | { seq_t sequence = {0,0,0}; /* some static analyzer believe that @sequence is not initialized (it necessarily is, since for(;;) loop as at least one iteration) */ |
1440 | | /* Align the decompression loop to 32 + 16 bytes. |
1441 | | * |
1442 | | * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression |
1443 | | * speed swings based on the alignment of the decompression loop. This |
1444 | | * performance swing is caused by parts of the decompression loop falling |
1445 | | * out of the DSB. The entire decompression loop should fit in the DSB, |
1446 | | * when it can't we get much worse performance. You can measure if you've |
1447 | | * hit the good case or the bad case with this perf command for some |
1448 | | * compressed file test.zst: |
1449 | | * |
1450 | | * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ |
1451 | | * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst |
1452 | | * |
1453 | | * If you see most cycles served out of the MITE you've hit the bad case. |
1454 | | * If you see most cycles served out of the DSB you've hit the good case. |
1455 | | * If it is pretty even then you may be in an okay case. |
1456 | | * |
1457 | | * This issue has been reproduced on the following CPUs: |
1458 | | * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 |
1459 | | * Use Instruments->Counters to get DSB/MITE cycles. |
1460 | | * I never got performance swings, but I was able to |
1461 | | * go from the good case of mostly DSB to half of the |
1462 | | * cycles served from MITE. |
1463 | | * - Coffeelake: Intel i9-9900k |
1464 | | * - Coffeelake: Intel i7-9700k |
1465 | | * |
1466 | | * I haven't been able to reproduce the instability or DSB misses on any |
1467 | | * of the following CPUS: |
1468 | | * - Haswell |
1469 | | * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH |
1470 | | * - Skylake |
1471 | | * |
1472 | | * Alignment is done for each of the three major decompression loops: |
1473 | | * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer |
1474 | | * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer |
1475 | | * - ZSTD_decompressSequences_body |
1476 | | * Alignment choices are made to minimize large swings on bad cases and influence on performance |
1477 | | * from changes external to this code, rather than to overoptimize on the current commit. |
1478 | | * |
1479 | | * If you are seeing performance stability this script can help test. |
1480 | | * It tests on 4 commits in zstd where I saw performance change. |
1481 | | * |
1482 | | * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 |
1483 | | */ |
1484 | 24.8k | #if defined(__GNUC__) && defined(__x86_64__) |
1485 | 24.8k | __asm__(".p2align 6"); |
1486 | | # if __GNUC__ >= 7 |
1487 | | /* good for gcc-7, gcc-9, and gcc-11 */ |
1488 | | __asm__("nop"); |
1489 | | __asm__(".p2align 5"); |
1490 | | __asm__("nop"); |
1491 | | __asm__(".p2align 4"); |
1492 | | # if __GNUC__ == 8 || __GNUC__ == 10 |
1493 | | /* good for gcc-8 and gcc-10 */ |
1494 | | __asm__("nop"); |
1495 | | __asm__(".p2align 3"); |
1496 | | # endif |
1497 | | # endif |
1498 | 24.8k | #endif |
1499 | | |
1500 | | /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */ |
1501 | 4.72M | for ( ; nbSeq; nbSeq--) { |
1502 | 4.71M | sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); |
1503 | 4.71M | if (litPtr + sequence.litLength > dctx->litBufferEnd) break; |
1504 | 4.70M | { size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); |
1505 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1506 | | assert(!ZSTD_isError(oneSeqSize)); |
1507 | | ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); |
1508 | | #endif |
1509 | 4.70M | if (UNLIKELY(ZSTD_isError(oneSeqSize))) |
1510 | 1.34k | return oneSeqSize; |
1511 | 4.69M | DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); |
1512 | 4.69M | op += oneSeqSize; |
1513 | 4.69M | } } |
1514 | 23.5k | DEBUGLOG(6, "reached: (litPtr + sequence.litLength > dctx->litBufferEnd)"); |
1515 | | |
1516 | | /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ |
1517 | 23.5k | if (nbSeq > 0) { |
1518 | 14.9k | const size_t leftoverLit = dctx->litBufferEnd - litPtr; |
1519 | 14.9k | DEBUGLOG(6, "There are %i sequences left, and %zu/%zu literals left in buffer", nbSeq, leftoverLit, sequence.litLength); |
1520 | 14.9k | if (leftoverLit) { |
1521 | 6.50k | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); |
1522 | 6.41k | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); |
1523 | 6.41k | sequence.litLength -= leftoverLit; |
1524 | 6.41k | op += leftoverLit; |
1525 | 6.41k | } |
1526 | 14.8k | litPtr = dctx->litExtraBuffer; |
1527 | 14.8k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; |
1528 | 14.8k | dctx->litBufferLocation = ZSTD_not_in_dst; |
1529 | 14.8k | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); |
1530 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1531 | | assert(!ZSTD_isError(oneSeqSize)); |
1532 | | ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); |
1533 | | #endif |
1534 | 14.8k | if (UNLIKELY(ZSTD_isError(oneSeqSize))) |
1535 | 388 | return oneSeqSize; |
1536 | 14.4k | DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); |
1537 | 14.4k | op += oneSeqSize; |
1538 | 14.4k | } |
1539 | 0 | nbSeq--; |
1540 | 14.4k | } |
1541 | 23.5k | } |
1542 | | |
1543 | 23.0k | if (nbSeq > 0) { |
1544 | | /* there is remaining lit from extra buffer */ |
1545 | | |
1546 | 13.9k | #if defined(__GNUC__) && defined(__x86_64__) |
1547 | 13.9k | __asm__(".p2align 6"); |
1548 | 13.9k | __asm__("nop"); |
1549 | 13.9k | # if __GNUC__ != 7 |
1550 | | /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */ |
1551 | 13.9k | __asm__(".p2align 4"); |
1552 | 13.9k | __asm__("nop"); |
1553 | 13.9k | __asm__(".p2align 3"); |
1554 | | # elif __GNUC__ >= 11 |
1555 | | __asm__(".p2align 3"); |
1556 | | # else |
1557 | | __asm__(".p2align 5"); |
1558 | | __asm__("nop"); |
1559 | | __asm__(".p2align 3"); |
1560 | | # endif |
1561 | 13.9k | #endif |
1562 | | |
1563 | 4.86M | for ( ; nbSeq ; nbSeq--) { |
1564 | 4.85M | seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); |
1565 | 4.85M | size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); |
1566 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1567 | | assert(!ZSTD_isError(oneSeqSize)); |
1568 | | ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); |
1569 | | #endif |
1570 | 4.85M | if (UNLIKELY(ZSTD_isError(oneSeqSize))) |
1571 | 2.09k | return oneSeqSize; |
1572 | 4.84M | DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); |
1573 | 4.84M | op += oneSeqSize; |
1574 | 4.84M | } |
1575 | 13.9k | } |
1576 | | |
1577 | | /* check if reached exact end */ |
1578 | 20.9k | DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq); |
1579 | 20.9k | RETURN_ERROR_IF(nbSeq, corruption_detected, ""); |
1580 | 20.9k | DEBUGLOG(5, "bitStream : start=%p, ptr=%p, bitsConsumed=%u", seqState.DStream.start, seqState.DStream.ptr, seqState.DStream.bitsConsumed); |
1581 | 20.9k | RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); |
1582 | | /* save reps for next block */ |
1583 | 80.3k | { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } |
1584 | 20.0k | } |
1585 | | |
1586 | | /* last literal segment */ |
1587 | 20.3k | if (dctx->litBufferLocation == ZSTD_split) { |
1588 | | /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ |
1589 | 8.57k | size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); |
1590 | 8.57k | DEBUGLOG(6, "copy last literals from segment : %u", (U32)lastLLSize); |
1591 | 8.57k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); |
1592 | 8.51k | if (op != NULL) { |
1593 | 8.51k | ZSTD_memmove(op, litPtr, lastLLSize); |
1594 | 8.51k | op += lastLLSize; |
1595 | 8.51k | } |
1596 | 8.51k | litPtr = dctx->litExtraBuffer; |
1597 | 8.51k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; |
1598 | 8.51k | dctx->litBufferLocation = ZSTD_not_in_dst; |
1599 | 8.51k | } |
1600 | | /* copy last literals from internal buffer */ |
1601 | 20.2k | { size_t const lastLLSize = (size_t)(litBufferEnd - litPtr); |
1602 | 20.2k | DEBUGLOG(6, "copy last literals from internal buffer : %u", (U32)lastLLSize); |
1603 | 20.2k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); |
1604 | 19.5k | if (op != NULL) { |
1605 | 19.5k | ZSTD_memcpy(op, litPtr, lastLLSize); |
1606 | 19.5k | op += lastLLSize; |
1607 | 19.5k | } } |
1608 | | |
1609 | 19.5k | DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); |
1610 | 19.5k | return (size_t)(op - ostart); |
1611 | 20.2k | } |
1612 | | |
1613 | | FORCE_INLINE_TEMPLATE size_t |
1614 | | DONT_VECTORIZE |
1615 | | ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, |
1616 | | void* dst, size_t maxDstSize, |
1617 | | const void* seqStart, size_t seqSize, int nbSeq, |
1618 | | const ZSTD_longOffset_e isLongOffset) |
1619 | 83.8k | { |
1620 | 83.8k | const BYTE* ip = (const BYTE*)seqStart; |
1621 | 83.8k | const BYTE* const iend = ip + seqSize; |
1622 | 83.8k | BYTE* const ostart = (BYTE*)dst; |
1623 | 83.8k | BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer; |
1624 | 83.8k | BYTE* op = ostart; |
1625 | 83.8k | const BYTE* litPtr = dctx->litPtr; |
1626 | 83.8k | const BYTE* const litEnd = litPtr + dctx->litSize; |
1627 | 83.8k | const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); |
1628 | 83.8k | const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); |
1629 | 83.8k | const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); |
1630 | 83.8k | DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq); |
1631 | | |
1632 | | /* Regen sequences */ |
1633 | 83.8k | if (nbSeq) { |
1634 | 31.9k | seqState_t seqState; |
1635 | 31.9k | dctx->fseEntropy = 1; |
1636 | 127k | { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } |
1637 | 31.9k | RETURN_ERROR_IF( |
1638 | 31.9k | ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)), |
1639 | 31.9k | corruption_detected, ""); |
1640 | 31.5k | ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); |
1641 | 31.5k | ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); |
1642 | 31.5k | ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); |
1643 | 31.5k | assert(dst != NULL); |
1644 | | |
1645 | 31.5k | #if defined(__GNUC__) && defined(__x86_64__) |
1646 | 31.5k | __asm__(".p2align 6"); |
1647 | 31.5k | __asm__("nop"); |
1648 | | # if __GNUC__ >= 7 |
1649 | | __asm__(".p2align 5"); |
1650 | | __asm__("nop"); |
1651 | | __asm__(".p2align 3"); |
1652 | | # else |
1653 | 31.5k | __asm__(".p2align 4"); |
1654 | 31.5k | __asm__("nop"); |
1655 | 31.5k | __asm__(".p2align 3"); |
1656 | 31.5k | # endif |
1657 | 31.5k | #endif |
1658 | | |
1659 | 8.61M | for ( ; nbSeq ; nbSeq--) { |
1660 | 8.59M | seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, nbSeq==1); |
1661 | 8.59M | size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); |
1662 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1663 | | assert(!ZSTD_isError(oneSeqSize)); |
1664 | | ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); |
1665 | | #endif |
1666 | 8.59M | if (UNLIKELY(ZSTD_isError(oneSeqSize))) |
1667 | 5.88k | return oneSeqSize; |
1668 | 8.58M | DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); |
1669 | 8.58M | op += oneSeqSize; |
1670 | 8.58M | } |
1671 | | |
1672 | | /* check if reached exact end */ |
1673 | 25.6k | assert(nbSeq == 0); |
1674 | 25.6k | RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); |
1675 | | /* save reps for next block */ |
1676 | 101k | { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } |
1677 | 25.3k | } |
1678 | | |
1679 | | /* last literal segment */ |
1680 | 77.1k | { size_t const lastLLSize = (size_t)(litEnd - litPtr); |
1681 | 77.1k | DEBUGLOG(6, "copy last literals : %u", (U32)lastLLSize); |
1682 | 77.1k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); |
1683 | 75.7k | if (op != NULL) { |
1684 | 63.8k | ZSTD_memcpy(op, litPtr, lastLLSize); |
1685 | 63.8k | op += lastLLSize; |
1686 | 63.8k | } } |
1687 | | |
1688 | 75.7k | DEBUGLOG(6, "decoded block of size %u bytes", (U32)(op - ostart)); |
1689 | 75.7k | return (size_t)(op - ostart); |
1690 | 77.1k | } |
1691 | | |
1692 | | static size_t |
1693 | | ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, |
1694 | | void* dst, size_t maxDstSize, |
1695 | | const void* seqStart, size_t seqSize, int nbSeq, |
1696 | | const ZSTD_longOffset_e isLongOffset) |
1697 | 0 | { |
1698 | 0 | return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1699 | 0 | } |
1700 | | |
1701 | | static size_t |
1702 | | ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx, |
1703 | | void* dst, size_t maxDstSize, |
1704 | | const void* seqStart, size_t seqSize, int nbSeq, |
1705 | | const ZSTD_longOffset_e isLongOffset) |
1706 | 0 | { |
1707 | 0 | return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1708 | 0 | } |
1709 | | #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ |
1710 | | |
1711 | | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT |
1712 | | |
1713 | | FORCE_INLINE_TEMPLATE |
1714 | | |
1715 | | size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence, |
1716 | | const BYTE* const prefixStart, const BYTE* const dictEnd) |
1717 | 15.8M | { |
1718 | 15.8M | prefetchPos += sequence.litLength; |
1719 | 15.8M | { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart; |
1720 | | /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. |
1721 | | * No consequence though : memory address is only used for prefetching, not for dereferencing */ |
1722 | 15.8M | const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset); |
1723 | 15.8M | PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ |
1724 | 15.8M | } |
1725 | 15.8M | return prefetchPos + sequence.matchLength; |
1726 | 15.8M | } |
1727 | | |
1728 | | /* This decoding function employs prefetching |
1729 | | * to reduce latency impact of cache misses. |
1730 | | * It's generally employed when block contains a significant portion of long-distance matches |
1731 | | * or when coupled with a "cold" dictionary */ |
1732 | | FORCE_INLINE_TEMPLATE size_t |
1733 | | ZSTD_decompressSequencesLong_body( |
1734 | | ZSTD_DCtx* dctx, |
1735 | | void* dst, size_t maxDstSize, |
1736 | | const void* seqStart, size_t seqSize, int nbSeq, |
1737 | | const ZSTD_longOffset_e isLongOffset) |
1738 | 137k | { |
1739 | 137k | const BYTE* ip = (const BYTE*)seqStart; |
1740 | 137k | const BYTE* const iend = ip + seqSize; |
1741 | 137k | BYTE* const ostart = (BYTE*)dst; |
1742 | 137k | BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize); |
1743 | 137k | BYTE* op = ostart; |
1744 | 137k | const BYTE* litPtr = dctx->litPtr; |
1745 | 137k | const BYTE* litBufferEnd = dctx->litBufferEnd; |
1746 | 137k | const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); |
1747 | 137k | const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); |
1748 | 137k | const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); |
1749 | | |
1750 | | /* Regen sequences */ |
1751 | 137k | if (nbSeq) { |
1752 | 76.5M | #define STORED_SEQS 8 |
1753 | 46.5M | #define STORED_SEQS_MASK (STORED_SEQS-1) |
1754 | 30.0M | #define ADVANCED_SEQS STORED_SEQS |
1755 | 135k | seq_t sequences[STORED_SEQS]; |
1756 | 135k | int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); |
1757 | 135k | seqState_t seqState; |
1758 | 135k | int seqNb; |
1759 | 135k | size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */ |
1760 | | |
1761 | 135k | dctx->fseEntropy = 1; |
1762 | 540k | { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } |
1763 | 135k | assert(dst != NULL); |
1764 | 135k | assert(iend >= ip); |
1765 | 135k | RETURN_ERROR_IF( |
1766 | 135k | ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), |
1767 | 135k | corruption_detected, ""); |
1768 | 135k | ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); |
1769 | 135k | ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); |
1770 | 135k | ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); |
1771 | | |
1772 | | /* prepare in advance */ |
1773 | 1.02M | for (seqNb=0; seqNb<seqAdvance; seqNb++) { |
1774 | 893k | seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1); |
1775 | 893k | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); |
1776 | 893k | sequences[seqNb] = sequence; |
1777 | 893k | } |
1778 | | |
1779 | | /* decompress without stomping litBuffer */ |
1780 | 15.0M | for (; seqNb < nbSeq; seqNb++) { |
1781 | 14.9M | seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1); |
1782 | | |
1783 | 14.9M | if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { |
1784 | | /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ |
1785 | 9.35k | const size_t leftoverLit = dctx->litBufferEnd - litPtr; |
1786 | 9.35k | if (leftoverLit) |
1787 | 9.28k | { |
1788 | 9.28k | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); |
1789 | 9.25k | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); |
1790 | 9.25k | sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; |
1791 | 9.25k | op += leftoverLit; |
1792 | 9.25k | } |
1793 | 9.32k | litPtr = dctx->litExtraBuffer; |
1794 | 9.32k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; |
1795 | 9.32k | dctx->litBufferLocation = ZSTD_not_in_dst; |
1796 | 9.32k | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); |
1797 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1798 | | assert(!ZSTD_isError(oneSeqSize)); |
1799 | 841 | ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); |
1800 | | #endif |
1801 | 9.32k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; |
1802 | | |
1803 | 9.24k | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); |
1804 | 9.24k | sequences[seqNb & STORED_SEQS_MASK] = sequence; |
1805 | 9.24k | op += oneSeqSize; |
1806 | 9.24k | } } |
1807 | 14.9M | else |
1808 | 14.9M | { |
1809 | | /* lit buffer is either wholly contained in first or second split, or not split at all*/ |
1810 | 14.9M | size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? |
1811 | 1.05M | ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : |
1812 | 14.9M | ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); |
1813 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1814 | | assert(!ZSTD_isError(oneSeqSize)); |
1815 | 12.9M | ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); |
1816 | | #endif |
1817 | 14.9M | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; |
1818 | | |
1819 | 14.9M | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); |
1820 | 14.9M | sequences[seqNb & STORED_SEQS_MASK] = sequence; |
1821 | 14.9M | op += oneSeqSize; |
1822 | 14.9M | } |
1823 | 14.9M | } |
1824 | 133k | RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); |
1825 | | |
1826 | | /* finish queue */ |
1827 | 133k | seqNb -= seqAdvance; |
1828 | 1.01M | for ( ; seqNb<nbSeq ; seqNb++) { |
1829 | 879k | seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]); |
1830 | 879k | if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { |
1831 | 21.5k | const size_t leftoverLit = dctx->litBufferEnd - litPtr; |
1832 | 21.5k | if (leftoverLit) { |
1833 | 21.2k | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); |
1834 | 21.2k | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); |
1835 | 21.2k | sequence->litLength -= leftoverLit; |
1836 | 21.2k | op += leftoverLit; |
1837 | 21.2k | } |
1838 | 21.5k | litPtr = dctx->litExtraBuffer; |
1839 | 21.5k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; |
1840 | 21.5k | dctx->litBufferLocation = ZSTD_not_in_dst; |
1841 | 21.5k | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); |
1842 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1843 | | assert(!ZSTD_isError(oneSeqSize)); |
1844 | 741 | ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); |
1845 | | #endif |
1846 | 21.5k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; |
1847 | 21.4k | op += oneSeqSize; |
1848 | 21.4k | } |
1849 | 21.4k | } |
1850 | 857k | else |
1851 | 857k | { |
1852 | 857k | size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? |
1853 | 50.8k | ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : |
1854 | 857k | ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); |
1855 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) |
1856 | | assert(!ZSTD_isError(oneSeqSize)); |
1857 | 632k | ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); |
1858 | | #endif |
1859 | 857k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; |
1860 | 857k | op += oneSeqSize; |
1861 | 857k | } |
1862 | 879k | } |
1863 | | |
1864 | | /* save reps for next block */ |
1865 | 532k | { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } |
1866 | 133k | } |
1867 | | |
1868 | | /* last literal segment */ |
1869 | 135k | if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ |
1870 | 1.98k | size_t const lastLLSize = litBufferEnd - litPtr; |
1871 | 1.98k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); |
1872 | 1.97k | if (op != NULL) { |
1873 | 1.97k | ZSTD_memmove(op, litPtr, lastLLSize); |
1874 | 1.97k | op += lastLLSize; |
1875 | 1.97k | } |
1876 | 1.97k | litPtr = dctx->litExtraBuffer; |
1877 | 1.97k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; |
1878 | 1.97k | } |
1879 | 135k | { size_t const lastLLSize = litBufferEnd - litPtr; |
1880 | 135k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); |
1881 | 135k | if (op != NULL) { |
1882 | 135k | ZSTD_memmove(op, litPtr, lastLLSize); |
1883 | 135k | op += lastLLSize; |
1884 | 135k | } |
1885 | 135k | } |
1886 | | |
1887 | 0 | return (size_t)(op - ostart); |
1888 | 135k | } zstd_decompress_block.c:ZSTD_decompressSequencesLong_body Line | Count | Source | 1738 | 33.3k | { | 1739 | 33.3k | const BYTE* ip = (const BYTE*)seqStart; | 1740 | 33.3k | const BYTE* const iend = ip + seqSize; | 1741 | 33.3k | BYTE* const ostart = (BYTE*)dst; | 1742 | 33.3k | BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize); | 1743 | 33.3k | BYTE* op = ostart; | 1744 | 33.3k | const BYTE* litPtr = dctx->litPtr; | 1745 | 33.3k | const BYTE* litBufferEnd = dctx->litBufferEnd; | 1746 | 33.3k | const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); | 1747 | 33.3k | const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); | 1748 | 33.3k | const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); | 1749 | | | 1750 | | /* Regen sequences */ | 1751 | 33.3k | if (nbSeq) { | 1752 | 32.5k | #define STORED_SEQS 8 | 1753 | 32.5k | #define STORED_SEQS_MASK (STORED_SEQS-1) | 1754 | 32.5k | #define ADVANCED_SEQS STORED_SEQS | 1755 | 32.5k | seq_t sequences[STORED_SEQS]; | 1756 | 32.5k | int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); | 1757 | 32.5k | seqState_t seqState; | 1758 | 32.5k | int seqNb; | 1759 | 32.5k | size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */ | 1760 | | | 1761 | 32.5k | dctx->fseEntropy = 1; | 1762 | 130k | { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } | 1763 | 32.5k | assert(dst != NULL); | 1764 | 32.5k | assert(iend >= ip); | 1765 | 32.5k | RETURN_ERROR_IF( | 1766 | 32.5k | ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), | 1767 | 32.5k | corruption_detected, ""); | 1768 | 32.5k | ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); | 1769 | 32.5k | ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); | 1770 | 32.5k | ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); | 1771 | | | 1772 | | /* prepare in advance */ | 1773 | 292k | for (seqNb=0; seqNb<seqAdvance; seqNb++) { | 1774 | 259k | seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1); | 1775 | 259k | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); | 1776 | 259k | sequences[seqNb] = sequence; | 1777 | 259k | } | 1778 | | | 1779 | | /* decompress without stomping litBuffer */ | 1780 | 2.04M | for (; seqNb < nbSeq; seqNb++) { | 1781 | 2.01M | seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1); | 1782 | | | 1783 | 2.01M | if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { | 1784 | | /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ | 1785 | 8.50k | const size_t leftoverLit = dctx->litBufferEnd - litPtr; | 1786 | 8.50k | if (leftoverLit) | 1787 | 8.45k | { | 1788 | 8.45k | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); | 1789 | 8.42k | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); | 1790 | 8.42k | sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; | 1791 | 8.42k | op += leftoverLit; | 1792 | 8.42k | } | 1793 | 8.48k | litPtr = dctx->litExtraBuffer; | 1794 | 8.48k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; | 1795 | 8.48k | dctx->litBufferLocation = ZSTD_not_in_dst; | 1796 | 8.48k | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1797 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1798 | | assert(!ZSTD_isError(oneSeqSize)); | 1799 | | ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); | 1800 | | #endif | 1801 | 8.48k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1802 | | | 1803 | 8.40k | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); | 1804 | 8.40k | sequences[seqNb & STORED_SEQS_MASK] = sequence; | 1805 | 8.40k | op += oneSeqSize; | 1806 | 8.40k | } } | 1807 | 2.00M | else | 1808 | 2.00M | { | 1809 | | /* lit buffer is either wholly contained in first or second split, or not split at all*/ | 1810 | 2.00M | size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? | 1811 | 953k | ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : | 1812 | 2.00M | ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1813 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1814 | | assert(!ZSTD_isError(oneSeqSize)); | 1815 | | ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); | 1816 | | #endif | 1817 | 2.00M | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1818 | | | 1819 | 2.00M | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); | 1820 | 2.00M | sequences[seqNb & STORED_SEQS_MASK] = sequence; | 1821 | 2.00M | op += oneSeqSize; | 1822 | 2.00M | } | 1823 | 2.01M | } | 1824 | 31.1k | RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); | 1825 | | | 1826 | | /* finish queue */ | 1827 | 30.9k | seqNb -= seqAdvance; | 1828 | 276k | for ( ; seqNb<nbSeq ; seqNb++) { | 1829 | 245k | seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]); | 1830 | 245k | if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { | 1831 | 20.7k | const size_t leftoverLit = dctx->litBufferEnd - litPtr; | 1832 | 20.7k | if (leftoverLit) { | 1833 | 20.5k | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); | 1834 | 20.5k | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); | 1835 | 20.5k | sequence->litLength -= leftoverLit; | 1836 | 20.5k | op += leftoverLit; | 1837 | 20.5k | } | 1838 | 20.7k | litPtr = dctx->litExtraBuffer; | 1839 | 20.7k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; | 1840 | 20.7k | dctx->litBufferLocation = ZSTD_not_in_dst; | 1841 | 20.7k | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1842 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1843 | | assert(!ZSTD_isError(oneSeqSize)); | 1844 | | ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); | 1845 | | #endif | 1846 | 20.7k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1847 | 20.7k | op += oneSeqSize; | 1848 | 20.7k | } | 1849 | 20.7k | } | 1850 | 224k | else | 1851 | 224k | { | 1852 | 224k | size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? | 1853 | 48.8k | ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : | 1854 | 224k | ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1855 | | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1856 | | assert(!ZSTD_isError(oneSeqSize)); | 1857 | | ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); | 1858 | | #endif | 1859 | 224k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1860 | 224k | op += oneSeqSize; | 1861 | 224k | } | 1862 | 245k | } | 1863 | | | 1864 | | /* save reps for next block */ | 1865 | 122k | { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } | 1866 | 30.6k | } | 1867 | | | 1868 | | /* last literal segment */ | 1869 | 31.3k | if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ | 1870 | 1.77k | size_t const lastLLSize = litBufferEnd - litPtr; | 1871 | 1.77k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); | 1872 | 1.76k | if (op != NULL) { | 1873 | 1.76k | ZSTD_memmove(op, litPtr, lastLLSize); | 1874 | 1.76k | op += lastLLSize; | 1875 | 1.76k | } | 1876 | 1.76k | litPtr = dctx->litExtraBuffer; | 1877 | 1.76k | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; | 1878 | 1.76k | } | 1879 | 31.3k | { size_t const lastLLSize = litBufferEnd - litPtr; | 1880 | 31.3k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); | 1881 | 31.2k | if (op != NULL) { | 1882 | 30.9k | ZSTD_memmove(op, litPtr, lastLLSize); | 1883 | 30.9k | op += lastLLSize; | 1884 | 30.9k | } | 1885 | 31.2k | } | 1886 | | | 1887 | 0 | return (size_t)(op - ostart); | 1888 | 31.3k | } |
zstd_decompress_block.c:ZSTD_decompressSequencesLong_body Line | Count | Source | 1738 | 104k | { | 1739 | 104k | const BYTE* ip = (const BYTE*)seqStart; | 1740 | 104k | const BYTE* const iend = ip + seqSize; | 1741 | 104k | BYTE* const ostart = (BYTE*)dst; | 1742 | 104k | BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize); | 1743 | 104k | BYTE* op = ostart; | 1744 | 104k | const BYTE* litPtr = dctx->litPtr; | 1745 | 104k | const BYTE* litBufferEnd = dctx->litBufferEnd; | 1746 | 104k | const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); | 1747 | 104k | const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); | 1748 | 104k | const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); | 1749 | | | 1750 | | /* Regen sequences */ | 1751 | 104k | if (nbSeq) { | 1752 | 102k | #define STORED_SEQS 8 | 1753 | 102k | #define STORED_SEQS_MASK (STORED_SEQS-1) | 1754 | 102k | #define ADVANCED_SEQS STORED_SEQS | 1755 | 102k | seq_t sequences[STORED_SEQS]; | 1756 | 102k | int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); | 1757 | 102k | seqState_t seqState; | 1758 | 102k | int seqNb; | 1759 | 102k | size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */ | 1760 | | | 1761 | 102k | dctx->fseEntropy = 1; | 1762 | 410k | { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } | 1763 | 102k | assert(dst != NULL); | 1764 | 102k | assert(iend >= ip); | 1765 | 102k | RETURN_ERROR_IF( | 1766 | 102k | ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), | 1767 | 102k | corruption_detected, ""); | 1768 | 102k | ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); | 1769 | 102k | ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); | 1770 | 102k | ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); | 1771 | | | 1772 | | /* prepare in advance */ | 1773 | 736k | for (seqNb=0; seqNb<seqAdvance; seqNb++) { | 1774 | 633k | seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1); | 1775 | 633k | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); | 1776 | 633k | sequences[seqNb] = sequence; | 1777 | 633k | } | 1778 | | | 1779 | | /* decompress without stomping litBuffer */ | 1780 | 13.0M | for (; seqNb < nbSeq; seqNb++) { | 1781 | 12.9M | seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset, seqNb == nbSeq-1); | 1782 | | | 1783 | 12.9M | if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) { | 1784 | | /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ | 1785 | 841 | const size_t leftoverLit = dctx->litBufferEnd - litPtr; | 1786 | 841 | if (leftoverLit) | 1787 | 825 | { | 1788 | 825 | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); | 1789 | 825 | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); | 1790 | 825 | sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; | 1791 | 825 | op += leftoverLit; | 1792 | 825 | } | 1793 | 841 | litPtr = dctx->litExtraBuffer; | 1794 | 841 | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; | 1795 | 841 | dctx->litBufferLocation = ZSTD_not_in_dst; | 1796 | 841 | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1797 | 841 | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1798 | 841 | assert(!ZSTD_isError(oneSeqSize)); | 1799 | 841 | ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); | 1800 | 841 | #endif | 1801 | 841 | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1802 | | | 1803 | 841 | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); | 1804 | 841 | sequences[seqNb & STORED_SEQS_MASK] = sequence; | 1805 | 841 | op += oneSeqSize; | 1806 | 841 | } } | 1807 | 12.9M | else | 1808 | 12.9M | { | 1809 | | /* lit buffer is either wholly contained in first or second split, or not split at all*/ | 1810 | 12.9M | size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? | 1811 | 98.2k | ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : | 1812 | 12.9M | ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1813 | 12.9M | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1814 | 12.9M | assert(!ZSTD_isError(oneSeqSize)); | 1815 | 12.9M | ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); | 1816 | 12.9M | #endif | 1817 | 12.9M | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1818 | | | 1819 | 12.9M | prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); | 1820 | 12.9M | sequences[seqNb & STORED_SEQS_MASK] = sequence; | 1821 | 12.9M | op += oneSeqSize; | 1822 | 12.9M | } | 1823 | 12.9M | } | 1824 | 102k | RETURN_ERROR_IF(!BIT_endOfDStream(&seqState.DStream), corruption_detected, ""); | 1825 | | | 1826 | | /* finish queue */ | 1827 | 102k | seqNb -= seqAdvance; | 1828 | 736k | for ( ; seqNb<nbSeq ; seqNb++) { | 1829 | 633k | seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]); | 1830 | 633k | if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) { | 1831 | 741 | const size_t leftoverLit = dctx->litBufferEnd - litPtr; | 1832 | 741 | if (leftoverLit) { | 1833 | 621 | RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); | 1834 | 621 | ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); | 1835 | 621 | sequence->litLength -= leftoverLit; | 1836 | 621 | op += leftoverLit; | 1837 | 621 | } | 1838 | 741 | litPtr = dctx->litExtraBuffer; | 1839 | 741 | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; | 1840 | 741 | dctx->litBufferLocation = ZSTD_not_in_dst; | 1841 | 741 | { size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1842 | 741 | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1843 | 741 | assert(!ZSTD_isError(oneSeqSize)); | 1844 | 741 | ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); | 1845 | 741 | #endif | 1846 | 741 | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1847 | 741 | op += oneSeqSize; | 1848 | 741 | } | 1849 | 741 | } | 1850 | 632k | else | 1851 | 632k | { | 1852 | 632k | size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? | 1853 | 1.95k | ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : | 1854 | 632k | ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); | 1855 | 632k | #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) | 1856 | 632k | assert(!ZSTD_isError(oneSeqSize)); | 1857 | 632k | ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); | 1858 | 632k | #endif | 1859 | 632k | if (ZSTD_isError(oneSeqSize)) return oneSeqSize; | 1860 | 632k | op += oneSeqSize; | 1861 | 632k | } | 1862 | 633k | } | 1863 | | | 1864 | | /* save reps for next block */ | 1865 | 410k | { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } | 1866 | 102k | } | 1867 | | | 1868 | | /* last literal segment */ | 1869 | 104k | if (dctx->litBufferLocation == ZSTD_split) { /* first deplete literal buffer in dst, then copy litExtraBuffer */ | 1870 | 212 | size_t const lastLLSize = litBufferEnd - litPtr; | 1871 | 212 | RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); | 1872 | 212 | if (op != NULL) { | 1873 | 212 | ZSTD_memmove(op, litPtr, lastLLSize); | 1874 | 212 | op += lastLLSize; | 1875 | 212 | } | 1876 | 212 | litPtr = dctx->litExtraBuffer; | 1877 | 212 | litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; | 1878 | 212 | } | 1879 | 104k | { size_t const lastLLSize = litBufferEnd - litPtr; | 1880 | 104k | RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); | 1881 | 104k | if (op != NULL) { | 1882 | 104k | ZSTD_memmove(op, litPtr, lastLLSize); | 1883 | 104k | op += lastLLSize; | 1884 | 104k | } | 1885 | 104k | } | 1886 | | | 1887 | 0 | return (size_t)(op - ostart); | 1888 | 104k | } |
|
1889 | | |
1890 | | static size_t |
1891 | | ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, |
1892 | | void* dst, size_t maxDstSize, |
1893 | | const void* seqStart, size_t seqSize, int nbSeq, |
1894 | | const ZSTD_longOffset_e isLongOffset) |
1895 | 0 | { |
1896 | 0 | return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1897 | 0 | } |
1898 | | #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ |
1899 | | |
1900 | | |
1901 | | |
1902 | | #if DYNAMIC_BMI2 |
1903 | | |
1904 | | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG |
1905 | | static BMI2_TARGET_ATTRIBUTE size_t |
1906 | | DONT_VECTORIZE |
1907 | | ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, |
1908 | | void* dst, size_t maxDstSize, |
1909 | | const void* seqStart, size_t seqSize, int nbSeq, |
1910 | | const ZSTD_longOffset_e isLongOffset) |
1911 | 4.48M | { |
1912 | 4.48M | return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1913 | 4.48M | } |
1914 | | static BMI2_TARGET_ATTRIBUTE size_t |
1915 | | DONT_VECTORIZE |
1916 | | ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx, |
1917 | | void* dst, size_t maxDstSize, |
1918 | | const void* seqStart, size_t seqSize, int nbSeq, |
1919 | | const ZSTD_longOffset_e isLongOffset) |
1920 | 29.1k | { |
1921 | 29.1k | return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1922 | 29.1k | } |
1923 | | #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ |
1924 | | |
1925 | | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT |
1926 | | static BMI2_TARGET_ATTRIBUTE size_t |
1927 | | ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, |
1928 | | void* dst, size_t maxDstSize, |
1929 | | const void* seqStart, size_t seqSize, int nbSeq, |
1930 | | const ZSTD_longOffset_e isLongOffset) |
1931 | 137k | { |
1932 | 137k | return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1933 | 137k | } |
1934 | | #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ |
1935 | | |
1936 | | #endif /* DYNAMIC_BMI2 */ |
1937 | | |
1938 | | typedef size_t (*ZSTD_decompressSequences_t)( |
1939 | | ZSTD_DCtx* dctx, |
1940 | | void* dst, size_t maxDstSize, |
1941 | | const void* seqStart, size_t seqSize, int nbSeq, |
1942 | | const ZSTD_longOffset_e isLongOffset); |
1943 | | |
1944 | | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG |
1945 | | static size_t |
1946 | | ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, |
1947 | | const void* seqStart, size_t seqSize, int nbSeq, |
1948 | | const ZSTD_longOffset_e isLongOffset) |
1949 | 4.48M | { |
1950 | 4.48M | DEBUGLOG(5, "ZSTD_decompressSequences"); |
1951 | 4.48M | #if DYNAMIC_BMI2 |
1952 | 4.48M | if (ZSTD_DCtx_get_bmi2(dctx)) { |
1953 | 4.48M | return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1954 | 4.48M | } |
1955 | 0 | #endif |
1956 | 0 | return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1957 | 4.48M | } |
1958 | | static size_t |
1959 | | ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, |
1960 | | const void* seqStart, size_t seqSize, int nbSeq, |
1961 | | const ZSTD_longOffset_e isLongOffset) |
1962 | 29.1k | { |
1963 | 29.1k | DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer"); |
1964 | 29.1k | #if DYNAMIC_BMI2 |
1965 | 29.1k | if (ZSTD_DCtx_get_bmi2(dctx)) { |
1966 | 29.1k | return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1967 | 29.1k | } |
1968 | 0 | #endif |
1969 | 0 | return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1970 | 29.1k | } |
1971 | | #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ |
1972 | | |
1973 | | |
1974 | | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT |
1975 | | /* ZSTD_decompressSequencesLong() : |
1976 | | * decompression function triggered when a minimum share of offsets is considered "long", |
1977 | | * aka out of cache. |
1978 | | * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". |
1979 | | * This function will try to mitigate main memory latency through the use of prefetching */ |
1980 | | static size_t |
1981 | | ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, |
1982 | | void* dst, size_t maxDstSize, |
1983 | | const void* seqStart, size_t seqSize, int nbSeq, |
1984 | | const ZSTD_longOffset_e isLongOffset) |
1985 | 137k | { |
1986 | 137k | DEBUGLOG(5, "ZSTD_decompressSequencesLong"); |
1987 | 137k | #if DYNAMIC_BMI2 |
1988 | 137k | if (ZSTD_DCtx_get_bmi2(dctx)) { |
1989 | 137k | return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1990 | 137k | } |
1991 | 0 | #endif |
1992 | 0 | return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); |
1993 | 137k | } |
1994 | | #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ |
1995 | | |
1996 | | |
1997 | | /** |
1998 | | * @returns The total size of the history referenceable by zstd, including |
1999 | | * both the prefix and the extDict. At @p op any offset larger than this |
2000 | | * is invalid. |
2001 | | */ |
2002 | | static size_t ZSTD_totalHistorySize(BYTE* op, BYTE const* virtualStart) |
2003 | 4.65M | { |
2004 | 4.65M | return (size_t)(op - virtualStart); |
2005 | 4.65M | } |
2006 | | |
2007 | | typedef struct { |
2008 | | unsigned longOffsetShare; |
2009 | | unsigned maxNbAdditionalBits; |
2010 | | } ZSTD_OffsetInfo; |
2011 | | |
2012 | | /* ZSTD_getOffsetInfo() : |
2013 | | * condition : offTable must be valid |
2014 | | * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) |
2015 | | * compared to maximum possible of (1<<OffFSELog), |
2016 | | * as well as the maximum number additional bits required. |
2017 | | */ |
2018 | | static ZSTD_OffsetInfo |
2019 | | ZSTD_getOffsetInfo(const ZSTD_seqSymbol* offTable, int nbSeq) |
2020 | 35.0k | { |
2021 | 35.0k | ZSTD_OffsetInfo info = {0, 0}; |
2022 | | /* If nbSeq == 0, then the offTable is uninitialized, but we have |
2023 | | * no sequences, so both values should be 0. |
2024 | | */ |
2025 | 35.0k | if (nbSeq != 0) { |
2026 | 35.0k | const void* ptr = offTable; |
2027 | 35.0k | U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog; |
2028 | 35.0k | const ZSTD_seqSymbol* table = offTable + 1; |
2029 | 35.0k | U32 const max = 1 << tableLog; |
2030 | 35.0k | U32 u; |
2031 | 35.0k | DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog); |
2032 | | |
2033 | 35.0k | assert(max <= (1 << OffFSELog)); /* max not too large */ |
2034 | 1.15M | for (u=0; u<max; u++) { |
2035 | 1.11M | info.maxNbAdditionalBits = MAX(info.maxNbAdditionalBits, table[u].nbAdditionalBits); |
2036 | 1.11M | if (table[u].nbAdditionalBits > 22) info.longOffsetShare += 1; |
2037 | 1.11M | } |
2038 | | |
2039 | 35.0k | assert(tableLog <= OffFSELog); |
2040 | 35.0k | info.longOffsetShare <<= (OffFSELog - tableLog); /* scale to OffFSELog */ |
2041 | 35.0k | } |
2042 | | |
2043 | 35.0k | return info; |
2044 | 35.0k | } |
2045 | | |
2046 | | /** |
2047 | | * @returns The maximum offset we can decode in one read of our bitstream, without |
2048 | | * reloading more bits in the middle of the offset bits read. Any offsets larger |
2049 | | * than this must use the long offset decoder. |
2050 | | */ |
2051 | | static size_t ZSTD_maxShortOffset(void) |
2052 | 0 | { |
2053 | 0 | if (MEM_64bits()) { |
2054 | | /* We can decode any offset without reloading bits. |
2055 | | * This might change if the max window size grows. |
2056 | | */ |
2057 | 0 | ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); |
2058 | 0 | return (size_t)-1; |
2059 | 0 | } else { |
2060 | | /* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1. |
2061 | | * This offBase would require STREAM_ACCUMULATOR_MIN extra bits. |
2062 | | * Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset. |
2063 | | */ |
2064 | 0 | size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1; |
2065 | 0 | size_t const maxOffset = maxOffbase - ZSTD_REP_NUM; |
2066 | 0 | assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN); |
2067 | 0 | return maxOffset; |
2068 | 0 | } |
2069 | 0 | } |
2070 | | |
2071 | | size_t |
2072 | | ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, |
2073 | | void* dst, size_t dstCapacity, |
2074 | | const void* src, size_t srcSize, const streaming_operation streaming) |
2075 | 4.67M | { /* blockType == blockCompressed */ |
2076 | 4.67M | const BYTE* ip = (const BYTE*)src; |
2077 | 4.67M | DEBUGLOG(5, "ZSTD_decompressBlock_internal (cSize : %u)", (unsigned)srcSize); |
2078 | | |
2079 | | /* Note : the wording of the specification |
2080 | | * allows compressed block to be sized exactly ZSTD_blockSizeMax(dctx). |
2081 | | * This generally does not happen, as it makes little sense, |
2082 | | * since an uncompressed block would feature same size and have no decompression cost. |
2083 | | * Also, note that decoder from reference libzstd before < v1.5.4 |
2084 | | * would consider this edge case as an error. |
2085 | | * As a consequence, avoid generating compressed blocks of size ZSTD_blockSizeMax(dctx) |
2086 | | * for broader compatibility with the deployed ecosystem of zstd decoders */ |
2087 | 4.67M | RETURN_ERROR_IF(srcSize > ZSTD_blockSizeMax(dctx), srcSize_wrong, ""); |
2088 | | |
2089 | | /* Decode literals section */ |
2090 | 4.67M | { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); |
2091 | 4.67M | DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize); |
2092 | 4.67M | if (ZSTD_isError(litCSize)) return litCSize; |
2093 | 4.65M | ip += litCSize; |
2094 | 4.65M | srcSize -= litCSize; |
2095 | 4.65M | } |
2096 | | |
2097 | | /* Build Decoding Tables */ |
2098 | 0 | { |
2099 | | /* Compute the maximum block size, which must also work when !frame and fParams are unset. |
2100 | | * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. |
2101 | | */ |
2102 | 4.65M | size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx)); |
2103 | 4.65M | size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart); |
2104 | | /* isLongOffset must be true if there are long offsets. |
2105 | | * Offsets are long if they are larger than ZSTD_maxShortOffset(). |
2106 | | * We don't expect that to be the case in 64-bit mode. |
2107 | | * |
2108 | | * We check here to see if our history is large enough to allow long offsets. |
2109 | | * If it isn't, then we can't possible have (valid) long offsets. If the offset |
2110 | | * is invalid, then it is okay to read it incorrectly. |
2111 | | * |
2112 | | * If isLongOffsets is true, then we will later check our decoding table to see |
2113 | | * if it is even possible to generate long offsets. |
2114 | | */ |
2115 | 4.65M | ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset())); |
2116 | | /* These macros control at build-time which decompressor implementation |
2117 | | * we use. If neither is defined, we do some inspection and dispatch at |
2118 | | * runtime. |
2119 | | */ |
2120 | 4.65M | #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ |
2121 | 4.65M | !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) |
2122 | 4.65M | int usePrefetchDecoder = dctx->ddictIsCold; |
2123 | | #else |
2124 | | /* Set to 1 to avoid computing offset info if we don't need to. |
2125 | | * Otherwise this value is ignored. |
2126 | | */ |
2127 | | int usePrefetchDecoder = 1; |
2128 | | #endif |
2129 | 4.65M | int nbSeq; |
2130 | 4.65M | size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); |
2131 | 4.65M | if (ZSTD_isError(seqHSize)) return seqHSize; |
2132 | 4.65M | ip += seqHSize; |
2133 | 4.65M | srcSize -= seqHSize; |
2134 | | |
2135 | 4.65M | RETURN_ERROR_IF((dst == NULL || dstCapacity == 0) && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); |
2136 | 4.64M | RETURN_ERROR_IF(MEM_64bits() && sizeof(size_t) == sizeof(void*) && (size_t)(-1) - (size_t)dst < (size_t)(1 << 20), dstSize_tooSmall, |
2137 | 4.64M | "invalid dst"); |
2138 | | |
2139 | | /* If we could potentially have long offsets, or we might want to use the prefetch decoder, |
2140 | | * compute information about the share of long offsets, and the maximum nbAdditionalBits. |
2141 | | * NOTE: could probably use a larger nbSeq limit |
2142 | | */ |
2143 | 4.64M | if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) { |
2144 | 35.0k | ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq); |
2145 | 35.0k | if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) { |
2146 | | /* If isLongOffset, but the maximum number of additional bits that we see in our table is small |
2147 | | * enough, then we know it is impossible to have too long an offset in this block, so we can |
2148 | | * use the regular offset decoder. |
2149 | | */ |
2150 | 0 | isLongOffset = ZSTD_lo_isRegularOffset; |
2151 | 0 | } |
2152 | 35.0k | if (!usePrefetchDecoder) { |
2153 | 35.0k | U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ |
2154 | 35.0k | usePrefetchDecoder = (info.longOffsetShare >= minShare); |
2155 | 35.0k | } |
2156 | 35.0k | } |
2157 | | |
2158 | 4.64M | dctx->ddictIsCold = 0; |
2159 | | |
2160 | 4.64M | #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ |
2161 | 4.64M | !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) |
2162 | 4.64M | if (usePrefetchDecoder) { |
2163 | | #else |
2164 | | (void)usePrefetchDecoder; |
2165 | | { |
2166 | | #endif |
2167 | 137k | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT |
2168 | 137k | return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); |
2169 | 137k | #endif |
2170 | 137k | } |
2171 | | |
2172 | 4.51M | #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG |
2173 | | /* else */ |
2174 | 4.51M | if (dctx->litBufferLocation == ZSTD_split) |
2175 | 29.1k | return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); |
2176 | 4.48M | else |
2177 | 4.48M | return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); |
2178 | 4.51M | #endif |
2179 | 4.51M | } |
2180 | 4.51M | } |
2181 | | |
2182 | | |
2183 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
2184 | | void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) |
2185 | 32.5M | { |
2186 | 32.5M | if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ |
2187 | 1.08M | dctx->dictEnd = dctx->previousDstEnd; |
2188 | 1.08M | dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); |
2189 | 1.08M | dctx->prefixStart = dst; |
2190 | 1.08M | dctx->previousDstEnd = dst; |
2191 | 1.08M | } |
2192 | 32.5M | } |
2193 | | |
2194 | | |
2195 | | size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx, |
2196 | | void* dst, size_t dstCapacity, |
2197 | | const void* src, size_t srcSize) |
2198 | 15.1k | { |
2199 | 15.1k | size_t dSize; |
2200 | 15.1k | dctx->isFrameDecompression = 0; |
2201 | 15.1k | ZSTD_checkContinuity(dctx, dst, dstCapacity); |
2202 | 15.1k | dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming); |
2203 | 15.1k | FORWARD_IF_ERROR(dSize, ""); |
2204 | 11.4k | dctx->previousDstEnd = (char*)dst + dSize; |
2205 | 11.4k | return dSize; |
2206 | 15.1k | } |
2207 | | |
2208 | | |
2209 | | /* NOTE: Must just wrap ZSTD_decompressBlock_deprecated() */ |
2210 | | size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, |
2211 | | void* dst, size_t dstCapacity, |
2212 | | const void* src, size_t srcSize) |
2213 | 15.1k | { |
2214 | 15.1k | return ZSTD_decompressBlock_deprecated(dctx, dst, dstCapacity, src, srcSize); |
2215 | 15.1k | } |