/src/zstd/lib/common/zstd_internal.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | #ifndef ZSTD_CCOMMON_H_MODULE |
12 | | #define ZSTD_CCOMMON_H_MODULE |
13 | | |
14 | | /* this module contains definitions which must be identical |
15 | | * across compression, decompression and dictBuilder. |
16 | | * It also contains a few functions useful to at least 2 of them |
17 | | * and which benefit from being inlined */ |
18 | | |
19 | | /*-************************************* |
20 | | * Dependencies |
21 | | ***************************************/ |
22 | | #include "compiler.h" |
23 | | #include "cpu.h" |
24 | | #include "mem.h" |
25 | | #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ |
26 | | #include "error_private.h" |
27 | | #define ZSTD_STATIC_LINKING_ONLY |
28 | | #include "../zstd.h" |
29 | | #define FSE_STATIC_LINKING_ONLY |
30 | | #include "fse.h" |
31 | | #include "huf.h" |
32 | | #ifndef XXH_STATIC_LINKING_ONLY |
33 | | # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ |
34 | | #endif |
35 | | #include "xxhash.h" /* XXH_reset, update, digest */ |
36 | | #ifndef ZSTD_NO_TRACE |
37 | | # include "zstd_trace.h" |
38 | | #else |
39 | | # define ZSTD_TRACE 0 |
40 | | #endif |
41 | | |
42 | | #if defined (__cplusplus) |
43 | | extern "C" { |
44 | | #endif |
45 | | |
46 | | /* ---- static assert (debug) --- */ |
47 | 0 | #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) |
48 | 0 | #define ZSTD_isError ERR_isError /* for inlining */ |
49 | 0 | #define FSE_isError ERR_isError |
50 | 0 | #define HUF_isError ERR_isError |
51 | | |
52 | | |
53 | | /*-************************************* |
54 | | * shared macros |
55 | | ***************************************/ |
56 | | #undef MIN |
57 | | #undef MAX |
58 | 8.14k | #define MIN(a,b) ((a)<(b) ? (a) : (b)) |
59 | 0 | #define MAX(a,b) ((a)>(b) ? (a) : (b)) |
60 | 0 | #define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) |
61 | | |
62 | | |
63 | | /*-************************************* |
64 | | * Common constants |
65 | | ***************************************/ |
66 | 0 | #define ZSTD_OPT_NUM (1<<12) |
67 | | |
68 | 0 | #define ZSTD_REP_NUM 3 /* number of repcodes */ |
69 | | static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; |
70 | | |
71 | 0 | #define KB *(1 <<10) |
72 | 0 | #define MB *(1 <<20) |
73 | 0 | #define GB *(1U<<30) |
74 | | |
75 | | #define BIT7 128 |
76 | | #define BIT6 64 |
77 | | #define BIT5 32 |
78 | | #define BIT4 16 |
79 | | #define BIT1 2 |
80 | | #define BIT0 1 |
81 | | |
82 | 0 | #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 |
83 | | static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; |
84 | | static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; |
85 | | |
86 | 0 | #define ZSTD_FRAMEIDSIZE 4 /* magic number size */ |
87 | | |
88 | 0 | #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ |
89 | | static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; |
90 | | typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; |
91 | | |
92 | | #define ZSTD_FRAMECHECKSUMSIZE 4 |
93 | | |
94 | | #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ |
95 | 0 | #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ |
96 | | #define MIN_LITERALS_FOR_4_STREAMS 6 |
97 | | |
98 | | typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; |
99 | | |
100 | 0 | #define LONGNBSEQ 0x7F00 |
101 | | |
102 | 0 | #define MINMATCH 3 |
103 | | |
104 | 0 | #define Litbits 8 |
105 | 0 | #define LitHufLog 11 |
106 | 0 | #define MaxLit ((1<<Litbits) - 1) |
107 | 0 | #define MaxML 52 |
108 | 0 | #define MaxLL 35 |
109 | 0 | #define DefaultMaxOff 28 |
110 | 0 | #define MaxOff 31 |
111 | 0 | #define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ |
112 | 0 | #define MLFSELog 9 |
113 | 0 | #define LLFSELog 9 |
114 | 0 | #define OffFSELog 8 |
115 | | #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) |
116 | | #define MaxMLBits 16 |
117 | | #define MaxLLBits 16 |
118 | | |
119 | | #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */ |
120 | | /* Each table cannot take more than #symbols * FSELog bits */ |
121 | | #define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8) |
122 | | |
123 | | static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = { |
124 | | 0, 0, 0, 0, 0, 0, 0, 0, |
125 | | 0, 0, 0, 0, 0, 0, 0, 0, |
126 | | 1, 1, 1, 1, 2, 2, 3, 3, |
127 | | 4, 6, 7, 8, 9,10,11,12, |
128 | | 13,14,15,16 |
129 | | }; |
130 | | static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = { |
131 | | 4, 3, 2, 2, 2, 2, 2, 2, |
132 | | 2, 2, 2, 2, 2, 1, 1, 1, |
133 | | 2, 2, 2, 2, 2, 2, 2, 2, |
134 | | 2, 3, 2, 1, 1, 1, 1, 1, |
135 | | -1,-1,-1,-1 |
136 | | }; |
137 | | #define LL_DEFAULTNORMLOG 6 /* for static allocation */ |
138 | | static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; |
139 | | |
140 | | static UNUSED_ATTR const U8 ML_bits[MaxML+1] = { |
141 | | 0, 0, 0, 0, 0, 0, 0, 0, |
142 | | 0, 0, 0, 0, 0, 0, 0, 0, |
143 | | 0, 0, 0, 0, 0, 0, 0, 0, |
144 | | 0, 0, 0, 0, 0, 0, 0, 0, |
145 | | 1, 1, 1, 1, 2, 2, 3, 3, |
146 | | 4, 4, 5, 7, 8, 9,10,11, |
147 | | 12,13,14,15,16 |
148 | | }; |
149 | | static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = { |
150 | | 1, 4, 3, 2, 2, 2, 2, 2, |
151 | | 2, 1, 1, 1, 1, 1, 1, 1, |
152 | | 1, 1, 1, 1, 1, 1, 1, 1, |
153 | | 1, 1, 1, 1, 1, 1, 1, 1, |
154 | | 1, 1, 1, 1, 1, 1, 1, 1, |
155 | | 1, 1, 1, 1, 1, 1,-1,-1, |
156 | | -1,-1,-1,-1,-1 |
157 | | }; |
158 | | #define ML_DEFAULTNORMLOG 6 /* for static allocation */ |
159 | | static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; |
160 | | |
161 | | static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = { |
162 | | 1, 1, 1, 1, 1, 1, 2, 2, |
163 | | 2, 1, 1, 1, 1, 1, 1, 1, |
164 | | 1, 1, 1, 1, 1, 1, 1, 1, |
165 | | -1,-1,-1,-1,-1 |
166 | | }; |
167 | | #define OF_DEFAULTNORMLOG 5 /* for static allocation */ |
168 | | static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; |
169 | | |
170 | | |
171 | | /*-******************************************* |
172 | | * Shared functions to include for inlining |
173 | | *********************************************/ |
174 | 0 | static void ZSTD_copy8(void* dst, const void* src) { |
175 | | #if defined(ZSTD_ARCH_ARM_NEON) |
176 | | vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src)); |
177 | | #else |
178 | 0 | ZSTD_memcpy(dst, src, 8); |
179 | 0 | #endif |
180 | 0 | } Unexecuted instantiation: sequence_producer.c:ZSTD_copy8 Unexecuted instantiation: zstd_common.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress_literals.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_copy8 Unexecuted instantiation: zstd_double_fast.c:ZSTD_copy8 Unexecuted instantiation: zstd_fast.c:ZSTD_copy8 Unexecuted instantiation: zstd_lazy.c:ZSTD_copy8 Unexecuted instantiation: zstd_ldm.c:ZSTD_copy8 Unexecuted instantiation: zstd_opt.c:ZSTD_copy8 Unexecuted instantiation: zstdmt_compress.c:ZSTD_copy8 Unexecuted instantiation: huf_decompress.c:ZSTD_copy8 Unexecuted instantiation: zstd_ddict.c:ZSTD_copy8 Unexecuted instantiation: zstd_decompress.c:ZSTD_copy8 Unexecuted instantiation: zstd_decompress_block.c:ZSTD_copy8 Unexecuted instantiation: cover.c:ZSTD_copy8 Unexecuted instantiation: fastcover.c:ZSTD_copy8 Unexecuted instantiation: zdict.c:ZSTD_copy8 |
181 | 0 | #define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0) |
182 | | |
183 | | /* Need to use memmove here since the literal buffer can now be located within |
184 | | the dst buffer. In circumstances where the op "catches up" to where the |
185 | | literal buffer is, there can be partial overlaps in this call on the final |
186 | | copy if the literal is being shifted by less than 16 bytes. */ |
187 | 0 | static void ZSTD_copy16(void* dst, const void* src) { |
188 | | #if defined(ZSTD_ARCH_ARM_NEON) |
189 | | vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); |
190 | | #elif defined(ZSTD_ARCH_X86_SSE2) |
191 | | _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); |
192 | | #elif defined(__clang__) |
193 | | ZSTD_memmove(dst, src, 16); |
194 | | #else |
195 | | /* ZSTD_memmove is not inlined properly by gcc */ |
196 | | BYTE copy16_buf[16]; |
197 | | ZSTD_memcpy(copy16_buf, src, 16); |
198 | | ZSTD_memcpy(dst, copy16_buf, 16); |
199 | | #endif |
200 | 0 | } Unexecuted instantiation: sequence_producer.c:ZSTD_copy16 Unexecuted instantiation: zstd_common.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress_literals.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_copy16 Unexecuted instantiation: zstd_double_fast.c:ZSTD_copy16 Unexecuted instantiation: zstd_fast.c:ZSTD_copy16 Unexecuted instantiation: zstd_lazy.c:ZSTD_copy16 Unexecuted instantiation: zstd_ldm.c:ZSTD_copy16 Unexecuted instantiation: zstd_opt.c:ZSTD_copy16 Unexecuted instantiation: zstdmt_compress.c:ZSTD_copy16 Unexecuted instantiation: huf_decompress.c:ZSTD_copy16 Unexecuted instantiation: zstd_ddict.c:ZSTD_copy16 Unexecuted instantiation: zstd_decompress.c:ZSTD_copy16 Unexecuted instantiation: zstd_decompress_block.c:ZSTD_copy16 Unexecuted instantiation: cover.c:ZSTD_copy16 Unexecuted instantiation: fastcover.c:ZSTD_copy16 Unexecuted instantiation: zdict.c:ZSTD_copy16 |
201 | 0 | #define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0) |
202 | | |
203 | 0 | #define WILDCOPY_OVERLENGTH 32 |
204 | 0 | #define WILDCOPY_VECLEN 16 |
205 | | |
206 | | typedef enum { |
207 | | ZSTD_no_overlap, |
208 | | ZSTD_overlap_src_before_dst |
209 | | /* ZSTD_overlap_dst_before_src, */ |
210 | | } ZSTD_overlap_e; |
211 | | |
212 | | /*! ZSTD_wildcopy() : |
213 | | * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0) |
214 | | * @param ovtype controls the overlap detection |
215 | | * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. |
216 | | * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart. |
217 | | * The src buffer must be before the dst buffer. |
218 | | */ |
219 | | MEM_STATIC FORCE_INLINE_ATTR |
220 | | void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype) |
221 | 0 | { |
222 | 0 | ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; |
223 | 0 | const BYTE* ip = (const BYTE*)src; |
224 | 0 | BYTE* op = (BYTE*)dst; |
225 | 0 | BYTE* const oend = op + length; |
226 | |
|
227 | 0 | if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { |
228 | | /* Handle short offset copies. */ |
229 | 0 | do { |
230 | 0 | COPY8(op, ip); |
231 | 0 | } while (op < oend); |
232 | 0 | } else { |
233 | 0 | assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); |
234 | | /* Separate out the first COPY16() call because the copy length is |
235 | | * almost certain to be short, so the branches have different |
236 | | * probabilities. Since it is almost certain to be short, only do |
237 | | * one COPY16() in the first call. Then, do two calls per loop since |
238 | | * at that point it is more likely to have a high trip count. |
239 | | */ |
240 | 0 | ZSTD_copy16(op, ip); |
241 | 0 | if (16 >= length) return; |
242 | 0 | op += 16; |
243 | 0 | ip += 16; |
244 | 0 | do { |
245 | 0 | COPY16(op, ip); |
246 | 0 | COPY16(op, ip); |
247 | 0 | } |
248 | 0 | while (op < oend); |
249 | 0 | } |
250 | 0 | } Unexecuted instantiation: sequence_producer.c:ZSTD_wildcopy Unexecuted instantiation: zstd_common.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress_literals.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_wildcopy Unexecuted instantiation: zstd_double_fast.c:ZSTD_wildcopy Unexecuted instantiation: zstd_fast.c:ZSTD_wildcopy Unexecuted instantiation: zstd_lazy.c:ZSTD_wildcopy Unexecuted instantiation: zstd_ldm.c:ZSTD_wildcopy Unexecuted instantiation: zstd_opt.c:ZSTD_wildcopy Unexecuted instantiation: zstdmt_compress.c:ZSTD_wildcopy Unexecuted instantiation: huf_decompress.c:ZSTD_wildcopy Unexecuted instantiation: zstd_ddict.c:ZSTD_wildcopy Unexecuted instantiation: zstd_decompress.c:ZSTD_wildcopy Unexecuted instantiation: zstd_decompress_block.c:ZSTD_wildcopy Unexecuted instantiation: cover.c:ZSTD_wildcopy Unexecuted instantiation: fastcover.c:ZSTD_wildcopy Unexecuted instantiation: zdict.c:ZSTD_wildcopy |
251 | | |
252 | | MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) |
253 | 0 | { |
254 | 0 | size_t const length = MIN(dstCapacity, srcSize); |
255 | 0 | if (length > 0) { |
256 | 0 | ZSTD_memcpy(dst, src, length); |
257 | 0 | } |
258 | 0 | return length; |
259 | 0 | } Unexecuted instantiation: sequence_producer.c:ZSTD_limitCopy Unexecuted instantiation: zstd_common.c:ZSTD_limitCopy Unexecuted instantiation: zstd_compress.c:ZSTD_limitCopy Unexecuted instantiation: zstd_compress_literals.c:ZSTD_limitCopy Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_limitCopy Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_limitCopy Unexecuted instantiation: zstd_double_fast.c:ZSTD_limitCopy Unexecuted instantiation: zstd_fast.c:ZSTD_limitCopy Unexecuted instantiation: zstd_lazy.c:ZSTD_limitCopy Unexecuted instantiation: zstd_ldm.c:ZSTD_limitCopy Unexecuted instantiation: zstd_opt.c:ZSTD_limitCopy Unexecuted instantiation: zstdmt_compress.c:ZSTD_limitCopy Unexecuted instantiation: huf_decompress.c:ZSTD_limitCopy Unexecuted instantiation: zstd_ddict.c:ZSTD_limitCopy Unexecuted instantiation: zstd_decompress.c:ZSTD_limitCopy Unexecuted instantiation: zstd_decompress_block.c:ZSTD_limitCopy Unexecuted instantiation: cover.c:ZSTD_limitCopy Unexecuted instantiation: fastcover.c:ZSTD_limitCopy Unexecuted instantiation: zdict.c:ZSTD_limitCopy |
260 | | |
261 | | /* define "workspace is too large" as this number of times larger than needed */ |
262 | 0 | #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 |
263 | | |
264 | | /* when workspace is continuously too large |
265 | | * during at least this number of times, |
266 | | * context's memory usage is considered wasteful, |
267 | | * because it's sized to handle a worst case scenario which rarely happens. |
268 | | * In which case, resize it down to free some memory */ |
269 | 0 | #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 |
270 | | |
271 | | /* Controls whether the input/output buffer is buffered or stable. */ |
272 | | typedef enum { |
273 | | ZSTD_bm_buffered = 0, /* Buffer the input/output */ |
274 | | ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ |
275 | | } ZSTD_bufferMode_e; |
276 | | |
277 | | |
278 | | /*-******************************************* |
279 | | * Private declarations |
280 | | *********************************************/ |
281 | | typedef struct seqDef_s { |
282 | | U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ |
283 | | U16 litLength; |
284 | | U16 mlBase; /* mlBase == matchLength - MINMATCH */ |
285 | | } seqDef; |
286 | | |
287 | | /* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */ |
288 | | typedef enum { |
289 | | ZSTD_llt_none = 0, /* no longLengthType */ |
290 | | ZSTD_llt_literalLength = 1, /* represents a long literal */ |
291 | | ZSTD_llt_matchLength = 2 /* represents a long match */ |
292 | | } ZSTD_longLengthType_e; |
293 | | |
294 | | typedef struct { |
295 | | seqDef* sequencesStart; |
296 | | seqDef* sequences; /* ptr to end of sequences */ |
297 | | BYTE* litStart; |
298 | | BYTE* lit; /* ptr to end of literals */ |
299 | | BYTE* llCode; |
300 | | BYTE* mlCode; |
301 | | BYTE* ofCode; |
302 | | size_t maxNbSeq; |
303 | | size_t maxNbLit; |
304 | | |
305 | | /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength |
306 | | * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment |
307 | | * the existing value of the litLength or matchLength by 0x10000. |
308 | | */ |
309 | | ZSTD_longLengthType_e longLengthType; |
310 | | U32 longLengthPos; /* Index of the sequence to apply long length modification to */ |
311 | | } seqStore_t; |
312 | | |
313 | | typedef struct { |
314 | | U32 litLength; |
315 | | U32 matchLength; |
316 | | } ZSTD_sequenceLength; |
317 | | |
318 | | /** |
319 | | * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences |
320 | | * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. |
321 | | */ |
322 | | MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) |
323 | 0 | { |
324 | 0 | ZSTD_sequenceLength seqLen; |
325 | 0 | seqLen.litLength = seq->litLength; |
326 | 0 | seqLen.matchLength = seq->mlBase + MINMATCH; |
327 | 0 | if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { |
328 | 0 | if (seqStore->longLengthType == ZSTD_llt_literalLength) { |
329 | 0 | seqLen.litLength += 0x10000; |
330 | 0 | } |
331 | 0 | if (seqStore->longLengthType == ZSTD_llt_matchLength) { |
332 | 0 | seqLen.matchLength += 0x10000; |
333 | 0 | } |
334 | 0 | } |
335 | 0 | return seqLen; |
336 | 0 | } Unexecuted instantiation: sequence_producer.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_common.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_compress.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_compress_literals.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_double_fast.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_fast.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_lazy.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_ldm.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_opt.c:ZSTD_getSequenceLength Unexecuted instantiation: zstdmt_compress.c:ZSTD_getSequenceLength Unexecuted instantiation: huf_decompress.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_ddict.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_decompress.c:ZSTD_getSequenceLength Unexecuted instantiation: zstd_decompress_block.c:ZSTD_getSequenceLength Unexecuted instantiation: cover.c:ZSTD_getSequenceLength Unexecuted instantiation: fastcover.c:ZSTD_getSequenceLength Unexecuted instantiation: zdict.c:ZSTD_getSequenceLength |
337 | | |
338 | | /** |
339 | | * Contains the compressed frame size and an upper-bound for the decompressed frame size. |
340 | | * Note: before using `compressedSize`, check for errors using ZSTD_isError(). |
341 | | * similarly, before using `decompressedBound`, check for errors using: |
342 | | * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` |
343 | | */ |
344 | | typedef struct { |
345 | | size_t nbBlocks; |
346 | | size_t compressedSize; |
347 | | unsigned long long decompressedBound; |
348 | | } ZSTD_frameSizeInfo; /* decompress & legacy */ |
349 | | |
350 | | const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ |
351 | | int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ |
352 | | |
353 | | |
354 | | /* ZSTD_invalidateRepCodes() : |
355 | | * ensures next compression will not use repcodes from previous block. |
356 | | * Note : only works with regular variant; |
357 | | * do not use with extDict variant ! */ |
358 | | void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ |
359 | | |
360 | | |
361 | | typedef struct { |
362 | | blockType_e blockType; |
363 | | U32 lastBlock; |
364 | | U32 origSize; |
365 | | } blockProperties_t; /* declared here for decompress and fullbench */ |
366 | | |
367 | | /*! ZSTD_getcBlockSize() : |
368 | | * Provides the size of compressed block from block header `src` */ |
369 | | /* Used by: decompress, fullbench */ |
370 | | size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, |
371 | | blockProperties_t* bpPtr); |
372 | | |
373 | | /*! ZSTD_decodeSeqHeaders() : |
374 | | * decode sequence header from src */ |
375 | | /* Used by: zstd_decompress_block, fullbench */ |
376 | | size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, |
377 | | const void* src, size_t srcSize); |
378 | | |
379 | | /** |
380 | | * @returns true iff the CPU supports dynamic BMI2 dispatch. |
381 | | */ |
382 | | MEM_STATIC int ZSTD_cpuSupportsBmi2(void) |
383 | 0 | { |
384 | 0 | ZSTD_cpuid_t cpuid = ZSTD_cpuid(); |
385 | 0 | return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); |
386 | 0 | } Unexecuted instantiation: sequence_producer.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_common.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_compress.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_double_fast.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_fast.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_lazy.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_ldm.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_opt.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstdmt_compress.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: huf_decompress.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_ddict.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_decompress.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_decompress_block.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: cover.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: fastcover.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zdict.c:ZSTD_cpuSupportsBmi2 |
387 | | |
388 | | #if defined (__cplusplus) |
389 | | } |
390 | | #endif |
391 | | |
392 | | #endif /* ZSTD_CCOMMON_H_MODULE */ |