/src/zstd/lib/common/zstd_internal.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | #ifndef ZSTD_CCOMMON_H_MODULE |
12 | | #define ZSTD_CCOMMON_H_MODULE |
13 | | |
14 | | /* this module contains definitions which must be identical |
15 | | * across compression, decompression and dictBuilder. |
16 | | * It also contains a few functions useful to at least 2 of them |
17 | | * and which benefit from being inlined */ |
18 | | |
19 | | /*-************************************* |
20 | | * Dependencies |
21 | | ***************************************/ |
22 | | #include "compiler.h" |
23 | | #include "cpu.h" |
24 | | #include "mem.h" |
25 | | #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ |
26 | | #include "error_private.h" |
27 | | #define ZSTD_STATIC_LINKING_ONLY |
28 | | #include "../zstd.h" |
29 | | #define FSE_STATIC_LINKING_ONLY |
30 | | #include "fse.h" |
31 | | #include "huf.h" |
32 | | #ifndef XXH_STATIC_LINKING_ONLY |
33 | | # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ |
34 | | #endif |
35 | | #include "xxhash.h" /* XXH_reset, update, digest */ |
36 | | #ifndef ZSTD_NO_TRACE |
37 | | # include "zstd_trace.h" |
38 | | #else |
39 | | # define ZSTD_TRACE 0 |
40 | | #endif |
41 | | |
42 | | /* ---- static assert (debug) --- */ |
43 | 38.1M | #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) |
44 | 428k | #define ZSTD_isError ERR_isError /* for inlining */ |
45 | | #define FSE_isError ERR_isError |
46 | | #define HUF_isError ERR_isError |
47 | | |
48 | | |
49 | | /*-************************************* |
50 | | * shared macros |
51 | | ***************************************/ |
52 | | #undef MIN |
53 | | #undef MAX |
54 | 138M | #define MIN(a,b) ((a)<(b) ? (a) : (b)) |
55 | 440k | #define MAX(a,b) ((a)>(b) ? (a) : (b)) |
56 | 176k | #define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) |
57 | | |
58 | | |
59 | | /*-************************************* |
60 | | * Common constants |
61 | | ***************************************/ |
62 | 29.8k | #define ZSTD_OPT_NUM (1<<12) |
63 | | |
64 | 316M | #define ZSTD_REP_NUM 3 /* number of repcodes */ |
65 | | static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; |
66 | | |
67 | 309k | #define KB *(1 <<10) |
68 | 97.2k | #define MB *(1 <<20) |
69 | | #define GB *(1U<<30) |
70 | | |
71 | | #define BIT7 128 |
72 | | #define BIT6 64 |
73 | | #define BIT5 32 |
74 | | #define BIT4 16 |
75 | | #define BIT1 2 |
76 | | #define BIT0 1 |
77 | | |
78 | 56.0k | #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 |
79 | | static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; |
80 | | static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; |
81 | | |
82 | 102 | #define ZSTD_FRAMEIDSIZE 4 /* magic number size */ |
83 | | |
84 | 14.9k | #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ |
85 | | static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; |
86 | | typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; |
87 | | |
88 | | #define ZSTD_FRAMECHECKSUMSIZE 4 |
89 | | |
90 | | #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ |
91 | 52.7k | #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */ |
92 | | #define MIN_LITERALS_FOR_4_STREAMS 6 |
93 | | |
94 | | typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e; |
95 | | |
96 | 22.4k | #define LONGNBSEQ 0x7F00 |
97 | | |
98 | 32.3M | #define MINMATCH 3 |
99 | | |
100 | 14.9k | #define Litbits 8 |
101 | 19.8k | #define LitHufLog 11 |
102 | 0 | #define MaxLit ((1<<Litbits) - 1) |
103 | 140k | #define MaxML 52 |
104 | 121k | #define MaxLL 35 |
105 | 105k | #define DefaultMaxOff 28 |
106 | 68.7k | #define MaxOff 31 |
107 | 106k | #define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ |
108 | 32.4M | #define MLFSELog 9 |
109 | 32.4M | #define LLFSELog 9 |
110 | 32.4M | #define OffFSELog 8 |
111 | | #define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) |
112 | | #define MaxMLBits 16 |
113 | | #define MaxLLBits 16 |
114 | | |
115 | | #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */ |
116 | | /* Each table cannot take more than #symbols * FSELog bits */ |
117 | | #define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8) |
118 | | |
119 | | static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = { |
120 | | 0, 0, 0, 0, 0, 0, 0, 0, |
121 | | 0, 0, 0, 0, 0, 0, 0, 0, |
122 | | 1, 1, 1, 1, 2, 2, 3, 3, |
123 | | 4, 6, 7, 8, 9,10,11,12, |
124 | | 13,14,15,16 |
125 | | }; |
126 | | static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = { |
127 | | 4, 3, 2, 2, 2, 2, 2, 2, |
128 | | 2, 2, 2, 2, 2, 1, 1, 1, |
129 | | 2, 2, 2, 2, 2, 2, 2, 2, |
130 | | 2, 3, 2, 1, 1, 1, 1, 1, |
131 | | -1,-1,-1,-1 |
132 | | }; |
133 | | #define LL_DEFAULTNORMLOG 6 /* for static allocation */ |
134 | | static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; |
135 | | |
136 | | static UNUSED_ATTR const U8 ML_bits[MaxML+1] = { |
137 | | 0, 0, 0, 0, 0, 0, 0, 0, |
138 | | 0, 0, 0, 0, 0, 0, 0, 0, |
139 | | 0, 0, 0, 0, 0, 0, 0, 0, |
140 | | 0, 0, 0, 0, 0, 0, 0, 0, |
141 | | 1, 1, 1, 1, 2, 2, 3, 3, |
142 | | 4, 4, 5, 7, 8, 9,10,11, |
143 | | 12,13,14,15,16 |
144 | | }; |
145 | | static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = { |
146 | | 1, 4, 3, 2, 2, 2, 2, 2, |
147 | | 2, 1, 1, 1, 1, 1, 1, 1, |
148 | | 1, 1, 1, 1, 1, 1, 1, 1, |
149 | | 1, 1, 1, 1, 1, 1, 1, 1, |
150 | | 1, 1, 1, 1, 1, 1, 1, 1, |
151 | | 1, 1, 1, 1, 1, 1,-1,-1, |
152 | | -1,-1,-1,-1,-1 |
153 | | }; |
154 | | #define ML_DEFAULTNORMLOG 6 /* for static allocation */ |
155 | | static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; |
156 | | |
157 | | static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = { |
158 | | 1, 1, 1, 1, 1, 1, 2, 2, |
159 | | 2, 1, 1, 1, 1, 1, 1, 1, |
160 | | 1, 1, 1, 1, 1, 1, 1, 1, |
161 | | -1,-1,-1,-1,-1 |
162 | | }; |
163 | | #define OF_DEFAULTNORMLOG 5 /* for static allocation */ |
164 | | static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; |
165 | | |
166 | | |
167 | | /*-******************************************* |
168 | | * Shared functions to include for inlining |
169 | | *********************************************/ |
170 | 6.64M | static void ZSTD_copy8(void* dst, const void* src) { |
171 | | #if defined(ZSTD_ARCH_ARM_NEON) && !defined(__aarch64__) |
172 | | vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src)); |
173 | | #else |
174 | 6.64M | ZSTD_memcpy(dst, src, 8); |
175 | 6.64M | #endif |
176 | 6.64M | } Unexecuted instantiation: zstd_common.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress_literals.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_copy8 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_copy8 Unexecuted instantiation: zstd_double_fast.c:ZSTD_copy8 Unexecuted instantiation: zstd_fast.c:ZSTD_copy8 Unexecuted instantiation: zstd_lazy.c:ZSTD_copy8 Unexecuted instantiation: zstd_ldm.c:ZSTD_copy8 Unexecuted instantiation: zstd_opt.c:ZSTD_copy8 Unexecuted instantiation: zstd_preSplit.c:ZSTD_copy8 Unexecuted instantiation: zstdmt_compress.c:ZSTD_copy8 Unexecuted instantiation: zstd_decompress.c:ZSTD_copy8 zstd_decompress_block.c:ZSTD_copy8 Line | Count | Source | 170 | 6.64M | static void ZSTD_copy8(void* dst, const void* src) { | 171 | | #if defined(ZSTD_ARCH_ARM_NEON) && !defined(__aarch64__) | 172 | | vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src)); | 173 | | #else | 174 | 6.64M | ZSTD_memcpy(dst, src, 8); | 175 | 6.64M | #endif | 176 | 6.64M | } |
Unexecuted instantiation: huf_decompress.c:ZSTD_copy8 Unexecuted instantiation: zstd_ddict.c:ZSTD_copy8 |
177 | 6.24M | #define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0) |
178 | | |
179 | | /* Need to use memmove here since the literal buffer can now be located within |
180 | | the dst buffer. In circumstances where the op "catches up" to where the |
181 | | literal buffer is, there can be partial overlaps in this call on the final |
182 | | copy if the literal is being shifted by less than 16 bytes. */ |
183 | 41.9M | static void ZSTD_copy16(void* dst, const void* src) { |
184 | | #if defined(ZSTD_ARCH_ARM_NEON) |
185 | | vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); |
186 | | #elif defined(ZSTD_ARCH_X86_SSE2) |
187 | | _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); |
188 | | #elif defined(ZSTD_ARCH_RISCV_RVV) |
189 | | __riscv_vse8_v_u8m1((uint8_t*)dst, __riscv_vle8_v_u8m1((const uint8_t*)src, 16), 16); |
190 | | #elif defined(__clang__) |
191 | | ZSTD_memmove(dst, src, 16); |
192 | | #else |
193 | | /* ZSTD_memmove is not inlined properly by gcc */ |
194 | | BYTE copy16_buf[16]; |
195 | | ZSTD_memcpy(copy16_buf, src, 16); |
196 | | ZSTD_memcpy(dst, copy16_buf, 16); |
197 | | #endif |
198 | 41.9M | } Unexecuted instantiation: zstd_common.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress_literals.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_copy16 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_copy16 Unexecuted instantiation: zstd_double_fast.c:ZSTD_copy16 Unexecuted instantiation: zstd_fast.c:ZSTD_copy16 Line | Count | Source | 183 | 34.9M | static void ZSTD_copy16(void* dst, const void* src) { | 184 | | #if defined(ZSTD_ARCH_ARM_NEON) | 185 | | vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); | 186 | | #elif defined(ZSTD_ARCH_X86_SSE2) | 187 | | _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); | 188 | | #elif defined(ZSTD_ARCH_RISCV_RVV) | 189 | | __riscv_vse8_v_u8m1((uint8_t*)dst, __riscv_vle8_v_u8m1((const uint8_t*)src, 16), 16); | 190 | | #elif defined(__clang__) | 191 | | ZSTD_memmove(dst, src, 16); | 192 | | #else | 193 | | /* ZSTD_memmove is not inlined properly by gcc */ | 194 | | BYTE copy16_buf[16]; | 195 | | ZSTD_memcpy(copy16_buf, src, 16); | 196 | | ZSTD_memcpy(dst, copy16_buf, 16); | 197 | | #endif | 198 | 34.9M | } |
Unexecuted instantiation: zstd_ldm.c:ZSTD_copy16 Unexecuted instantiation: zstd_opt.c:ZSTD_copy16 Unexecuted instantiation: zstd_preSplit.c:ZSTD_copy16 Unexecuted instantiation: zstdmt_compress.c:ZSTD_copy16 Unexecuted instantiation: zstd_decompress.c:ZSTD_copy16 zstd_decompress_block.c:ZSTD_copy16 Line | Count | Source | 183 | 7.03M | static void ZSTD_copy16(void* dst, const void* src) { | 184 | | #if defined(ZSTD_ARCH_ARM_NEON) | 185 | | vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); | 186 | | #elif defined(ZSTD_ARCH_X86_SSE2) | 187 | | _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); | 188 | | #elif defined(ZSTD_ARCH_RISCV_RVV) | 189 | | __riscv_vse8_v_u8m1((uint8_t*)dst, __riscv_vle8_v_u8m1((const uint8_t*)src, 16), 16); | 190 | | #elif defined(__clang__) | 191 | | ZSTD_memmove(dst, src, 16); | 192 | | #else | 193 | | /* ZSTD_memmove is not inlined properly by gcc */ | 194 | | BYTE copy16_buf[16]; | 195 | | ZSTD_memcpy(copy16_buf, src, 16); | 196 | | ZSTD_memcpy(dst, copy16_buf, 16); | 197 | | #endif | 198 | 7.03M | } |
Unexecuted instantiation: huf_decompress.c:ZSTD_copy16 Unexecuted instantiation: zstd_ddict.c:ZSTD_copy16 |
199 | 7.09M | #define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0) |
200 | | |
201 | 34.5M | #define WILDCOPY_OVERLENGTH 32 |
202 | 391k | #define WILDCOPY_VECLEN 16 |
203 | | |
204 | | typedef enum { |
205 | | ZSTD_no_overlap, |
206 | | ZSTD_overlap_src_before_dst |
207 | | /* ZSTD_overlap_dst_before_src, */ |
208 | | } ZSTD_overlap_e; |
209 | | |
210 | | /*! ZSTD_wildcopy() : |
211 | | * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0) |
212 | | * @param ovtype controls the overlap detection |
213 | | * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. |
214 | | * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart. |
215 | | * The src buffer must be before the dst buffer. |
216 | | */ |
217 | | MEM_STATIC FORCE_INLINE_ATTR |
218 | | void ZSTD_wildcopy(void* dst, const void* src, size_t length, ZSTD_overlap_e const ovtype) |
219 | 1.32M | { |
220 | 1.32M | ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; |
221 | 1.32M | const BYTE* ip = (const BYTE*)src; |
222 | 1.32M | BYTE* op = (BYTE*)dst; |
223 | 1.32M | BYTE* const oend = op + length; |
224 | | |
225 | 1.32M | if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { |
226 | | /* Handle short offset copies. */ |
227 | 6.24M | do { |
228 | 6.24M | COPY8(op, ip); |
229 | 6.24M | } while (op < oend); |
230 | 935k | } else { |
231 | 935k | assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); |
232 | | /* Separate out the first COPY16() call because the copy length is |
233 | | * almost certain to be short, so the branches have different |
234 | | * probabilities. Since it is almost certain to be short, only do |
235 | | * one COPY16() in the first call. Then, do two calls per loop since |
236 | | * at that point it is more likely to have a high trip count. |
237 | | */ |
238 | 935k | ZSTD_copy16(op, ip); |
239 | 935k | if (16 >= length) return; |
240 | 346k | op += 16; |
241 | 346k | ip += 16; |
242 | 3.54M | do { |
243 | 3.54M | COPY16(op, ip); |
244 | 3.54M | COPY16(op, ip); |
245 | 3.54M | } |
246 | 3.54M | while (op < oend); |
247 | 346k | } |
248 | 1.32M | } Unexecuted instantiation: zstd_common.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress_literals.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_wildcopy Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_wildcopy Unexecuted instantiation: zstd_double_fast.c:ZSTD_wildcopy Unexecuted instantiation: zstd_fast.c:ZSTD_wildcopy zstd_lazy.c:ZSTD_wildcopy Line | Count | Source | 219 | 357k | { | 220 | 357k | ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; | 221 | 357k | const BYTE* ip = (const BYTE*)src; | 222 | 357k | BYTE* op = (BYTE*)dst; | 223 | 357k | BYTE* const oend = op + length; | 224 | | | 225 | 357k | if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { | 226 | | /* Handle short offset copies. */ | 227 | 0 | do { | 228 | 0 | COPY8(op, ip); | 229 | 0 | } while (op < oend); | 230 | 357k | } else { | 231 | 357k | assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); | 232 | | /* Separate out the first COPY16() call because the copy length is | 233 | | * almost certain to be short, so the branches have different | 234 | | * probabilities. Since it is almost certain to be short, only do | 235 | | * one COPY16() in the first call. Then, do two calls per loop since | 236 | | * at that point it is more likely to have a high trip count. | 237 | | */ | 238 | 357k | ZSTD_copy16(op, ip); | 239 | 357k | if (16 >= length) return; | 240 | 160k | op += 16; | 241 | 160k | ip += 16; | 242 | 1.11M | do { | 243 | 1.11M | COPY16(op, ip); | 244 | 1.11M | COPY16(op, ip); | 245 | 1.11M | } | 246 | 1.11M | while (op < oend); | 247 | 160k | } | 248 | 357k | } |
Unexecuted instantiation: zstd_ldm.c:ZSTD_wildcopy Unexecuted instantiation: zstd_opt.c:ZSTD_wildcopy Unexecuted instantiation: zstd_preSplit.c:ZSTD_wildcopy Unexecuted instantiation: zstdmt_compress.c:ZSTD_wildcopy Unexecuted instantiation: zstd_decompress.c:ZSTD_wildcopy zstd_decompress_block.c:ZSTD_wildcopy Line | Count | Source | 219 | 968k | { | 220 | 968k | ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src; | 221 | 968k | const BYTE* ip = (const BYTE*)src; | 222 | 968k | BYTE* op = (BYTE*)dst; | 223 | 968k | BYTE* const oend = op + length; | 224 | | | 225 | 968k | if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { | 226 | | /* Handle short offset copies. */ | 227 | 6.24M | do { | 228 | 6.24M | COPY8(op, ip); | 229 | 6.24M | } while (op < oend); | 230 | 577k | } else { | 231 | 577k | assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); | 232 | | /* Separate out the first COPY16() call because the copy length is | 233 | | * almost certain to be short, so the branches have different | 234 | | * probabilities. Since it is almost certain to be short, only do | 235 | | * one COPY16() in the first call. Then, do two calls per loop since | 236 | | * at that point it is more likely to have a high trip count. | 237 | | */ | 238 | 577k | ZSTD_copy16(op, ip); | 239 | 577k | if (16 >= length) return; | 240 | 185k | op += 16; | 241 | 185k | ip += 16; | 242 | 2.42M | do { | 243 | 2.42M | COPY16(op, ip); | 244 | 2.42M | COPY16(op, ip); | 245 | 2.42M | } | 246 | 2.42M | while (op < oend); | 247 | 185k | } | 248 | 968k | } |
Unexecuted instantiation: huf_decompress.c:ZSTD_wildcopy Unexecuted instantiation: zstd_ddict.c:ZSTD_wildcopy |
249 | | |
250 | | MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) |
251 | 10.2M | { |
252 | 10.2M | size_t const length = MIN(dstCapacity, srcSize); |
253 | 10.2M | if (length > 0) { |
254 | 10.2M | ZSTD_memcpy(dst, src, length); |
255 | 10.2M | } |
256 | 10.2M | return length; |
257 | 10.2M | } Unexecuted instantiation: zstd_common.c:ZSTD_limitCopy zstd_compress.c:ZSTD_limitCopy Line | Count | Source | 251 | 9.42M | { | 252 | 9.42M | size_t const length = MIN(dstCapacity, srcSize); | 253 | 9.42M | if (length > 0) { | 254 | 9.40M | ZSTD_memcpy(dst, src, length); | 255 | 9.40M | } | 256 | 9.42M | return length; | 257 | 9.42M | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_limitCopy Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_limitCopy Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_limitCopy Unexecuted instantiation: zstd_double_fast.c:ZSTD_limitCopy Unexecuted instantiation: zstd_fast.c:ZSTD_limitCopy Unexecuted instantiation: zstd_lazy.c:ZSTD_limitCopy Unexecuted instantiation: zstd_ldm.c:ZSTD_limitCopy Unexecuted instantiation: zstd_opt.c:ZSTD_limitCopy Unexecuted instantiation: zstd_preSplit.c:ZSTD_limitCopy Unexecuted instantiation: zstdmt_compress.c:ZSTD_limitCopy zstd_decompress.c:ZSTD_limitCopy Line | Count | Source | 251 | 816k | { | 252 | 816k | size_t const length = MIN(dstCapacity, srcSize); | 253 | 816k | if (length > 0) { | 254 | 816k | ZSTD_memcpy(dst, src, length); | 255 | 816k | } | 256 | 816k | return length; | 257 | 816k | } |
Unexecuted instantiation: zstd_decompress_block.c:ZSTD_limitCopy Unexecuted instantiation: huf_decompress.c:ZSTD_limitCopy Unexecuted instantiation: zstd_ddict.c:ZSTD_limitCopy |
258 | | |
259 | | /* define "workspace is too large" as this number of times larger than needed */ |
260 | 35.4k | #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 |
261 | | |
262 | | /* when workspace is continuously too large |
263 | | * during at least this number of times, |
264 | | * context's memory usage is considered wasteful, |
265 | | * because it's sized to handle a worst case scenario which rarely happens. |
266 | | * In which case, resize it down to free some memory */ |
267 | 5.61k | #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 |
268 | | |
269 | | /* Controls whether the input/output buffer is buffered or stable. */ |
270 | | typedef enum { |
271 | | ZSTD_bm_buffered = 0, /* Buffer the input/output */ |
272 | | ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */ |
273 | | } ZSTD_bufferMode_e; |
274 | | |
275 | | |
276 | | /*-******************************************* |
277 | | * Private declarations |
278 | | *********************************************/ |
279 | | |
280 | | /** |
281 | | * Contains the compressed frame size and an upper-bound for the decompressed frame size. |
282 | | * Note: before using `compressedSize`, check for errors using ZSTD_isError(). |
283 | | * similarly, before using `decompressedBound`, check for errors using: |
284 | | * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` |
285 | | */ |
286 | | typedef struct { |
287 | | size_t nbBlocks; |
288 | | size_t compressedSize; |
289 | | unsigned long long decompressedBound; |
290 | | } ZSTD_frameSizeInfo; /* decompress & legacy */ |
291 | | |
292 | | /* ZSTD_invalidateRepCodes() : |
293 | | * ensures next compression will not use repcodes from previous block. |
294 | | * Note : only works with regular variant; |
295 | | * do not use with extDict variant ! */ |
296 | | void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ |
297 | | |
298 | | |
299 | | typedef struct { |
300 | | blockType_e blockType; |
301 | | U32 lastBlock; |
302 | | U32 origSize; |
303 | | } blockProperties_t; /* declared here for decompress and fullbench */ |
304 | | |
305 | | /*! ZSTD_getcBlockSize() : |
306 | | * Provides the size of compressed block from block header `src` */ |
307 | | /* Used by: decompress, fullbench */ |
308 | | size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, |
309 | | blockProperties_t* bpPtr); |
310 | | |
311 | | /*! ZSTD_decodeSeqHeaders() : |
312 | | * decode sequence header from src */ |
313 | | /* Used by: zstd_decompress_block, fullbench */ |
314 | | size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, |
315 | | const void* src, size_t srcSize); |
316 | | |
317 | | /** |
318 | | * @returns true iff the CPU supports dynamic BMI2 dispatch. |
319 | | */ |
320 | | MEM_STATIC int ZSTD_cpuSupportsBmi2(void) |
321 | 15.7k | { |
322 | 15.7k | ZSTD_cpuid_t cpuid = ZSTD_cpuid(); |
323 | 15.7k | return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); |
324 | 15.7k | } Unexecuted instantiation: zstd_common.c:ZSTD_cpuSupportsBmi2 zstd_compress.c:ZSTD_cpuSupportsBmi2 Line | Count | Source | 321 | 5.54k | { | 322 | 5.54k | ZSTD_cpuid_t cpuid = ZSTD_cpuid(); | 323 | 5.54k | return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); | 324 | 5.54k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_double_fast.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_fast.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_lazy.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_ldm.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_opt.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_preSplit.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstdmt_compress.c:ZSTD_cpuSupportsBmi2 zstd_decompress.c:ZSTD_cpuSupportsBmi2 Line | Count | Source | 321 | 10.1k | { | 322 | 10.1k | ZSTD_cpuid_t cpuid = ZSTD_cpuid(); | 323 | 10.1k | return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); | 324 | 10.1k | } |
Unexecuted instantiation: zstd_decompress_block.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: huf_decompress.c:ZSTD_cpuSupportsBmi2 Unexecuted instantiation: zstd_ddict.c:ZSTD_cpuSupportsBmi2 |
325 | | |
326 | | #endif /* ZSTD_CCOMMON_H_MODULE */ |