/src/zstd/lib/compress/zstd_cwksp.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | #ifndef ZSTD_CWKSP_H |
12 | | #define ZSTD_CWKSP_H |
13 | | |
14 | | /*-************************************* |
15 | | * Dependencies |
16 | | ***************************************/ |
17 | | #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ |
18 | | #include "../common/zstd_internal.h" |
19 | | #include "../common/portability_macros.h" |
20 | | #include "../common/compiler.h" /* ZS2_isPower2 */ |
21 | | |
22 | | /*-************************************* |
23 | | * Constants |
24 | | ***************************************/ |
25 | | |
26 | | /* Since the workspace is effectively its own little malloc implementation / |
27 | | * arena, when we run under ASAN, we should similarly insert redzones between |
28 | | * each internal element of the workspace, so ASAN will catch overruns that |
29 | | * reach outside an object but that stay inside the workspace. |
30 | | * |
31 | | * This defines the size of that redzone. |
32 | | */ |
33 | | #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE |
34 | | #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 |
35 | | #endif |
36 | | |
37 | | |
38 | | /* Set our tables and aligneds to align by 64 bytes */ |
39 | 0 | #define ZSTD_CWKSP_ALIGNMENT_BYTES 64 |
40 | | |
41 | | /*-************************************* |
42 | | * Structures |
43 | | ***************************************/ |
44 | | typedef enum { |
45 | | ZSTD_cwksp_alloc_objects, |
46 | | ZSTD_cwksp_alloc_aligned_init_once, |
47 | | ZSTD_cwksp_alloc_aligned, |
48 | | ZSTD_cwksp_alloc_buffers |
49 | | } ZSTD_cwksp_alloc_phase_e; |
50 | | |
51 | | /** |
52 | | * Used to describe whether the workspace is statically allocated (and will not |
53 | | * necessarily ever be freed), or if it's dynamically allocated and we can |
54 | | * expect a well-formed caller to free this. |
55 | | */ |
56 | | typedef enum { |
57 | | ZSTD_cwksp_dynamic_alloc, |
58 | | ZSTD_cwksp_static_alloc |
59 | | } ZSTD_cwksp_static_alloc_e; |
60 | | |
61 | | /** |
62 | | * Zstd fits all its internal datastructures into a single continuous buffer, |
63 | | * so that it only needs to perform a single OS allocation (or so that a buffer |
64 | | * can be provided to it and it can perform no allocations at all). This buffer |
65 | | * is called the workspace. |
66 | | * |
67 | | * Several optimizations complicate that process of allocating memory ranges |
68 | | * from this workspace for each internal datastructure: |
69 | | * |
70 | | * - These different internal datastructures have different setup requirements: |
71 | | * |
72 | | * - The static objects need to be cleared once and can then be trivially |
73 | | * reused for each compression. |
74 | | * |
75 | | * - Various buffers don't need to be initialized at all--they are always |
76 | | * written into before they're read. |
77 | | * |
78 | | * - The matchstate tables have a unique requirement that they don't need |
79 | | * their memory to be totally cleared, but they do need the memory to have |
80 | | * some bound, i.e., a guarantee that all values in the memory they've been |
81 | | * allocated is less than some maximum value (which is the starting value |
82 | | * for the indices that they will then use for compression). When this |
83 | | * guarantee is provided to them, they can use the memory without any setup |
84 | | * work. When it can't, they have to clear the area. |
85 | | * |
86 | | * - These buffers also have different alignment requirements. |
87 | | * |
88 | | * - We would like to reuse the objects in the workspace for multiple |
89 | | * compressions without having to perform any expensive reallocation or |
90 | | * reinitialization work. |
91 | | * |
92 | | * - We would like to be able to efficiently reuse the workspace across |
93 | | * multiple compressions **even when the compression parameters change** and |
94 | | * we need to resize some of the objects (where possible). |
95 | | * |
96 | | * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp |
97 | | * abstraction was created. It works as follows: |
98 | | * |
99 | | * Workspace Layout: |
100 | | * |
101 | | * [ ... workspace ... ] |
102 | | * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] |
103 | | * |
104 | | * The various objects that live in the workspace are divided into the |
105 | | * following categories, and are allocated separately: |
106 | | * |
107 | | * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, |
108 | | * so that literally everything fits in a single buffer. Note: if present, |
109 | | * this must be the first object in the workspace, since ZSTD_customFree{CCtx, |
110 | | * CDict}() rely on a pointer comparison to see whether one or two frees are |
111 | | * required. |
112 | | * |
113 | | * - Fixed size objects: these are fixed-size, fixed-count objects that are |
114 | | * nonetheless "dynamically" allocated in the workspace so that we can |
115 | | * control how they're initialized separately from the broader ZSTD_CCtx. |
116 | | * Examples: |
117 | | * - Entropy Workspace |
118 | | * - 2 x ZSTD_compressedBlockState_t |
119 | | * - CDict dictionary contents |
120 | | * |
121 | | * - Tables: these are any of several different datastructures (hash tables, |
122 | | * chain tables, binary trees) that all respect a common format: they are |
123 | | * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). |
124 | | * Their sizes depend on the cparams. These tables are 64-byte aligned. |
125 | | * |
126 | | * - Init once: these buffers require to be initialized at least once before |
127 | | * use. They should be used when we want to skip memory initialization |
128 | | * while not triggering memory checkers (like Valgrind) when reading from |
129 | | * from this memory without writing to it first. |
130 | | * These buffers should be used carefully as they might contain data |
131 | | * from previous compressions. |
132 | | * Buffers are aligned to 64 bytes. |
133 | | * |
134 | | * - Aligned: these buffers don't require any initialization before they're |
135 | | * used. The user of the buffer should make sure they write into a buffer |
136 | | * location before reading from it. |
137 | | * Buffers are aligned to 64 bytes. |
138 | | * |
139 | | * - Buffers: these buffers are used for various purposes that don't require |
140 | | * any alignment or initialization before they're used. This means they can |
141 | | * be moved around at no cost for a new compression. |
142 | | * |
143 | | * Allocating Memory: |
144 | | * |
145 | | * The various types of objects must be allocated in order, so they can be |
146 | | * correctly packed into the workspace buffer. That order is: |
147 | | * |
148 | | * 1. Objects |
149 | | * 2. Init once / Tables |
150 | | * 3. Aligned / Tables |
151 | | * 4. Buffers / Tables |
152 | | * |
153 | | * Attempts to reserve objects of different types out of order will fail. |
154 | | */ |
155 | | typedef struct { |
156 | | void* workspace; |
157 | | void* workspaceEnd; |
158 | | |
159 | | void* objectEnd; |
160 | | void* tableEnd; |
161 | | void* tableValidEnd; |
162 | | void* allocStart; |
163 | | void* initOnceStart; |
164 | | |
165 | | BYTE allocFailed; |
166 | | int workspaceOversizedDuration; |
167 | | ZSTD_cwksp_alloc_phase_e phase; |
168 | | ZSTD_cwksp_static_alloc_e isStatic; |
169 | | } ZSTD_cwksp; |
170 | | |
171 | | /*-************************************* |
172 | | * Functions |
173 | | ***************************************/ |
174 | | |
175 | | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); |
176 | | MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); |
177 | | |
178 | 0 | MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { |
179 | 0 | (void)ws; |
180 | 0 | assert(ws->workspace <= ws->objectEnd); |
181 | 0 | assert(ws->objectEnd <= ws->tableEnd); |
182 | 0 | assert(ws->objectEnd <= ws->tableValidEnd); |
183 | 0 | assert(ws->tableEnd <= ws->allocStart); |
184 | 0 | assert(ws->tableValidEnd <= ws->allocStart); |
185 | 0 | assert(ws->allocStart <= ws->workspaceEnd); |
186 | 0 | assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); |
187 | 0 | assert(ws->workspace <= ws->initOnceStart); |
188 | | #if ZSTD_MEMORY_SANITIZER |
189 | | { |
190 | | intptr_t const offset = __msan_test_shadow(ws->initOnceStart, |
191 | | (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); |
192 | | (void)offset; |
193 | | #if defined(ZSTD_MSAN_PRINT) |
194 | | if(offset!=-1) { |
195 | | __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); |
196 | | } |
197 | | #endif |
198 | | assert(offset==-1); |
199 | | }; |
200 | | #endif |
201 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_assert_internal_consistency |
202 | | |
203 | | /** |
204 | | * Align must be a power of 2. |
205 | | */ |
206 | 0 | MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) { |
207 | 0 | size_t const mask = align - 1; |
208 | 0 | assert(ZSTD_isPower2(align)); |
209 | 0 | return (size + mask) & ~mask; |
210 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_align Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_align |
211 | | |
212 | | /** |
213 | | * Use this to determine how much space in the workspace we will consume to |
214 | | * allocate this object. (Normally it should be exactly the size of the object, |
215 | | * but under special conditions, like ASAN, where we pad each object, it might |
216 | | * be larger.) |
217 | | * |
218 | | * Since tables aren't currently redzoned, you don't need to call through this |
219 | | * to figure out how much space you need for the matchState tables. Everything |
220 | | * else is though. |
221 | | * |
222 | | * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). |
223 | | */ |
224 | 0 | MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { |
225 | 0 | if (size == 0) |
226 | 0 | return 0; |
227 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
228 | | return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
229 | | #else |
230 | 0 | return size; |
231 | 0 | #endif |
232 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_alloc_size |
233 | | |
234 | 0 | MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) { |
235 | 0 | return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); |
236 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_aligned_alloc_size |
237 | | |
238 | | /** |
239 | | * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. |
240 | | * Used to determine the number of bytes required for a given "aligned". |
241 | | */ |
242 | 0 | MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) { |
243 | 0 | return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES); |
244 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_aligned64_alloc_size |
245 | | |
246 | | /** |
247 | | * Returns the amount of additional space the cwksp must allocate |
248 | | * for internal purposes (currently only alignment). |
249 | | */ |
250 | 0 | MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { |
251 | | /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES |
252 | | * bytes to align the beginning of tables section and end of buffers; |
253 | | */ |
254 | 0 | size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; |
255 | 0 | return slackSpace; |
256 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_slack_space_required |
257 | | |
258 | | |
259 | | /** |
260 | | * Return the number of additional bytes required to align a pointer to the given number of bytes. |
261 | | * alignBytes must be a power of two. |
262 | | */ |
263 | 0 | MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { |
264 | 0 | size_t const alignBytesMask = alignBytes - 1; |
265 | 0 | size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; |
266 | 0 | assert(ZSTD_isPower2(alignBytes)); |
267 | 0 | assert(bytes < alignBytes); |
268 | 0 | return bytes; |
269 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_bytes_to_align_ptr |
270 | | |
271 | | /** |
272 | | * Returns the initial value for allocStart which is used to determine the position from |
273 | | * which we can allocate from the end of the workspace. |
274 | | */ |
275 | | MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) |
276 | 0 | { |
277 | 0 | char* endPtr = (char*)ws->workspaceEnd; |
278 | 0 | assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES)); |
279 | 0 | endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES); |
280 | 0 | return (void*)endPtr; |
281 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_initialAllocStart |
282 | | |
283 | | /** |
284 | | * Internal function. Do not use directly. |
285 | | * Reserves the given number of bytes within the aligned/buffer segment of the wksp, |
286 | | * which counts from the end of the wksp (as opposed to the object/table segment). |
287 | | * |
288 | | * Returns a pointer to the beginning of that space. |
289 | | */ |
290 | | MEM_STATIC void* |
291 | | ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) |
292 | 0 | { |
293 | 0 | void* const alloc = (BYTE*)ws->allocStart - bytes; |
294 | 0 | void* const bottom = ws->tableEnd; |
295 | 0 | DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", |
296 | 0 | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); |
297 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
298 | 0 | assert(alloc >= bottom); |
299 | 0 | if (alloc < bottom) { |
300 | 0 | DEBUGLOG(4, "cwksp: alloc failed!"); |
301 | 0 | ws->allocFailed = 1; |
302 | 0 | return NULL; |
303 | 0 | } |
304 | | /* the area is reserved from the end of wksp. |
305 | | * If it overlaps with tableValidEnd, it voids guarantees on values' range */ |
306 | 0 | if (alloc < ws->tableValidEnd) { |
307 | 0 | ws->tableValidEnd = alloc; |
308 | 0 | } |
309 | 0 | ws->allocStart = alloc; |
310 | 0 | return alloc; |
311 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_internal_buffer_space |
312 | | |
313 | | /** |
314 | | * Moves the cwksp to the next phase, and does any necessary allocations. |
315 | | * cwksp initialization must necessarily go through each phase in order. |
316 | | * Returns a 0 on success, or zstd error |
317 | | */ |
318 | | MEM_STATIC size_t |
319 | | ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) |
320 | 0 | { |
321 | 0 | assert(phase >= ws->phase); |
322 | 0 | if (phase > ws->phase) { |
323 | | /* Going from allocating objects to allocating initOnce / tables */ |
324 | 0 | if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && |
325 | 0 | phase >= ZSTD_cwksp_alloc_aligned_init_once) { |
326 | 0 | ws->tableValidEnd = ws->objectEnd; |
327 | 0 | ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); |
328 | |
|
329 | 0 | { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ |
330 | 0 | void *const alloc = ws->objectEnd; |
331 | 0 | size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); |
332 | 0 | void *const objectEnd = (BYTE *) alloc + bytesToAlign; |
333 | 0 | DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); |
334 | 0 | RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, |
335 | 0 | "table phase - alignment initial allocation failed!"); |
336 | 0 | ws->objectEnd = objectEnd; |
337 | 0 | ws->tableEnd = objectEnd; /* table area starts being empty */ |
338 | 0 | if (ws->tableValidEnd < ws->tableEnd) { |
339 | 0 | ws->tableValidEnd = ws->tableEnd; |
340 | 0 | } |
341 | 0 | } |
342 | 0 | } |
343 | 0 | ws->phase = phase; |
344 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
345 | 0 | } |
346 | 0 | return 0; |
347 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_internal_advance_phase |
348 | | |
349 | | /** |
350 | | * Returns whether this object/buffer/etc was allocated in this workspace. |
351 | | */ |
352 | | MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) |
353 | 0 | { |
354 | 0 | return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); |
355 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_owns_buffer |
356 | | |
357 | | /** |
358 | | * Internal function. Do not use directly. |
359 | | */ |
360 | | MEM_STATIC void* |
361 | | ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) |
362 | 0 | { |
363 | 0 | void* alloc; |
364 | 0 | if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { |
365 | 0 | return NULL; |
366 | 0 | } |
367 | | |
368 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
369 | | /* over-reserve space */ |
370 | | bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
371 | | #endif |
372 | | |
373 | 0 | alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); |
374 | |
|
375 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
376 | | /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on |
377 | | * either size. */ |
378 | | if (alloc) { |
379 | | alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
380 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
381 | | /* We need to keep the redzone poisoned while unpoisoning the bytes that |
382 | | * are actually allocated. */ |
383 | | __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); |
384 | | } |
385 | | } |
386 | | #endif |
387 | |
|
388 | 0 | return alloc; |
389 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_internal |
390 | | |
391 | | /** |
392 | | * Reserves and returns unaligned memory. |
393 | | */ |
394 | | MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) |
395 | 0 | { |
396 | 0 | return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); |
397 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_buffer |
398 | | |
399 | | /** |
400 | | * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). |
401 | | * This memory has been initialized at least once in the past. |
402 | | * This doesn't mean it has been initialized this time, and it might contain data from previous |
403 | | * operations. |
404 | | * The main usage is for algorithms that might need read access into uninitialized memory. |
405 | | * The algorithm must maintain safety under these conditions and must make sure it doesn't |
406 | | * leak any of the past data (directly or in side channels). |
407 | | */ |
408 | | MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) |
409 | 0 | { |
410 | 0 | size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); |
411 | 0 | void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); |
412 | 0 | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
413 | 0 | if(ptr && ptr < ws->initOnceStart) { |
414 | | /* We assume the memory following the current allocation is either: |
415 | | * 1. Not usable as initOnce memory (end of workspace) |
416 | | * 2. Another initOnce buffer that has been allocated before (and so was previously memset) |
417 | | * 3. An ASAN redzone, in which case we don't want to write on it |
418 | | * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. |
419 | | * Note that we assume here that MSAN and ASAN cannot run in the same time. */ |
420 | 0 | ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); |
421 | 0 | ws->initOnceStart = ptr; |
422 | 0 | } |
423 | | #if ZSTD_MEMORY_SANITIZER |
424 | | assert(__msan_test_shadow(ptr, bytes) == -1); |
425 | | #endif |
426 | 0 | return ptr; |
427 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_aligned_init_once |
428 | | |
429 | | /** |
430 | | * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). |
431 | | */ |
432 | | MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes) |
433 | 0 | { |
434 | 0 | void* const ptr = ZSTD_cwksp_reserve_internal(ws, |
435 | 0 | ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), |
436 | 0 | ZSTD_cwksp_alloc_aligned); |
437 | 0 | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
438 | 0 | return ptr; |
439 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_aligned64 |
440 | | |
441 | | /** |
442 | | * Aligned on 64 bytes. These buffers have the special property that |
443 | | * their values remain constrained, allowing us to reuse them without |
444 | | * memset()-ing them. |
445 | | */ |
446 | | MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) |
447 | 0 | { |
448 | 0 | const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; |
449 | 0 | void* alloc; |
450 | 0 | void* end; |
451 | 0 | void* top; |
452 | | |
453 | | /* We can only start allocating tables after we are done reserving space for objects at the |
454 | | * start of the workspace */ |
455 | 0 | if(ws->phase < phase) { |
456 | 0 | if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { |
457 | 0 | return NULL; |
458 | 0 | } |
459 | 0 | } |
460 | 0 | alloc = ws->tableEnd; |
461 | 0 | end = (BYTE *)alloc + bytes; |
462 | 0 | top = ws->allocStart; |
463 | |
|
464 | 0 | DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", |
465 | 0 | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); |
466 | 0 | assert((bytes & (sizeof(U32)-1)) == 0); |
467 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
468 | 0 | assert(end <= top); |
469 | 0 | if (end > top) { |
470 | 0 | DEBUGLOG(4, "cwksp: table alloc failed!"); |
471 | 0 | ws->allocFailed = 1; |
472 | 0 | return NULL; |
473 | 0 | } |
474 | 0 | ws->tableEnd = end; |
475 | |
|
476 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
477 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
478 | | __asan_unpoison_memory_region(alloc, bytes); |
479 | | } |
480 | | #endif |
481 | |
|
482 | 0 | assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
483 | 0 | assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
484 | 0 | return alloc; |
485 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_table |
486 | | |
487 | | /** |
488 | | * Aligned on sizeof(void*). |
489 | | * Note : should happen only once, at workspace first initialization |
490 | | */ |
491 | | MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) |
492 | 0 | { |
493 | 0 | size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); |
494 | 0 | void* alloc = ws->objectEnd; |
495 | 0 | void* end = (BYTE*)alloc + roundedBytes; |
496 | |
|
497 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
498 | | /* over-reserve space */ |
499 | | end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
500 | | #endif |
501 | |
|
502 | 0 | DEBUGLOG(4, |
503 | 0 | "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", |
504 | 0 | alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); |
505 | 0 | assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); |
506 | 0 | assert(bytes % ZSTD_ALIGNOF(void*) == 0); |
507 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
508 | | /* we must be in the first phase, no advance is possible */ |
509 | 0 | if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { |
510 | 0 | DEBUGLOG(3, "cwksp: object alloc failed!"); |
511 | 0 | ws->allocFailed = 1; |
512 | 0 | return NULL; |
513 | 0 | } |
514 | 0 | ws->objectEnd = end; |
515 | 0 | ws->tableEnd = end; |
516 | 0 | ws->tableValidEnd = end; |
517 | |
|
518 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
519 | | /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on |
520 | | * either size. */ |
521 | | alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
522 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
523 | | __asan_unpoison_memory_region(alloc, bytes); |
524 | | } |
525 | | #endif |
526 | |
|
527 | 0 | return alloc; |
528 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_object |
529 | | /** |
530 | | * with alignment control |
531 | | * Note : should happen only once, at workspace first initialization |
532 | | */ |
533 | | MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment) |
534 | 0 | { |
535 | 0 | size_t const mask = alignment - 1; |
536 | 0 | size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0; |
537 | 0 | void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); |
538 | 0 | if (start == NULL) return NULL; |
539 | 0 | if (surplus == 0) return start; |
540 | 0 | assert(ZSTD_isPower2(alignment)); |
541 | 0 | return (void*)(((size_t)start + surplus) & ~mask); |
542 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_object_aligned |
543 | | |
544 | | MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) |
545 | 0 | { |
546 | 0 | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); |
547 | |
|
548 | | #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) |
549 | | /* To validate that the table reuse logic is sound, and that we don't |
550 | | * access table space that we haven't cleaned, we re-"poison" the table |
551 | | * space every time we mark it dirty. |
552 | | * Since tableValidEnd space and initOnce space may overlap we don't poison |
553 | | * the initOnce portion as it break its promise. This means that this poisoning |
554 | | * check isn't always applied fully. */ |
555 | | { |
556 | | size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; |
557 | | assert(__msan_test_shadow(ws->objectEnd, size) == -1); |
558 | | if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { |
559 | | __msan_poison(ws->objectEnd, size); |
560 | | } else { |
561 | | assert(ws->initOnceStart >= ws->objectEnd); |
562 | | __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); |
563 | | } |
564 | | } |
565 | | #endif |
566 | |
|
567 | 0 | assert(ws->tableValidEnd >= ws->objectEnd); |
568 | 0 | assert(ws->tableValidEnd <= ws->allocStart); |
569 | 0 | ws->tableValidEnd = ws->objectEnd; |
570 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
571 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_mark_tables_dirty |
572 | | |
573 | 0 | MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { |
574 | 0 | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); |
575 | 0 | assert(ws->tableValidEnd >= ws->objectEnd); |
576 | 0 | assert(ws->tableValidEnd <= ws->allocStart); |
577 | 0 | if (ws->tableValidEnd < ws->tableEnd) { |
578 | 0 | ws->tableValidEnd = ws->tableEnd; |
579 | 0 | } |
580 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
581 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_mark_tables_clean |
582 | | |
583 | | /** |
584 | | * Zero the part of the allocated tables not already marked clean. |
585 | | */ |
586 | 0 | MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { |
587 | 0 | DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); |
588 | 0 | assert(ws->tableValidEnd >= ws->objectEnd); |
589 | 0 | assert(ws->tableValidEnd <= ws->allocStart); |
590 | 0 | if (ws->tableValidEnd < ws->tableEnd) { |
591 | 0 | ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); |
592 | 0 | } |
593 | 0 | ZSTD_cwksp_mark_tables_clean(ws); |
594 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_clean_tables |
595 | | |
596 | | /** |
597 | | * Invalidates table allocations. |
598 | | * All other allocations remain valid. |
599 | | */ |
600 | | MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) |
601 | 0 | { |
602 | 0 | DEBUGLOG(4, "cwksp: clearing tables!"); |
603 | |
|
604 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
605 | | /* We don't do this when the workspace is statically allocated, because |
606 | | * when that is the case, we have no capability to hook into the end of the |
607 | | * workspace's lifecycle to unpoison the memory. |
608 | | */ |
609 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
610 | | size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; |
611 | | __asan_poison_memory_region(ws->objectEnd, size); |
612 | | } |
613 | | #endif |
614 | |
|
615 | 0 | ws->tableEnd = ws->objectEnd; |
616 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
617 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_clear_tables |
618 | | |
619 | | /** |
620 | | * Invalidates all buffer, aligned, and table allocations. |
621 | | * Object allocations remain valid. |
622 | | */ |
623 | 0 | MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { |
624 | 0 | DEBUGLOG(4, "cwksp: clearing!"); |
625 | |
|
626 | | #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) |
627 | | /* To validate that the context reuse logic is sound, and that we don't |
628 | | * access stuff that this compression hasn't initialized, we re-"poison" |
629 | | * the workspace except for the areas in which we expect memory reuse |
630 | | * without initialization (objects, valid tables area and init once |
631 | | * memory). */ |
632 | | { |
633 | | if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { |
634 | | size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; |
635 | | __msan_poison(ws->tableValidEnd, size); |
636 | | } |
637 | | } |
638 | | #endif |
639 | |
|
640 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
641 | | /* We don't do this when the workspace is statically allocated, because |
642 | | * when that is the case, we have no capability to hook into the end of the |
643 | | * workspace's lifecycle to unpoison the memory. |
644 | | */ |
645 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
646 | | size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; |
647 | | __asan_poison_memory_region(ws->objectEnd, size); |
648 | | } |
649 | | #endif |
650 | |
|
651 | 0 | ws->tableEnd = ws->objectEnd; |
652 | 0 | ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); |
653 | 0 | ws->allocFailed = 0; |
654 | 0 | if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { |
655 | 0 | ws->phase = ZSTD_cwksp_alloc_aligned_init_once; |
656 | 0 | } |
657 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
658 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_clear Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_clear |
659 | | |
660 | 0 | MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { |
661 | 0 | return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); |
662 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_sizeof |
663 | | |
664 | 0 | MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { |
665 | 0 | return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) |
666 | 0 | + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); |
667 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_used Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_used |
668 | | |
669 | | /** |
670 | | * The provided workspace takes ownership of the buffer [start, start+size). |
671 | | * Any existing values in the workspace are ignored (the previously managed |
672 | | * buffer, if present, must be separately freed). |
673 | | */ |
674 | 0 | MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { |
675 | 0 | DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); |
676 | 0 | assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ |
677 | 0 | ws->workspace = start; |
678 | 0 | ws->workspaceEnd = (BYTE*)start + size; |
679 | 0 | ws->objectEnd = ws->workspace; |
680 | 0 | ws->tableValidEnd = ws->objectEnd; |
681 | 0 | ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); |
682 | 0 | ws->phase = ZSTD_cwksp_alloc_objects; |
683 | 0 | ws->isStatic = isStatic; |
684 | 0 | ZSTD_cwksp_clear(ws); |
685 | 0 | ws->workspaceOversizedDuration = 0; |
686 | 0 | ZSTD_cwksp_assert_internal_consistency(ws); |
687 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_init Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_init |
688 | | |
689 | 0 | MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { |
690 | 0 | void* workspace = ZSTD_customMalloc(size, customMem); |
691 | 0 | DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); |
692 | 0 | RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); |
693 | 0 | ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); |
694 | 0 | return 0; |
695 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_create Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_create |
696 | | |
697 | 0 | MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { |
698 | 0 | void *ptr = ws->workspace; |
699 | 0 | DEBUGLOG(4, "cwksp: freeing workspace"); |
700 | | #if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) |
701 | | if (ptr != NULL && customMem.customFree != NULL) { |
702 | | __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws)); |
703 | | } |
704 | | #endif |
705 | 0 | ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); |
706 | 0 | ZSTD_customFree(ptr, customMem); |
707 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_free Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_free |
708 | | |
709 | | /** |
710 | | * Moves the management of a workspace from one cwksp to another. The src cwksp |
711 | | * is left in an invalid state (src must be re-init()'ed before it's used again). |
712 | | */ |
713 | 0 | MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { |
714 | 0 | *dst = *src; |
715 | 0 | ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); |
716 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_move Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_move |
717 | | |
718 | 0 | MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { |
719 | 0 | return ws->allocFailed; |
720 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_failed |
721 | | |
722 | | /*-************************************* |
723 | | * Functions Checking Free Space |
724 | | ***************************************/ |
725 | | |
726 | | /* ZSTD_alignmentSpaceWithinBounds() : |
727 | | * Returns if the estimated space needed for a wksp is within an acceptable limit of the |
728 | | * actual amount of space used. |
729 | | */ |
730 | 0 | MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) { |
731 | 0 | /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice |
732 | 0 | * the alignment bytes difference between estimation and actual usage */ |
733 | 0 | return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) && |
734 | 0 | ZSTD_cwksp_used(ws) <= estimatedSpace; |
735 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_estimated_space_within_bounds |
736 | | |
737 | | |
738 | 0 | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { |
739 | 0 | return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); |
740 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_available_space |
741 | | |
742 | 0 | MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
743 | 0 | return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; |
744 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_check_available |
745 | | |
746 | 0 | MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
747 | 0 | return ZSTD_cwksp_check_available( |
748 | 0 | ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); |
749 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_check_too_large |
750 | | |
751 | 0 | MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
752 | 0 | return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) |
753 | 0 | && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; |
754 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_check_wasteful |
755 | | |
756 | | MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( |
757 | 0 | ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
758 | 0 | if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { |
759 | 0 | ws->workspaceOversizedDuration++; |
760 | 0 | } else { |
761 | 0 | ws->workspaceOversizedDuration = 0; |
762 | 0 | } |
763 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_bump_oversized_duration |
764 | | |
765 | | #endif /* ZSTD_CWKSP_H */ |