/src/zstd/lib/compress/zstd_cwksp.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | #ifndef ZSTD_CWKSP_H |
12 | | #define ZSTD_CWKSP_H |
13 | | |
14 | | /*-************************************* |
15 | | * Dependencies |
16 | | ***************************************/ |
17 | | #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ |
18 | | #include "../common/zstd_internal.h" |
19 | | #include "../common/portability_macros.h" |
20 | | #include "../common/compiler.h" /* ZS2_isPower2 */ |
21 | | |
22 | | /*-************************************* |
23 | | * Constants |
24 | | ***************************************/ |
25 | | |
26 | | /* Since the workspace is effectively its own little malloc implementation / |
27 | | * arena, when we run under ASAN, we should similarly insert redzones between |
28 | | * each internal element of the workspace, so ASAN will catch overruns that |
29 | | * reach outside an object but that stay inside the workspace. |
30 | | * |
31 | | * This defines the size of that redzone. |
32 | | */ |
33 | | #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE |
34 | | #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 |
35 | | #endif |
36 | | |
37 | | |
38 | | /* Set our tables and aligneds to align by 64 bytes */ |
39 | 210k | #define ZSTD_CWKSP_ALIGNMENT_BYTES 64 |
40 | | |
41 | | /*-************************************* |
42 | | * Structures |
43 | | ***************************************/ |
44 | | typedef enum { |
45 | | ZSTD_cwksp_alloc_objects, |
46 | | ZSTD_cwksp_alloc_aligned_init_once, |
47 | | ZSTD_cwksp_alloc_aligned, |
48 | | ZSTD_cwksp_alloc_buffers |
49 | | } ZSTD_cwksp_alloc_phase_e; |
50 | | |
51 | | /** |
52 | | * Used to describe whether the workspace is statically allocated (and will not |
53 | | * necessarily ever be freed), or if it's dynamically allocated and we can |
54 | | * expect a well-formed caller to free this. |
55 | | */ |
56 | | typedef enum { |
57 | | ZSTD_cwksp_dynamic_alloc, |
58 | | ZSTD_cwksp_static_alloc |
59 | | } ZSTD_cwksp_static_alloc_e; |
60 | | |
61 | | /** |
62 | | * Zstd fits all its internal datastructures into a single continuous buffer, |
63 | | * so that it only needs to perform a single OS allocation (or so that a buffer |
64 | | * can be provided to it and it can perform no allocations at all). This buffer |
65 | | * is called the workspace. |
66 | | * |
67 | | * Several optimizations complicate that process of allocating memory ranges |
68 | | * from this workspace for each internal datastructure: |
69 | | * |
70 | | * - These different internal datastructures have different setup requirements: |
71 | | * |
72 | | * - The static objects need to be cleared once and can then be trivially |
73 | | * reused for each compression. |
74 | | * |
75 | | * - Various buffers don't need to be initialized at all--they are always |
76 | | * written into before they're read. |
77 | | * |
78 | | * - The matchstate tables have a unique requirement that they don't need |
79 | | * their memory to be totally cleared, but they do need the memory to have |
80 | | * some bound, i.e., a guarantee that all values in the memory they've been |
81 | | * allocated is less than some maximum value (which is the starting value |
82 | | * for the indices that they will then use for compression). When this |
83 | | * guarantee is provided to them, they can use the memory without any setup |
84 | | * work. When it can't, they have to clear the area. |
85 | | * |
86 | | * - These buffers also have different alignment requirements. |
87 | | * |
88 | | * - We would like to reuse the objects in the workspace for multiple |
89 | | * compressions without having to perform any expensive reallocation or |
90 | | * reinitialization work. |
91 | | * |
92 | | * - We would like to be able to efficiently reuse the workspace across |
93 | | * multiple compressions **even when the compression parameters change** and |
94 | | * we need to resize some of the objects (where possible). |
95 | | * |
96 | | * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp |
97 | | * abstraction was created. It works as follows: |
98 | | * |
99 | | * Workspace Layout: |
100 | | * |
101 | | * [ ... workspace ... ] |
102 | | * [objects][tables ->] free space [<- buffers][<- aligned][<- init once] |
103 | | * |
104 | | * The various objects that live in the workspace are divided into the |
105 | | * following categories, and are allocated separately: |
106 | | * |
107 | | * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, |
108 | | * so that literally everything fits in a single buffer. Note: if present, |
109 | | * this must be the first object in the workspace, since ZSTD_customFree{CCtx, |
110 | | * CDict}() rely on a pointer comparison to see whether one or two frees are |
111 | | * required. |
112 | | * |
113 | | * - Fixed size objects: these are fixed-size, fixed-count objects that are |
114 | | * nonetheless "dynamically" allocated in the workspace so that we can |
115 | | * control how they're initialized separately from the broader ZSTD_CCtx. |
116 | | * Examples: |
117 | | * - Entropy Workspace |
118 | | * - 2 x ZSTD_compressedBlockState_t |
119 | | * - CDict dictionary contents |
120 | | * |
121 | | * - Tables: these are any of several different datastructures (hash tables, |
122 | | * chain tables, binary trees) that all respect a common format: they are |
123 | | * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). |
124 | | * Their sizes depend on the cparams. These tables are 64-byte aligned. |
125 | | * |
126 | | * - Init once: these buffers require to be initialized at least once before |
127 | | * use. They should be used when we want to skip memory initialization |
128 | | * while not triggering memory checkers (like Valgrind) when reading from |
129 | | * from this memory without writing to it first. |
130 | | * These buffers should be used carefully as they might contain data |
131 | | * from previous compressions. |
132 | | * Buffers are aligned to 64 bytes. |
133 | | * |
134 | | * - Aligned: these buffers don't require any initialization before they're |
135 | | * used. The user of the buffer should make sure they write into a buffer |
136 | | * location before reading from it. |
137 | | * Buffers are aligned to 64 bytes. |
138 | | * |
139 | | * - Buffers: these buffers are used for various purposes that don't require |
140 | | * any alignment or initialization before they're used. This means they can |
141 | | * be moved around at no cost for a new compression. |
142 | | * |
143 | | * Allocating Memory: |
144 | | * |
145 | | * The various types of objects must be allocated in order, so they can be |
146 | | * correctly packed into the workspace buffer. That order is: |
147 | | * |
148 | | * 1. Objects |
149 | | * 2. Init once / Tables |
150 | | * 3. Aligned / Tables |
151 | | * 4. Buffers / Tables |
152 | | * |
153 | | * Attempts to reserve objects of different types out of order will fail. |
154 | | */ |
155 | | typedef struct { |
156 | | void* workspace; |
157 | | void* workspaceEnd; |
158 | | |
159 | | void* objectEnd; |
160 | | void* tableEnd; |
161 | | void* tableValidEnd; |
162 | | void* allocStart; |
163 | | void* initOnceStart; |
164 | | |
165 | | BYTE allocFailed; |
166 | | int workspaceOversizedDuration; |
167 | | ZSTD_cwksp_alloc_phase_e phase; |
168 | | ZSTD_cwksp_static_alloc_e isStatic; |
169 | | } ZSTD_cwksp; |
170 | | |
171 | | /*-************************************* |
172 | | * Functions |
173 | | ***************************************/ |
174 | | |
175 | | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); |
176 | | MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws); |
177 | | |
178 | 290k | MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { |
179 | 290k | (void)ws; |
180 | 290k | assert(ws->workspace <= ws->objectEnd); |
181 | 290k | assert(ws->objectEnd <= ws->tableEnd); |
182 | 290k | assert(ws->objectEnd <= ws->tableValidEnd); |
183 | 290k | assert(ws->tableEnd <= ws->allocStart); |
184 | 290k | assert(ws->tableValidEnd <= ws->allocStart); |
185 | 290k | assert(ws->allocStart <= ws->workspaceEnd); |
186 | 290k | assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); |
187 | 290k | assert(ws->workspace <= ws->initOnceStart); |
188 | | #if ZSTD_MEMORY_SANITIZER |
189 | | { |
190 | | intptr_t const offset = __msan_test_shadow(ws->initOnceStart, |
191 | | (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); |
192 | | (void)offset; |
193 | | #if defined(ZSTD_MSAN_PRINT) |
194 | | if(offset!=-1) { |
195 | | __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); |
196 | | } |
197 | | #endif |
198 | | assert(offset==-1); |
199 | | }; |
200 | | #endif |
201 | 290k | } zstd_compress.c:ZSTD_cwksp_assert_internal_consistency Line | Count | Source | 178 | 290k | MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { | 179 | 290k | (void)ws; | 180 | 290k | assert(ws->workspace <= ws->objectEnd); | 181 | 290k | assert(ws->objectEnd <= ws->tableEnd); | 182 | 290k | assert(ws->objectEnd <= ws->tableValidEnd); | 183 | 290k | assert(ws->tableEnd <= ws->allocStart); | 184 | 290k | assert(ws->tableValidEnd <= ws->allocStart); | 185 | 290k | assert(ws->allocStart <= ws->workspaceEnd); | 186 | 290k | assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws)); | 187 | 290k | assert(ws->workspace <= ws->initOnceStart); | 188 | | #if ZSTD_MEMORY_SANITIZER | 189 | | { | 190 | | intptr_t const offset = __msan_test_shadow(ws->initOnceStart, | 191 | | (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart); | 192 | | (void)offset; | 193 | | #if defined(ZSTD_MSAN_PRINT) | 194 | | if(offset!=-1) { | 195 | | __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32); | 196 | | } | 197 | | #endif | 198 | | assert(offset==-1); | 199 | | }; | 200 | | #endif | 201 | 290k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_assert_internal_consistency Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_assert_internal_consistency |
202 | | |
203 | | /** |
204 | | * Align must be a power of 2. |
205 | | */ |
206 | 173k | MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) { |
207 | 173k | size_t const mask = align - 1; |
208 | 173k | assert(ZSTD_isPower2(align)); |
209 | 173k | return (size + mask) & ~mask; |
210 | 173k | } zstd_compress.c:ZSTD_cwksp_align Line | Count | Source | 206 | 173k | MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) { | 207 | 173k | size_t const mask = align - 1; | 208 | 173k | assert(ZSTD_isPower2(align)); | 209 | 173k | return (size + mask) & ~mask; | 210 | 173k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_align Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_align Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_align |
211 | | |
212 | | /** |
213 | | * Use this to determine how much space in the workspace we will consume to |
214 | | * allocate this object. (Normally it should be exactly the size of the object, |
215 | | * but under special conditions, like ASAN, where we pad each object, it might |
216 | | * be larger.) |
217 | | * |
218 | | * Since tables aren't currently redzoned, you don't need to call through this |
219 | | * to figure out how much space you need for the matchState tables. Everything |
220 | | * else is though. |
221 | | * |
222 | | * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). |
223 | | */ |
224 | 247k | MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { |
225 | 247k | if (size == 0) |
226 | 0 | return 0; |
227 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
228 | | return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
229 | | #else |
230 | 247k | return size; |
231 | 247k | #endif |
232 | 247k | } zstd_compress.c:ZSTD_cwksp_alloc_size Line | Count | Source | 224 | 216k | MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { | 225 | 216k | if (size == 0) | 226 | 0 | return 0; | 227 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 228 | | return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; | 229 | | #else | 230 | 216k | return size; | 231 | 216k | #endif | 232 | 216k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_alloc_size zstd_ldm.c:ZSTD_cwksp_alloc_size Line | Count | Source | 224 | 30.9k | MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { | 225 | 30.9k | if (size == 0) | 226 | 0 | return 0; | 227 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 228 | | return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; | 229 | | #else | 230 | 30.9k | return size; | 231 | 30.9k | #endif | 232 | 30.9k | } |
Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_alloc_size Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_alloc_size |
233 | | |
234 | 123k | MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) { |
235 | 123k | return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); |
236 | 123k | } zstd_compress.c:ZSTD_cwksp_aligned_alloc_size Line | Count | Source | 234 | 123k | MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) { | 235 | 123k | return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); | 236 | 123k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_aligned_alloc_size Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_aligned_alloc_size |
237 | | |
238 | | /** |
239 | | * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. |
240 | | * Used to determine the number of bytes required for a given "aligned". |
241 | | */ |
242 | 123k | MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) { |
243 | 123k | return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES); |
244 | 123k | } zstd_compress.c:ZSTD_cwksp_aligned64_alloc_size Line | Count | Source | 242 | 123k | MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) { | 243 | 123k | return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES); | 244 | 123k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_aligned64_alloc_size Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_aligned64_alloc_size |
245 | | |
246 | | /** |
247 | | * Returns the amount of additional space the cwksp must allocate |
248 | | * for internal purposes (currently only alignment). |
249 | | */ |
250 | 15.4k | MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { |
251 | | /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES |
252 | | * bytes to align the beginning of tables section and end of buffers; |
253 | | */ |
254 | 15.4k | size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; |
255 | 15.4k | return slackSpace; |
256 | 15.4k | } zstd_compress.c:ZSTD_cwksp_slack_space_required Line | Count | Source | 250 | 15.4k | MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { | 251 | | /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES | 252 | | * bytes to align the beginning of tables section and end of buffers; | 253 | | */ | 254 | 15.4k | size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2; | 255 | 15.4k | return slackSpace; | 256 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_slack_space_required Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_slack_space_required |
257 | | |
258 | | |
259 | | /** |
260 | | * Return the number of additional bytes required to align a pointer to the given number of bytes. |
261 | | * alignBytes must be a power of two. |
262 | | */ |
263 | 6.05k | MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { |
264 | 6.05k | size_t const alignBytesMask = alignBytes - 1; |
265 | 6.05k | size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; |
266 | 6.05k | assert(ZSTD_isPower2(alignBytes)); |
267 | 6.05k | assert(bytes < alignBytes); |
268 | 6.05k | return bytes; |
269 | 6.05k | } zstd_compress.c:ZSTD_cwksp_bytes_to_align_ptr Line | Count | Source | 263 | 6.05k | MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { | 264 | 6.05k | size_t const alignBytesMask = alignBytes - 1; | 265 | 6.05k | size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; | 266 | 6.05k | assert(ZSTD_isPower2(alignBytes)); | 267 | 6.05k | assert(bytes < alignBytes); | 268 | 6.05k | return bytes; | 269 | 6.05k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_bytes_to_align_ptr Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_bytes_to_align_ptr |
270 | | |
271 | | /** |
272 | | * Returns the initial value for allocStart which is used to determine the position from |
273 | | * which we can allocate from the end of the workspace. |
274 | | */ |
275 | | MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) |
276 | 33.6k | { |
277 | 33.6k | char* endPtr = (char*)ws->workspaceEnd; |
278 | 33.6k | assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES)); |
279 | 33.6k | endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES); |
280 | 33.6k | return (void*)endPtr; |
281 | 33.6k | } zstd_compress.c:ZSTD_cwksp_initialAllocStart Line | Count | Source | 276 | 33.6k | { | 277 | 33.6k | char* endPtr = (char*)ws->workspaceEnd; | 278 | 33.6k | assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES)); | 279 | 33.6k | endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES); | 280 | 33.6k | return (void*)endPtr; | 281 | 33.6k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_initialAllocStart Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_initialAllocStart |
282 | | |
283 | | /** |
284 | | * Internal function. Do not use directly. |
285 | | * Reserves the given number of bytes within the aligned/buffer segment of the wksp, |
286 | | * which counts from the end of the wksp (as opposed to the object/table segment). |
287 | | * |
288 | | * Returns a pointer to the beginning of that space. |
289 | | */ |
290 | | MEM_STATIC void* |
291 | | ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) |
292 | 123k | { |
293 | 123k | void* const alloc = (BYTE*)ws->allocStart - bytes; |
294 | 123k | void* const bottom = ws->tableEnd; |
295 | 123k | DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", |
296 | 123k | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); |
297 | 123k | ZSTD_cwksp_assert_internal_consistency(ws); |
298 | 123k | assert(alloc >= bottom); |
299 | 123k | if (alloc < bottom) { |
300 | 0 | DEBUGLOG(4, "cwksp: alloc failed!"); |
301 | 0 | ws->allocFailed = 1; |
302 | 0 | return NULL; |
303 | 0 | } |
304 | | /* the area is reserved from the end of wksp. |
305 | | * If it overlaps with tableValidEnd, it voids guarantees on values' range */ |
306 | 123k | if (alloc < ws->tableValidEnd) { |
307 | 0 | ws->tableValidEnd = alloc; |
308 | 0 | } |
309 | 123k | ws->allocStart = alloc; |
310 | 123k | return alloc; |
311 | 123k | } zstd_compress.c:ZSTD_cwksp_reserve_internal_buffer_space Line | Count | Source | 292 | 123k | { | 293 | 123k | void* const alloc = (BYTE*)ws->allocStart - bytes; | 294 | 123k | void* const bottom = ws->tableEnd; | 295 | 123k | DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", | 296 | 123k | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); | 297 | 123k | ZSTD_cwksp_assert_internal_consistency(ws); | 298 | 123k | assert(alloc >= bottom); | 299 | 123k | if (alloc < bottom) { | 300 | 0 | DEBUGLOG(4, "cwksp: alloc failed!"); | 301 | 0 | ws->allocFailed = 1; | 302 | 0 | return NULL; | 303 | 0 | } | 304 | | /* the area is reserved from the end of wksp. | 305 | | * If it overlaps with tableValidEnd, it voids guarantees on values' range */ | 306 | 123k | if (alloc < ws->tableValidEnd) { | 307 | 0 | ws->tableValidEnd = alloc; | 308 | 0 | } | 309 | 123k | ws->allocStart = alloc; | 310 | 123k | return alloc; | 311 | 123k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_internal_buffer_space Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_internal_buffer_space |
312 | | |
313 | | /** |
314 | | * Moves the cwksp to the next phase, and does any necessary allocations. |
315 | | * cwksp initialization must necessarily go through each phase in order. |
316 | | * Returns a 0 on success, or zstd error |
317 | | */ |
318 | | MEM_STATIC size_t |
319 | | ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) |
320 | 130k | { |
321 | 130k | assert(phase >= ws->phase); |
322 | 130k | if (phase > ws->phase) { |
323 | | /* Going from allocating objects to allocating initOnce / tables */ |
324 | 37.0k | if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && |
325 | 37.0k | phase >= ZSTD_cwksp_alloc_aligned_init_once) { |
326 | 6.05k | ws->tableValidEnd = ws->objectEnd; |
327 | 6.05k | ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); |
328 | | |
329 | 6.05k | { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ |
330 | 6.05k | void *const alloc = ws->objectEnd; |
331 | 6.05k | size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); |
332 | 6.05k | void *const objectEnd = (BYTE *) alloc + bytesToAlign; |
333 | 6.05k | DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); |
334 | 6.05k | RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, |
335 | 6.05k | "table phase - alignment initial allocation failed!"); |
336 | 6.05k | ws->objectEnd = objectEnd; |
337 | 6.05k | ws->tableEnd = objectEnd; /* table area starts being empty */ |
338 | 6.05k | if (ws->tableValidEnd < ws->tableEnd) { |
339 | 6.05k | ws->tableValidEnd = ws->tableEnd; |
340 | 6.05k | } |
341 | 6.05k | } |
342 | 6.05k | } |
343 | 37.0k | ws->phase = phase; |
344 | 37.0k | ZSTD_cwksp_assert_internal_consistency(ws); |
345 | 37.0k | } |
346 | 130k | return 0; |
347 | 130k | } zstd_compress.c:ZSTD_cwksp_internal_advance_phase Line | Count | Source | 320 | 130k | { | 321 | 130k | assert(phase >= ws->phase); | 322 | 130k | if (phase > ws->phase) { | 323 | | /* Going from allocating objects to allocating initOnce / tables */ | 324 | 37.0k | if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once && | 325 | 37.0k | phase >= ZSTD_cwksp_alloc_aligned_init_once) { | 326 | 6.05k | ws->tableValidEnd = ws->objectEnd; | 327 | 6.05k | ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); | 328 | | | 329 | 6.05k | { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ | 330 | 6.05k | void *const alloc = ws->objectEnd; | 331 | 6.05k | size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); | 332 | 6.05k | void *const objectEnd = (BYTE *) alloc + bytesToAlign; | 333 | 6.05k | DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); | 334 | 6.05k | RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, | 335 | 6.05k | "table phase - alignment initial allocation failed!"); | 336 | 6.05k | ws->objectEnd = objectEnd; | 337 | 6.05k | ws->tableEnd = objectEnd; /* table area starts being empty */ | 338 | 6.05k | if (ws->tableValidEnd < ws->tableEnd) { | 339 | 6.05k | ws->tableValidEnd = ws->tableEnd; | 340 | 6.05k | } | 341 | 6.05k | } | 342 | 6.05k | } | 343 | 37.0k | ws->phase = phase; | 344 | 37.0k | ZSTD_cwksp_assert_internal_consistency(ws); | 345 | 37.0k | } | 346 | 130k | return 0; | 347 | 130k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_internal_advance_phase Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_internal_advance_phase |
348 | | |
349 | | /** |
350 | | * Returns whether this object/buffer/etc was allocated in this workspace. |
351 | | */ |
352 | | MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) |
353 | 6.05k | { |
354 | 6.05k | return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); |
355 | 6.05k | } zstd_compress.c:ZSTD_cwksp_owns_buffer Line | Count | Source | 353 | 6.05k | { | 354 | 6.05k | return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd); | 355 | 6.05k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_owns_buffer Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_owns_buffer |
356 | | |
357 | | /** |
358 | | * Internal function. Do not use directly. |
359 | | */ |
360 | | MEM_STATIC void* |
361 | | ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) |
362 | 123k | { |
363 | 123k | void* alloc; |
364 | 123k | if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { |
365 | 0 | return NULL; |
366 | 0 | } |
367 | | |
368 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
369 | | /* over-reserve space */ |
370 | | bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
371 | | #endif |
372 | | |
373 | 123k | alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); |
374 | | |
375 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
376 | | /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on |
377 | | * either size. */ |
378 | | if (alloc) { |
379 | | alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
380 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
381 | | /* We need to keep the redzone poisoned while unpoisoning the bytes that |
382 | | * are actually allocated. */ |
383 | | __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); |
384 | | } |
385 | | } |
386 | | #endif |
387 | | |
388 | 123k | return alloc; |
389 | 123k | } zstd_compress.c:ZSTD_cwksp_reserve_internal Line | Count | Source | 362 | 123k | { | 363 | 123k | void* alloc; | 364 | 123k | if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { | 365 | 0 | return NULL; | 366 | 0 | } | 367 | | | 368 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 369 | | /* over-reserve space */ | 370 | | bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; | 371 | | #endif | 372 | | | 373 | 123k | alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); | 374 | | | 375 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 376 | | /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on | 377 | | * either size. */ | 378 | | if (alloc) { | 379 | | alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; | 380 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { | 381 | | /* We need to keep the redzone poisoned while unpoisoning the bytes that | 382 | | * are actually allocated. */ | 383 | | __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE); | 384 | | } | 385 | | } | 386 | | #endif | 387 | | | 388 | 123k | return alloc; | 389 | 123k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_internal Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_internal |
390 | | |
391 | | /** |
392 | | * Reserves and returns unaligned memory. |
393 | | */ |
394 | | MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) |
395 | 92.9k | { |
396 | 92.9k | return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); |
397 | 92.9k | } zstd_compress.c:ZSTD_cwksp_reserve_buffer Line | Count | Source | 395 | 92.9k | { | 396 | 92.9k | return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); | 397 | 92.9k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_buffer Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_buffer |
398 | | |
399 | | /** |
400 | | * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). |
401 | | * This memory has been initialized at least once in the past. |
402 | | * This doesn't mean it has been initialized this time, and it might contain data from previous |
403 | | * operations. |
404 | | * The main usage is for algorithms that might need read access into uninitialized memory. |
405 | | * The algorithm must maintain safety under these conditions and must make sure it doesn't |
406 | | * leak any of the past data (directly or in side channels). |
407 | | */ |
408 | | MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) |
409 | 15.4k | { |
410 | 15.4k | size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); |
411 | 15.4k | void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); |
412 | 15.4k | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
413 | 15.4k | if(ptr && ptr < ws->initOnceStart) { |
414 | | /* We assume the memory following the current allocation is either: |
415 | | * 1. Not usable as initOnce memory (end of workspace) |
416 | | * 2. Another initOnce buffer that has been allocated before (and so was previously memset) |
417 | | * 3. An ASAN redzone, in which case we don't want to write on it |
418 | | * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. |
419 | | * Note that we assume here that MSAN and ASAN cannot run in the same time. */ |
420 | 6.05k | ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); |
421 | 6.05k | ws->initOnceStart = ptr; |
422 | 6.05k | } |
423 | | #if ZSTD_MEMORY_SANITIZER |
424 | | assert(__msan_test_shadow(ptr, bytes) == -1); |
425 | | #endif |
426 | 15.4k | return ptr; |
427 | 15.4k | } zstd_compress.c:ZSTD_cwksp_reserve_aligned_init_once Line | Count | Source | 409 | 15.4k | { | 410 | 15.4k | size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); | 411 | 15.4k | void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); | 412 | 15.4k | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); | 413 | 15.4k | if(ptr && ptr < ws->initOnceStart) { | 414 | | /* We assume the memory following the current allocation is either: | 415 | | * 1. Not usable as initOnce memory (end of workspace) | 416 | | * 2. Another initOnce buffer that has been allocated before (and so was previously memset) | 417 | | * 3. An ASAN redzone, in which case we don't want to write on it | 418 | | * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart. | 419 | | * Note that we assume here that MSAN and ASAN cannot run in the same time. */ | 420 | 6.05k | ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes)); | 421 | 6.05k | ws->initOnceStart = ptr; | 422 | 6.05k | } | 423 | | #if ZSTD_MEMORY_SANITIZER | 424 | | assert(__msan_test_shadow(ptr, bytes) == -1); | 425 | | #endif | 426 | 15.4k | return ptr; | 427 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_aligned_init_once Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_aligned_init_once |
428 | | |
429 | | /** |
430 | | * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). |
431 | | */ |
432 | | MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes) |
433 | 15.4k | { |
434 | 15.4k | void* const ptr = ZSTD_cwksp_reserve_internal(ws, |
435 | 15.4k | ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), |
436 | 15.4k | ZSTD_cwksp_alloc_aligned); |
437 | 15.4k | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
438 | 15.4k | return ptr; |
439 | 15.4k | } zstd_compress.c:ZSTD_cwksp_reserve_aligned64 Line | Count | Source | 433 | 15.4k | { | 434 | 15.4k | void* const ptr = ZSTD_cwksp_reserve_internal(ws, | 435 | 15.4k | ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), | 436 | 15.4k | ZSTD_cwksp_alloc_aligned); | 437 | 15.4k | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); | 438 | 15.4k | return ptr; | 439 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_aligned64 Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_aligned64 |
440 | | |
441 | | /** |
442 | | * Aligned on 64 bytes. These buffers have the special property that |
443 | | * their values remain constrained, allowing us to reuse them without |
444 | | * memset()-ing them. |
445 | | */ |
446 | | MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) |
447 | 46.4k | { |
448 | 46.4k | const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; |
449 | 46.4k | void* alloc; |
450 | 46.4k | void* end; |
451 | 46.4k | void* top; |
452 | | |
453 | | /* We can only start allocating tables after we are done reserving space for objects at the |
454 | | * start of the workspace */ |
455 | 46.4k | if(ws->phase < phase) { |
456 | 6.05k | if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { |
457 | 0 | return NULL; |
458 | 0 | } |
459 | 6.05k | } |
460 | 46.4k | alloc = ws->tableEnd; |
461 | 46.4k | end = (BYTE *)alloc + bytes; |
462 | 46.4k | top = ws->allocStart; |
463 | | |
464 | 46.4k | DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", |
465 | 46.4k | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); |
466 | 46.4k | assert((bytes & (sizeof(U32)-1)) == 0); |
467 | 46.4k | ZSTD_cwksp_assert_internal_consistency(ws); |
468 | 46.4k | assert(end <= top); |
469 | 46.4k | if (end > top) { |
470 | 0 | DEBUGLOG(4, "cwksp: table alloc failed!"); |
471 | 0 | ws->allocFailed = 1; |
472 | 0 | return NULL; |
473 | 0 | } |
474 | 46.4k | ws->tableEnd = end; |
475 | | |
476 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
477 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
478 | | __asan_unpoison_memory_region(alloc, bytes); |
479 | | } |
480 | | #endif |
481 | | |
482 | 46.4k | assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
483 | 46.4k | assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
484 | 46.4k | return alloc; |
485 | 46.4k | } zstd_compress.c:ZSTD_cwksp_reserve_table Line | Count | Source | 447 | 46.4k | { | 448 | 46.4k | const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once; | 449 | 46.4k | void* alloc; | 450 | 46.4k | void* end; | 451 | 46.4k | void* top; | 452 | | | 453 | | /* We can only start allocating tables after we are done reserving space for objects at the | 454 | | * start of the workspace */ | 455 | 46.4k | if(ws->phase < phase) { | 456 | 6.05k | if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) { | 457 | 0 | return NULL; | 458 | 0 | } | 459 | 6.05k | } | 460 | 46.4k | alloc = ws->tableEnd; | 461 | 46.4k | end = (BYTE *)alloc + bytes; | 462 | 46.4k | top = ws->allocStart; | 463 | | | 464 | 46.4k | DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", | 465 | 46.4k | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); | 466 | 46.4k | assert((bytes & (sizeof(U32)-1)) == 0); | 467 | 46.4k | ZSTD_cwksp_assert_internal_consistency(ws); | 468 | 46.4k | assert(end <= top); | 469 | 46.4k | if (end > top) { | 470 | 0 | DEBUGLOG(4, "cwksp: table alloc failed!"); | 471 | 0 | ws->allocFailed = 1; | 472 | 0 | return NULL; | 473 | 0 | } | 474 | 46.4k | ws->tableEnd = end; | 475 | | | 476 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 477 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { | 478 | | __asan_unpoison_memory_region(alloc, bytes); | 479 | | } | 480 | | #endif | 481 | | | 482 | 46.4k | assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); | 483 | 46.4k | assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); | 484 | 46.4k | return alloc; | 485 | 46.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_table Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_table |
486 | | |
487 | | /** |
488 | | * Aligned on sizeof(void*). |
489 | | * Note : should happen only once, at workspace first initialization |
490 | | */ |
491 | | MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) |
492 | 18.1k | { |
493 | 18.1k | size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); |
494 | 18.1k | void* alloc = ws->objectEnd; |
495 | 18.1k | void* end = (BYTE*)alloc + roundedBytes; |
496 | | |
497 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
498 | | /* over-reserve space */ |
499 | | end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
500 | | #endif |
501 | | |
502 | 18.1k | DEBUGLOG(4, |
503 | 18.1k | "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", |
504 | 18.1k | alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); |
505 | 18.1k | assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); |
506 | 18.1k | assert(bytes % ZSTD_ALIGNOF(void*) == 0); |
507 | 18.1k | ZSTD_cwksp_assert_internal_consistency(ws); |
508 | | /* we must be in the first phase, no advance is possible */ |
509 | 18.1k | if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { |
510 | 0 | DEBUGLOG(3, "cwksp: object alloc failed!"); |
511 | 0 | ws->allocFailed = 1; |
512 | 0 | return NULL; |
513 | 0 | } |
514 | 18.1k | ws->objectEnd = end; |
515 | 18.1k | ws->tableEnd = end; |
516 | 18.1k | ws->tableValidEnd = end; |
517 | | |
518 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
519 | | /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on |
520 | | * either size. */ |
521 | | alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; |
522 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
523 | | __asan_unpoison_memory_region(alloc, bytes); |
524 | | } |
525 | | #endif |
526 | | |
527 | 18.1k | return alloc; |
528 | 18.1k | } zstd_compress.c:ZSTD_cwksp_reserve_object Line | Count | Source | 492 | 18.1k | { | 493 | 18.1k | size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); | 494 | 18.1k | void* alloc = ws->objectEnd; | 495 | 18.1k | void* end = (BYTE*)alloc + roundedBytes; | 496 | | | 497 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 498 | | /* over-reserve space */ | 499 | | end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; | 500 | | #endif | 501 | | | 502 | 18.1k | DEBUGLOG(4, | 503 | 18.1k | "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", | 504 | 18.1k | alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); | 505 | 18.1k | assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); | 506 | 18.1k | assert(bytes % ZSTD_ALIGNOF(void*) == 0); | 507 | 18.1k | ZSTD_cwksp_assert_internal_consistency(ws); | 508 | | /* we must be in the first phase, no advance is possible */ | 509 | 18.1k | if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { | 510 | 0 | DEBUGLOG(3, "cwksp: object alloc failed!"); | 511 | 0 | ws->allocFailed = 1; | 512 | 0 | return NULL; | 513 | 0 | } | 514 | 18.1k | ws->objectEnd = end; | 515 | 18.1k | ws->tableEnd = end; | 516 | 18.1k | ws->tableValidEnd = end; | 517 | | | 518 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 519 | | /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on | 520 | | * either size. */ | 521 | | alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; | 522 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { | 523 | | __asan_unpoison_memory_region(alloc, bytes); | 524 | | } | 525 | | #endif | 526 | | | 527 | 18.1k | return alloc; | 528 | 18.1k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_object Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_object |
529 | | /** |
530 | | * with alignment control |
531 | | * Note : should happen only once, at workspace first initialization |
532 | | */ |
533 | | MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment) |
534 | 0 | { |
535 | 0 | size_t const mask = alignment - 1; |
536 | 0 | size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0; |
537 | 0 | void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); |
538 | 0 | if (start == NULL) return NULL; |
539 | 0 | if (surplus == 0) return start; |
540 | 0 | assert(ZSTD_isPower2(alignment)); |
541 | 0 | return (void*)(((size_t)start + surplus) & ~mask); |
542 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_object_aligned Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_object_aligned |
543 | | |
544 | | MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) |
545 | 6.36k | { |
546 | 6.36k | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); |
547 | | |
548 | | #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) |
549 | | /* To validate that the table reuse logic is sound, and that we don't |
550 | | * access table space that we haven't cleaned, we re-"poison" the table |
551 | | * space every time we mark it dirty. |
552 | | * Since tableValidEnd space and initOnce space may overlap we don't poison |
553 | | * the initOnce portion as it break its promise. This means that this poisoning |
554 | | * check isn't always applied fully. */ |
555 | | { |
556 | | size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; |
557 | | assert(__msan_test_shadow(ws->objectEnd, size) == -1); |
558 | | if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { |
559 | | __msan_poison(ws->objectEnd, size); |
560 | | } else { |
561 | | assert(ws->initOnceStart >= ws->objectEnd); |
562 | | __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); |
563 | | } |
564 | | } |
565 | | #endif |
566 | | |
567 | 6.36k | assert(ws->tableValidEnd >= ws->objectEnd); |
568 | 6.36k | assert(ws->tableValidEnd <= ws->allocStart); |
569 | 6.36k | ws->tableValidEnd = ws->objectEnd; |
570 | 6.36k | ZSTD_cwksp_assert_internal_consistency(ws); |
571 | 6.36k | } zstd_compress.c:ZSTD_cwksp_mark_tables_dirty Line | Count | Source | 545 | 6.36k | { | 546 | 6.36k | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); | 547 | | | 548 | | #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) | 549 | | /* To validate that the table reuse logic is sound, and that we don't | 550 | | * access table space that we haven't cleaned, we re-"poison" the table | 551 | | * space every time we mark it dirty. | 552 | | * Since tableValidEnd space and initOnce space may overlap we don't poison | 553 | | * the initOnce portion as it break its promise. This means that this poisoning | 554 | | * check isn't always applied fully. */ | 555 | | { | 556 | | size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; | 557 | | assert(__msan_test_shadow(ws->objectEnd, size) == -1); | 558 | | if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { | 559 | | __msan_poison(ws->objectEnd, size); | 560 | | } else { | 561 | | assert(ws->initOnceStart >= ws->objectEnd); | 562 | | __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd); | 563 | | } | 564 | | } | 565 | | #endif | 566 | | | 567 | 6.36k | assert(ws->tableValidEnd >= ws->objectEnd); | 568 | 6.36k | assert(ws->tableValidEnd <= ws->allocStart); | 569 | 6.36k | ws->tableValidEnd = ws->objectEnd; | 570 | 6.36k | ZSTD_cwksp_assert_internal_consistency(ws); | 571 | 6.36k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_mark_tables_dirty Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_mark_tables_dirty |
572 | | |
573 | 15.8k | MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { |
574 | 15.8k | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); |
575 | 15.8k | assert(ws->tableValidEnd >= ws->objectEnd); |
576 | 15.8k | assert(ws->tableValidEnd <= ws->allocStart); |
577 | 15.8k | if (ws->tableValidEnd < ws->tableEnd) { |
578 | 6.36k | ws->tableValidEnd = ws->tableEnd; |
579 | 6.36k | } |
580 | 15.8k | ZSTD_cwksp_assert_internal_consistency(ws); |
581 | 15.8k | } zstd_compress.c:ZSTD_cwksp_mark_tables_clean Line | Count | Source | 573 | 15.8k | MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { | 574 | 15.8k | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); | 575 | 15.8k | assert(ws->tableValidEnd >= ws->objectEnd); | 576 | 15.8k | assert(ws->tableValidEnd <= ws->allocStart); | 577 | 15.8k | if (ws->tableValidEnd < ws->tableEnd) { | 578 | 6.36k | ws->tableValidEnd = ws->tableEnd; | 579 | 6.36k | } | 580 | 15.8k | ZSTD_cwksp_assert_internal_consistency(ws); | 581 | 15.8k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_mark_tables_clean Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_mark_tables_clean |
582 | | |
583 | | /** |
584 | | * Zero the part of the allocated tables not already marked clean. |
585 | | */ |
586 | 15.4k | MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { |
587 | 15.4k | DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); |
588 | 15.4k | assert(ws->tableValidEnd >= ws->objectEnd); |
589 | 15.4k | assert(ws->tableValidEnd <= ws->allocStart); |
590 | 15.4k | if (ws->tableValidEnd < ws->tableEnd) { |
591 | 6.05k | ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); |
592 | 6.05k | } |
593 | 15.4k | ZSTD_cwksp_mark_tables_clean(ws); |
594 | 15.4k | } zstd_compress.c:ZSTD_cwksp_clean_tables Line | Count | Source | 586 | 15.4k | MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { | 587 | 15.4k | DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); | 588 | 15.4k | assert(ws->tableValidEnd >= ws->objectEnd); | 589 | 15.4k | assert(ws->tableValidEnd <= ws->allocStart); | 590 | 15.4k | if (ws->tableValidEnd < ws->tableEnd) { | 591 | 6.05k | ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd)); | 592 | 6.05k | } | 593 | 15.4k | ZSTD_cwksp_mark_tables_clean(ws); | 594 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_clean_tables Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_clean_tables |
595 | | |
596 | | /** |
597 | | * Invalidates table allocations. |
598 | | * All other allocations remain valid. |
599 | | */ |
600 | | MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) |
601 | 15.4k | { |
602 | 15.4k | DEBUGLOG(4, "cwksp: clearing tables!"); |
603 | | |
604 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
605 | | /* We don't do this when the workspace is statically allocated, because |
606 | | * when that is the case, we have no capability to hook into the end of the |
607 | | * workspace's lifecycle to unpoison the memory. |
608 | | */ |
609 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
610 | | size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; |
611 | | __asan_poison_memory_region(ws->objectEnd, size); |
612 | | } |
613 | | #endif |
614 | | |
615 | 15.4k | ws->tableEnd = ws->objectEnd; |
616 | 15.4k | ZSTD_cwksp_assert_internal_consistency(ws); |
617 | 15.4k | } zstd_compress.c:ZSTD_cwksp_clear_tables Line | Count | Source | 601 | 15.4k | { | 602 | 15.4k | DEBUGLOG(4, "cwksp: clearing tables!"); | 603 | | | 604 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 605 | | /* We don't do this when the workspace is statically allocated, because | 606 | | * when that is the case, we have no capability to hook into the end of the | 607 | | * workspace's lifecycle to unpoison the memory. | 608 | | */ | 609 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { | 610 | | size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; | 611 | | __asan_poison_memory_region(ws->objectEnd, size); | 612 | | } | 613 | | #endif | 614 | | | 615 | 15.4k | ws->tableEnd = ws->objectEnd; | 616 | 15.4k | ZSTD_cwksp_assert_internal_consistency(ws); | 617 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_clear_tables Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_clear_tables |
618 | | |
619 | | /** |
620 | | * Invalidates all buffer, aligned, and table allocations. |
621 | | * Object allocations remain valid. |
622 | | */ |
623 | 21.5k | MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { |
624 | 21.5k | DEBUGLOG(4, "cwksp: clearing!"); |
625 | | |
626 | | #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) |
627 | | /* To validate that the context reuse logic is sound, and that we don't |
628 | | * access stuff that this compression hasn't initialized, we re-"poison" |
629 | | * the workspace except for the areas in which we expect memory reuse |
630 | | * without initialization (objects, valid tables area and init once |
631 | | * memory). */ |
632 | | { |
633 | | if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { |
634 | | size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; |
635 | | __msan_poison(ws->tableValidEnd, size); |
636 | | } |
637 | | } |
638 | | #endif |
639 | | |
640 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) |
641 | | /* We don't do this when the workspace is statically allocated, because |
642 | | * when that is the case, we have no capability to hook into the end of the |
643 | | * workspace's lifecycle to unpoison the memory. |
644 | | */ |
645 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { |
646 | | size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; |
647 | | __asan_poison_memory_region(ws->objectEnd, size); |
648 | | } |
649 | | #endif |
650 | | |
651 | 21.5k | ws->tableEnd = ws->objectEnd; |
652 | 21.5k | ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); |
653 | 21.5k | ws->allocFailed = 0; |
654 | 21.5k | if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { |
655 | 9.44k | ws->phase = ZSTD_cwksp_alloc_aligned_init_once; |
656 | 9.44k | } |
657 | 21.5k | ZSTD_cwksp_assert_internal_consistency(ws); |
658 | 21.5k | } zstd_compress.c:ZSTD_cwksp_clear Line | Count | Source | 623 | 21.5k | MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { | 624 | 21.5k | DEBUGLOG(4, "cwksp: clearing!"); | 625 | | | 626 | | #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) | 627 | | /* To validate that the context reuse logic is sound, and that we don't | 628 | | * access stuff that this compression hasn't initialized, we re-"poison" | 629 | | * the workspace except for the areas in which we expect memory reuse | 630 | | * without initialization (objects, valid tables area and init once | 631 | | * memory). */ | 632 | | { | 633 | | if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) { | 634 | | size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd; | 635 | | __msan_poison(ws->tableValidEnd, size); | 636 | | } | 637 | | } | 638 | | #endif | 639 | | | 640 | | #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) | 641 | | /* We don't do this when the workspace is statically allocated, because | 642 | | * when that is the case, we have no capability to hook into the end of the | 643 | | * workspace's lifecycle to unpoison the memory. | 644 | | */ | 645 | | if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { | 646 | | size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; | 647 | | __asan_poison_memory_region(ws->objectEnd, size); | 648 | | } | 649 | | #endif | 650 | | | 651 | 21.5k | ws->tableEnd = ws->objectEnd; | 652 | 21.5k | ws->allocStart = ZSTD_cwksp_initialAllocStart(ws); | 653 | 21.5k | ws->allocFailed = 0; | 654 | 21.5k | if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) { | 655 | 9.44k | ws->phase = ZSTD_cwksp_alloc_aligned_init_once; | 656 | 9.44k | } | 657 | 21.5k | ZSTD_cwksp_assert_internal_consistency(ws); | 658 | 21.5k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_clear Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_clear Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_clear |
659 | | |
660 | 15.4k | MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { |
661 | 15.4k | return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); |
662 | 15.4k | } zstd_compress.c:ZSTD_cwksp_sizeof Line | Count | Source | 660 | 15.4k | MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { | 661 | 15.4k | return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); | 662 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_sizeof Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_sizeof |
663 | | |
664 | 0 | MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { |
665 | 0 | return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) |
666 | 0 | + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); |
667 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_used Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_used Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_used |
668 | | |
669 | | /** |
670 | | * The provided workspace takes ownership of the buffer [start, start+size). |
671 | | * Any existing values in the workspace are ignored (the previously managed |
672 | | * buffer, if present, must be separately freed). |
673 | | */ |
674 | 6.05k | MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { |
675 | 6.05k | DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); |
676 | 6.05k | assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ |
677 | 6.05k | ws->workspace = start; |
678 | 6.05k | ws->workspaceEnd = (BYTE*)start + size; |
679 | 6.05k | ws->objectEnd = ws->workspace; |
680 | 6.05k | ws->tableValidEnd = ws->objectEnd; |
681 | 6.05k | ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); |
682 | 6.05k | ws->phase = ZSTD_cwksp_alloc_objects; |
683 | 6.05k | ws->isStatic = isStatic; |
684 | 6.05k | ZSTD_cwksp_clear(ws); |
685 | 6.05k | ws->workspaceOversizedDuration = 0; |
686 | 6.05k | ZSTD_cwksp_assert_internal_consistency(ws); |
687 | 6.05k | } zstd_compress.c:ZSTD_cwksp_init Line | Count | Source | 674 | 6.05k | MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { | 675 | 6.05k | DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); | 676 | 6.05k | assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ | 677 | 6.05k | ws->workspace = start; | 678 | 6.05k | ws->workspaceEnd = (BYTE*)start + size; | 679 | 6.05k | ws->objectEnd = ws->workspace; | 680 | 6.05k | ws->tableValidEnd = ws->objectEnd; | 681 | 6.05k | ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws); | 682 | 6.05k | ws->phase = ZSTD_cwksp_alloc_objects; | 683 | 6.05k | ws->isStatic = isStatic; | 684 | 6.05k | ZSTD_cwksp_clear(ws); | 685 | 6.05k | ws->workspaceOversizedDuration = 0; | 686 | 6.05k | ZSTD_cwksp_assert_internal_consistency(ws); | 687 | 6.05k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_init Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_init Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_init |
688 | | |
689 | 6.05k | MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { |
690 | 6.05k | void* workspace = ZSTD_customMalloc(size, customMem); |
691 | 6.05k | DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); |
692 | 6.05k | RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); |
693 | 6.05k | ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); |
694 | 6.05k | return 0; |
695 | 6.05k | } zstd_compress.c:ZSTD_cwksp_create Line | Count | Source | 689 | 6.05k | MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { | 690 | 6.05k | void* workspace = ZSTD_customMalloc(size, customMem); | 691 | 6.05k | DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); | 692 | 6.05k | RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); | 693 | 6.05k | ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc); | 694 | 6.05k | return 0; | 695 | 6.05k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_create Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_create Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_create |
696 | | |
697 | 12.1k | MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { |
698 | 12.1k | void *ptr = ws->workspace; |
699 | 12.1k | DEBUGLOG(4, "cwksp: freeing workspace"); |
700 | | #if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) |
701 | | if (ptr != NULL && customMem.customFree != NULL) { |
702 | | __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws)); |
703 | | } |
704 | | #endif |
705 | 12.1k | ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); |
706 | 12.1k | ZSTD_customFree(ptr, customMem); |
707 | 12.1k | } zstd_compress.c:ZSTD_cwksp_free Line | Count | Source | 697 | 12.1k | MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { | 698 | 12.1k | void *ptr = ws->workspace; | 699 | 12.1k | DEBUGLOG(4, "cwksp: freeing workspace"); | 700 | | #if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE) | 701 | | if (ptr != NULL && customMem.customFree != NULL) { | 702 | | __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws)); | 703 | | } | 704 | | #endif | 705 | 12.1k | ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); | 706 | 12.1k | ZSTD_customFree(ptr, customMem); | 707 | 12.1k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_free Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_free Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_free |
708 | | |
709 | | /** |
710 | | * Moves the management of a workspace from one cwksp to another. The src cwksp |
711 | | * is left in an invalid state (src must be re-init()'ed before it's used again). |
712 | | */ |
713 | 0 | MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { |
714 | 0 | *dst = *src; |
715 | 0 | ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); |
716 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_move Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_move Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_move |
717 | | |
718 | 30.9k | MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { |
719 | 30.9k | return ws->allocFailed; |
720 | 30.9k | } zstd_compress.c:ZSTD_cwksp_reserve_failed Line | Count | Source | 718 | 30.9k | MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { | 719 | 30.9k | return ws->allocFailed; | 720 | 30.9k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_reserve_failed Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_reserve_failed |
721 | | |
722 | | /*-************************************* |
723 | | * Functions Checking Free Space |
724 | | ***************************************/ |
725 | | |
726 | | /* ZSTD_alignmentSpaceWithinBounds() : |
727 | | * Returns if the estimated space needed for a wksp is within an acceptable limit of the |
728 | | * actual amount of space used. |
729 | | */ |
730 | 0 | MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) { |
731 | 0 | /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice |
732 | 0 | * the alignment bytes difference between estimation and actual usage */ |
733 | 0 | return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) && |
734 | 0 | ZSTD_cwksp_used(ws) <= estimatedSpace; |
735 | 0 | } Unexecuted instantiation: zstd_compress.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_estimated_space_within_bounds Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_estimated_space_within_bounds |
736 | | |
737 | | |
738 | 30.9k | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { |
739 | 30.9k | return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); |
740 | 30.9k | } zstd_compress.c:ZSTD_cwksp_available_space Line | Count | Source | 738 | 30.9k | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { | 739 | 30.9k | return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); | 740 | 30.9k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_available_space Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_available_space |
741 | | |
742 | 30.9k | MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
743 | 30.9k | return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; |
744 | 30.9k | } zstd_compress.c:ZSTD_cwksp_check_available Line | Count | Source | 742 | 30.9k | MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { | 743 | 30.9k | return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; | 744 | 30.9k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_check_available Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_check_available |
745 | | |
746 | 30.9k | MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
747 | 30.9k | return ZSTD_cwksp_check_available( |
748 | 30.9k | ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); |
749 | 30.9k | } zstd_compress.c:ZSTD_cwksp_check_too_large Line | Count | Source | 746 | 30.9k | MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { | 747 | 30.9k | return ZSTD_cwksp_check_available( | 748 | 30.9k | ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); | 749 | 30.9k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_check_too_large Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_check_too_large |
750 | | |
751 | 15.4k | MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
752 | 15.4k | return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) |
753 | 15.4k | && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; |
754 | 15.4k | } zstd_compress.c:ZSTD_cwksp_check_wasteful Line | Count | Source | 751 | 15.4k | MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { | 752 | 15.4k | return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) | 753 | 15.4k | && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; | 754 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_check_wasteful Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_check_wasteful |
755 | | |
756 | | MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( |
757 | 15.4k | ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
758 | 15.4k | if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { |
759 | 15.4k | ws->workspaceOversizedDuration++; |
760 | 15.4k | } else { |
761 | 0 | ws->workspaceOversizedDuration = 0; |
762 | 0 | } |
763 | 15.4k | } zstd_compress.c:ZSTD_cwksp_bump_oversized_duration Line | Count | Source | 757 | 15.4k | ZSTD_cwksp* ws, size_t additionalNeededSpace) { | 758 | 15.4k | if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { | 759 | 15.4k | ws->workspaceOversizedDuration++; | 760 | 15.4k | } else { | 761 | 0 | ws->workspaceOversizedDuration = 0; | 762 | 0 | } | 763 | 15.4k | } |
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_double_fast.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_fast.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_lazy.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_ldm.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstd_opt.c:ZSTD_cwksp_bump_oversized_duration Unexecuted instantiation: zstdmt_compress.c:ZSTD_cwksp_bump_oversized_duration |
764 | | |
765 | | #endif /* ZSTD_CWKSP_H */ |