/src/zstd/lib/compress/zstdmt_compress.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | |
12 | | /* ====== Compiler specifics ====== */ |
13 | | #if defined(_MSC_VER) |
14 | | # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ |
15 | | #endif |
16 | | |
17 | | |
18 | | /* ====== Dependencies ====== */ |
19 | | #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ |
20 | | #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */ |
21 | | #include "../common/mem.h" /* MEM_STATIC */ |
22 | | #include "../common/pool.h" /* threadpool */ |
23 | | #include "../common/threading.h" /* mutex */ |
24 | | #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ |
25 | | #include "zstd_ldm.h" |
26 | | #include "zstdmt_compress.h" |
27 | | |
28 | | /* Guards code to support resizing the SeqPool. |
29 | | * We will want to resize the SeqPool to save memory in the future. |
30 | | * Until then, comment the code out since it is unused. |
31 | | */ |
32 | | #define ZSTD_RESIZE_SEQPOOL 0 |
33 | | |
34 | | /* ====== Debug ====== */ |
35 | | #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \ |
36 | | && !defined(_MSC_VER) \ |
37 | | && !defined(__MINGW32__) |
38 | | |
39 | | # include <stdio.h> |
40 | | # include <unistd.h> |
41 | | # include <sys/times.h> |
42 | | |
43 | | # define DEBUG_PRINTHEX(l,p,n) \ |
44 | | do { \ |
45 | | unsigned debug_u; \ |
46 | | for (debug_u=0; debug_u<(n); debug_u++) \ |
47 | | RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ |
48 | | RAWLOG(l, " \n"); \ |
49 | | } while (0) |
50 | | |
51 | | static unsigned long long GetCurrentClockTimeMicroseconds(void) |
52 | | { |
53 | | static clock_t _ticksPerSecond = 0; |
54 | | if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); |
55 | | |
56 | | { struct tms junk; clock_t newTicks = (clock_t) times(&junk); |
57 | | return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); |
58 | | } } |
59 | | |
60 | | #define MUTEX_WAIT_TIME_DLEVEL 6 |
61 | | #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) \ |
62 | | do { \ |
63 | | if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ |
64 | | unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ |
65 | | ZSTD_pthread_mutex_lock(mutex); \ |
66 | | { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ |
67 | | unsigned long long const elapsedTime = (afterTime-beforeTime); \ |
68 | | if (elapsedTime > 1000) { \ |
69 | | /* or whatever threshold you like; I'm using 1 millisecond here */ \ |
70 | | DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \ |
71 | | "Thread took %llu microseconds to acquire mutex %s \n", \ |
72 | | elapsedTime, #mutex); \ |
73 | | } } \ |
74 | | } else { \ |
75 | | ZSTD_pthread_mutex_lock(mutex); \ |
76 | | } \ |
77 | | } while (0) |
78 | | |
79 | | #else |
80 | | |
81 | 0 | # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) |
82 | | # define DEBUG_PRINTHEX(l,p,n) do { } while (0) |
83 | | |
84 | | #endif |
85 | | |
86 | | |
87 | | /* ===== Buffer Pool ===== */ |
88 | | /* a single Buffer Pool can be invoked from multiple threads in parallel */ |
89 | | |
90 | | typedef struct buffer_s { |
91 | | void* start; |
92 | | size_t capacity; |
93 | | } Buffer; |
94 | | |
95 | | static const Buffer g_nullBuffer = { NULL, 0 }; |
96 | | |
97 | | typedef struct ZSTDMT_bufferPool_s { |
98 | | ZSTD_pthread_mutex_t poolMutex; |
99 | | size_t bufferSize; |
100 | | unsigned totalBuffers; |
101 | | unsigned nbBuffers; |
102 | | ZSTD_customMem cMem; |
103 | | Buffer* buffers; |
104 | | } ZSTDMT_bufferPool; |
105 | | |
106 | | static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) |
107 | 0 | { |
108 | 0 | DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); |
109 | 0 | if (!bufPool) return; /* compatibility with free on NULL */ |
110 | 0 | if (bufPool->buffers) { |
111 | 0 | unsigned u; |
112 | 0 | for (u=0; u<bufPool->totalBuffers; u++) { |
113 | 0 | DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start); |
114 | 0 | ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem); |
115 | 0 | } |
116 | 0 | ZSTD_customFree(bufPool->buffers, bufPool->cMem); |
117 | 0 | } |
118 | 0 | ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); |
119 | 0 | ZSTD_customFree(bufPool, bufPool->cMem); |
120 | 0 | } |
121 | | |
122 | | static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem) |
123 | 0 | { |
124 | 0 | ZSTDMT_bufferPool* const bufPool = |
125 | 0 | (ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem); |
126 | 0 | if (bufPool==NULL) return NULL; |
127 | 0 | if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { |
128 | 0 | ZSTD_customFree(bufPool, cMem); |
129 | 0 | return NULL; |
130 | 0 | } |
131 | 0 | bufPool->buffers = (Buffer*)ZSTD_customCalloc(maxNbBuffers * sizeof(Buffer), cMem); |
132 | 0 | if (bufPool->buffers==NULL) { |
133 | 0 | ZSTDMT_freeBufferPool(bufPool); |
134 | 0 | return NULL; |
135 | 0 | } |
136 | 0 | bufPool->bufferSize = 64 KB; |
137 | 0 | bufPool->totalBuffers = maxNbBuffers; |
138 | 0 | bufPool->nbBuffers = 0; |
139 | 0 | bufPool->cMem = cMem; |
140 | 0 | return bufPool; |
141 | 0 | } |
142 | | |
143 | | /* only works at initialization, not during compression */ |
144 | | static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) |
145 | 0 | { |
146 | 0 | size_t const poolSize = sizeof(*bufPool); |
147 | 0 | size_t const arraySize = bufPool->totalBuffers * sizeof(Buffer); |
148 | 0 | unsigned u; |
149 | 0 | size_t totalBufferSize = 0; |
150 | 0 | ZSTD_pthread_mutex_lock(&bufPool->poolMutex); |
151 | 0 | for (u=0; u<bufPool->totalBuffers; u++) |
152 | 0 | totalBufferSize += bufPool->buffers[u].capacity; |
153 | 0 | ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); |
154 | |
|
155 | 0 | return poolSize + arraySize + totalBufferSize; |
156 | 0 | } |
157 | | |
158 | | /* ZSTDMT_setBufferSize() : |
159 | | * all future buffers provided by this buffer pool will have _at least_ this size |
160 | | * note : it's better for all buffers to have same size, |
161 | | * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ |
162 | | static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize) |
163 | 0 | { |
164 | 0 | ZSTD_pthread_mutex_lock(&bufPool->poolMutex); |
165 | 0 | DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize); |
166 | 0 | bufPool->bufferSize = bSize; |
167 | 0 | ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); |
168 | 0 | } |
169 | | |
170 | | |
171 | | static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers) |
172 | 0 | { |
173 | 0 | if (srcBufPool==NULL) return NULL; |
174 | 0 | if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ |
175 | 0 | return srcBufPool; |
176 | | /* need a larger buffer pool */ |
177 | 0 | { ZSTD_customMem const cMem = srcBufPool->cMem; |
178 | 0 | size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ |
179 | 0 | ZSTDMT_bufferPool* newBufPool; |
180 | 0 | ZSTDMT_freeBufferPool(srcBufPool); |
181 | 0 | newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); |
182 | 0 | if (newBufPool==NULL) return newBufPool; |
183 | 0 | ZSTDMT_setBufferSize(newBufPool, bSize); |
184 | 0 | return newBufPool; |
185 | 0 | } |
186 | 0 | } |
187 | | |
188 | | /** ZSTDMT_getBuffer() : |
189 | | * assumption : bufPool must be valid |
190 | | * @return : a buffer, with start pointer and size |
191 | | * note: allocation may fail, in this case, start==NULL and size==0 */ |
192 | | static Buffer ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) |
193 | 0 | { |
194 | 0 | size_t const bSize = bufPool->bufferSize; |
195 | 0 | DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); |
196 | 0 | ZSTD_pthread_mutex_lock(&bufPool->poolMutex); |
197 | 0 | if (bufPool->nbBuffers) { /* try to use an existing buffer */ |
198 | 0 | Buffer const buf = bufPool->buffers[--(bufPool->nbBuffers)]; |
199 | 0 | size_t const availBufferSize = buf.capacity; |
200 | 0 | bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer; |
201 | 0 | if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { |
202 | | /* large enough, but not too much */ |
203 | 0 | DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", |
204 | 0 | bufPool->nbBuffers, (U32)buf.capacity); |
205 | 0 | ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); |
206 | 0 | return buf; |
207 | 0 | } |
208 | | /* size conditions not respected : scratch this buffer, create new one */ |
209 | 0 | DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); |
210 | 0 | ZSTD_customFree(buf.start, bufPool->cMem); |
211 | 0 | } |
212 | 0 | ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); |
213 | | /* create new buffer */ |
214 | 0 | DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); |
215 | 0 | { Buffer buffer; |
216 | 0 | void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); |
217 | 0 | buffer.start = start; /* note : start can be NULL if malloc fails ! */ |
218 | 0 | buffer.capacity = (start==NULL) ? 0 : bSize; |
219 | 0 | if (start==NULL) { |
220 | 0 | DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!"); |
221 | 0 | } else { |
222 | 0 | DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); |
223 | 0 | } |
224 | 0 | return buffer; |
225 | 0 | } |
226 | 0 | } |
227 | | |
228 | | #if ZSTD_RESIZE_SEQPOOL |
229 | | /** ZSTDMT_resizeBuffer() : |
230 | | * assumption : bufPool must be valid |
231 | | * @return : a buffer that is at least the buffer pool buffer size. |
232 | | * If a reallocation happens, the data in the input buffer is copied. |
233 | | */ |
234 | | static Buffer ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, Buffer buffer) |
235 | | { |
236 | | size_t const bSize = bufPool->bufferSize; |
237 | | if (buffer.capacity < bSize) { |
238 | | void* const start = ZSTD_customMalloc(bSize, bufPool->cMem); |
239 | | Buffer newBuffer; |
240 | | newBuffer.start = start; |
241 | | newBuffer.capacity = start == NULL ? 0 : bSize; |
242 | | if (start != NULL) { |
243 | | assert(newBuffer.capacity >= buffer.capacity); |
244 | | ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity); |
245 | | DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); |
246 | | return newBuffer; |
247 | | } |
248 | | DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!"); |
249 | | } |
250 | | return buffer; |
251 | | } |
252 | | #endif |
253 | | |
254 | | /* store buffer for later re-use, up to pool capacity */ |
255 | | static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, Buffer buf) |
256 | 0 | { |
257 | 0 | DEBUGLOG(5, "ZSTDMT_releaseBuffer"); |
258 | 0 | if (buf.start == NULL) return; /* compatible with release on NULL */ |
259 | 0 | ZSTD_pthread_mutex_lock(&bufPool->poolMutex); |
260 | 0 | if (bufPool->nbBuffers < bufPool->totalBuffers) { |
261 | 0 | bufPool->buffers[bufPool->nbBuffers++] = buf; /* stored for later use */ |
262 | 0 | DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", |
263 | 0 | (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); |
264 | 0 | ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); |
265 | 0 | return; |
266 | 0 | } |
267 | 0 | ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); |
268 | | /* Reached bufferPool capacity (note: should not happen) */ |
269 | 0 | DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); |
270 | 0 | ZSTD_customFree(buf.start, bufPool->cMem); |
271 | 0 | } |
272 | | |
273 | | /* We need 2 output buffers per worker since each dstBuff must be flushed after it is released. |
274 | | * The 3 additional buffers are as follows: |
275 | | * 1 buffer for input loading |
276 | | * 1 buffer for "next input" when submitting current one |
277 | | * 1 buffer stuck in queue */ |
278 | 0 | #define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3) |
279 | | |
280 | | /* After a worker releases its rawSeqStore, it is immediately ready for reuse. |
281 | | * So we only need one seq buffer per worker. */ |
282 | 0 | #define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers) |
283 | | |
284 | | /* ===== Seq Pool Wrapper ====== */ |
285 | | |
286 | | typedef ZSTDMT_bufferPool ZSTDMT_seqPool; |
287 | | |
288 | | static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) |
289 | 0 | { |
290 | 0 | return ZSTDMT_sizeof_bufferPool(seqPool); |
291 | 0 | } |
292 | | |
293 | | static RawSeqStore_t bufferToSeq(Buffer buffer) |
294 | 0 | { |
295 | 0 | RawSeqStore_t seq = kNullRawSeqStore; |
296 | 0 | seq.seq = (rawSeq*)buffer.start; |
297 | 0 | seq.capacity = buffer.capacity / sizeof(rawSeq); |
298 | 0 | return seq; |
299 | 0 | } |
300 | | |
301 | | static Buffer seqToBuffer(RawSeqStore_t seq) |
302 | 0 | { |
303 | 0 | Buffer buffer; |
304 | 0 | buffer.start = seq.seq; |
305 | 0 | buffer.capacity = seq.capacity * sizeof(rawSeq); |
306 | 0 | return buffer; |
307 | 0 | } |
308 | | |
309 | | static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) |
310 | 0 | { |
311 | 0 | if (seqPool->bufferSize == 0) { |
312 | 0 | return kNullRawSeqStore; |
313 | 0 | } |
314 | 0 | return bufferToSeq(ZSTDMT_getBuffer(seqPool)); |
315 | 0 | } |
316 | | |
317 | | #if ZSTD_RESIZE_SEQPOOL |
318 | | static RawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq) |
319 | | { |
320 | | return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); |
321 | | } |
322 | | #endif |
323 | | |
324 | | static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq) |
325 | 0 | { |
326 | 0 | ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); |
327 | 0 | } |
328 | | |
329 | | static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) |
330 | 0 | { |
331 | 0 | ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq)); |
332 | 0 | } |
333 | | |
334 | | static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) |
335 | 0 | { |
336 | 0 | ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); |
337 | 0 | if (seqPool == NULL) return NULL; |
338 | 0 | ZSTDMT_setNbSeq(seqPool, 0); |
339 | 0 | return seqPool; |
340 | 0 | } |
341 | | |
342 | | static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) |
343 | 0 | { |
344 | 0 | ZSTDMT_freeBufferPool(seqPool); |
345 | 0 | } |
346 | | |
347 | | static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) |
348 | 0 | { |
349 | 0 | return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers)); |
350 | 0 | } |
351 | | |
352 | | |
353 | | /* ===== CCtx Pool ===== */ |
354 | | /* a single CCtx Pool can be invoked from multiple threads in parallel */ |
355 | | |
356 | | typedef struct { |
357 | | ZSTD_pthread_mutex_t poolMutex; |
358 | | int totalCCtx; |
359 | | int availCCtx; |
360 | | ZSTD_customMem cMem; |
361 | | ZSTD_CCtx** cctxs; |
362 | | } ZSTDMT_CCtxPool; |
363 | | |
364 | | /* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */ |
365 | | static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) |
366 | 0 | { |
367 | 0 | if (!pool) return; |
368 | 0 | ZSTD_pthread_mutex_destroy(&pool->poolMutex); |
369 | 0 | if (pool->cctxs) { |
370 | 0 | int cid; |
371 | 0 | for (cid=0; cid<pool->totalCCtx; cid++) |
372 | 0 | ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */ |
373 | 0 | ZSTD_customFree(pool->cctxs, pool->cMem); |
374 | 0 | } |
375 | 0 | ZSTD_customFree(pool, pool->cMem); |
376 | 0 | } |
377 | | |
378 | | /* ZSTDMT_createCCtxPool() : |
379 | | * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ |
380 | | static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, |
381 | | ZSTD_customMem cMem) |
382 | 0 | { |
383 | 0 | ZSTDMT_CCtxPool* const cctxPool = |
384 | 0 | (ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem); |
385 | 0 | assert(nbWorkers > 0); |
386 | 0 | if (!cctxPool) return NULL; |
387 | 0 | if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { |
388 | 0 | ZSTD_customFree(cctxPool, cMem); |
389 | 0 | return NULL; |
390 | 0 | } |
391 | 0 | cctxPool->totalCCtx = nbWorkers; |
392 | 0 | cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem); |
393 | 0 | if (!cctxPool->cctxs) { |
394 | 0 | ZSTDMT_freeCCtxPool(cctxPool); |
395 | 0 | return NULL; |
396 | 0 | } |
397 | 0 | cctxPool->cMem = cMem; |
398 | 0 | cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem); |
399 | 0 | if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } |
400 | 0 | cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ |
401 | 0 | DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); |
402 | 0 | return cctxPool; |
403 | 0 | } |
404 | | |
405 | | static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, |
406 | | int nbWorkers) |
407 | 0 | { |
408 | 0 | if (srcPool==NULL) return NULL; |
409 | 0 | if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */ |
410 | | /* need a larger cctx pool */ |
411 | 0 | { ZSTD_customMem const cMem = srcPool->cMem; |
412 | 0 | ZSTDMT_freeCCtxPool(srcPool); |
413 | 0 | return ZSTDMT_createCCtxPool(nbWorkers, cMem); |
414 | 0 | } |
415 | 0 | } |
416 | | |
417 | | /* only works during initialization phase, not during compression */ |
418 | | static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) |
419 | 0 | { |
420 | 0 | ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); |
421 | 0 | { unsigned const nbWorkers = cctxPool->totalCCtx; |
422 | 0 | size_t const poolSize = sizeof(*cctxPool); |
423 | 0 | size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*); |
424 | 0 | size_t totalCCtxSize = 0; |
425 | 0 | unsigned u; |
426 | 0 | for (u=0; u<nbWorkers; u++) { |
427 | 0 | totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]); |
428 | 0 | } |
429 | 0 | ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); |
430 | 0 | assert(nbWorkers > 0); |
431 | 0 | return poolSize + arraySize + totalCCtxSize; |
432 | 0 | } |
433 | 0 | } |
434 | | |
435 | | static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) |
436 | 0 | { |
437 | 0 | DEBUGLOG(5, "ZSTDMT_getCCtx"); |
438 | 0 | ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); |
439 | 0 | if (cctxPool->availCCtx) { |
440 | 0 | cctxPool->availCCtx--; |
441 | 0 | { ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx]; |
442 | 0 | ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); |
443 | 0 | return cctx; |
444 | 0 | } } |
445 | 0 | ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); |
446 | 0 | DEBUGLOG(5, "create one more CCtx"); |
447 | 0 | return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ |
448 | 0 | } |
449 | | |
450 | | static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) |
451 | 0 | { |
452 | 0 | if (cctx==NULL) return; /* compatibility with release on NULL */ |
453 | 0 | ZSTD_pthread_mutex_lock(&pool->poolMutex); |
454 | 0 | if (pool->availCCtx < pool->totalCCtx) |
455 | 0 | pool->cctxs[pool->availCCtx++] = cctx; |
456 | 0 | else { |
457 | | /* pool overflow : should not happen, since totalCCtx==nbWorkers */ |
458 | 0 | DEBUGLOG(4, "CCtx pool overflow : free cctx"); |
459 | 0 | ZSTD_freeCCtx(cctx); |
460 | 0 | } |
461 | 0 | ZSTD_pthread_mutex_unlock(&pool->poolMutex); |
462 | 0 | } |
463 | | |
464 | | /* ==== Serial State ==== */ |
465 | | |
466 | | typedef struct { |
467 | | void const* start; |
468 | | size_t size; |
469 | | } Range; |
470 | | |
471 | | typedef struct { |
472 | | /* All variables in the struct are protected by mutex. */ |
473 | | ZSTD_pthread_mutex_t mutex; |
474 | | ZSTD_pthread_cond_t cond; |
475 | | ZSTD_CCtx_params params; |
476 | | ldmState_t ldmState; |
477 | | XXH64_state_t xxhState; |
478 | | unsigned nextJobID; |
479 | | /* Protects ldmWindow. |
480 | | * Must be acquired after the main mutex when acquiring both. |
481 | | */ |
482 | | ZSTD_pthread_mutex_t ldmWindowMutex; |
483 | | ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ |
484 | | ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ |
485 | | } SerialState; |
486 | | |
487 | | static int |
488 | | ZSTDMT_serialState_reset(SerialState* serialState, |
489 | | ZSTDMT_seqPool* seqPool, |
490 | | ZSTD_CCtx_params params, |
491 | | size_t jobSize, |
492 | | const void* dict, size_t const dictSize, |
493 | | ZSTD_dictContentType_e dictContentType) |
494 | 0 | { |
495 | | /* Adjust parameters */ |
496 | 0 | if (params.ldmParams.enableLdm == ZSTD_ps_enable) { |
497 | 0 | DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); |
498 | 0 | ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); |
499 | 0 | assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); |
500 | 0 | assert(params.ldmParams.hashRateLog < 32); |
501 | 0 | } else { |
502 | 0 | ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); |
503 | 0 | } |
504 | 0 | serialState->nextJobID = 0; |
505 | 0 | if (params.fParams.checksumFlag) |
506 | 0 | XXH64_reset(&serialState->xxhState, 0); |
507 | 0 | if (params.ldmParams.enableLdm == ZSTD_ps_enable) { |
508 | 0 | ZSTD_customMem cMem = params.customMem; |
509 | 0 | unsigned const hashLog = params.ldmParams.hashLog; |
510 | 0 | size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); |
511 | 0 | unsigned const bucketLog = |
512 | 0 | params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; |
513 | 0 | unsigned const prevBucketLog = |
514 | 0 | serialState->params.ldmParams.hashLog - |
515 | 0 | serialState->params.ldmParams.bucketSizeLog; |
516 | 0 | size_t const numBuckets = (size_t)1 << bucketLog; |
517 | | /* Size the seq pool tables */ |
518 | 0 | ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); |
519 | | /* Reset the window */ |
520 | 0 | ZSTD_window_init(&serialState->ldmState.window); |
521 | | /* Resize tables and output space if necessary. */ |
522 | 0 | if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { |
523 | 0 | ZSTD_customFree(serialState->ldmState.hashTable, cMem); |
524 | 0 | serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem); |
525 | 0 | } |
526 | 0 | if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { |
527 | 0 | ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); |
528 | 0 | serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem); |
529 | 0 | } |
530 | 0 | if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) |
531 | 0 | return 1; |
532 | | /* Zero the tables */ |
533 | 0 | ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize); |
534 | 0 | ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets); |
535 | | |
536 | | /* Update window state and fill hash table with dict */ |
537 | 0 | serialState->ldmState.loadedDictEnd = 0; |
538 | 0 | if (dictSize > 0) { |
539 | 0 | if (dictContentType == ZSTD_dct_rawContent) { |
540 | 0 | BYTE const* const dictEnd = (const BYTE*)dict + dictSize; |
541 | 0 | ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0); |
542 | 0 | ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams); |
543 | 0 | serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base); |
544 | 0 | } else { |
545 | | /* don't even load anything */ |
546 | 0 | } |
547 | 0 | } |
548 | | |
549 | | /* Initialize serialState's copy of ldmWindow. */ |
550 | 0 | serialState->ldmWindow = serialState->ldmState.window; |
551 | 0 | } |
552 | | |
553 | 0 | serialState->params = params; |
554 | 0 | serialState->params.jobSize = (U32)jobSize; |
555 | 0 | return 0; |
556 | 0 | } |
557 | | |
558 | | static int ZSTDMT_serialState_init(SerialState* serialState) |
559 | 0 | { |
560 | 0 | int initError = 0; |
561 | 0 | ZSTD_memset(serialState, 0, sizeof(*serialState)); |
562 | 0 | initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); |
563 | 0 | initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); |
564 | 0 | initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); |
565 | 0 | initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL); |
566 | 0 | return initError; |
567 | 0 | } |
568 | | |
569 | | static void ZSTDMT_serialState_free(SerialState* serialState) |
570 | 0 | { |
571 | 0 | ZSTD_customMem cMem = serialState->params.customMem; |
572 | 0 | ZSTD_pthread_mutex_destroy(&serialState->mutex); |
573 | 0 | ZSTD_pthread_cond_destroy(&serialState->cond); |
574 | 0 | ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); |
575 | 0 | ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); |
576 | 0 | ZSTD_customFree(serialState->ldmState.hashTable, cMem); |
577 | 0 | ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem); |
578 | 0 | } |
579 | | |
580 | | static void |
581 | | ZSTDMT_serialState_genSequences(SerialState* serialState, |
582 | | RawSeqStore_t* seqStore, |
583 | | Range src, unsigned jobID) |
584 | 0 | { |
585 | | /* Wait for our turn */ |
586 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); |
587 | 0 | while (serialState->nextJobID < jobID) { |
588 | 0 | DEBUGLOG(5, "wait for serialState->cond"); |
589 | 0 | ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex); |
590 | 0 | } |
591 | | /* A future job may error and skip our job */ |
592 | 0 | if (serialState->nextJobID == jobID) { |
593 | | /* It is now our turn, do any processing necessary */ |
594 | 0 | if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) { |
595 | 0 | size_t error; |
596 | 0 | DEBUGLOG(6, "ZSTDMT_serialState_genSequences: LDM update"); |
597 | 0 | assert(seqStore->seq != NULL && seqStore->pos == 0 && |
598 | 0 | seqStore->size == 0 && seqStore->capacity > 0); |
599 | 0 | assert(src.size <= serialState->params.jobSize); |
600 | 0 | ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0); |
601 | 0 | error = ZSTD_ldm_generateSequences( |
602 | 0 | &serialState->ldmState, seqStore, |
603 | 0 | &serialState->params.ldmParams, src.start, src.size); |
604 | | /* We provide a large enough buffer to never fail. */ |
605 | 0 | assert(!ZSTD_isError(error)); (void)error; |
606 | | /* Update ldmWindow to match the ldmState.window and signal the main |
607 | | * thread if it is waiting for a buffer. |
608 | | */ |
609 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); |
610 | 0 | serialState->ldmWindow = serialState->ldmState.window; |
611 | 0 | ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); |
612 | 0 | ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); |
613 | 0 | } |
614 | 0 | if (serialState->params.fParams.checksumFlag && src.size > 0) |
615 | 0 | XXH64_update(&serialState->xxhState, src.start, src.size); |
616 | 0 | } |
617 | | /* Now it is the next jobs turn */ |
618 | 0 | serialState->nextJobID++; |
619 | 0 | ZSTD_pthread_cond_broadcast(&serialState->cond); |
620 | 0 | ZSTD_pthread_mutex_unlock(&serialState->mutex); |
621 | 0 | } |
622 | | |
623 | | static void |
624 | | ZSTDMT_serialState_applySequences(const SerialState* serialState, /* just for an assert() check */ |
625 | | ZSTD_CCtx* jobCCtx, |
626 | | const RawSeqStore_t* seqStore) |
627 | 0 | { |
628 | 0 | if (seqStore->size > 0) { |
629 | 0 | DEBUGLOG(5, "ZSTDMT_serialState_applySequences: uploading %u external sequences", (unsigned)seqStore->size); |
630 | 0 | assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); (void)serialState; |
631 | 0 | assert(jobCCtx); |
632 | 0 | ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size); |
633 | 0 | } |
634 | 0 | } |
635 | | |
636 | | static void ZSTDMT_serialState_ensureFinished(SerialState* serialState, |
637 | | unsigned jobID, size_t cSize) |
638 | 0 | { |
639 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); |
640 | 0 | if (serialState->nextJobID <= jobID) { |
641 | 0 | assert(ZSTD_isError(cSize)); (void)cSize; |
642 | 0 | DEBUGLOG(5, "Skipping past job %u because of error", jobID); |
643 | 0 | serialState->nextJobID = jobID + 1; |
644 | 0 | ZSTD_pthread_cond_broadcast(&serialState->cond); |
645 | |
|
646 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); |
647 | 0 | ZSTD_window_clear(&serialState->ldmWindow); |
648 | 0 | ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); |
649 | 0 | ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); |
650 | 0 | } |
651 | 0 | ZSTD_pthread_mutex_unlock(&serialState->mutex); |
652 | |
|
653 | 0 | } |
654 | | |
655 | | |
656 | | /* ------------------------------------------ */ |
657 | | /* ===== Worker thread ===== */ |
658 | | /* ------------------------------------------ */ |
659 | | |
660 | | static const Range kNullRange = { NULL, 0 }; |
661 | | |
662 | | typedef struct { |
663 | | size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ |
664 | | size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ |
665 | | ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ |
666 | | ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ |
667 | | ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ |
668 | | ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ |
669 | | ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ |
670 | | SerialState* serial; /* Thread-safe - used by mtctx and (all) workers */ |
671 | | Buffer dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ |
672 | | Range prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ |
673 | | Range src; /* set by mtctx, then read by worker & mtctx => no barrier */ |
674 | | unsigned jobID; /* set by mtctx, then read by worker => no barrier */ |
675 | | unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ |
676 | | unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ |
677 | | ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ |
678 | | const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ |
679 | | unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ |
680 | | size_t dstFlushed; /* used only by mtctx */ |
681 | | unsigned frameChecksumNeeded; /* used only by mtctx */ |
682 | | } ZSTDMT_jobDescription; |
683 | | |
684 | | #define JOB_ERROR(e) \ |
685 | 0 | do { \ |
686 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ |
687 | 0 | job->cSize = e; \ |
688 | 0 | ZSTD_pthread_mutex_unlock(&job->job_mutex); \ |
689 | 0 | goto _endJob; \ |
690 | 0 | } while (0) |
691 | | |
692 | | /* ZSTDMT_compressionJob() is a POOL_function type */ |
693 | | static void ZSTDMT_compressionJob(void* jobDescription) |
694 | 0 | { |
695 | 0 | ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; |
696 | 0 | ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ |
697 | 0 | ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); |
698 | 0 | RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); |
699 | 0 | Buffer dstBuff = job->dstBuff; |
700 | 0 | size_t lastCBlockSize = 0; |
701 | |
|
702 | 0 | DEBUGLOG(5, "ZSTDMT_compressionJob: job %u", job->jobID); |
703 | | /* resources */ |
704 | 0 | if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); |
705 | 0 | if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ |
706 | 0 | dstBuff = ZSTDMT_getBuffer(job->bufPool); |
707 | 0 | if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); |
708 | 0 | job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ |
709 | 0 | } |
710 | 0 | if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL) |
711 | 0 | JOB_ERROR(ERROR(memory_allocation)); |
712 | | |
713 | | /* Don't compute the checksum for chunks, since we compute it externally, |
714 | | * but write it in the header. |
715 | | */ |
716 | 0 | if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; |
717 | | /* Don't run LDM for the chunks, since we handle it externally */ |
718 | 0 | jobParams.ldmParams.enableLdm = ZSTD_ps_disable; |
719 | | /* Correct nbWorkers to 0. */ |
720 | 0 | jobParams.nbWorkers = 0; |
721 | | |
722 | | |
723 | | /* init */ |
724 | | |
725 | | /* Perform serial step as early as possible */ |
726 | 0 | ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID); |
727 | |
|
728 | 0 | if (job->cdict) { |
729 | 0 | size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); |
730 | 0 | assert(job->firstJob); /* only allowed for first job */ |
731 | 0 | if (ZSTD_isError(initError)) JOB_ERROR(initError); |
732 | 0 | } else { |
733 | 0 | U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; |
734 | 0 | { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); |
735 | 0 | if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); |
736 | 0 | } |
737 | 0 | if (!job->firstJob) { |
738 | 0 | size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0); |
739 | 0 | if (ZSTD_isError(err)) JOB_ERROR(err); |
740 | 0 | } |
741 | 0 | DEBUGLOG(6, "ZSTDMT_compressionJob: job %u: loading prefix of size %zu", job->jobID, job->prefix.size); |
742 | 0 | { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, |
743 | 0 | job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, |
744 | 0 | ZSTD_dtlm_fast, |
745 | 0 | NULL, /*cdict*/ |
746 | 0 | &jobParams, pledgedSrcSize); |
747 | 0 | if (ZSTD_isError(initError)) JOB_ERROR(initError); |
748 | 0 | } } |
749 | | |
750 | | /* External Sequences can only be applied after CCtx initialization */ |
751 | 0 | ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore); |
752 | |
|
753 | 0 | if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ |
754 | 0 | size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); |
755 | 0 | if (ZSTD_isError(hSize)) JOB_ERROR(hSize); |
756 | 0 | DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); |
757 | 0 | ZSTD_invalidateRepCodes(cctx); |
758 | 0 | } |
759 | | |
760 | | /* compress the entire job by smaller chunks, for better granularity */ |
761 | 0 | { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; |
762 | 0 | int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); |
763 | 0 | const BYTE* ip = (const BYTE*) job->src.start; |
764 | 0 | BYTE* const ostart = (BYTE*)dstBuff.start; |
765 | 0 | BYTE* op = ostart; |
766 | 0 | BYTE* oend = op + dstBuff.capacity; |
767 | 0 | int chunkNb; |
768 | 0 | if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */ |
769 | 0 | DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); |
770 | 0 | assert(job->cSize == 0); |
771 | 0 | for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { |
772 | 0 | size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize); |
773 | 0 | if (ZSTD_isError(cSize)) JOB_ERROR(cSize); |
774 | 0 | ip += chunkSize; |
775 | 0 | op += cSize; assert(op < oend); |
776 | | /* stats */ |
777 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); |
778 | 0 | job->cSize += cSize; |
779 | 0 | job->consumed = chunkSize * chunkNb; |
780 | 0 | DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)", |
781 | 0 | (U32)cSize, (U32)job->cSize); |
782 | 0 | ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */ |
783 | 0 | ZSTD_pthread_mutex_unlock(&job->job_mutex); |
784 | 0 | } |
785 | | /* last block */ |
786 | 0 | assert(chunkSize > 0); |
787 | 0 | assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */ |
788 | 0 | if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) { |
789 | 0 | size_t const lastBlockSize1 = job->src.size & (chunkSize-1); |
790 | 0 | size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; |
791 | 0 | size_t const cSize = (job->lastJob) ? |
792 | 0 | ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) : |
793 | 0 | ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize); |
794 | 0 | if (ZSTD_isError(cSize)) JOB_ERROR(cSize); |
795 | 0 | lastCBlockSize = cSize; |
796 | 0 | } } |
797 | 0 | if (!job->firstJob) { |
798 | | /* Double check that we don't have an ext-dict, because then our |
799 | | * repcode invalidation doesn't work. |
800 | | */ |
801 | 0 | assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); |
802 | 0 | } |
803 | 0 | ZSTD_CCtx_trace(cctx, 0); |
804 | |
|
805 | 0 | _endJob: |
806 | 0 | ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); |
807 | 0 | if (job->prefix.size > 0) |
808 | 0 | DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start); |
809 | 0 | DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start); |
810 | | /* release resources */ |
811 | 0 | ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); |
812 | 0 | ZSTDMT_releaseCCtx(job->cctxPool, cctx); |
813 | | /* report */ |
814 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); |
815 | 0 | if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); |
816 | 0 | job->cSize += lastCBlockSize; |
817 | 0 | job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */ |
818 | 0 | ZSTD_pthread_cond_signal(&job->job_cond); |
819 | 0 | ZSTD_pthread_mutex_unlock(&job->job_mutex); |
820 | 0 | } |
821 | | |
822 | | |
823 | | /* ------------------------------------------ */ |
824 | | /* ===== Multi-threaded compression ===== */ |
825 | | /* ------------------------------------------ */ |
826 | | |
827 | | typedef struct { |
828 | | Range prefix; /* read-only non-owned prefix buffer */ |
829 | | Buffer buffer; |
830 | | size_t filled; |
831 | | } InBuff_t; |
832 | | |
833 | | typedef struct { |
834 | | BYTE* buffer; /* The round input buffer. All jobs get references |
835 | | * to pieces of the buffer. ZSTDMT_tryGetInputRange() |
836 | | * handles handing out job input buffers, and makes |
837 | | * sure it doesn't overlap with any pieces still in use. |
838 | | */ |
839 | | size_t capacity; /* The capacity of buffer. */ |
840 | | size_t pos; /* The position of the current inBuff in the round |
841 | | * buffer. Updated past the end if the inBuff once |
842 | | * the inBuff is sent to the worker thread. |
843 | | * pos <= capacity. |
844 | | */ |
845 | | } RoundBuff_t; |
846 | | |
847 | | static const RoundBuff_t kNullRoundBuff = {NULL, 0, 0}; |
848 | | |
849 | 0 | #define RSYNC_LENGTH 32 |
850 | | /* Don't create chunks smaller than the zstd block size. |
851 | | * This stops us from regressing compression ratio too much, |
852 | | * and ensures our output fits in ZSTD_compressBound(). |
853 | | * |
854 | | * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then |
855 | | * ZSTD_COMPRESSBOUND() will need to be updated. |
856 | | */ |
857 | 0 | #define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX |
858 | 0 | #define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG) |
859 | | |
860 | | typedef struct { |
861 | | U64 hash; |
862 | | U64 hitMask; |
863 | | U64 primePower; |
864 | | } RSyncState_t; |
865 | | |
866 | | struct ZSTDMT_CCtx_s { |
867 | | POOL_ctx* factory; |
868 | | ZSTDMT_jobDescription* jobs; |
869 | | ZSTDMT_bufferPool* bufPool; |
870 | | ZSTDMT_CCtxPool* cctxPool; |
871 | | ZSTDMT_seqPool* seqPool; |
872 | | ZSTD_CCtx_params params; |
873 | | size_t targetSectionSize; |
874 | | size_t targetPrefixSize; |
875 | | int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ |
876 | | InBuff_t inBuff; |
877 | | RoundBuff_t roundBuff; |
878 | | SerialState serial; |
879 | | RSyncState_t rsync; |
880 | | unsigned jobIDMask; |
881 | | unsigned doneJobID; |
882 | | unsigned nextJobID; |
883 | | unsigned frameEnded; |
884 | | unsigned allJobsCompleted; |
885 | | unsigned long long frameContentSize; |
886 | | unsigned long long consumed; |
887 | | unsigned long long produced; |
888 | | ZSTD_customMem cMem; |
889 | | ZSTD_CDict* cdictLocal; |
890 | | const ZSTD_CDict* cdict; |
891 | | unsigned providedFactory: 1; |
892 | | }; |
893 | | |
894 | | static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) |
895 | 0 | { |
896 | 0 | U32 jobNb; |
897 | 0 | if (jobTable == NULL) return; |
898 | 0 | for (jobNb=0; jobNb<nbJobs; jobNb++) { |
899 | 0 | ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex); |
900 | 0 | ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond); |
901 | 0 | } |
902 | 0 | ZSTD_customFree(jobTable, cMem); |
903 | 0 | } |
904 | | |
905 | | /* ZSTDMT_allocJobsTable() |
906 | | * allocate and init a job table. |
907 | | * update *nbJobsPtr to next power of 2 value, as size of table */ |
908 | | static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem) |
909 | 0 | { |
910 | 0 | U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; |
911 | 0 | U32 const nbJobs = 1 << nbJobsLog2; |
912 | 0 | U32 jobNb; |
913 | 0 | ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*) |
914 | 0 | ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem); |
915 | 0 | int initError = 0; |
916 | 0 | if (jobTable==NULL) return NULL; |
917 | 0 | *nbJobsPtr = nbJobs; |
918 | 0 | for (jobNb=0; jobNb<nbJobs; jobNb++) { |
919 | 0 | initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL); |
920 | 0 | initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL); |
921 | 0 | } |
922 | 0 | if (initError != 0) { |
923 | 0 | ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem); |
924 | 0 | return NULL; |
925 | 0 | } |
926 | 0 | return jobTable; |
927 | 0 | } |
928 | | |
929 | 0 | static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) { |
930 | 0 | U32 nbJobs = nbWorkers + 2; |
931 | 0 | if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */ |
932 | 0 | ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); |
933 | 0 | mtctx->jobIDMask = 0; |
934 | 0 | mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); |
935 | 0 | if (mtctx->jobs==NULL) return ERROR(memory_allocation); |
936 | 0 | assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */ |
937 | 0 | mtctx->jobIDMask = nbJobs - 1; |
938 | 0 | } |
939 | 0 | return 0; |
940 | 0 | } |
941 | | |
942 | | |
943 | | /* ZSTDMT_CCtxParam_setNbWorkers(): |
944 | | * Internal use only */ |
945 | | static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) |
946 | 0 | { |
947 | 0 | return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); |
948 | 0 | } |
949 | | |
950 | | MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) |
951 | 0 | { |
952 | 0 | ZSTDMT_CCtx* mtctx; |
953 | 0 | U32 nbJobs = nbWorkers + 2; |
954 | 0 | int initError; |
955 | 0 | DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers); |
956 | |
|
957 | 0 | if (nbWorkers < 1) return NULL; |
958 | 0 | nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX); |
959 | 0 | if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) |
960 | | /* invalid custom allocator */ |
961 | 0 | return NULL; |
962 | | |
963 | 0 | mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem); |
964 | 0 | if (!mtctx) return NULL; |
965 | 0 | ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); |
966 | 0 | mtctx->cMem = cMem; |
967 | 0 | mtctx->allJobsCompleted = 1; |
968 | 0 | if (pool != NULL) { |
969 | 0 | mtctx->factory = pool; |
970 | 0 | mtctx->providedFactory = 1; |
971 | 0 | } |
972 | 0 | else { |
973 | 0 | mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); |
974 | 0 | mtctx->providedFactory = 0; |
975 | 0 | } |
976 | 0 | mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); |
977 | 0 | assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ |
978 | 0 | mtctx->jobIDMask = nbJobs - 1; |
979 | 0 | mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); |
980 | 0 | mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); |
981 | 0 | mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); |
982 | 0 | initError = ZSTDMT_serialState_init(&mtctx->serial); |
983 | 0 | mtctx->roundBuff = kNullRoundBuff; |
984 | 0 | if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) { |
985 | 0 | ZSTDMT_freeCCtx(mtctx); |
986 | 0 | return NULL; |
987 | 0 | } |
988 | 0 | DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers); |
989 | 0 | return mtctx; |
990 | 0 | } |
991 | | |
992 | | ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool) |
993 | 0 | { |
994 | 0 | #ifdef ZSTD_MULTITHREAD |
995 | 0 | return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool); |
996 | | #else |
997 | | (void)nbWorkers; |
998 | | (void)cMem; |
999 | | (void)pool; |
1000 | | return NULL; |
1001 | | #endif |
1002 | 0 | } |
1003 | | |
1004 | | |
1005 | | /* ZSTDMT_releaseAllJobResources() : |
1006 | | * note : ensure all workers are killed first ! */ |
1007 | | static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) |
1008 | 0 | { |
1009 | 0 | unsigned jobID; |
1010 | 0 | DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); |
1011 | 0 | for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { |
1012 | | /* Copy the mutex/cond out */ |
1013 | 0 | ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; |
1014 | 0 | ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; |
1015 | |
|
1016 | 0 | DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); |
1017 | 0 | ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); |
1018 | | |
1019 | | /* Clear the job description, but keep the mutex/cond */ |
1020 | 0 | ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); |
1021 | 0 | mtctx->jobs[jobID].job_mutex = mutex; |
1022 | 0 | mtctx->jobs[jobID].job_cond = cond; |
1023 | 0 | } |
1024 | 0 | mtctx->inBuff.buffer = g_nullBuffer; |
1025 | 0 | mtctx->inBuff.filled = 0; |
1026 | 0 | mtctx->allJobsCompleted = 1; |
1027 | 0 | } |
1028 | | |
1029 | | static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) |
1030 | 0 | { |
1031 | 0 | DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); |
1032 | 0 | while (mtctx->doneJobID < mtctx->nextJobID) { |
1033 | 0 | unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask; |
1034 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); |
1035 | 0 | while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { |
1036 | 0 | DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */ |
1037 | 0 | ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); |
1038 | 0 | } |
1039 | 0 | ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); |
1040 | 0 | mtctx->doneJobID++; |
1041 | 0 | } |
1042 | 0 | } |
1043 | | |
1044 | | size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) |
1045 | 0 | { |
1046 | 0 | if (mtctx==NULL) return 0; /* compatible with free on NULL */ |
1047 | 0 | if (!mtctx->providedFactory) |
1048 | 0 | POOL_free(mtctx->factory); /* stop and free worker threads */ |
1049 | 0 | ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ |
1050 | 0 | ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); |
1051 | 0 | ZSTDMT_freeBufferPool(mtctx->bufPool); |
1052 | 0 | ZSTDMT_freeCCtxPool(mtctx->cctxPool); |
1053 | 0 | ZSTDMT_freeSeqPool(mtctx->seqPool); |
1054 | 0 | ZSTDMT_serialState_free(&mtctx->serial); |
1055 | 0 | ZSTD_freeCDict(mtctx->cdictLocal); |
1056 | 0 | if (mtctx->roundBuff.buffer) |
1057 | 0 | ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); |
1058 | 0 | ZSTD_customFree(mtctx, mtctx->cMem); |
1059 | 0 | return 0; |
1060 | 0 | } |
1061 | | |
1062 | | size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) |
1063 | 0 | { |
1064 | 0 | if (mtctx == NULL) return 0; /* supports sizeof NULL */ |
1065 | 0 | return sizeof(*mtctx) |
1066 | 0 | + POOL_sizeof(mtctx->factory) |
1067 | 0 | + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) |
1068 | 0 | + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) |
1069 | 0 | + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) |
1070 | 0 | + ZSTDMT_sizeof_seqPool(mtctx->seqPool) |
1071 | 0 | + ZSTD_sizeof_CDict(mtctx->cdictLocal) |
1072 | 0 | + mtctx->roundBuff.capacity; |
1073 | 0 | } |
1074 | | |
1075 | | |
1076 | | /* ZSTDMT_resize() : |
1077 | | * @return : error code if fails, 0 on success */ |
1078 | | static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) |
1079 | 0 | { |
1080 | 0 | if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); |
1081 | 0 | FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); |
1082 | 0 | mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers)); |
1083 | 0 | if (mtctx->bufPool == NULL) return ERROR(memory_allocation); |
1084 | 0 | mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); |
1085 | 0 | if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); |
1086 | 0 | mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); |
1087 | 0 | if (mtctx->seqPool == NULL) return ERROR(memory_allocation); |
1088 | 0 | ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); |
1089 | 0 | return 0; |
1090 | 0 | } |
1091 | | |
1092 | | |
1093 | | /*! ZSTDMT_updateCParams_whileCompressing() : |
1094 | | * Updates a selected set of compression parameters, remaining compatible with currently active frame. |
1095 | | * New parameters will be applied to next compression job. */ |
1096 | | void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams) |
1097 | 0 | { |
1098 | 0 | U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */ |
1099 | 0 | int const compressionLevel = cctxParams->compressionLevel; |
1100 | 0 | DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", |
1101 | 0 | compressionLevel); |
1102 | 0 | mtctx->params.compressionLevel = compressionLevel; |
1103 | 0 | { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); |
1104 | 0 | cParams.windowLog = saved_wlog; |
1105 | 0 | mtctx->params.cParams = cParams; |
1106 | 0 | } |
1107 | 0 | } |
1108 | | |
1109 | | /* ZSTDMT_getFrameProgression(): |
1110 | | * tells how much data has been consumed (input) and produced (output) for current frame. |
1111 | | * able to count progression inside worker threads. |
1112 | | * Note : mutex will be acquired during statistics collection inside workers. */ |
1113 | | ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) |
1114 | 0 | { |
1115 | 0 | ZSTD_frameProgression fps; |
1116 | 0 | DEBUGLOG(5, "ZSTDMT_getFrameProgression"); |
1117 | 0 | fps.ingested = mtctx->consumed + mtctx->inBuff.filled; |
1118 | 0 | fps.consumed = mtctx->consumed; |
1119 | 0 | fps.produced = fps.flushed = mtctx->produced; |
1120 | 0 | fps.currentJobID = mtctx->nextJobID; |
1121 | 0 | fps.nbActiveWorkers = 0; |
1122 | 0 | { unsigned jobNb; |
1123 | 0 | unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); |
1124 | 0 | DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", |
1125 | 0 | mtctx->doneJobID, lastJobNb, mtctx->jobReady); |
1126 | 0 | for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { |
1127 | 0 | unsigned const wJobID = jobNb & mtctx->jobIDMask; |
1128 | 0 | ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; |
1129 | 0 | ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); |
1130 | 0 | { size_t const cResult = jobPtr->cSize; |
1131 | 0 | size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; |
1132 | 0 | size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; |
1133 | 0 | assert(flushed <= produced); |
1134 | 0 | fps.ingested += jobPtr->src.size; |
1135 | 0 | fps.consumed += jobPtr->consumed; |
1136 | 0 | fps.produced += produced; |
1137 | 0 | fps.flushed += flushed; |
1138 | 0 | fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size); |
1139 | 0 | } |
1140 | 0 | ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); |
1141 | 0 | } |
1142 | 0 | } |
1143 | 0 | return fps; |
1144 | 0 | } |
1145 | | |
1146 | | |
1147 | | size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) |
1148 | 0 | { |
1149 | 0 | size_t toFlush; |
1150 | 0 | unsigned const jobID = mtctx->doneJobID; |
1151 | 0 | assert(jobID <= mtctx->nextJobID); |
1152 | 0 | if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */ |
1153 | | |
1154 | | /* look into oldest non-fully-flushed job */ |
1155 | 0 | { unsigned const wJobID = jobID & mtctx->jobIDMask; |
1156 | 0 | ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID]; |
1157 | 0 | ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); |
1158 | 0 | { size_t const cResult = jobPtr->cSize; |
1159 | 0 | size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; |
1160 | 0 | size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; |
1161 | 0 | assert(flushed <= produced); |
1162 | 0 | assert(jobPtr->consumed <= jobPtr->src.size); |
1163 | 0 | toFlush = produced - flushed; |
1164 | | /* if toFlush==0, nothing is available to flush. |
1165 | | * However, jobID is expected to still be active: |
1166 | | * if jobID was already completed and fully flushed, |
1167 | | * ZSTDMT_flushProduced() should have already moved onto next job. |
1168 | | * Therefore, some input has not yet been consumed. */ |
1169 | 0 | if (toFlush==0) { |
1170 | 0 | assert(jobPtr->consumed < jobPtr->src.size); |
1171 | 0 | } |
1172 | 0 | } |
1173 | 0 | ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); |
1174 | 0 | } |
1175 | |
|
1176 | 0 | return toFlush; |
1177 | 0 | } |
1178 | | |
1179 | | |
1180 | | /* ------------------------------------------ */ |
1181 | | /* ===== Multi-threaded compression ===== */ |
1182 | | /* ------------------------------------------ */ |
1183 | | |
1184 | | static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) |
1185 | 0 | { |
1186 | 0 | unsigned jobLog; |
1187 | 0 | if (params->ldmParams.enableLdm == ZSTD_ps_enable) { |
1188 | | /* In Long Range Mode, the windowLog is typically oversized. |
1189 | | * In which case, it's preferable to determine the jobSize |
1190 | | * based on cycleLog instead. */ |
1191 | 0 | jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3); |
1192 | 0 | } else { |
1193 | 0 | jobLog = MAX(20, params->cParams.windowLog + 2); |
1194 | 0 | } |
1195 | 0 | return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX); |
1196 | 0 | } |
1197 | | |
1198 | | static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) |
1199 | 0 | { |
1200 | 0 | switch(strat) |
1201 | 0 | { |
1202 | 0 | case ZSTD_btultra2: |
1203 | 0 | return 9; |
1204 | 0 | case ZSTD_btultra: |
1205 | 0 | case ZSTD_btopt: |
1206 | 0 | return 8; |
1207 | 0 | case ZSTD_btlazy2: |
1208 | 0 | case ZSTD_lazy2: |
1209 | 0 | return 7; |
1210 | 0 | case ZSTD_lazy: |
1211 | 0 | case ZSTD_greedy: |
1212 | 0 | case ZSTD_dfast: |
1213 | 0 | case ZSTD_fast: |
1214 | 0 | default:; |
1215 | 0 | } |
1216 | 0 | return 6; |
1217 | 0 | } |
1218 | | |
1219 | | static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) |
1220 | 0 | { |
1221 | 0 | assert(0 <= ovlog && ovlog <= 9); |
1222 | 0 | if (ovlog == 0) return ZSTDMT_overlapLog_default(strat); |
1223 | 0 | return ovlog; |
1224 | 0 | } |
1225 | | |
1226 | | static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) |
1227 | 0 | { |
1228 | 0 | int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); |
1229 | 0 | int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); |
1230 | 0 | assert(0 <= overlapRLog && overlapRLog <= 8); |
1231 | 0 | if (params->ldmParams.enableLdm == ZSTD_ps_enable) { |
1232 | | /* In Long Range Mode, the windowLog is typically oversized. |
1233 | | * In which case, it's preferable to determine the jobSize |
1234 | | * based on chainLog instead. |
1235 | | * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ |
1236 | 0 | ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) |
1237 | 0 | - overlapRLog; |
1238 | 0 | } |
1239 | 0 | assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX); |
1240 | 0 | DEBUGLOG(4, "overlapLog : %i", params->overlapLog); |
1241 | 0 | DEBUGLOG(4, "overlap size : %i", 1 << ovLog); |
1242 | 0 | return (ovLog==0) ? 0 : (size_t)1 << ovLog; |
1243 | 0 | } |
1244 | | |
1245 | | /* ====================================== */ |
1246 | | /* ======= Streaming API ======= */ |
1247 | | /* ====================================== */ |
1248 | | |
1249 | | size_t ZSTDMT_initCStream_internal( |
1250 | | ZSTDMT_CCtx* mtctx, |
1251 | | const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, |
1252 | | const ZSTD_CDict* cdict, ZSTD_CCtx_params params, |
1253 | | unsigned long long pledgedSrcSize) |
1254 | 0 | { |
1255 | 0 | DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)", |
1256 | 0 | (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx); |
1257 | | |
1258 | | /* params supposed partially fully validated at this point */ |
1259 | 0 | assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); |
1260 | 0 | assert(!((dict) && (cdict))); /* either dict or cdict, not both */ |
1261 | | |
1262 | | /* init */ |
1263 | 0 | if (params.nbWorkers != mtctx->params.nbWorkers) |
1264 | 0 | FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, (unsigned)params.nbWorkers) , ""); |
1265 | | |
1266 | 0 | if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; |
1267 | 0 | if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; |
1268 | |
|
1269 | 0 | if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ |
1270 | 0 | ZSTDMT_waitForAllJobsCompleted(mtctx); |
1271 | 0 | ZSTDMT_releaseAllJobResources(mtctx); |
1272 | 0 | mtctx->allJobsCompleted = 1; |
1273 | 0 | } |
1274 | |
|
1275 | 0 | mtctx->params = params; |
1276 | 0 | mtctx->frameContentSize = pledgedSrcSize; |
1277 | 0 | ZSTD_freeCDict(mtctx->cdictLocal); |
1278 | 0 | if (dict) { |
1279 | 0 | mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, |
1280 | 0 | ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ |
1281 | 0 | params.cParams, mtctx->cMem); |
1282 | 0 | mtctx->cdict = mtctx->cdictLocal; |
1283 | 0 | if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); |
1284 | 0 | } else { |
1285 | 0 | mtctx->cdictLocal = NULL; |
1286 | 0 | mtctx->cdict = cdict; |
1287 | 0 | } |
1288 | | |
1289 | 0 | mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms); |
1290 | 0 | DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); |
1291 | 0 | mtctx->targetSectionSize = params.jobSize; |
1292 | 0 | if (mtctx->targetSectionSize == 0) { |
1293 | 0 | mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms); |
1294 | 0 | } |
1295 | 0 | assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX); |
1296 | |
|
1297 | 0 | if (params.rsyncable) { |
1298 | | /* Aim for the targetsectionSize as the average job size. */ |
1299 | 0 | U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10); |
1300 | 0 | U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10); |
1301 | | /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our |
1302 | | * expected job size is at least 4x larger. */ |
1303 | 0 | assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2); |
1304 | 0 | DEBUGLOG(4, "rsyncLog = %u", rsyncBits); |
1305 | 0 | mtctx->rsync.hash = 0; |
1306 | 0 | mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; |
1307 | 0 | mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH); |
1308 | 0 | } |
1309 | 0 | if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */ |
1310 | 0 | DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize); |
1311 | 0 | DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10)); |
1312 | 0 | ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); |
1313 | 0 | { |
1314 | | /* If ldm is enabled we need windowSize space. */ |
1315 | 0 | size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0; |
1316 | | /* Two buffers of slack, plus extra space for the overlap |
1317 | | * This is the minimum slack that LDM works with. One extra because |
1318 | | * flush might waste up to targetSectionSize-1 bytes. Another extra |
1319 | | * for the overlap (if > 0), then one to fill which doesn't overlap |
1320 | | * with the LDM window. |
1321 | | */ |
1322 | 0 | size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0); |
1323 | 0 | size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers; |
1324 | | /* Compute the total size, and always have enough slack */ |
1325 | 0 | size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1); |
1326 | 0 | size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers; |
1327 | 0 | size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; |
1328 | 0 | if (mtctx->roundBuff.capacity < capacity) { |
1329 | 0 | if (mtctx->roundBuff.buffer) |
1330 | 0 | ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem); |
1331 | 0 | mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem); |
1332 | 0 | if (mtctx->roundBuff.buffer == NULL) { |
1333 | 0 | mtctx->roundBuff.capacity = 0; |
1334 | 0 | return ERROR(memory_allocation); |
1335 | 0 | } |
1336 | 0 | mtctx->roundBuff.capacity = capacity; |
1337 | 0 | } |
1338 | 0 | } |
1339 | 0 | DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10)); |
1340 | 0 | mtctx->roundBuff.pos = 0; |
1341 | 0 | mtctx->inBuff.buffer = g_nullBuffer; |
1342 | 0 | mtctx->inBuff.filled = 0; |
1343 | 0 | mtctx->inBuff.prefix = kNullRange; |
1344 | 0 | mtctx->doneJobID = 0; |
1345 | 0 | mtctx->nextJobID = 0; |
1346 | 0 | mtctx->frameEnded = 0; |
1347 | 0 | mtctx->allJobsCompleted = 0; |
1348 | 0 | mtctx->consumed = 0; |
1349 | 0 | mtctx->produced = 0; |
1350 | | |
1351 | | /* update dictionary */ |
1352 | 0 | ZSTD_freeCDict(mtctx->cdictLocal); |
1353 | 0 | mtctx->cdictLocal = NULL; |
1354 | 0 | mtctx->cdict = NULL; |
1355 | 0 | if (dict) { |
1356 | 0 | if (dictContentType == ZSTD_dct_rawContent) { |
1357 | 0 | mtctx->inBuff.prefix.start = (const BYTE*)dict; |
1358 | 0 | mtctx->inBuff.prefix.size = dictSize; |
1359 | 0 | } else { |
1360 | | /* note : a loadPrefix becomes an internal CDict */ |
1361 | 0 | mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, |
1362 | 0 | ZSTD_dlm_byRef, dictContentType, |
1363 | 0 | params.cParams, mtctx->cMem); |
1364 | 0 | mtctx->cdict = mtctx->cdictLocal; |
1365 | 0 | if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); |
1366 | 0 | } |
1367 | 0 | } else { |
1368 | 0 | mtctx->cdict = cdict; |
1369 | 0 | } |
1370 | | |
1371 | 0 | if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, |
1372 | 0 | dict, dictSize, dictContentType)) |
1373 | 0 | return ERROR(memory_allocation); |
1374 | | |
1375 | | |
1376 | 0 | return 0; |
1377 | 0 | } |
1378 | | |
1379 | | |
1380 | | /* ZSTDMT_writeLastEmptyBlock() |
1381 | | * Write a single empty block with an end-of-frame to finish a frame. |
1382 | | * Job must be created from streaming variant. |
1383 | | * This function is always successful if expected conditions are fulfilled. |
1384 | | */ |
1385 | | static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) |
1386 | 0 | { |
1387 | 0 | assert(job->lastJob == 1); |
1388 | 0 | assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */ |
1389 | 0 | assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */ |
1390 | 0 | assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */ |
1391 | 0 | job->dstBuff = ZSTDMT_getBuffer(job->bufPool); |
1392 | 0 | if (job->dstBuff.start == NULL) { |
1393 | 0 | job->cSize = ERROR(memory_allocation); |
1394 | 0 | return; |
1395 | 0 | } |
1396 | 0 | assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */ |
1397 | 0 | job->src = kNullRange; |
1398 | 0 | job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); |
1399 | 0 | assert(!ZSTD_isError(job->cSize)); |
1400 | 0 | assert(job->consumed == 0); |
1401 | 0 | } |
1402 | | |
1403 | | static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp) |
1404 | 0 | { |
1405 | 0 | unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask; |
1406 | 0 | int const endFrame = (endOp == ZSTD_e_end); |
1407 | |
|
1408 | 0 | if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { |
1409 | 0 | DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full"); |
1410 | 0 | assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); |
1411 | 0 | return 0; |
1412 | 0 | } |
1413 | | |
1414 | 0 | if (!mtctx->jobReady) { |
1415 | 0 | BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start; |
1416 | 0 | DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", |
1417 | 0 | mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size); |
1418 | 0 | mtctx->jobs[jobID].src.start = src; |
1419 | 0 | mtctx->jobs[jobID].src.size = srcSize; |
1420 | 0 | assert(mtctx->inBuff.filled >= srcSize); |
1421 | 0 | mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; |
1422 | 0 | mtctx->jobs[jobID].consumed = 0; |
1423 | 0 | mtctx->jobs[jobID].cSize = 0; |
1424 | 0 | mtctx->jobs[jobID].params = mtctx->params; |
1425 | 0 | mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL; |
1426 | 0 | mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; |
1427 | 0 | mtctx->jobs[jobID].dstBuff = g_nullBuffer; |
1428 | 0 | mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; |
1429 | 0 | mtctx->jobs[jobID].bufPool = mtctx->bufPool; |
1430 | 0 | mtctx->jobs[jobID].seqPool = mtctx->seqPool; |
1431 | 0 | mtctx->jobs[jobID].serial = &mtctx->serial; |
1432 | 0 | mtctx->jobs[jobID].jobID = mtctx->nextJobID; |
1433 | 0 | mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0); |
1434 | 0 | mtctx->jobs[jobID].lastJob = endFrame; |
1435 | 0 | mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0); |
1436 | 0 | mtctx->jobs[jobID].dstFlushed = 0; |
1437 | | |
1438 | | /* Update the round buffer pos and clear the input buffer to be reset */ |
1439 | 0 | mtctx->roundBuff.pos += srcSize; |
1440 | 0 | mtctx->inBuff.buffer = g_nullBuffer; |
1441 | 0 | mtctx->inBuff.filled = 0; |
1442 | | /* Set the prefix for next job */ |
1443 | 0 | if (!endFrame) { |
1444 | 0 | size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); |
1445 | 0 | mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; |
1446 | 0 | mtctx->inBuff.prefix.size = newPrefixSize; |
1447 | 0 | } else { /* endFrame==1 => no need for another input buffer */ |
1448 | 0 | mtctx->inBuff.prefix = kNullRange; |
1449 | 0 | mtctx->frameEnded = endFrame; |
1450 | 0 | if (mtctx->nextJobID == 0) { |
1451 | | /* single job exception : checksum is already calculated directly within worker thread */ |
1452 | 0 | mtctx->params.fParams.checksumFlag = 0; |
1453 | 0 | } } |
1454 | |
|
1455 | 0 | if ( (srcSize == 0) |
1456 | 0 | && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) { |
1457 | 0 | DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame"); |
1458 | 0 | assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */ |
1459 | 0 | ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); |
1460 | 0 | mtctx->nextJobID++; |
1461 | 0 | return 0; |
1462 | 0 | } |
1463 | 0 | } |
1464 | | |
1465 | 0 | DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))", |
1466 | 0 | mtctx->nextJobID, |
1467 | 0 | (U32)mtctx->jobs[jobID].src.size, |
1468 | 0 | mtctx->jobs[jobID].lastJob, |
1469 | 0 | mtctx->nextJobID, |
1470 | 0 | jobID); |
1471 | 0 | if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) { |
1472 | 0 | mtctx->nextJobID++; |
1473 | 0 | mtctx->jobReady = 0; |
1474 | 0 | } else { |
1475 | 0 | DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID); |
1476 | 0 | mtctx->jobReady = 1; |
1477 | 0 | } |
1478 | 0 | return 0; |
1479 | 0 | } |
1480 | | |
1481 | | |
1482 | | /*! ZSTDMT_flushProduced() : |
1483 | | * flush whatever data has been produced but not yet flushed in current job. |
1484 | | * move to next job if current one is fully flushed. |
1485 | | * `output` : `pos` will be updated with amount of data flushed . |
1486 | | * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . |
1487 | | * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ |
1488 | | static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end) |
1489 | 0 | { |
1490 | 0 | unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask; |
1491 | 0 | DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)", |
1492 | 0 | blockToFlush, mtctx->doneJobID, mtctx->nextJobID); |
1493 | 0 | assert(output->size >= output->pos); |
1494 | |
|
1495 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); |
1496 | 0 | if ( blockToFlush |
1497 | 0 | && (mtctx->doneJobID < mtctx->nextJobID) ) { |
1498 | 0 | assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); |
1499 | 0 | while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ |
1500 | 0 | if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { |
1501 | 0 | DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none", |
1502 | 0 | mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size); |
1503 | 0 | break; |
1504 | 0 | } |
1505 | 0 | DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)", |
1506 | 0 | mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); |
1507 | 0 | ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */ |
1508 | 0 | } } |
1509 | | |
1510 | | /* try to flush something */ |
1511 | 0 | { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ |
1512 | 0 | size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */ |
1513 | 0 | size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */ |
1514 | 0 | ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); |
1515 | 0 | if (ZSTD_isError(cSize)) { |
1516 | 0 | DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s", |
1517 | 0 | mtctx->doneJobID, ZSTD_getErrorName(cSize)); |
1518 | 0 | ZSTDMT_waitForAllJobsCompleted(mtctx); |
1519 | 0 | ZSTDMT_releaseAllJobResources(mtctx); |
1520 | 0 | return cSize; |
1521 | 0 | } |
1522 | | /* add frame checksum if necessary (can only happen once) */ |
1523 | 0 | assert(srcConsumed <= srcSize); |
1524 | 0 | if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */ |
1525 | 0 | && mtctx->jobs[wJobID].frameChecksumNeeded ) { |
1526 | 0 | U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); |
1527 | 0 | DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum); |
1528 | 0 | MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); |
1529 | 0 | cSize += 4; |
1530 | 0 | mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ |
1531 | 0 | mtctx->jobs[wJobID].frameChecksumNeeded = 0; |
1532 | 0 | } |
1533 | |
|
1534 | 0 | if (cSize > 0) { /* compression is ongoing or completed */ |
1535 | 0 | size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); |
1536 | 0 | DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)", |
1537 | 0 | (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); |
1538 | 0 | assert(mtctx->doneJobID < mtctx->nextJobID); |
1539 | 0 | assert(cSize >= mtctx->jobs[wJobID].dstFlushed); |
1540 | 0 | assert(mtctx->jobs[wJobID].dstBuff.start != NULL); |
1541 | 0 | if (toFlush > 0) { |
1542 | 0 | ZSTD_memcpy((char*)output->dst + output->pos, |
1543 | 0 | (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, |
1544 | 0 | toFlush); |
1545 | 0 | } |
1546 | 0 | output->pos += toFlush; |
1547 | 0 | mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ |
1548 | |
|
1549 | 0 | if ( (srcConsumed == srcSize) /* job is completed */ |
1550 | 0 | && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */ |
1551 | 0 | DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one", |
1552 | 0 | mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); |
1553 | 0 | ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); |
1554 | 0 | DEBUGLOG(5, "dstBuffer released"); |
1555 | 0 | mtctx->jobs[wJobID].dstBuff = g_nullBuffer; |
1556 | 0 | mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */ |
1557 | 0 | mtctx->consumed += srcSize; |
1558 | 0 | mtctx->produced += cSize; |
1559 | 0 | mtctx->doneJobID++; |
1560 | 0 | } } |
1561 | | |
1562 | | /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */ |
1563 | 0 | if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); |
1564 | 0 | if (srcSize > srcConsumed) return 1; /* current job not completely compressed */ |
1565 | 0 | } |
1566 | 0 | if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */ |
1567 | 0 | if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */ |
1568 | 0 | if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */ |
1569 | 0 | mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */ |
1570 | 0 | if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */ |
1571 | 0 | return 0; /* internal buffers fully flushed */ |
1572 | 0 | } |
1573 | | |
1574 | | /** |
1575 | | * Returns the range of data used by the earliest job that is not yet complete. |
1576 | | * If the data of the first job is broken up into two segments, we cover both |
1577 | | * sections. |
1578 | | */ |
1579 | | static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) |
1580 | 0 | { |
1581 | 0 | unsigned const firstJobID = mtctx->doneJobID; |
1582 | 0 | unsigned const lastJobID = mtctx->nextJobID; |
1583 | 0 | unsigned jobID; |
1584 | | |
1585 | | /* no need to check during first round */ |
1586 | 0 | size_t roundBuffCapacity = mtctx->roundBuff.capacity; |
1587 | 0 | size_t nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize; |
1588 | 0 | if (lastJobID < nbJobs1stRoundMin) return kNullRange; |
1589 | | |
1590 | 0 | for (jobID = firstJobID; jobID < lastJobID; ++jobID) { |
1591 | 0 | unsigned const wJobID = jobID & mtctx->jobIDMask; |
1592 | 0 | size_t consumed; |
1593 | |
|
1594 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); |
1595 | 0 | consumed = mtctx->jobs[wJobID].consumed; |
1596 | 0 | ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); |
1597 | |
|
1598 | 0 | if (consumed < mtctx->jobs[wJobID].src.size) { |
1599 | 0 | Range range = mtctx->jobs[wJobID].prefix; |
1600 | 0 | if (range.size == 0) { |
1601 | | /* Empty prefix */ |
1602 | 0 | range = mtctx->jobs[wJobID].src; |
1603 | 0 | } |
1604 | | /* Job source in multiple segments not supported yet */ |
1605 | 0 | assert(range.start <= mtctx->jobs[wJobID].src.start); |
1606 | 0 | return range; |
1607 | 0 | } |
1608 | 0 | } |
1609 | 0 | return kNullRange; |
1610 | 0 | } |
1611 | | |
1612 | | /** |
1613 | | * Returns non-zero iff buffer and range overlap. |
1614 | | */ |
1615 | | static int ZSTDMT_isOverlapped(Buffer buffer, Range range) |
1616 | 0 | { |
1617 | 0 | BYTE const* const bufferStart = (BYTE const*)buffer.start; |
1618 | 0 | BYTE const* const rangeStart = (BYTE const*)range.start; |
1619 | |
|
1620 | 0 | if (rangeStart == NULL || bufferStart == NULL) |
1621 | 0 | return 0; |
1622 | | |
1623 | 0 | { |
1624 | 0 | BYTE const* const bufferEnd = bufferStart + buffer.capacity; |
1625 | 0 | BYTE const* const rangeEnd = rangeStart + range.size; |
1626 | | |
1627 | | /* Empty ranges cannot overlap */ |
1628 | 0 | if (bufferStart == bufferEnd || rangeStart == rangeEnd) |
1629 | 0 | return 0; |
1630 | | |
1631 | 0 | return bufferStart < rangeEnd && rangeStart < bufferEnd; |
1632 | 0 | } |
1633 | 0 | } |
1634 | | |
1635 | | static int ZSTDMT_doesOverlapWindow(Buffer buffer, ZSTD_window_t window) |
1636 | 0 | { |
1637 | 0 | Range extDict; |
1638 | 0 | Range prefix; |
1639 | |
|
1640 | 0 | DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); |
1641 | 0 | extDict.start = window.dictBase + window.lowLimit; |
1642 | 0 | extDict.size = window.dictLimit - window.lowLimit; |
1643 | |
|
1644 | 0 | prefix.start = window.base + window.dictLimit; |
1645 | 0 | prefix.size = window.nextSrc - (window.base + window.dictLimit); |
1646 | 0 | DEBUGLOG(5, "extDict [0x%zx, 0x%zx)", |
1647 | 0 | (size_t)extDict.start, |
1648 | 0 | (size_t)extDict.start + extDict.size); |
1649 | 0 | DEBUGLOG(5, "prefix [0x%zx, 0x%zx)", |
1650 | 0 | (size_t)prefix.start, |
1651 | 0 | (size_t)prefix.start + prefix.size); |
1652 | |
|
1653 | 0 | return ZSTDMT_isOverlapped(buffer, extDict) |
1654 | 0 | || ZSTDMT_isOverlapped(buffer, prefix); |
1655 | 0 | } |
1656 | | |
1657 | | static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, Buffer buffer) |
1658 | 0 | { |
1659 | 0 | if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) { |
1660 | 0 | ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; |
1661 | 0 | DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); |
1662 | 0 | DEBUGLOG(5, "source [0x%zx, 0x%zx)", |
1663 | 0 | (size_t)buffer.start, |
1664 | 0 | (size_t)buffer.start + buffer.capacity); |
1665 | 0 | ZSTD_PTHREAD_MUTEX_LOCK(mutex); |
1666 | 0 | while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) { |
1667 | 0 | DEBUGLOG(5, "Waiting for LDM to finish..."); |
1668 | 0 | ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex); |
1669 | 0 | } |
1670 | 0 | DEBUGLOG(6, "Done waiting for LDM to finish"); |
1671 | 0 | ZSTD_pthread_mutex_unlock(mutex); |
1672 | 0 | } |
1673 | 0 | } |
1674 | | |
1675 | | /** |
1676 | | * Attempts to set the inBuff to the next section to fill. |
1677 | | * If any part of the new section is still in use we give up. |
1678 | | * Returns non-zero if the buffer is filled. |
1679 | | */ |
1680 | | static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) |
1681 | 0 | { |
1682 | 0 | Range const inUse = ZSTDMT_getInputDataInUse(mtctx); |
1683 | 0 | size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; |
1684 | 0 | size_t const spaceNeeded = mtctx->targetSectionSize; |
1685 | 0 | Buffer buffer; |
1686 | |
|
1687 | 0 | DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); |
1688 | 0 | assert(mtctx->inBuff.buffer.start == NULL); |
1689 | 0 | assert(mtctx->roundBuff.capacity >= spaceNeeded); |
1690 | |
|
1691 | 0 | if (spaceLeft < spaceNeeded) { |
1692 | | /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. |
1693 | | * Simply copy the prefix to the beginning in that case. |
1694 | | */ |
1695 | 0 | BYTE* const start = (BYTE*)mtctx->roundBuff.buffer; |
1696 | 0 | size_t const prefixSize = mtctx->inBuff.prefix.size; |
1697 | |
|
1698 | 0 | buffer.start = start; |
1699 | 0 | buffer.capacity = prefixSize; |
1700 | 0 | if (ZSTDMT_isOverlapped(buffer, inUse)) { |
1701 | 0 | DEBUGLOG(5, "Waiting for buffer..."); |
1702 | 0 | return 0; |
1703 | 0 | } |
1704 | 0 | ZSTDMT_waitForLdmComplete(mtctx, buffer); |
1705 | 0 | ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize); |
1706 | 0 | mtctx->inBuff.prefix.start = start; |
1707 | 0 | mtctx->roundBuff.pos = prefixSize; |
1708 | 0 | } |
1709 | 0 | buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; |
1710 | 0 | buffer.capacity = spaceNeeded; |
1711 | |
|
1712 | 0 | if (ZSTDMT_isOverlapped(buffer, inUse)) { |
1713 | 0 | DEBUGLOG(5, "Waiting for buffer..."); |
1714 | 0 | return 0; |
1715 | 0 | } |
1716 | 0 | assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix)); |
1717 | |
|
1718 | 0 | ZSTDMT_waitForLdmComplete(mtctx, buffer); |
1719 | |
|
1720 | 0 | DEBUGLOG(5, "Using prefix range [%zx, %zx)", |
1721 | 0 | (size_t)mtctx->inBuff.prefix.start, |
1722 | 0 | (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size); |
1723 | 0 | DEBUGLOG(5, "Using source range [%zx, %zx)", |
1724 | 0 | (size_t)buffer.start, |
1725 | 0 | (size_t)buffer.start + buffer.capacity); |
1726 | | |
1727 | |
|
1728 | 0 | mtctx->inBuff.buffer = buffer; |
1729 | 0 | mtctx->inBuff.filled = 0; |
1730 | 0 | assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); |
1731 | 0 | return 1; |
1732 | 0 | } |
1733 | | |
1734 | | typedef struct { |
1735 | | size_t toLoad; /* The number of bytes to load from the input. */ |
1736 | | int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ |
1737 | | } SyncPoint; |
1738 | | |
1739 | | /** |
1740 | | * Searches through the input for a synchronization point. If one is found, we |
1741 | | * will instruct the caller to flush, and return the number of bytes to load. |
1742 | | * Otherwise, we will load as many bytes as possible and instruct the caller |
1743 | | * to continue as normal. |
1744 | | */ |
1745 | | static SyncPoint |
1746 | | findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) |
1747 | 0 | { |
1748 | 0 | BYTE const* const istart = (BYTE const*)input.src + input.pos; |
1749 | 0 | U64 const primePower = mtctx->rsync.primePower; |
1750 | 0 | U64 const hitMask = mtctx->rsync.hitMask; |
1751 | |
|
1752 | 0 | SyncPoint syncPoint; |
1753 | 0 | U64 hash; |
1754 | 0 | BYTE const* prev; |
1755 | 0 | size_t pos; |
1756 | |
|
1757 | 0 | syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled); |
1758 | 0 | syncPoint.flush = 0; |
1759 | 0 | if (!mtctx->params.rsyncable) |
1760 | | /* Rsync is disabled. */ |
1761 | 0 | return syncPoint; |
1762 | 0 | if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE) |
1763 | | /* We don't emit synchronization points if it would produce too small blocks. |
1764 | | * We don't have enough input to find a synchronization point, so don't look. |
1765 | | */ |
1766 | 0 | return syncPoint; |
1767 | 0 | if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) |
1768 | | /* Not enough to compute the hash. |
1769 | | * We will miss any synchronization points in this RSYNC_LENGTH byte |
1770 | | * window. However, since it depends only in the internal buffers, if the |
1771 | | * state is already synchronized, we will remain synchronized. |
1772 | | * Additionally, the probability that we miss a synchronization point is |
1773 | | * low: RSYNC_LENGTH / targetSectionSize. |
1774 | | */ |
1775 | 0 | return syncPoint; |
1776 | | /* Initialize the loop variables. */ |
1777 | 0 | if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) { |
1778 | | /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions |
1779 | | * because they can't possibly be a sync point. So we can start |
1780 | | * part way through the input buffer. |
1781 | | */ |
1782 | 0 | pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled; |
1783 | 0 | if (pos >= RSYNC_LENGTH) { |
1784 | 0 | prev = istart + pos - RSYNC_LENGTH; |
1785 | 0 | hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); |
1786 | 0 | } else { |
1787 | 0 | assert(mtctx->inBuff.filled >= RSYNC_LENGTH); |
1788 | 0 | prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; |
1789 | 0 | hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos)); |
1790 | 0 | hash = ZSTD_rollingHash_append(hash, istart, pos); |
1791 | 0 | } |
1792 | 0 | } else { |
1793 | | /* We have enough bytes buffered to initialize the hash, |
1794 | | * and have processed enough bytes to find a sync point. |
1795 | | * Start scanning at the beginning of the input. |
1796 | | */ |
1797 | 0 | assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); |
1798 | 0 | assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH); |
1799 | 0 | pos = 0; |
1800 | 0 | prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; |
1801 | 0 | hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); |
1802 | 0 | if ((hash & hitMask) == hitMask) { |
1803 | | /* We're already at a sync point so don't load any more until |
1804 | | * we're able to flush this sync point. |
1805 | | * This likely happened because the job table was full so we |
1806 | | * couldn't add our job. |
1807 | | */ |
1808 | 0 | syncPoint.toLoad = 0; |
1809 | 0 | syncPoint.flush = 1; |
1810 | 0 | return syncPoint; |
1811 | 0 | } |
1812 | 0 | } |
1813 | | /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll |
1814 | | * through the input. If we hit a synchronization point, then cut the |
1815 | | * job off, and tell the compressor to flush the job. Otherwise, load |
1816 | | * all the bytes and continue as normal. |
1817 | | * If we go too long without a synchronization point (targetSectionSize) |
1818 | | * then a block will be emitted anyways, but this is okay, since if we |
1819 | | * are already synchronized we will remain synchronized. |
1820 | | */ |
1821 | 0 | assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); |
1822 | 0 | for (; pos < syncPoint.toLoad; ++pos) { |
1823 | 0 | BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; |
1824 | | /* This assert is very expensive, and Debian compiles with asserts enabled. |
1825 | | * So disable it for now. We can get similar coverage by checking it at the |
1826 | | * beginning & end of the loop. |
1827 | | * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); |
1828 | | */ |
1829 | 0 | hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); |
1830 | 0 | assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); |
1831 | 0 | if ((hash & hitMask) == hitMask) { |
1832 | 0 | syncPoint.toLoad = pos + 1; |
1833 | 0 | syncPoint.flush = 1; |
1834 | 0 | ++pos; /* for assert */ |
1835 | 0 | break; |
1836 | 0 | } |
1837 | 0 | } |
1838 | 0 | assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); |
1839 | 0 | return syncPoint; |
1840 | 0 | } |
1841 | | |
1842 | | size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx) |
1843 | 0 | { |
1844 | 0 | size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; |
1845 | 0 | if (hintInSize==0) hintInSize = mtctx->targetSectionSize; |
1846 | 0 | return hintInSize; |
1847 | 0 | } |
1848 | | |
1849 | | /** ZSTDMT_compressStream_generic() : |
1850 | | * internal use only - exposed to be invoked from zstd_compress.c |
1851 | | * assumption : output and input are valid (pos <= size) |
1852 | | * @return : minimum amount of data remaining to flush, 0 if none */ |
1853 | | size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, |
1854 | | ZSTD_outBuffer* output, |
1855 | | ZSTD_inBuffer* input, |
1856 | | ZSTD_EndDirective endOp) |
1857 | 0 | { |
1858 | 0 | unsigned forwardInputProgress = 0; |
1859 | 0 | DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)", |
1860 | 0 | (U32)endOp, (U32)(input->size - input->pos)); |
1861 | 0 | assert(output->pos <= output->size); |
1862 | 0 | assert(input->pos <= input->size); |
1863 | |
|
1864 | 0 | if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { |
1865 | | /* current frame being ended. Only flush/end are allowed */ |
1866 | 0 | return ERROR(stage_wrong); |
1867 | 0 | } |
1868 | | |
1869 | | /* fill input buffer */ |
1870 | 0 | if ( (!mtctx->jobReady) |
1871 | 0 | && (input->size > input->pos) ) { /* support NULL input */ |
1872 | 0 | if (mtctx->inBuff.buffer.start == NULL) { |
1873 | 0 | assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */ |
1874 | 0 | if (!ZSTDMT_tryGetInputRange(mtctx)) { |
1875 | | /* It is only possible for this operation to fail if there are |
1876 | | * still compression jobs ongoing. |
1877 | | */ |
1878 | 0 | DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed"); |
1879 | 0 | assert(mtctx->doneJobID != mtctx->nextJobID); |
1880 | 0 | } else |
1881 | 0 | DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); |
1882 | 0 | } |
1883 | 0 | if (mtctx->inBuff.buffer.start != NULL) { |
1884 | 0 | SyncPoint const syncPoint = findSynchronizationPoint(mtctx, *input); |
1885 | 0 | if (syncPoint.flush && endOp == ZSTD_e_continue) { |
1886 | 0 | endOp = ZSTD_e_flush; |
1887 | 0 | } |
1888 | 0 | assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); |
1889 | 0 | DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", |
1890 | 0 | (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); |
1891 | 0 | ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); |
1892 | 0 | input->pos += syncPoint.toLoad; |
1893 | 0 | mtctx->inBuff.filled += syncPoint.toLoad; |
1894 | 0 | forwardInputProgress = syncPoint.toLoad>0; |
1895 | 0 | } |
1896 | 0 | } |
1897 | 0 | if ((input->pos < input->size) && (endOp == ZSTD_e_end)) { |
1898 | | /* Can't end yet because the input is not fully consumed. |
1899 | | * We are in one of these cases: |
1900 | | * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job. |
1901 | | * - We filled the input buffer: flush this job but don't end the frame. |
1902 | | * - We hit a synchronization point: flush this job but don't end the frame. |
1903 | | */ |
1904 | 0 | assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable); |
1905 | 0 | endOp = ZSTD_e_flush; |
1906 | 0 | } |
1907 | |
|
1908 | 0 | if ( (mtctx->jobReady) |
1909 | 0 | || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */ |
1910 | 0 | || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */ |
1911 | 0 | || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ |
1912 | 0 | size_t const jobSize = mtctx->inBuff.filled; |
1913 | 0 | assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); |
1914 | 0 | FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , ""); |
1915 | 0 | } |
1916 | | |
1917 | | /* check for potential compressed data ready to be flushed */ |
1918 | 0 | { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */ |
1919 | 0 | if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */ |
1920 | 0 | DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush); |
1921 | 0 | return remainingToFlush; |
1922 | 0 | } |
1923 | 0 | } |