Coverage Report

Created: 2025-08-26 06:15

/src/zstd/lib/compress/zstdmt_compress.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
12
/* ======   Compiler specifics   ====== */
13
#if defined(_MSC_VER)
14
#  pragma warning(disable : 4204)   /* disable: C4204: non-constant aggregate initializer */
15
#endif
16
17
18
/* ======   Dependencies   ====== */
19
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
20
#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
21
#include "../common/mem.h"         /* MEM_STATIC */
22
#include "../common/pool.h"        /* threadpool */
23
#include "../common/threading.h"   /* mutex */
24
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
25
#include "zstd_ldm.h"
26
#include "zstdmt_compress.h"
27
28
/* Guards code to support resizing the SeqPool.
29
 * We will want to resize the SeqPool to save memory in the future.
30
 * Until then, comment the code out since it is unused.
31
 */
32
#define ZSTD_RESIZE_SEQPOOL 0
33
34
/* ======   Debug   ====== */
35
#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
36
    && !defined(_MSC_VER) \
37
    && !defined(__MINGW32__)
38
39
#  include <stdio.h>
40
#  include <unistd.h>
41
#  include <sys/times.h>
42
43
#  define DEBUG_PRINTHEX(l,p,n)                                       \
44
    do {                                                              \
45
        unsigned debug_u;                                             \
46
        for (debug_u=0; debug_u<(n); debug_u++)                       \
47
            RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
48
        RAWLOG(l, " \n");                                             \
49
    } while (0)
50
51
static unsigned long long GetCurrentClockTimeMicroseconds(void)
52
{
53
   static clock_t _ticksPerSecond = 0;
54
   if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
55
56
   {   struct tms junk; clock_t newTicks = (clock_t) times(&junk);
57
       return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
58
}  }
59
60
#define MUTEX_WAIT_TIME_DLEVEL 6
61
#define ZSTD_PTHREAD_MUTEX_LOCK(mutex)                                                  \
62
    do {                                                                                \
63
        if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) {                                     \
64
            unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds();    \
65
            ZSTD_pthread_mutex_lock(mutex);                                             \
66
            {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
67
                unsigned long long const elapsedTime = (afterTime-beforeTime);          \
68
                if (elapsedTime > 1000) {                                               \
69
                    /* or whatever threshold you like; I'm using 1 millisecond here */  \
70
                    DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL,                                    \
71
                        "Thread took %llu microseconds to acquire mutex %s \n",         \
72
                        elapsedTime, #mutex);                                           \
73
            }   }                                                                       \
74
        } else {                                                                        \
75
            ZSTD_pthread_mutex_lock(mutex);                                             \
76
        }                                                                               \
77
    } while (0)
78
79
#else
80
81
21.3M
#  define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
82
#  define DEBUG_PRINTHEX(l,p,n) do { } while (0)
83
84
#endif
85
86
87
/* =====   Buffer Pool   ===== */
88
/* a single Buffer Pool can be invoked from multiple threads in parallel */
89
90
typedef struct buffer_s {
91
    void* start;
92
    size_t capacity;
93
} Buffer;
94
95
static const Buffer g_nullBuffer = { NULL, 0 };
96
97
typedef struct ZSTDMT_bufferPool_s {
98
    ZSTD_pthread_mutex_t poolMutex;
99
    size_t bufferSize;
100
    unsigned totalBuffers;
101
    unsigned nbBuffers;
102
    ZSTD_customMem cMem;
103
    Buffer* buffers;
104
} ZSTDMT_bufferPool;
105
106
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
107
37.4k
{
108
37.4k
    DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
109
37.4k
    if (!bufPool) return;   /* compatibility with free on NULL */
110
37.4k
    if (bufPool->buffers) {
111
37.4k
        unsigned u;
112
172k
        for (u=0; u<bufPool->totalBuffers; u++) {
113
134k
            DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start);
114
134k
            ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem);
115
134k
        }
116
37.4k
        ZSTD_customFree(bufPool->buffers, bufPool->cMem);
117
37.4k
    }
118
37.4k
    ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
119
37.4k
    ZSTD_customFree(bufPool, bufPool->cMem);
120
37.4k
}
121
122
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
123
37.4k
{
124
37.4k
    ZSTDMT_bufferPool* const bufPool =
125
37.4k
        (ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem);
126
37.4k
    if (bufPool==NULL) return NULL;
127
37.4k
    if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
128
0
        ZSTD_customFree(bufPool, cMem);
129
0
        return NULL;
130
0
    }
131
37.4k
    bufPool->buffers = (Buffer*)ZSTD_customCalloc(maxNbBuffers * sizeof(Buffer), cMem);
132
37.4k
    if (bufPool->buffers==NULL) {
133
0
        ZSTDMT_freeBufferPool(bufPool);
134
0
        return NULL;
135
0
    }
136
37.4k
    bufPool->bufferSize = 64 KB;
137
37.4k
    bufPool->totalBuffers = maxNbBuffers;
138
37.4k
    bufPool->nbBuffers = 0;
139
37.4k
    bufPool->cMem = cMem;
140
37.4k
    return bufPool;
141
37.4k
}
142
143
/* only works at initialization, not during compression */
144
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
145
0
{
146
0
    size_t const poolSize = sizeof(*bufPool);
147
0
    size_t const arraySize = bufPool->totalBuffers * sizeof(Buffer);
148
0
    unsigned u;
149
0
    size_t totalBufferSize = 0;
150
0
    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
151
0
    for (u=0; u<bufPool->totalBuffers; u++)
152
0
        totalBufferSize += bufPool->buffers[u].capacity;
153
0
    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
154
155
0
    return poolSize + arraySize + totalBufferSize;
156
0
}
157
158
/* ZSTDMT_setBufferSize() :
159
 * all future buffers provided by this buffer pool will have _at least_ this size
160
 * note : it's better for all buffers to have same size,
161
 * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
162
static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
163
68.8k
{
164
68.8k
    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
165
68.8k
    DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
166
68.8k
    bufPool->bufferSize = bSize;
167
68.8k
    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
168
68.8k
}
169
170
171
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
172
1.02k
{
173
1.02k
    if (srcBufPool==NULL) return NULL;
174
1.02k
    if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
175
588
        return srcBufPool;
176
    /* need a larger buffer pool */
177
436
    {   ZSTD_customMem const cMem = srcBufPool->cMem;
178
436
        size_t const bSize = srcBufPool->bufferSize;   /* forward parameters */
179
436
        ZSTDMT_bufferPool* newBufPool;
180
436
        ZSTDMT_freeBufferPool(srcBufPool);
181
436
        newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem);
182
436
        if (newBufPool==NULL) return newBufPool;
183
436
        ZSTDMT_setBufferSize(newBufPool, bSize);
184
436
        return newBufPool;
185
436
    }
186
436
}
187
188
/** ZSTDMT_getBuffer() :
189
 *  assumption : bufPool must be valid
190
 * @return : a buffer, with start pointer and size
191
 *  note: allocation may fail, in this case, start==NULL and size==0 */
192
static Buffer ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
193
3.02M
{
194
3.02M
    size_t const bSize = bufPool->bufferSize;
195
3.02M
    DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
196
3.02M
    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
197
3.02M
    if (bufPool->nbBuffers) {   /* try to use an existing buffer */
198
2.99M
        Buffer const buf = bufPool->buffers[--(bufPool->nbBuffers)];
199
2.99M
        size_t const availBufferSize = buf.capacity;
200
2.99M
        bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer;
201
2.99M
        if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
202
            /* large enough, but not too much */
203
2.99M
            DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
204
2.99M
                        bufPool->nbBuffers, (U32)buf.capacity);
205
2.99M
            ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
206
2.99M
            return buf;
207
2.99M
        }
208
        /* size conditions not respected : scratch this buffer, create new one */
209
217
        DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
210
217
        ZSTD_customFree(buf.start, bufPool->cMem);
211
217
    }
212
27.7k
    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
213
    /* create new buffer */
214
27.7k
    DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
215
27.7k
    {   Buffer buffer;
216
27.7k
        void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
217
27.7k
        buffer.start = start;   /* note : start can be NULL if malloc fails ! */
218
27.7k
        buffer.capacity = (start==NULL) ? 0 : bSize;
219
27.7k
        if (start==NULL) {
220
0
            DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
221
27.7k
        } else {
222
27.7k
            DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
223
27.7k
        }
224
27.7k
        return buffer;
225
3.02M
    }
226
3.02M
}
227
228
#if ZSTD_RESIZE_SEQPOOL
229
/** ZSTDMT_resizeBuffer() :
230
 * assumption : bufPool must be valid
231
 * @return : a buffer that is at least the buffer pool buffer size.
232
 *           If a reallocation happens, the data in the input buffer is copied.
233
 */
234
static Buffer ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, Buffer buffer)
235
{
236
    size_t const bSize = bufPool->bufferSize;
237
    if (buffer.capacity < bSize) {
238
        void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
239
        Buffer newBuffer;
240
        newBuffer.start = start;
241
        newBuffer.capacity = start == NULL ? 0 : bSize;
242
        if (start != NULL) {
243
            assert(newBuffer.capacity >= buffer.capacity);
244
            ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
245
            DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
246
            return newBuffer;
247
        }
248
        DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
249
    }
250
    return buffer;
251
}
252
#endif
253
254
/* store buffer for later re-use, up to pool capacity */
255
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, Buffer buf)
256
4.04M
{
257
4.04M
    DEBUGLOG(5, "ZSTDMT_releaseBuffer");
258
4.04M
    if (buf.start == NULL) return;   /* compatible with release on NULL */
259
3.02M
    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
260
3.02M
    if (bufPool->nbBuffers < bufPool->totalBuffers) {
261
3.02M
        bufPool->buffers[bufPool->nbBuffers++] = buf;  /* stored for later use */
262
3.02M
        DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
263
3.02M
                    (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
264
3.02M
        ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
265
3.02M
        return;
266
3.02M
    }
267
0
    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
268
    /* Reached bufferPool capacity (note: should not happen) */
269
0
    DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
270
0
    ZSTD_customFree(buf.start, bufPool->cMem);
271
0
}
272
273
/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
274
 * The 3 additional buffers are as follows:
275
 *   1 buffer for input loading
276
 *   1 buffer for "next input" when submitting current one
277
 *   1 buffer stuck in queue */
278
18.9k
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3)
279
280
/* After a worker releases its rawSeqStore, it is immediately ready for reuse.
281
 * So we only need one seq buffer per worker. */
282
18.9k
#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers)
283
284
/* =====   Seq Pool Wrapper   ====== */
285
286
typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
287
288
static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
289
0
{
290
0
    return ZSTDMT_sizeof_bufferPool(seqPool);
291
0
}
292
293
static RawSeqStore_t bufferToSeq(Buffer buffer)
294
1.04M
{
295
1.04M
    RawSeqStore_t seq = kNullRawSeqStore;
296
1.04M
    seq.seq = (rawSeq*)buffer.start;
297
1.04M
    seq.capacity = buffer.capacity / sizeof(rawSeq);
298
1.04M
    return seq;
299
1.04M
}
300
301
static Buffer seqToBuffer(RawSeqStore_t seq)
302
1.96M
{
303
1.96M
    Buffer buffer;
304
1.96M
    buffer.start = seq.seq;
305
1.96M
    buffer.capacity = seq.capacity * sizeof(rawSeq);
306
1.96M
    return buffer;
307
1.96M
}
308
309
static RawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
310
1.96M
{
311
1.96M
    if (seqPool->bufferSize == 0) {
312
919k
        return kNullRawSeqStore;
313
919k
    }
314
1.04M
    return bufferToSeq(ZSTDMT_getBuffer(seqPool));
315
1.96M
}
316
317
#if ZSTD_RESIZE_SEQPOOL
318
static RawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq)
319
{
320
  return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
321
}
322
#endif
323
324
static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, RawSeqStore_t seq)
325
1.96M
{
326
1.96M
  ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
327
1.96M
}
328
329
static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
330
31.9k
{
331
31.9k
  ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
332
31.9k
}
333
334
static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
335
18.4k
{
336
18.4k
    ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
337
18.4k
    if (seqPool == NULL) return NULL;
338
18.4k
    ZSTDMT_setNbSeq(seqPool, 0);
339
18.4k
    return seqPool;
340
18.4k
}
341
342
static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
343
18.4k
{
344
18.4k
    ZSTDMT_freeBufferPool(seqPool);
345
18.4k
}
346
347
static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
348
512
{
349
512
    return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
350
512
}
351
352
353
/* =====   CCtx Pool   ===== */
354
/* a single CCtx Pool can be invoked from multiple threads in parallel */
355
356
typedef struct {
357
    ZSTD_pthread_mutex_t poolMutex;
358
    int totalCCtx;
359
    int availCCtx;
360
    ZSTD_customMem cMem;
361
    ZSTD_CCtx** cctxs;
362
} ZSTDMT_CCtxPool;
363
364
/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */
365
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
366
18.7k
{
367
18.7k
    if (!pool) return;
368
18.7k
    ZSTD_pthread_mutex_destroy(&pool->poolMutex);
369
18.7k
    if (pool->cctxs) {
370
18.7k
        int cid;
371
44.8k
        for (cid=0; cid<pool->totalCCtx; cid++)
372
26.1k
            ZSTD_freeCCtx(pool->cctxs[cid]);  /* free compatible with NULL */
373
18.7k
        ZSTD_customFree(pool->cctxs, pool->cMem);
374
18.7k
    }
375
18.7k
    ZSTD_customFree(pool, pool->cMem);
376
18.7k
}
377
378
/* ZSTDMT_createCCtxPool() :
379
 * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
380
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
381
                                              ZSTD_customMem cMem)
382
18.7k
{
383
18.7k
    ZSTDMT_CCtxPool* const cctxPool =
384
18.7k
        (ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem);
385
18.7k
    assert(nbWorkers > 0);
386
18.7k
    if (!cctxPool) return NULL;
387
18.7k
    if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
388
0
        ZSTD_customFree(cctxPool, cMem);
389
0
        return NULL;
390
0
    }
391
18.7k
    cctxPool->totalCCtx = nbWorkers;
392
18.7k
    cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem);
393
18.7k
    if (!cctxPool->cctxs) {
394
0
        ZSTDMT_freeCCtxPool(cctxPool);
395
0
        return NULL;
396
0
    }
397
18.7k
    cctxPool->cMem = cMem;
398
18.7k
    cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem);
399
18.7k
    if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
400
18.7k
    cctxPool->availCCtx = 1;   /* at least one cctx for single-thread mode */
401
18.7k
    DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
402
18.7k
    return cctxPool;
403
18.7k
}
404
405
static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
406
                                              int nbWorkers)
407
512
{
408
512
    if (srcPool==NULL) return NULL;
409
512
    if (nbWorkers <= srcPool->totalCCtx) return srcPool;   /* good enough */
410
    /* need a larger cctx pool */
411
218
    {   ZSTD_customMem const cMem = srcPool->cMem;
412
218
        ZSTDMT_freeCCtxPool(srcPool);
413
218
        return ZSTDMT_createCCtxPool(nbWorkers, cMem);
414
512
    }
415
512
}
416
417
/* only works during initialization phase, not during compression */
418
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
419
0
{
420
0
    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
421
0
    {   unsigned const nbWorkers = cctxPool->totalCCtx;
422
0
        size_t const poolSize = sizeof(*cctxPool);
423
0
        size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*);
424
0
        size_t totalCCtxSize = 0;
425
0
        unsigned u;
426
0
        for (u=0; u<nbWorkers; u++) {
427
0
            totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]);
428
0
        }
429
0
        ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
430
0
        assert(nbWorkers > 0);
431
0
        return poolSize + arraySize + totalCCtxSize;
432
0
    }
433
0
}
434
435
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
436
1.96M
{
437
1.96M
    DEBUGLOG(5, "ZSTDMT_getCCtx");
438
1.96M
    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
439
1.96M
    if (cctxPool->availCCtx) {
440
1.96M
        cctxPool->availCCtx--;
441
1.96M
        {   ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx];
442
1.96M
            ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
443
1.96M
            return cctx;
444
1.96M
    }   }
445
159
    ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
446
159
    DEBUGLOG(5, "create one more CCtx");
447
159
    return ZSTD_createCCtx_advanced(cctxPool->cMem);   /* note : can be NULL, when creation fails ! */
448
1.96M
}
449
450
static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
451
1.96M
{
452
1.96M
    if (cctx==NULL) return;   /* compatibility with release on NULL */
453
1.96M
    ZSTD_pthread_mutex_lock(&pool->poolMutex);
454
1.96M
    if (pool->availCCtx < pool->totalCCtx)
455
1.96M
        pool->cctxs[pool->availCCtx++] = cctx;
456
0
    else {
457
        /* pool overflow : should not happen, since totalCCtx==nbWorkers */
458
0
        DEBUGLOG(4, "CCtx pool overflow : free cctx");
459
0
        ZSTD_freeCCtx(cctx);
460
0
    }
461
1.96M
    ZSTD_pthread_mutex_unlock(&pool->poolMutex);
462
1.96M
}
463
464
/* ====   Serial State   ==== */
465
466
typedef struct {
467
    void const* start;
468
    size_t size;
469
} Range;
470
471
typedef struct {
472
    /* All variables in the struct are protected by mutex. */
473
    ZSTD_pthread_mutex_t mutex;
474
    ZSTD_pthread_cond_t cond;
475
    ZSTD_CCtx_params params;
476
    ldmState_t ldmState;
477
    XXH64_state_t xxhState;
478
    unsigned nextJobID;
479
    /* Protects ldmWindow.
480
     * Must be acquired after the main mutex when acquiring both.
481
     */
482
    ZSTD_pthread_mutex_t ldmWindowMutex;
483
    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is updated */
484
    ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
485
} SerialState;
486
487
static int
488
ZSTDMT_serialState_reset(SerialState* serialState,
489
                         ZSTDMT_seqPool* seqPool,
490
                         ZSTD_CCtx_params params,
491
                         size_t jobSize,
492
                         const void* dict, size_t const dictSize,
493
                         ZSTD_dictContentType_e dictContentType)
494
36.4k
{
495
    /* Adjust parameters */
496
36.4k
    if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
497
13.4k
        DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
498
13.4k
        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
499
13.4k
        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
500
13.4k
        assert(params.ldmParams.hashRateLog < 32);
501
22.9k
    } else {
502
22.9k
        ZSTD_memset(&params.ldmParams, 0, sizeof(params.ldmParams));
503
22.9k
    }
504
36.4k
    serialState->nextJobID = 0;
505
36.4k
    if (params.fParams.checksumFlag)
506
8.56k
        XXH64_reset(&serialState->xxhState, 0);
507
36.4k
    if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
508
13.4k
        ZSTD_customMem cMem = params.customMem;
509
13.4k
        unsigned const hashLog = params.ldmParams.hashLog;
510
13.4k
        size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
511
13.4k
        unsigned const bucketLog =
512
13.4k
            params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
513
13.4k
        unsigned const prevBucketLog =
514
13.4k
            serialState->params.ldmParams.hashLog -
515
13.4k
            serialState->params.ldmParams.bucketSizeLog;
516
13.4k
        size_t const numBuckets = (size_t)1 << bucketLog;
517
        /* Size the seq pool tables */
518
13.4k
        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
519
        /* Reset the window */
520
13.4k
        ZSTD_window_init(&serialState->ldmState.window);
521
        /* Resize tables and output space if necessary. */
522
13.4k
        if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
523
8.60k
            ZSTD_customFree(serialState->ldmState.hashTable, cMem);
524
8.60k
            serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
525
8.60k
        }
526
13.4k
        if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
527
8.60k
            ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
528
8.60k
            serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
529
8.60k
        }
530
13.4k
        if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
531
0
            return 1;
532
        /* Zero the tables */
533
13.4k
        ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
534
13.4k
        ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
535
536
        /* Update window state and fill hash table with dict */
537
13.4k
        serialState->ldmState.loadedDictEnd = 0;
538
13.4k
        if (dictSize > 0) {
539
1.55k
            if (dictContentType == ZSTD_dct_rawContent) {
540
794
                BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
541
794
                ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
542
794
                ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, &params.ldmParams);
543
794
                serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
544
794
            } else {
545
                /* don't even load anything */
546
765
            }
547
1.55k
        }
548
549
        /* Initialize serialState's copy of ldmWindow. */
550
13.4k
        serialState->ldmWindow = serialState->ldmState.window;
551
13.4k
    }
552
553
36.4k
    serialState->params = params;
554
36.4k
    serialState->params.jobSize = (U32)jobSize;
555
36.4k
    return 0;
556
36.4k
}
557
558
static int ZSTDMT_serialState_init(SerialState* serialState)
559
18.4k
{
560
18.4k
    int initError = 0;
561
18.4k
    ZSTD_memset(serialState, 0, sizeof(*serialState));
562
18.4k
    initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
563
18.4k
    initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
564
18.4k
    initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
565
18.4k
    initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
566
18.4k
    return initError;
567
18.4k
}
568
569
static void ZSTDMT_serialState_free(SerialState* serialState)
570
18.4k
{
571
18.4k
    ZSTD_customMem cMem = serialState->params.customMem;
572
18.4k
    ZSTD_pthread_mutex_destroy(&serialState->mutex);
573
18.4k
    ZSTD_pthread_cond_destroy(&serialState->cond);
574
18.4k
    ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
575
18.4k
    ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
576
18.4k
    ZSTD_customFree(serialState->ldmState.hashTable, cMem);
577
18.4k
    ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
578
18.4k
}
579
580
static void
581
ZSTDMT_serialState_genSequences(SerialState* serialState,
582
                                RawSeqStore_t* seqStore,
583
                                Range src, unsigned jobID)
584
1.96M
{
585
    /* Wait for our turn */
586
1.96M
    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
587
1.96M
    while (serialState->nextJobID < jobID) {
588
0
        DEBUGLOG(5, "wait for serialState->cond");
589
0
        ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
590
0
    }
591
    /* A future job may error and skip our job */
592
1.96M
    if (serialState->nextJobID == jobID) {
593
        /* It is now our turn, do any processing necessary */
594
1.96M
        if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
595
1.04M
            size_t error;
596
1.04M
            DEBUGLOG(6, "ZSTDMT_serialState_genSequences: LDM update");
597
1.04M
            assert(seqStore->seq != NULL && seqStore->pos == 0 &&
598
1.04M
                   seqStore->size == 0 && seqStore->capacity > 0);
599
1.04M
            assert(src.size <= serialState->params.jobSize);
600
1.04M
            ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
601
1.04M
            error = ZSTD_ldm_generateSequences(
602
1.04M
                &serialState->ldmState, seqStore,
603
1.04M
                &serialState->params.ldmParams, src.start, src.size);
604
            /* We provide a large enough buffer to never fail. */
605
1.04M
            assert(!ZSTD_isError(error)); (void)error;
606
            /* Update ldmWindow to match the ldmState.window and signal the main
607
             * thread if it is waiting for a buffer.
608
             */
609
1.04M
            ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
610
1.04M
            serialState->ldmWindow = serialState->ldmState.window;
611
1.04M
            ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
612
1.04M
            ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
613
1.04M
        }
614
1.96M
        if (serialState->params.fParams.checksumFlag && src.size > 0)
615
639k
            XXH64_update(&serialState->xxhState, src.start, src.size);
616
1.96M
    }
617
    /* Now it is the next jobs turn */
618
1.96M
    serialState->nextJobID++;
619
1.96M
    ZSTD_pthread_cond_broadcast(&serialState->cond);
620
1.96M
    ZSTD_pthread_mutex_unlock(&serialState->mutex);
621
1.96M
}
622
623
static void
624
ZSTDMT_serialState_applySequences(const SerialState* serialState, /* just for an assert() check */
625
                                  ZSTD_CCtx* jobCCtx,
626
                                  const RawSeqStore_t* seqStore)
627
1.96M
{
628
1.96M
    if (seqStore->size > 0) {
629
4.63k
        DEBUGLOG(5, "ZSTDMT_serialState_applySequences: uploading %u external sequences", (unsigned)seqStore->size);
630
4.63k
        assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); (void)serialState;
631
4.63k
        assert(jobCCtx);
632
4.63k
        ZSTD_referenceExternalSequences(jobCCtx, seqStore->seq, seqStore->size);
633
4.63k
    }
634
1.96M
}
635
636
static void ZSTDMT_serialState_ensureFinished(SerialState* serialState,
637
                                              unsigned jobID, size_t cSize)
638
1.96M
{
639
1.96M
    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
640
1.96M
    if (serialState->nextJobID <= jobID) {
641
0
        assert(ZSTD_isError(cSize)); (void)cSize;
642
0
        DEBUGLOG(5, "Skipping past job %u because of error", jobID);
643
0
        serialState->nextJobID = jobID + 1;
644
0
        ZSTD_pthread_cond_broadcast(&serialState->cond);
645
646
0
        ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
647
0
        ZSTD_window_clear(&serialState->ldmWindow);
648
0
        ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
649
0
        ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
650
0
    }
651
1.96M
    ZSTD_pthread_mutex_unlock(&serialState->mutex);
652
653
1.96M
}
654
655
656
/* ------------------------------------------ */
657
/* =====          Worker thread         ===== */
658
/* ------------------------------------------ */
659
660
static const Range kNullRange = { NULL, 0 };
661
662
typedef struct {
663
    size_t   consumed;                 /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
664
    size_t   cSize;                    /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
665
    ZSTD_pthread_mutex_t job_mutex;    /* Thread-safe - used by mtctx and worker */
666
    ZSTD_pthread_cond_t job_cond;      /* Thread-safe - used by mtctx and worker */
667
    ZSTDMT_CCtxPool* cctxPool;         /* Thread-safe - used by mtctx and (all) workers */
668
    ZSTDMT_bufferPool* bufPool;        /* Thread-safe - used by mtctx and (all) workers */
669
    ZSTDMT_seqPool* seqPool;           /* Thread-safe - used by mtctx and (all) workers */
670
    SerialState* serial;               /* Thread-safe - used by mtctx and (all) workers */
671
    Buffer dstBuff;                    /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
672
    Range prefix;                      /* set by mtctx, then read by worker & mtctx => no barrier */
673
    Range src;                         /* set by mtctx, then read by worker & mtctx => no barrier */
674
    unsigned jobID;                    /* set by mtctx, then read by worker => no barrier */
675
    unsigned firstJob;                 /* set by mtctx, then read by worker => no barrier */
676
    unsigned lastJob;                  /* set by mtctx, then read by worker => no barrier */
677
    ZSTD_CCtx_params params;           /* set by mtctx, then read by worker => no barrier */
678
    const ZSTD_CDict* cdict;           /* set by mtctx, then read by worker => no barrier */
679
    unsigned long long fullFrameSize;  /* set by mtctx, then read by worker => no barrier */
680
    size_t   dstFlushed;               /* used only by mtctx */
681
    unsigned frameChecksumNeeded;      /* used only by mtctx */
682
} ZSTDMT_jobDescription;
683
684
#define JOB_ERROR(e)                                \
685
0
    do {                                            \
686
0
        ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);   \
687
0
        job->cSize = e;                             \
688
0
        ZSTD_pthread_mutex_unlock(&job->job_mutex); \
689
0
        goto _endJob;                               \
690
0
    } while (0)
691
692
/* ZSTDMT_compressionJob() is a POOL_function type */
693
static void ZSTDMT_compressionJob(void* jobDescription)
694
1.96M
{
695
1.96M
    ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
696
1.96M
    ZSTD_CCtx_params jobParams = job->params;   /* do not modify job->params ! copy it, modify the copy */
697
1.96M
    ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
698
1.96M
    RawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
699
1.96M
    Buffer dstBuff = job->dstBuff;
700
1.96M
    size_t lastCBlockSize = 0;
701
702
1.96M
    DEBUGLOG(5, "ZSTDMT_compressionJob: job %u", job->jobID);
703
    /* resources */
704
1.96M
    if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
705
1.96M
    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
706
1.96M
        dstBuff = ZSTDMT_getBuffer(job->bufPool);
707
1.96M
        if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
708
1.96M
        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
709
1.96M
    }
710
1.96M
    if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
711
0
        JOB_ERROR(ERROR(memory_allocation));
712
713
    /* Don't compute the checksum for chunks, since we compute it externally,
714
     * but write it in the header.
715
     */
716
1.96M
    if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
717
    /* Don't run LDM for the chunks, since we handle it externally */
718
1.96M
    jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
719
    /* Correct nbWorkers to 0. */
720
1.96M
    jobParams.nbWorkers = 0;
721
722
723
    /* init */
724
725
    /* Perform serial step as early as possible */
726
1.96M
    ZSTDMT_serialState_genSequences(job->serial, &rawSeqStore, job->src, job->jobID);
727
728
1.96M
    if (job->cdict) {
729
16.1k
        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
730
16.1k
        assert(job->firstJob);  /* only allowed for first job */
731
16.1k
        if (ZSTD_isError(initError)) JOB_ERROR(initError);
732
1.94M
    } else {
733
1.94M
        U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
734
1.94M
        {   size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
735
1.94M
            if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
736
1.94M
        }
737
1.94M
        if (!job->firstJob) {
738
1.92M
            size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
739
1.92M
            if (ZSTD_isError(err)) JOB_ERROR(err);
740
1.92M
        }
741
1.94M
        DEBUGLOG(6, "ZSTDMT_compressionJob: job %u: loading prefix of size %zu", job->jobID, job->prefix.size);
742
1.94M
        {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
743
1.94M
                                        job->prefix.start, job->prefix.size, ZSTD_dct_rawContent,
744
1.94M
                                        ZSTD_dtlm_fast,
745
1.94M
                                        NULL, /*cdict*/
746
1.94M
                                        &jobParams, pledgedSrcSize);
747
1.94M
            if (ZSTD_isError(initError)) JOB_ERROR(initError);
748
1.94M
    }   }
749
750
    /* External Sequences can only be applied after CCtx initialization */
751
1.96M
    ZSTDMT_serialState_applySequences(job->serial, cctx, &rawSeqStore);
752
753
1.96M
    if (!job->firstJob) {  /* flush and overwrite frame header when it's not first job */
754
1.92M
        size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
755
1.92M
        if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
756
1.92M
        DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
757
1.92M
        ZSTD_invalidateRepCodes(cctx);
758
1.92M
    }
759
760
    /* compress the entire job by smaller chunks, for better granularity */
761
1.96M
    {   size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
762
1.96M
        int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
763
1.96M
        const BYTE* ip = (const BYTE*) job->src.start;
764
1.96M
        BYTE* const ostart = (BYTE*)dstBuff.start;
765
1.96M
        BYTE* op = ostart;
766
1.96M
        BYTE* oend = op + dstBuff.capacity;
767
1.96M
        int chunkNb;
768
1.96M
        if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize);   /* check overflow */
769
1.96M
        DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
770
1.96M
        assert(job->cSize == 0);
771
1.96M
        for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
772
636
            size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize);
773
636
            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
774
636
            ip += chunkSize;
775
636
            op += cSize; assert(op < oend);
776
            /* stats */
777
636
            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
778
636
            job->cSize += cSize;
779
636
            job->consumed = chunkSize * chunkNb;
780
636
            DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
781
636
                        (U32)cSize, (U32)job->cSize);
782
636
            ZSTD_pthread_cond_signal(&job->job_cond);   /* warns some more data is ready to be flushed */
783
636
            ZSTD_pthread_mutex_unlock(&job->job_mutex);
784
636
        }
785
        /* last block */
786
1.96M
        assert(chunkSize > 0);
787
1.96M
        assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
788
1.96M
        if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
789
1.96M
            size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
790
1.96M
            size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
791
1.96M
            size_t const cSize = (job->lastJob) ?
792
25.4k
                 ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) :
793
1.96M
                 ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize);
794
1.96M
            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
795
1.96M
            lastCBlockSize = cSize;
796
1.96M
    }   }
797
1.96M
    if (!job->firstJob) {
798
        /* Double check that we don't have an ext-dict, because then our
799
         * repcode invalidation doesn't work.
800
         */
801
1.92M
        assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
802
1.92M
    }
803
1.96M
    ZSTD_CCtx_trace(cctx, 0);
804
805
1.96M
_endJob:
806
1.96M
    ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
807
1.96M
    if (job->prefix.size > 0)
808
1.92M
        DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
809
1.96M
    DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
810
    /* release resources */
811
1.96M
    ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
812
1.96M
    ZSTDMT_releaseCCtx(job->cctxPool, cctx);
813
    /* report */
814
1.96M
    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
815
1.96M
    if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
816
1.96M
    job->cSize += lastCBlockSize;
817
1.96M
    job->consumed = job->src.size;  /* when job->consumed == job->src.size , compression job is presumed completed */
818
1.96M
    ZSTD_pthread_cond_signal(&job->job_cond);
819
1.96M
    ZSTD_pthread_mutex_unlock(&job->job_mutex);
820
1.96M
}
821
822
823
/* ------------------------------------------ */
824
/* =====   Multi-threaded compression   ===== */
825
/* ------------------------------------------ */
826
827
typedef struct {
828
    Range prefix;         /* read-only non-owned prefix buffer */
829
    Buffer buffer;
830
    size_t filled;
831
} InBuff_t;
832
833
typedef struct {
834
  BYTE* buffer;     /* The round input buffer. All jobs get references
835
                     * to pieces of the buffer. ZSTDMT_tryGetInputRange()
836
                     * handles handing out job input buffers, and makes
837
                     * sure it doesn't overlap with any pieces still in use.
838
                     */
839
  size_t capacity;  /* The capacity of buffer. */
840
  size_t pos;       /* The position of the current inBuff in the round
841
                     * buffer. Updated past the end if the inBuff once
842
                     * the inBuff is sent to the worker thread.
843
                     * pos <= capacity.
844
                     */
845
} RoundBuff_t;
846
847
static const RoundBuff_t kNullRoundBuff = {NULL, 0, 0};
848
849
625M
#define RSYNC_LENGTH 32
850
/* Don't create chunks smaller than the zstd block size.
851
 * This stops us from regressing compression ratio too much,
852
 * and ensures our output fits in ZSTD_compressBound().
853
 *
854
 * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
855
 * ZSTD_COMPRESSBOUND() will need to be updated.
856
 */
857
1.30M
#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
858
1.30M
#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
859
860
typedef struct {
861
  U64 hash;
862
  U64 hitMask;
863
  U64 primePower;
864
} RSyncState_t;
865
866
struct ZSTDMT_CCtx_s {
867
    POOL_ctx* factory;
868
    ZSTDMT_jobDescription* jobs;
869
    ZSTDMT_bufferPool* bufPool;
870
    ZSTDMT_CCtxPool* cctxPool;
871
    ZSTDMT_seqPool* seqPool;
872
    ZSTD_CCtx_params params;
873
    size_t targetSectionSize;
874
    size_t targetPrefixSize;
875
    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
876
    InBuff_t inBuff;
877
    RoundBuff_t roundBuff;
878
    SerialState serial;
879
    RSyncState_t rsync;
880
    unsigned jobIDMask;
881
    unsigned doneJobID;
882
    unsigned nextJobID;
883
    unsigned frameEnded;
884
    unsigned allJobsCompleted;
885
    unsigned long long frameContentSize;
886
    unsigned long long consumed;
887
    unsigned long long produced;
888
    ZSTD_customMem cMem;
889
    ZSTD_CDict* cdictLocal;
890
    const ZSTD_CDict* cdict;
891
    unsigned providedFactory: 1;
892
};
893
894
static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
895
18.4k
{
896
18.4k
    U32 jobNb;
897
18.4k
    if (jobTable == NULL) return;
898
121k
    for (jobNb=0; jobNb<nbJobs; jobNb++) {
899
102k
        ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
900
102k
        ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
901
102k
    }
902
18.4k
    ZSTD_customFree(jobTable, cMem);
903
18.4k
}
904
905
/* ZSTDMT_allocJobsTable()
906
 * allocate and init a job table.
907
 * update *nbJobsPtr to next power of 2 value, as size of table */
908
static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
909
18.4k
{
910
18.4k
    U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
911
18.4k
    U32 const nbJobs = 1 << nbJobsLog2;
912
18.4k
    U32 jobNb;
913
18.4k
    ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
914
18.4k
                ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
915
18.4k
    int initError = 0;
916
18.4k
    if (jobTable==NULL) return NULL;
917
18.4k
    *nbJobsPtr = nbJobs;
918
121k
    for (jobNb=0; jobNb<nbJobs; jobNb++) {
919
102k
        initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
920
102k
        initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
921
102k
    }
922
18.4k
    if (initError != 0) {
923
0
        ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
924
0
        return NULL;
925
0
    }
926
18.4k
    return jobTable;
927
18.4k
}
928
929
512
static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
930
512
    U32 nbJobs = nbWorkers + 2;
931
512
    if (nbJobs > mtctx->jobIDMask+1) {  /* need more job capacity */
932
0
        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
933
0
        mtctx->jobIDMask = 0;
934
0
        mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
935
0
        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
936
0
        assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0));  /* ensure nbJobs is a power of 2 */
937
0
        mtctx->jobIDMask = nbJobs - 1;
938
0
    }
939
512
    return 0;
940
512
}
941
942
943
/* ZSTDMT_CCtxParam_setNbWorkers():
944
 * Internal use only */
945
static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
946
18.9k
{
947
18.9k
    return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
948
18.9k
}
949
950
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
951
18.4k
{
952
18.4k
    ZSTDMT_CCtx* mtctx;
953
18.4k
    U32 nbJobs = nbWorkers + 2;
954
18.4k
    int initError;
955
18.4k
    DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
956
957
18.4k
    if (nbWorkers < 1) return NULL;
958
18.4k
    nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
959
18.4k
    if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
960
        /* invalid custom allocator */
961
0
        return NULL;
962
963
18.4k
    mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
964
18.4k
    if (!mtctx) return NULL;
965
18.4k
    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
966
18.4k
    mtctx->cMem = cMem;
967
18.4k
    mtctx->allJobsCompleted = 1;
968
18.4k
    if (pool != NULL) {
969
0
      mtctx->factory = pool;
970
0
      mtctx->providedFactory = 1;
971
0
    }
972
18.4k
    else {
973
18.4k
      mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
974
18.4k
      mtctx->providedFactory = 0;
975
18.4k
    }
976
18.4k
    mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
977
18.4k
    assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0);  /* ensure nbJobs is a power of 2 */
978
18.4k
    mtctx->jobIDMask = nbJobs - 1;
979
18.4k
    mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
980
18.4k
    mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
981
18.4k
    mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
982
18.4k
    initError = ZSTDMT_serialState_init(&mtctx->serial);
983
18.4k
    mtctx->roundBuff = kNullRoundBuff;
984
18.4k
    if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
985
0
        ZSTDMT_freeCCtx(mtctx);
986
0
        return NULL;
987
0
    }
988
18.4k
    DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
989
18.4k
    return mtctx;
990
18.4k
}
991
992
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
993
18.4k
{
994
18.4k
#ifdef ZSTD_MULTITHREAD
995
18.4k
    return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
996
#else
997
    (void)nbWorkers;
998
    (void)cMem;
999
    (void)pool;
1000
    return NULL;
1001
#endif
1002
18.4k
}
1003
1004
1005
/* ZSTDMT_releaseAllJobResources() :
1006
 * note : ensure all workers are killed first ! */
1007
static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
1008
18.4k
{
1009
18.4k
    unsigned jobID;
1010
18.4k
    DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
1011
18.4k
    if (mtctx->jobs) {
1012
121k
        for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
1013
            /* Copy the mutex/cond out */
1014
102k
            ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
1015
102k
            ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
1016
            
1017
102k
            DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
1018
102k
            ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
1019
            
1020
            /* Clear the job description, but keep the mutex/cond */
1021
102k
            ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
1022
102k
            mtctx->jobs[jobID].job_mutex = mutex;
1023
102k
            mtctx->jobs[jobID].job_cond = cond;
1024
102k
        }
1025
18.4k
    }
1026
18.4k
    mtctx->inBuff.buffer = g_nullBuffer;
1027
18.4k
    mtctx->inBuff.filled = 0;
1028
18.4k
    mtctx->allJobsCompleted = 1;
1029
18.4k
}
1030
1031
static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
1032
0
{
1033
0
    DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
1034
0
    while (mtctx->doneJobID < mtctx->nextJobID) {
1035
0
        unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
1036
0
        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
1037
0
        while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
1038
0
            DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
1039
0
            ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
1040
0
        }
1041
0
        ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
1042
0
        mtctx->doneJobID++;
1043
0
    }
1044
0
}
1045
1046
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
1047
288k
{
1048
288k
    if (mtctx==NULL) return 0;   /* compatible with free on NULL */
1049
18.4k
    if (!mtctx->providedFactory)
1050
18.4k
        POOL_free(mtctx->factory);   /* stop and free worker threads */
1051
18.4k
    ZSTDMT_releaseAllJobResources(mtctx);  /* release job resources into pools first */
1052
18.4k
    ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
1053
18.4k
    ZSTDMT_freeBufferPool(mtctx->bufPool);
1054
18.4k
    ZSTDMT_freeCCtxPool(mtctx->cctxPool);
1055
18.4k
    ZSTDMT_freeSeqPool(mtctx->seqPool);
1056
18.4k
    ZSTDMT_serialState_free(&mtctx->serial);
1057
18.4k
    ZSTD_freeCDict(mtctx->cdictLocal);
1058
18.4k
    if (mtctx->roundBuff.buffer)
1059
18.4k
        ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1060
18.4k
    ZSTD_customFree(mtctx, mtctx->cMem);
1061
18.4k
    return 0;
1062
288k
}
1063
1064
size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
1065
0
{
1066
0
    if (mtctx == NULL) return 0;   /* supports sizeof NULL */
1067
0
    return sizeof(*mtctx)
1068
0
            + POOL_sizeof(mtctx->factory)
1069
0
            + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
1070
0
            + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
1071
0
            + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
1072
0
            + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
1073
0
            + ZSTD_sizeof_CDict(mtctx->cdictLocal)
1074
0
            + mtctx->roundBuff.capacity;
1075
0
}
1076
1077
1078
/* ZSTDMT_resize() :
1079
 * @return : error code if fails, 0 on success */
1080
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
1081
512
{
1082
512
    if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
1083
512
    FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
1084
512
    mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers));
1085
512
    if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
1086
512
    mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
1087
512
    if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
1088
512
    mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
1089
512
    if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
1090
512
    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
1091
512
    return 0;
1092
512
}
1093
1094
1095
/*! ZSTDMT_updateCParams_whileCompressing() :
1096
 *  Updates a selected set of compression parameters, remaining compatible with currently active frame.
1097
 *  New parameters will be applied to next compression job. */
1098
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
1099
0
{
1100
0
    U32 const saved_wlog = mtctx->params.cParams.windowLog;   /* Do not modify windowLog while compressing */
1101
0
    int const compressionLevel = cctxParams->compressionLevel;
1102
0
    DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
1103
0
                compressionLevel);
1104
0
    mtctx->params.compressionLevel = compressionLevel;
1105
0
    {   ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1106
0
        cParams.windowLog = saved_wlog;
1107
0
        mtctx->params.cParams = cParams;
1108
0
    }
1109
0
}
1110
1111
/* ZSTDMT_getFrameProgression():
1112
 * tells how much data has been consumed (input) and produced (output) for current frame.
1113
 * able to count progression inside worker threads.
1114
 * Note : mutex will be acquired during statistics collection inside workers. */
1115
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
1116
0
{
1117
0
    ZSTD_frameProgression fps;
1118
0
    DEBUGLOG(5, "ZSTDMT_getFrameProgression");
1119
0
    fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
1120
0
    fps.consumed = mtctx->consumed;
1121
0
    fps.produced = fps.flushed = mtctx->produced;
1122
0
    fps.currentJobID = mtctx->nextJobID;
1123
0
    fps.nbActiveWorkers = 0;
1124
0
    {   unsigned jobNb;
1125
0
        unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
1126
0
        DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
1127
0
                    mtctx->doneJobID, lastJobNb, mtctx->jobReady);
1128
0
        for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
1129
0
            unsigned const wJobID = jobNb & mtctx->jobIDMask;
1130
0
            ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
1131
0
            ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1132
0
            {   size_t const cResult = jobPtr->cSize;
1133
0
                size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1134
0
                size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1135
0
                assert(flushed <= produced);
1136
0
                fps.ingested += jobPtr->src.size;
1137
0
                fps.consumed += jobPtr->consumed;
1138
0
                fps.produced += produced;
1139
0
                fps.flushed  += flushed;
1140
0
                fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
1141
0
            }
1142
0
            ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1143
0
        }
1144
0
    }
1145
0
    return fps;
1146
0
}
1147
1148
1149
size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
1150
0
{
1151
0
    size_t toFlush;
1152
0
    unsigned const jobID = mtctx->doneJobID;
1153
0
    assert(jobID <= mtctx->nextJobID);
1154
0
    if (jobID == mtctx->nextJobID) return 0;   /* no active job => nothing to flush */
1155
1156
    /* look into oldest non-fully-flushed job */
1157
0
    {   unsigned const wJobID = jobID & mtctx->jobIDMask;
1158
0
        ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
1159
0
        ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1160
0
        {   size_t const cResult = jobPtr->cSize;
1161
0
            size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1162
0
            size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1163
0
            assert(flushed <= produced);
1164
0
            assert(jobPtr->consumed <= jobPtr->src.size);
1165
0
            toFlush = produced - flushed;
1166
            /* if toFlush==0, nothing is available to flush.
1167
             * However, jobID is expected to still be active:
1168
             * if jobID was already completed and fully flushed,
1169
             * ZSTDMT_flushProduced() should have already moved onto next job.
1170
             * Therefore, some input has not yet been consumed. */
1171
0
            if (toFlush==0) {
1172
0
                assert(jobPtr->consumed < jobPtr->src.size);
1173
0
            }
1174
0
        }
1175
0
        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1176
0
    }
1177
1178
0
    return toFlush;
1179
0
}
1180
1181
1182
/* ------------------------------------------ */
1183
/* =====   Multi-threaded compression   ===== */
1184
/* ------------------------------------------ */
1185
1186
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
1187
49.9k
{
1188
49.9k
    unsigned jobLog;
1189
49.9k
    if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1190
        /* In Long Range Mode, the windowLog is typically oversized.
1191
         * In which case, it's preferable to determine the jobSize
1192
         * based on cycleLog instead. */
1193
26.9k
        jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
1194
26.9k
    } else {
1195
22.9k
        jobLog = MAX(20, params->cParams.windowLog + 2);
1196
22.9k
    }
1197
49.9k
    return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
1198
49.9k
}
1199
1200
static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
1201
36.4k
{
1202
36.4k
    switch(strat)
1203
36.4k
    {
1204
3.25k
        case ZSTD_btultra2:
1205
3.25k
            return 9;
1206
1.65k
        case ZSTD_btultra:
1207
4.56k
        case ZSTD_btopt:
1208
4.56k
            return 8;
1209
1.58k
        case ZSTD_btlazy2:
1210
7.57k
        case ZSTD_lazy2:
1211
7.57k
            return 7;
1212
5.60k
        case ZSTD_lazy:
1213
8.52k
        case ZSTD_greedy:
1214
14.6k
        case ZSTD_dfast:
1215
21.0k
        case ZSTD_fast:
1216
21.0k
        default:;
1217
36.4k
    }
1218
21.0k
    return 6;
1219
36.4k
}
1220
1221
static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
1222
36.4k
{
1223
36.4k
    assert(0 <= ovlog && ovlog <= 9);
1224
36.4k
    if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
1225
0
    return ovlog;
1226
36.4k
}
1227
1228
static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
1229
36.4k
{
1230
36.4k
    int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
1231
36.4k
    int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
1232
36.4k
    assert(0 <= overlapRLog && overlapRLog <= 8);
1233
36.4k
    if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1234
        /* In Long Range Mode, the windowLog is typically oversized.
1235
         * In which case, it's preferable to determine the jobSize
1236
         * based on chainLog instead.
1237
         * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
1238
13.4k
        ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1239
13.4k
                - overlapRLog;
1240
13.4k
    }
1241
36.4k
    assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
1242
36.4k
    DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
1243
36.4k
    DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
1244
36.4k
    return (ovLog==0) ? 0 : (size_t)1 << ovLog;
1245
36.4k
}
1246
1247
/* ====================================== */
1248
/* =======      Streaming API     ======= */
1249
/* ====================================== */
1250
1251
size_t ZSTDMT_initCStream_internal(
1252
        ZSTDMT_CCtx* mtctx,
1253
        const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
1254
        const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
1255
        unsigned long long pledgedSrcSize)
1256
36.4k
{
1257
36.4k
    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
1258
36.4k
                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
1259
1260
    /* params supposed partially fully validated at this point */
1261
36.4k
    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1262
36.4k
    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
1263
1264
    /* init */
1265
36.4k
    if (params.nbWorkers != mtctx->params.nbWorkers)
1266
512
        FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, (unsigned)params.nbWorkers) , "");
1267
1268
36.4k
    if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
1269
36.4k
    if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
1270
1271
36.4k
    if (mtctx->allJobsCompleted == 0) {   /* previous compression not correctly finished */
1272
0
        ZSTDMT_waitForAllJobsCompleted(mtctx);
1273
0
        ZSTDMT_releaseAllJobResources(mtctx); /* Will set allJobsCompleted to 1 */
1274
0
    }
1275
1276
36.4k
    mtctx->params = params;
1277
36.4k
    mtctx->frameContentSize = pledgedSrcSize;
1278
36.4k
    ZSTD_freeCDict(mtctx->cdictLocal);
1279
36.4k
    if (dict) {
1280
3.36k
        mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1281
3.36k
                                                    ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
1282
3.36k
                                                    params.cParams, mtctx->cMem);
1283
3.36k
        mtctx->cdict = mtctx->cdictLocal;
1284
3.36k
        if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1285
33.0k
    } else {
1286
33.0k
        mtctx->cdictLocal = NULL;
1287
33.0k
        mtctx->cdict = cdict;
1288
33.0k
    }
1289
1290
36.4k
    mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
1291
36.4k
    DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
1292
36.4k
    mtctx->targetSectionSize = params.jobSize;
1293
36.4k
    if (mtctx->targetSectionSize == 0) {
1294
36.4k
        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
1295
36.4k
    }
1296
36.4k
    assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
1297
1298
36.4k
    if (params.rsyncable) {
1299
        /* Aim for the targetsectionSize as the average job size. */
1300
15.2k
        U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
1301
15.2k
        U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
1302
        /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
1303
         * expected job size is at least 4x larger. */
1304
0
        assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
1305
15.2k
        DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
1306
15.2k
        mtctx->rsync.hash = 0;
1307
15.2k
        mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
1308
15.2k
        mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
1309
15.2k
    }
1310
36.4k
    if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize;  /* job size must be >= overlap size */
1311
36.4k
    DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
1312
36.4k
    DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
1313
36.4k
    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
1314
36.4k
    {
1315
        /* If ldm is enabled we need windowSize space. */
1316
36.4k
        size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
1317
        /* Two buffers of slack, plus extra space for the overlap
1318
         * This is the minimum slack that LDM works with. One extra because
1319
         * flush might waste up to targetSectionSize-1 bytes. Another extra
1320
         * for the overlap (if > 0), then one to fill which doesn't overlap
1321
         * with the LDM window.
1322
         */
1323
36.4k
        size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
1324
36.4k
        size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
1325
        /* Compute the total size, and always have enough slack */
1326
36.4k
        size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
1327
36.4k
        size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
1328
36.4k
        size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
1329
36.4k
        if (mtctx->roundBuff.capacity < capacity) {
1330
18.7k
            if (mtctx->roundBuff.buffer)
1331
285
                ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1332
18.7k
            mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
1333
18.7k
            if (mtctx->roundBuff.buffer == NULL) {
1334
0
                mtctx->roundBuff.capacity = 0;
1335
0
                return ERROR(memory_allocation);
1336
0
            }
1337
18.7k
            mtctx->roundBuff.capacity = capacity;
1338
18.7k
        }
1339
36.4k
    }
1340
36.4k
    DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
1341
36.4k
    mtctx->roundBuff.pos = 0;
1342
36.4k
    mtctx->inBuff.buffer = g_nullBuffer;
1343
36.4k
    mtctx->inBuff.filled = 0;
1344
36.4k
    mtctx->inBuff.prefix = kNullRange;
1345
36.4k
    mtctx->doneJobID = 0;
1346
36.4k
    mtctx->nextJobID = 0;
1347
36.4k
    mtctx->frameEnded = 0;
1348
36.4k
    mtctx->allJobsCompleted = 0;
1349
36.4k
    mtctx->consumed = 0;
1350
36.4k
    mtctx->produced = 0;
1351
1352
    /* update dictionary */
1353
36.4k
    ZSTD_freeCDict(mtctx->cdictLocal);
1354
36.4k
    mtctx->cdictLocal = NULL;
1355
36.4k
    mtctx->cdict = NULL;
1356
36.4k
    if (dict) {
1357
3.36k
        if (dictContentType == ZSTD_dct_rawContent) {
1358
1.47k
            mtctx->inBuff.prefix.start = (const BYTE*)dict;
1359
1.47k
            mtctx->inBuff.prefix.size = dictSize;
1360
1.88k
        } else {
1361
            /* note : a loadPrefix becomes an internal CDict */
1362
1.88k
            mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1363
1.88k
                                                        ZSTD_dlm_byRef, dictContentType,
1364
1.88k
                                                        params.cParams, mtctx->cMem);
1365
1.88k
            mtctx->cdict = mtctx->cdictLocal;
1366
1.88k
            if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1367
1.88k
        }
1368
33.0k
    } else {
1369
33.0k
        mtctx->cdict = cdict;
1370
33.0k
    }
1371
1372
36.4k
    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
1373
36.4k
                                 dict, dictSize, dictContentType))
1374
0
        return ERROR(memory_allocation);
1375
1376
1377
36.4k
    return 0;
1378
36.4k
}
1379
1380
1381
/* ZSTDMT_writeLastEmptyBlock()
1382
 * Write a single empty block with an end-of-frame to finish a frame.
1383
 * Job must be created from streaming variant.
1384
 * This function is always successful if expected conditions are fulfilled.
1385
 */
1386
static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
1387
10.9k
{
1388
10.9k
    assert(job->lastJob == 1);
1389
10.9k
    assert(job->src.size == 0);   /* last job is empty -> will be simplified into a last empty block */
1390
10.9k
    assert(job->firstJob == 0);   /* cannot be first job, as it also needs to create frame header */
1391
10.9k
    assert(job->dstBuff.start == NULL);   /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
1392
10.9k
    job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
1393
10.9k
    if (job->dstBuff.start == NULL) {
1394
0
      job->cSize = ERROR(memory_allocation);
1395
0
      return;
1396
0
    }
1397
10.9k
    assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize);   /* no buffer should ever be that small */
1398
10.9k
    job->src = kNullRange;
1399
10.9k
    job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
1400
10.9k
    assert(!ZSTD_isError(job->cSize));
1401
10.9k
    assert(job->consumed == 0);
1402
10.9k
}
1403
1404
static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
1405
2.45M
{
1406
2.45M
    unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
1407
2.45M
    int const endFrame = (endOp == ZSTD_e_end);
1408
1409
2.45M
    if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
1410
0
        DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
1411
0
        assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
1412
0
        return 0;
1413
0
    }
1414
1415
2.45M
    if (!mtctx->jobReady) {
1416
1.97M
        BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
1417
1.97M
        DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
1418
1.97M
                    mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
1419
1.97M
        mtctx->jobs[jobID].src.start = src;
1420
1.97M
        mtctx->jobs[jobID].src.size = srcSize;
1421
1.97M
        assert(mtctx->inBuff.filled >= srcSize);
1422
1.97M
        mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
1423
1.97M
        mtctx->jobs[jobID].consumed = 0;
1424
1.97M
        mtctx->jobs[jobID].cSize = 0;
1425
1.97M
        mtctx->jobs[jobID].params = mtctx->params;
1426
1.97M
        mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
1427
1.97M
        mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
1428
1.97M
        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1429
1.97M
        mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
1430
1.97M
        mtctx->jobs[jobID].bufPool = mtctx->bufPool;
1431
1.97M
        mtctx->jobs[jobID].seqPool = mtctx->seqPool;
1432
1.97M
        mtctx->jobs[jobID].serial = &mtctx->serial;
1433
1.97M
        mtctx->jobs[jobID].jobID = mtctx->nextJobID;
1434
1.97M
        mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
1435
1.97M
        mtctx->jobs[jobID].lastJob = endFrame;
1436
1.97M
        mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
1437
1.97M
        mtctx->jobs[jobID].dstFlushed = 0;
1438
1439
        /* Update the round buffer pos and clear the input buffer to be reset */
1440
1.97M
        mtctx->roundBuff.pos += srcSize;
1441
1.97M
        mtctx->inBuff.buffer = g_nullBuffer;
1442
1.97M
        mtctx->inBuff.filled = 0;
1443
        /* Set the prefix for next job */
1444
1.97M
        if (!endFrame) {
1445
1.93M
            size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
1446
1.93M
            mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
1447
1.93M
            mtctx->inBuff.prefix.size = newPrefixSize;
1448
1.93M
        } else {   /* endFrame==1 => no need for another input buffer */
1449
36.4k
            mtctx->inBuff.prefix = kNullRange;
1450
36.4k
            mtctx->frameEnded = endFrame;
1451
36.4k
            if (mtctx->nextJobID == 0) {
1452
                /* single job exception : checksum is already calculated directly within worker thread */
1453
4.84k
                mtctx->params.fParams.checksumFlag = 0;
1454
4.84k
        }   }
1455
1456
1.97M
        if ( (srcSize == 0)
1457
1.97M
          && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
1458
10.9k
            DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
1459
10.9k
            assert(endOp == ZSTD_e_end);  /* only possible case : need to end the frame with an empty last block */
1460
10.9k
            ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
1461
10.9k
            mtctx->nextJobID++;
1462
10.9k
            return 0;
1463
10.9k
        }
1464
1.97M
    }
1465
1466
2.44M
    DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes  (end:%u, jobNb == %u (mod:%u))",
1467
2.44M
                mtctx->nextJobID,
1468
2.44M
                (U32)mtctx->jobs[jobID].src.size,
1469
2.44M
                mtctx->jobs[jobID].lastJob,
1470
2.44M
                mtctx->nextJobID,
1471
2.44M
                jobID);
1472
2.44M
    if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
1473
1.96M
        mtctx->nextJobID++;
1474
1.96M
        mtctx->jobReady = 0;
1475
1.96M
    } else {
1476
479k
        DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
1477
479k
        mtctx->jobReady = 1;
1478
479k
    }
1479
2.44M
    return 0;
1480
2.45M
}
1481
1482
1483
/*! ZSTDMT_flushProduced() :
1484
 *  flush whatever data has been produced but not yet flushed in current job.
1485
 *  move to next job if current one is fully flushed.
1486
 * `output` : `pos` will be updated with amount of data flushed .
1487
 * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
1488
 * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
1489
static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
1490
13.3M
{
1491
13.3M
    unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
1492
13.3M
    DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
1493
13.3M
                blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
1494
13.3M
    assert(output->size >= output->pos);
1495
1496
13.3M
    ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1497
13.3M
    if (  blockToFlush
1498
13.3M
      && (mtctx->doneJobID < mtctx->nextJobID) ) {
1499
11.3M
        assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
1500
13.2M
        while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) {  /* nothing to flush */
1501
1.96M
            if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
1502
0
                DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
1503
0
                            mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
1504
0
                break;
1505
0
            }
1506
1.96M
            DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
1507
1.96M
                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1508
1.96M
            ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex);  /* block when nothing to flush but some to come */
1509
1.96M
    }   }
1510
1511
    /* try to flush something */
1512
13.3M
    {   size_t cSize = mtctx->jobs[wJobID].cSize;                  /* shared */
1513
13.3M
        size_t const srcConsumed = mtctx->jobs[wJobID].consumed;   /* shared */
1514
13.3M
        size_t const srcSize = mtctx->jobs[wJobID].src.size;       /* read-only, could be done after mutex lock, but no-declaration-after-statement */
1515
13.3M
        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1516
13.3M
        if (ZSTD_isError(cSize)) {
1517
0
            DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
1518
0
                        mtctx->doneJobID, ZSTD_getErrorName(cSize));
1519
0
            ZSTDMT_waitForAllJobsCompleted(mtctx);
1520
0
            ZSTDMT_releaseAllJobResources(mtctx);
1521
0
            return cSize;
1522
0
        }
1523
        /* add frame checksum if necessary (can only happen once) */
1524
13.3M
        assert(srcConsumed <= srcSize);
1525
13.3M
        if ( (srcConsumed == srcSize)   /* job completed -> worker no longer active */
1526
13.3M
          && mtctx->jobs[wJobID].frameChecksumNeeded ) {
1527
7.19k
            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1528
7.19k
            DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
1529
7.19k
            MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
1530
7.19k
            cSize += 4;
1531
7.19k
            mtctx->jobs[wJobID].cSize += 4;  /* can write this shared value, as worker is no longer active */
1532
7.19k
            mtctx->jobs[wJobID].frameChecksumNeeded = 0;
1533
7.19k
        }
1534
1535
13.3M
        if (cSize > 0) {   /* compression is ongoing or completed */
1536
11.3M
            size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
1537
11.3M
            DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
1538
11.3M
                        (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
1539
11.3M
            assert(mtctx->doneJobID < mtctx->nextJobID);
1540
11.3M
            assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
1541
11.3M
            assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
1542
11.3M
            if (toFlush > 0) {
1543
11.3M
                ZSTD_memcpy((char*)output->dst + output->pos,
1544
11.3M
                    (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
1545
11.3M
                    toFlush);
1546
11.3M
            }
1547
11.3M
            output->pos += toFlush;
1548
11.3M
            mtctx->jobs[wJobID].dstFlushed += toFlush;  /* can write : this value is only used by mtctx */
1549
1550
11.3M
            if ( (srcConsumed == srcSize)    /* job is completed */
1551
11.3M
              && (mtctx->jobs[wJobID].dstFlushed == cSize) ) {   /* output buffer fully flushed => free this job position */
1552
1.97M
                DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
1553
1.97M
                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1554
1.97M
                ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
1555
1.97M
                DEBUGLOG(5, "dstBuffer released");
1556
1.97M
                mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
1557
1.97M
                mtctx->jobs[wJobID].cSize = 0;   /* ensure this job slot is considered "not started" in future check */
1558
1.97M
                mtctx->consumed += srcSize;
1559
1.97M
                mtctx->produced += cSize;
1560
1.97M
                mtctx->doneJobID++;
1561
1.97M
        }   }
1562
1563
        /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
1564
13.3M
        if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
1565
4.02M
        if (srcSize > srcConsumed) return 1;   /* current job not completely compressed */
1566
4.02M
    }
1567
2.03M
    if (mtctx->doneJobID < mtctx->nextJobID) return 1;   /* some more jobs ongoing */
1568
2.03M
    if (mtctx->jobReady) return 1;      /* one job is ready to push, just not yet in the list */
1569
2.03M
    if (mtctx->inBuff.filled > 0) return 1;   /* input is not empty, and still needs to be converted into a job */
1570
1.97M
    mtctx->allJobsCompleted = mtctx->frameEnded;   /* all jobs are entirely flushed => if this one is last one, frame is completed */
1571
1.97M
    if (end == ZSTD_e_end) return !mtctx->frameEnded;  /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
1572
1.94M
    return 0;   /* internal buffers fully flushed */
1573
1.97M
}
1574
1575
/**
1576
 * Returns the range of data used by the earliest job that is not yet complete.
1577
 * If the data of the first job is broken up into two segments, we cover both
1578
 * sections.
1579
 */
1580
static Range ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
1581
1.96M
{
1582
1.96M
    unsigned const firstJobID = mtctx->doneJobID;
1583
1.96M
    unsigned const lastJobID = mtctx->nextJobID;
1584
1.96M
    unsigned jobID;
1585
1586
    /* no need to check during first round */
1587
1.96M
    size_t roundBuffCapacity = mtctx->roundBuff.capacity;
1588
1.96M
    size_t nbJobs1stRoundMin = roundBuffCapacity / mtctx->targetSectionSize;
1589
1.96M
    if (lastJobID < nbJobs1stRoundMin) return kNullRange;
1590
1591
1.85M
    for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
1592
61
        unsigned const wJobID = jobID & mtctx->jobIDMask;
1593
61
        size_t consumed;
1594
1595
61
        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1596
61
        consumed = mtctx->jobs[wJobID].consumed;
1597
61
        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1598
1599
61
        if (consumed < mtctx->jobs[wJobID].src.size) {
1600
53
            Range range = mtctx->jobs[wJobID].prefix;
1601
53
            if (range.size == 0) {
1602
                /* Empty prefix */
1603
0
                range = mtctx->jobs[wJobID].src;
1604
0
            }
1605
            /* Job source in multiple segments not supported yet */
1606
53
            assert(range.start <= mtctx->jobs[wJobID].src.start);
1607
53
            return range;
1608
53
        }
1609
61
    }
1610
1.85M
    return kNullRange;
1611
1.85M
}
1612
1613
/**
1614
 * Returns non-zero iff buffer and range overlap.
1615
 */
1616
static int ZSTDMT_isOverlapped(Buffer buffer, Range range)
1617
6.01M
{
1618
6.01M
    BYTE const* const bufferStart = (BYTE const*)buffer.start;
1619
6.01M
    BYTE const* const rangeStart = (BYTE const*)range.start;
1620
1621
6.01M
    if (rangeStart == NULL || bufferStart == NULL)
1622
1.99M
        return 0;
1623
1624
4.01M
    {
1625
4.01M
        BYTE const* const bufferEnd = bufferStart + buffer.capacity;
1626
4.01M
        BYTE const* const rangeEnd = rangeStart + range.size;
1627
1628
        /* Empty ranges cannot overlap */
1629
4.01M
        if (bufferStart == bufferEnd || rangeStart == rangeEnd)
1630
1.04M
            return 0;
1631
1632
2.96M
        return bufferStart < rangeEnd && rangeStart < bufferEnd;
1633
4.01M
    }
1634
4.01M
}
1635
1636
static int ZSTDMT_doesOverlapWindow(Buffer buffer, ZSTD_window_t window)
1637
1.04M
{
1638
1.04M
    Range extDict;
1639
1.04M
    Range prefix;
1640
1641
1.04M
    DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
1642
1.04M
    extDict.start = window.dictBase + window.lowLimit;
1643
1.04M
    extDict.size = window.dictLimit - window.lowLimit;
1644
1645
1.04M
    prefix.start = window.base + window.dictLimit;
1646
1.04M
    prefix.size = window.nextSrc - (window.base + window.dictLimit);
1647
1.04M
    DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
1648
1.04M
                (size_t)extDict.start,
1649
1.04M
                (size_t)extDict.start + extDict.size);
1650
1.04M
    DEBUGLOG(5, "prefix  [0x%zx, 0x%zx)",
1651
1.04M
                (size_t)prefix.start,
1652
1.04M
                (size_t)prefix.start + prefix.size);
1653
1654
1.04M
    return ZSTDMT_isOverlapped(buffer, extDict)
1655
1.04M
        || ZSTDMT_isOverlapped(buffer, prefix);
1656
1.04M
}
1657
1658
static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, Buffer buffer)
1659
1.96M
{
1660
1.96M
    if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
1661
1.04M
        ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
1662
1.04M
        DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
1663
1.04M
        DEBUGLOG(5, "source  [0x%zx, 0x%zx)",
1664
1.04M
                    (size_t)buffer.start,
1665
1.04M
                    (size_t)buffer.start + buffer.capacity);
1666
1.04M
        ZSTD_PTHREAD_MUTEX_LOCK(mutex);
1667
1.04M
        while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
1668
0
            DEBUGLOG(5, "Waiting for LDM to finish...");
1669
0
            ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
1670
0
        }
1671
1.04M
        DEBUGLOG(6, "Done waiting for LDM to finish");
1672
1.04M
        ZSTD_pthread_mutex_unlock(mutex);
1673
1.04M
    }
1674
1.96M
}
1675
1676
/**
1677
 * Attempts to set the inBuff to the next section to fill.
1678
 * If any part of the new section is still in use we give up.
1679
 * Returns non-zero if the buffer is filled.
1680
 */
1681
static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
1682
1.96M
{
1683
1.96M
    Range const inUse = ZSTDMT_getInputDataInUse(mtctx);
1684
1.96M
    size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
1685
1.96M
    size_t const spaceNeeded = mtctx->targetSectionSize;
1686
1.96M
    Buffer buffer;
1687
1688
1.96M
    DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
1689
1.96M
    assert(mtctx->inBuff.buffer.start == NULL);
1690
1.96M
    assert(mtctx->roundBuff.capacity >= spaceNeeded);
1691
1692
1.96M
    if (spaceLeft < spaceNeeded) {
1693
        /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
1694
         * Simply copy the prefix to the beginning in that case.
1695
         */
1696
0
        BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
1697
0
        size_t const prefixSize = mtctx->inBuff.prefix.size;
1698
1699
0
        buffer.start = start;
1700
0
        buffer.capacity = prefixSize;
1701
0
        if (ZSTDMT_isOverlapped(buffer, inUse)) {
1702
0
            DEBUGLOG(5, "Waiting for buffer...");
1703
0
            return 0;
1704
0
        }
1705
0
        ZSTDMT_waitForLdmComplete(mtctx, buffer);
1706
0
        ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
1707
0
        mtctx->inBuff.prefix.start = start;
1708
0
        mtctx->roundBuff.pos = prefixSize;
1709
0
    }
1710
1.96M
    buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
1711
1.96M
    buffer.capacity = spaceNeeded;
1712
1713
1.96M
    if (ZSTDMT_isOverlapped(buffer, inUse)) {
1714
0
        DEBUGLOG(5, "Waiting for buffer...");
1715
0
        return 0;
1716
0
    }
1717
1.96M
    assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
1718
1719
1.96M
    ZSTDMT_waitForLdmComplete(mtctx, buffer);
1720
1721
1.96M
    DEBUGLOG(5, "Using prefix range [%zx, %zx)",
1722
1.96M
                (size_t)mtctx->inBuff.prefix.start,
1723
1.96M
                (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
1724
1.96M
    DEBUGLOG(5, "Using source range [%zx, %zx)",
1725
1.96M
                (size_t)buffer.start,
1726
1.96M
                (size_t)buffer.start + buffer.capacity);
1727
1728
1729
1.96M
    mtctx->inBuff.buffer = buffer;
1730
1.96M
    mtctx->inBuff.filled = 0;
1731
1.96M
    assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
1732
1.96M
    return 1;
1733
1.96M
}
1734
1735
typedef struct {
1736
  size_t toLoad;  /* The number of bytes to load from the input. */
1737
  int flush;      /* Boolean declaring if we must flush because we found a synchronization point. */
1738
} SyncPoint;
1739
1740
/**
1741
 * Searches through the input for a synchronization point. If one is found, we
1742
 * will instruct the caller to flush, and return the number of bytes to load.
1743
 * Otherwise, we will load as many bytes as possible and instruct the caller
1744
 * to continue as normal.
1745
 */
1746
static SyncPoint
1747
findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
1748
2.01M
{
1749
2.01M
    BYTE const* const istart = (BYTE const*)input.src + input.pos;
1750
2.01M
    U64 const primePower = mtctx->rsync.primePower;
1751
2.01M
    U64 const hitMask = mtctx->rsync.hitMask;
1752
1753
2.01M
    SyncPoint syncPoint;
1754
2.01M
    U64 hash;
1755
2.01M
    BYTE const* prev;
1756
2.01M
    size_t pos;
1757
1758
2.01M
    syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
1759
2.01M
    syncPoint.flush = 0;
1760
2.01M
    if (!mtctx->params.rsyncable)
1761
        /* Rsync is disabled. */
1762
710k
        return syncPoint;
1763
1.30M
    if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE)
1764
        /* We don't emit synchronization points if it would produce too small blocks.
1765
         * We don't have enough input to find a synchronization point, so don't look.
1766
         */
1767
1.29M
        return syncPoint;
1768
3.27k
    if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
1769
        /* Not enough to compute the hash.
1770
         * We will miss any synchronization points in this RSYNC_LENGTH byte
1771
         * window. However, since it depends only in the internal buffers, if the
1772
         * state is already synchronized, we will remain synchronized.
1773
         * Additionally, the probability that we miss a synchronization point is
1774
         * low: RSYNC_LENGTH / targetSectionSize.
1775
         */
1776
0
        return syncPoint;
1777
    /* Initialize the loop variables. */
1778
3.27k
    if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
1779
        /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
1780
         * because they can't possibly be a sync point. So we can start
1781
         * part way through the input buffer.
1782
         */
1783
1.86k
        pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
1784
1.86k
        if (pos >= RSYNC_LENGTH) {
1785
1.84k
            prev = istart + pos - RSYNC_LENGTH;
1786
1.84k
            hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1787
1.84k
        } else {
1788
23
            assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
1789
23
            prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1790
23
            hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
1791
23
            hash = ZSTD_rollingHash_append(hash, istart, pos);
1792
23
        }
1793
1.86k
    } else {
1794
        /* We have enough bytes buffered to initialize the hash,
1795
         * and have processed enough bytes to find a sync point.
1796
         * Start scanning at the beginning of the input.
1797
         */
1798
1.40k
        assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE);
1799
1.40k
        assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH);
1800
1.40k
        pos = 0;
1801
1.40k
        prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1802
1.40k
        hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1803
1.40k
        if ((hash & hitMask) == hitMask) {
1804
            /* We're already at a sync point so don't load any more until
1805
             * we're able to flush this sync point.
1806
             * This likely happened because the job table was full so we
1807
             * couldn't add our job.
1808
             */
1809
0
            syncPoint.toLoad = 0;
1810
0
            syncPoint.flush = 1;
1811
0
            return syncPoint;
1812
0
        }
1813
1.40k
    }
1814
    /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
1815
     * through the input. If we hit a synchronization point, then cut the
1816
     * job off, and tell the compressor to flush the job. Otherwise, load
1817
     * all the bytes and continue as normal.
1818
     * If we go too long without a synchronization point (targetSectionSize)
1819
     * then a block will be emitted anyways, but this is okay, since if we
1820
     * are already synchronized we will remain synchronized.
1821
     */
1822
3.27k
    assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1823
312M
    for (; pos < syncPoint.toLoad; ++pos) {
1824
312M
        BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
1825
        /* This assert is very expensive, and Debian compiles with asserts enabled.
1826
         * So disable it for now. We can get similar coverage by checking it at the
1827
         * beginning & end of the loop.
1828
         * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1829
         */
1830
312M
        hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
1831
312M
        assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE);
1832
312M
        if ((hash & hitMask) == hitMask) {
1833
1.08k
            syncPoint.toLoad = pos + 1;
1834
1.08k
            syncPoint.flush = 1;
1835
1.08k
            ++pos; /* for assert */
1836
1.08k
            break;
1837
1.08k
        }
1838
312M
    }
1839
3.27k
    assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1840
3.27k
    return syncPoint;
1841
3.27k
}
1842
1843
size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
1844
0
{
1845
0
    size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
1846
0
    if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
1847
0
    return hintInSize;
1848
0
}
1849
1850
/** ZSTDMT_compressStream_generic() :
1851
 *  internal use only - exposed to be invoked from zstd_compress.c
1852
 *  assumption : output and input are valid (pos <= size)
1853
 * @return : minimum amount of data remaining to flush, 0 if none */
1854
size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
1855
                                     ZSTD_outBuffer* output,
1856
                                     ZSTD_inBuffer* input,
1857
                                     ZSTD_EndDirective endOp)
1858
13.3M
{
1859
13.3M
    unsigned forwardInputProgress = 0;
1860
13.3M
    DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
1861
13.3M
                (U32)endOp, (U32)(input->size - input->pos));
1862
13.3M
    assert(output->pos <= output->size);
1863
13.3M
    assert(input->pos  <= input->size);
1864
1865
13.3M
    if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
1866
        /* current frame being ended. Only flush/end are allowed */
1867
0
        return ERROR(stage_wrong);
1868
0
    }
1869
1870
    /* fill input buffer */
1871
13.3M
    if ( (!mtctx->jobReady)
1872
13.3M
      && (input->size > input->pos) ) {   /* support NULL input */
1873
2.01M
        if (mtctx->inBuff.buffer.start == NULL) {
1874
1.96M
            assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
1875
1.96M
            if (!ZSTDMT_tryGetInputRange(mtctx)) {
1876
                /* It is only possible for this operation to fail if there are
1877
                 * still compression jobs ongoing.
1878
                 */
1879
0
                DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
1880
0
                assert(mtctx->doneJobID != mtctx->nextJobID);
1881
0
            } else
1882
1.96M
                DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
1883
1.96M
        }
1884
2.01M
        if (mtctx->inBuff.buffer.start != NULL) {
1885
2.01M
            SyncPoint const syncPoint = findSynchronizationPoint(mtctx, *input);
1886
2.01M
            if (syncPoint.flush && endOp == ZSTD_e_continue) {
1887
144
                endOp = ZSTD_e_flush;
1888
144
            }
1889
2.01M
            assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
1890
2.01M
            DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
1891
2.01M
                        (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
1892
2.01M
            ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
1893
2.01M
            input->pos += syncPoint.toLoad;
1894
2.01M
            mtctx->inBuff.filled += syncPoint.toLoad;
1895
2.01M
            forwardInputProgress = syncPoint.toLoad>0;
1896
2.01M
        }
1897
2.01M
    }
1898
13.3M
    if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
1899
        /* Can't end yet because the input is not fully consumed.
1900
            * We are in one of these cases:
1901
            * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
1902
            * - We filled the input buffer: flush this job but don't end the frame.
1903
            * - We hit a synchronization point: flush this job but don't end the frame.
1904
            */
1905
1.21k
        assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
1906
1.21k
        endOp = ZSTD_e_flush;
1907
1.21k
    }
1908
1909
13.3M
    if ( (mtctx->jobReady)
1910
13.3M
      || (mtctx->inBuff.filled >= mtctx->targetSectionSize)  /* filled enough : let's compress */
1911
13.3M
      || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0))  /* something to flush : let's go */
1912
13.3M
      || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) {   /* must finish the frame with a zero-size block */
1913
2.45M
        size_t const jobSize = mtctx->inBuff.filled;
1914
2.45M
        assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
1915
2.45M
        FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
1916
2.45M
    }
1917
1918
    /* check for potential compressed data ready to be flushed */
1919
13.3M
    {   size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
1920
13.3M
        if (input->pos < input->size) return MAX(remainingToFlush, 1);  /* input not consumed : do not end flush yet */
1921
13.2M
        DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
1922
13.2M
        return remainingToFlush;
1923
13.3M
    }
1924
13.3M
}