Coverage Report

Created: 2026-04-30 07:02

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/tdengine/contrib/TSZ/zstd/compress/zstd_compress.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
/*-*************************************
12
*  Dependencies
13
***************************************/
14
#include <string.h>         /* memset */
15
#include "cpu.h"
16
#include "mem.h"
17
#include "hist.h"           /* HIST_countFast_wksp */
18
#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
19
#include "fse.h"
20
#define HUF_STATIC_LINKING_ONLY
21
#include "huf.h"
22
#include "zstd_compress_internal.h"
23
#include "zstd_fast.h"
24
#include "zstd_double_fast.h"
25
#include "zstd_lazy.h"
26
#include "zstd_opt.h"
27
#include "zstd_ldm.h"
28
29
30
/*-*************************************
31
*  Helper functions
32
***************************************/
33
0
size_t ZSTD_compressBound(size_t srcSize) {
34
0
    return ZSTD_COMPRESSBOUND(srcSize);
35
0
}
36
37
38
/*-*************************************
39
*  Context memory management
40
***************************************/
41
struct ZSTD_CDict_s {
42
    void* dictBuffer;
43
    const void* dictContent;
44
    size_t dictContentSize;
45
    void* workspace;
46
    size_t workspaceSize;
47
    ZSTD_matchState_t matchState;
48
    ZSTD_compressedBlockState_t cBlockState;
49
    ZSTD_compressionParameters cParams;
50
    ZSTD_customMem customMem;
51
    U32 dictID;
52
};  /* typedef'd to ZSTD_CDict within "zstd.h" */
53
54
ZSTD_CCtx* ZSTD_createCCtx(void)
55
0
{
56
0
    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
57
0
}
58
59
static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
60
0
{
61
0
    assert(cctx != NULL);
62
0
    memset(cctx, 0, sizeof(*cctx));
63
0
    cctx->customMem = memManager;
64
0
    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
65
0
    {   size_t const err = ZSTD_CCtx_resetParameters(cctx);
66
0
        assert(!ZSTD_isError(err));
67
0
        (void)err;
68
0
    }
69
0
}
70
71
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
72
0
{
73
0
    ZSTD_STATIC_ASSERT(zcss_init==0);
74
0
    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
75
0
    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
76
0
    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
77
0
        if (!cctx) return NULL;
78
0
        ZSTD_initCCtx(cctx, customMem);
79
0
        return cctx;
80
0
    }
81
0
}
82
83
ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
84
0
{
85
0
    ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
86
0
    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
87
0
    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
88
0
    memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
89
0
    cctx->staticSize = workspaceSize;
90
0
    cctx->workSpace = (void*)(cctx+1);
91
0
    cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
92
93
    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
94
0
    if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
95
0
    assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
96
0
    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
97
0
    cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
98
0
    {
99
0
        void* const ptr = cctx->blockState.nextCBlock + 1;
100
0
        cctx->entropyWorkspace = (U32*)ptr;
101
0
    }
102
0
    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
103
0
    return cctx;
104
0
}
105
106
static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
107
0
{
108
0
    assert(cctx != NULL);
109
0
    assert(cctx->staticSize == 0);
110
0
    ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
111
0
    ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
112
#ifdef ZSTD_MULTITHREAD
113
    ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
114
#endif
115
0
}
116
117
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
118
0
{
119
0
    if (cctx==NULL) return 0;   /* support free on NULL */
120
0
    if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
121
0
    ZSTD_freeCCtxContent(cctx);
122
0
    ZSTD_free(cctx, cctx->customMem);
123
0
    return 0;
124
0
}
125
126
127
static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
128
0
{
129
#ifdef ZSTD_MULTITHREAD
130
    return ZSTDMT_sizeof_CCtx(cctx->mtctx);
131
#else
132
0
    (void) cctx;
133
0
    return 0;
134
0
#endif
135
0
}
136
137
138
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
139
0
{
140
0
    if (cctx==NULL) return 0;   /* support sizeof on NULL */
141
0
    return sizeof(*cctx) + cctx->workSpaceSize
142
0
           + ZSTD_sizeof_CDict(cctx->cdictLocal)
143
0
           + ZSTD_sizeof_mtctx(cctx);
144
0
}
145
146
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
147
0
{
148
0
    return ZSTD_sizeof_CCtx(zcs);  /* same object */
149
0
}
150
151
/* private API call, for dictBuilder only */
152
0
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
153
154
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
155
        ZSTD_compressionParameters cParams)
156
0
{
157
0
    ZSTD_CCtx_params cctxParams;
158
0
    memset(&cctxParams, 0, sizeof(cctxParams));
159
0
    cctxParams.cParams = cParams;
160
0
    cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */
161
0
    assert(!ZSTD_checkCParams(cParams));
162
0
    cctxParams.fParams.contentSizeFlag = 1;
163
0
    return cctxParams;
164
0
}
165
166
static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
167
        ZSTD_customMem customMem)
168
0
{
169
0
    ZSTD_CCtx_params* params;
170
0
    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
171
0
    params = (ZSTD_CCtx_params*)ZSTD_calloc(
172
0
            sizeof(ZSTD_CCtx_params), customMem);
173
0
    if (!params) { return NULL; }
174
0
    params->customMem = customMem;
175
0
    params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
176
0
    params->fParams.contentSizeFlag = 1;
177
0
    return params;
178
0
}
179
180
ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
181
0
{
182
0
    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
183
0
}
184
185
size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
186
0
{
187
0
    if (params == NULL) { return 0; }
188
0
    ZSTD_free(params, params->customMem);
189
0
    return 0;
190
0
}
191
192
size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
193
0
{
194
0
    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
195
0
}
196
197
0
size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
198
0
    if (!cctxParams) { return ERROR(GENERIC); }
199
0
    memset(cctxParams, 0, sizeof(*cctxParams));
200
0
    cctxParams->compressionLevel = compressionLevel;
201
0
    cctxParams->fParams.contentSizeFlag = 1;
202
0
    return 0;
203
0
}
204
205
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
206
0
{
207
0
    if (!cctxParams) { return ERROR(GENERIC); }
208
0
    CHECK_F( ZSTD_checkCParams(params.cParams) );
209
0
    memset(cctxParams, 0, sizeof(*cctxParams));
210
0
    cctxParams->cParams = params.cParams;
211
0
    cctxParams->fParams = params.fParams;
212
0
    cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
213
0
    assert(!ZSTD_checkCParams(params.cParams));
214
0
    return 0;
215
0
}
216
217
/* ZSTD_assignParamsToCCtxParams() :
218
 * params is presumed valid at this stage */
219
static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
220
        ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
221
0
{
222
0
    ZSTD_CCtx_params ret = cctxParams;
223
0
    ret.cParams = params.cParams;
224
0
    ret.fParams = params.fParams;
225
0
    ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
226
0
    assert(!ZSTD_checkCParams(params.cParams));
227
0
    return ret;
228
0
}
229
230
0
#define CLAMPCHECK(val,min,max) {            \
231
0
    if (((val)<(min)) | ((val)>(max))) {     \
232
0
        return ERROR(parameter_outOfBound);  \
233
0
}   }
234
235
236
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
237
0
{
238
0
    switch(param)
239
0
    {
240
0
    case ZSTD_p_compressionLevel:
241
0
    case ZSTD_p_hashLog:
242
0
    case ZSTD_p_chainLog:
243
0
    case ZSTD_p_searchLog:
244
0
    case ZSTD_p_minMatch:
245
0
    case ZSTD_p_targetLength:
246
0
    case ZSTD_p_compressionStrategy:
247
0
        return 1;
248
249
0
    case ZSTD_p_format:
250
0
    case ZSTD_p_windowLog:
251
0
    case ZSTD_p_contentSizeFlag:
252
0
    case ZSTD_p_checksumFlag:
253
0
    case ZSTD_p_dictIDFlag:
254
0
    case ZSTD_p_forceMaxWindow :
255
0
    case ZSTD_p_nbWorkers:
256
0
    case ZSTD_p_jobSize:
257
0
    case ZSTD_p_overlapSizeLog:
258
0
    case ZSTD_p_enableLongDistanceMatching:
259
0
    case ZSTD_p_ldmHashLog:
260
0
    case ZSTD_p_ldmMinMatch:
261
0
    case ZSTD_p_ldmBucketSizeLog:
262
0
    case ZSTD_p_ldmHashEveryLog:
263
0
    case ZSTD_p_forceAttachDict:
264
0
    default:
265
0
        return 0;
266
0
    }
267
0
}
268
269
size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
270
0
{
271
0
    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value);
272
0
    if (cctx->streamStage != zcss_init) {
273
0
        if (ZSTD_isUpdateAuthorized(param)) {
274
0
            cctx->cParamsChanged = 1;
275
0
        } else {
276
0
            return ERROR(stage_wrong);
277
0
    }   }
278
279
0
    switch(param)
280
0
    {
281
0
    case ZSTD_p_format :
282
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
283
284
0
    case ZSTD_p_compressionLevel:
285
0
        if (cctx->cdict) return ERROR(stage_wrong);
286
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
287
288
0
    case ZSTD_p_windowLog:
289
0
    case ZSTD_p_hashLog:
290
0
    case ZSTD_p_chainLog:
291
0
    case ZSTD_p_searchLog:
292
0
    case ZSTD_p_minMatch:
293
0
    case ZSTD_p_targetLength:
294
0
    case ZSTD_p_compressionStrategy:
295
0
        if (cctx->cdict) return ERROR(stage_wrong);
296
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
297
298
0
    case ZSTD_p_contentSizeFlag:
299
0
    case ZSTD_p_checksumFlag:
300
0
    case ZSTD_p_dictIDFlag:
301
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
302
303
0
    case ZSTD_p_forceMaxWindow :  /* Force back-references to remain < windowSize,
304
                                   * even when referencing into Dictionary content.
305
                                   * default : 0 when using a CDict, 1 when using a Prefix */
306
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
307
308
0
    case ZSTD_p_forceAttachDict:
309
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
310
311
0
    case ZSTD_p_nbWorkers:
312
0
        if ((value>0) && cctx->staticSize) {
313
0
            return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
314
0
        }
315
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
316
317
0
    case ZSTD_p_jobSize:
318
0
    case ZSTD_p_overlapSizeLog:
319
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
320
321
0
    case ZSTD_p_enableLongDistanceMatching:
322
0
    case ZSTD_p_ldmHashLog:
323
0
    case ZSTD_p_ldmMinMatch:
324
0
    case ZSTD_p_ldmBucketSizeLog:
325
0
    case ZSTD_p_ldmHashEveryLog:
326
0
        if (cctx->cdict) return ERROR(stage_wrong);
327
0
        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
328
329
0
    default: return ERROR(parameter_unsupported);
330
0
    }
331
0
}
332
333
size_t ZSTD_CCtxParam_setParameter(
334
        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value)
335
0
{
336
0
    DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value);
337
0
    switch(param)
338
0
    {
339
0
    case ZSTD_p_format :
340
0
        if (value > (unsigned)ZSTD_f_zstd1_magicless)
341
0
            return ERROR(parameter_unsupported);
342
0
        CCtxParams->format = (ZSTD_format_e)value;
343
0
        return (size_t)CCtxParams->format;
344
345
0
    case ZSTD_p_compressionLevel : {
346
0
        int cLevel = (int)value;  /* cast expected to restore negative sign */
347
0
        if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
348
0
        if (cLevel) {  /* 0 : does not change current level */
349
0
            CCtxParams->compressionLevel = cLevel;
350
0
        }
351
0
        if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
352
0
        return 0;  /* return type (size_t) cannot represent negative values */
353
0
    }
354
355
0
    case ZSTD_p_windowLog :
356
0
        if (value>0)   /* 0 => use default */
357
0
            CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
358
0
        CCtxParams->cParams.windowLog = value;
359
0
        return CCtxParams->cParams.windowLog;
360
361
0
    case ZSTD_p_hashLog :
362
0
        if (value>0)   /* 0 => use default */
363
0
            CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
364
0
        CCtxParams->cParams.hashLog = value;
365
0
        return CCtxParams->cParams.hashLog;
366
367
0
    case ZSTD_p_chainLog :
368
0
        if (value>0)   /* 0 => use default */
369
0
            CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
370
0
        CCtxParams->cParams.chainLog = value;
371
0
        return CCtxParams->cParams.chainLog;
372
373
0
    case ZSTD_p_searchLog :
374
0
        if (value>0)   /* 0 => use default */
375
0
            CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
376
0
        CCtxParams->cParams.searchLog = value;
377
0
        return value;
378
379
0
    case ZSTD_p_minMatch :
380
0
        if (value>0)   /* 0 => use default */
381
0
            CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
382
0
        CCtxParams->cParams.searchLength = value;
383
0
        return CCtxParams->cParams.searchLength;
384
385
0
    case ZSTD_p_targetLength :
386
        /* all values are valid. 0 => use default */
387
0
        CCtxParams->cParams.targetLength = value;
388
0
        return CCtxParams->cParams.targetLength;
389
390
0
    case ZSTD_p_compressionStrategy :
391
0
        if (value>0)   /* 0 => use default */
392
0
            CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
393
0
        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
394
0
        return (size_t)CCtxParams->cParams.strategy;
395
396
0
    case ZSTD_p_contentSizeFlag :
397
        /* Content size written in frame header _when known_ (default:1) */
398
0
        DEBUGLOG(4, "set content size flag = %u", (value>0));
399
0
        CCtxParams->fParams.contentSizeFlag = value > 0;
400
0
        return CCtxParams->fParams.contentSizeFlag;
401
402
0
    case ZSTD_p_checksumFlag :
403
        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
404
0
        CCtxParams->fParams.checksumFlag = value > 0;
405
0
        return CCtxParams->fParams.checksumFlag;
406
407
0
    case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
408
0
        DEBUGLOG(4, "set dictIDFlag = %u", (value>0));
409
0
        CCtxParams->fParams.noDictIDFlag = !value;
410
0
        return !CCtxParams->fParams.noDictIDFlag;
411
412
0
    case ZSTD_p_forceMaxWindow :
413
0
        CCtxParams->forceWindow = (value > 0);
414
0
        return CCtxParams->forceWindow;
415
416
0
    case ZSTD_p_forceAttachDict :
417
0
        CCtxParams->attachDictPref = value ?
418
0
                                    (value > 0 ? ZSTD_dictForceAttach : ZSTD_dictForceCopy) :
419
0
                                     ZSTD_dictDefaultAttach;
420
0
        return CCtxParams->attachDictPref;
421
422
0
    case ZSTD_p_nbWorkers :
423
0
#ifndef ZSTD_MULTITHREAD
424
0
        if (value>0) return ERROR(parameter_unsupported);
425
0
        return 0;
426
#else
427
        return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
428
#endif
429
430
0
    case ZSTD_p_jobSize :
431
0
#ifndef ZSTD_MULTITHREAD
432
0
        return ERROR(parameter_unsupported);
433
#else
434
        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
435
#endif
436
437
0
    case ZSTD_p_overlapSizeLog :
438
0
#ifndef ZSTD_MULTITHREAD
439
0
        return ERROR(parameter_unsupported);
440
#else
441
        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
442
#endif
443
444
0
    case ZSTD_p_enableLongDistanceMatching :
445
0
        CCtxParams->ldmParams.enableLdm = (value>0);
446
0
        return CCtxParams->ldmParams.enableLdm;
447
448
0
    case ZSTD_p_ldmHashLog :
449
0
        if (value>0)   /* 0 ==> auto */
450
0
            CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
451
0
        CCtxParams->ldmParams.hashLog = value;
452
0
        return CCtxParams->ldmParams.hashLog;
453
454
0
    case ZSTD_p_ldmMinMatch :
455
0
        if (value>0)   /* 0 ==> default */
456
0
            CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
457
0
        CCtxParams->ldmParams.minMatchLength = value;
458
0
        return CCtxParams->ldmParams.minMatchLength;
459
460
0
    case ZSTD_p_ldmBucketSizeLog :
461
0
        if (value > ZSTD_LDM_BUCKETSIZELOG_MAX)
462
0
            return ERROR(parameter_outOfBound);
463
0
        CCtxParams->ldmParams.bucketSizeLog = value;
464
0
        return CCtxParams->ldmParams.bucketSizeLog;
465
466
0
    case ZSTD_p_ldmHashEveryLog :
467
0
        if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
468
0
            return ERROR(parameter_outOfBound);
469
0
        CCtxParams->ldmParams.hashEveryLog = value;
470
0
        return CCtxParams->ldmParams.hashEveryLog;
471
472
0
    default: return ERROR(parameter_unsupported);
473
0
    }
474
0
}
475
476
size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned* value)
477
0
{
478
0
    return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
479
0
}
480
481
size_t ZSTD_CCtxParam_getParameter(
482
        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned* value)
483
0
{
484
0
    switch(param)
485
0
    {
486
0
    case ZSTD_p_format :
487
0
        *value = CCtxParams->format;
488
0
        break;
489
0
    case ZSTD_p_compressionLevel :
490
0
        *value = CCtxParams->compressionLevel;
491
0
        break;
492
0
    case ZSTD_p_windowLog :
493
0
        *value = CCtxParams->cParams.windowLog;
494
0
        break;
495
0
    case ZSTD_p_hashLog :
496
0
        *value = CCtxParams->cParams.hashLog;
497
0
        break;
498
0
    case ZSTD_p_chainLog :
499
0
        *value = CCtxParams->cParams.chainLog;
500
0
        break;
501
0
    case ZSTD_p_searchLog :
502
0
        *value = CCtxParams->cParams.searchLog;
503
0
        break;
504
0
    case ZSTD_p_minMatch :
505
0
        *value = CCtxParams->cParams.searchLength;
506
0
        break;
507
0
    case ZSTD_p_targetLength :
508
0
        *value = CCtxParams->cParams.targetLength;
509
0
        break;
510
0
    case ZSTD_p_compressionStrategy :
511
0
        *value = (unsigned)CCtxParams->cParams.strategy;
512
0
        break;
513
0
    case ZSTD_p_contentSizeFlag :
514
0
        *value = CCtxParams->fParams.contentSizeFlag;
515
0
        break;
516
0
    case ZSTD_p_checksumFlag :
517
0
        *value = CCtxParams->fParams.checksumFlag;
518
0
        break;
519
0
    case ZSTD_p_dictIDFlag :
520
0
        *value = !CCtxParams->fParams.noDictIDFlag;
521
0
        break;
522
0
    case ZSTD_p_forceMaxWindow :
523
0
        *value = CCtxParams->forceWindow;
524
0
        break;
525
0
    case ZSTD_p_forceAttachDict :
526
0
        *value = CCtxParams->attachDictPref;
527
0
        break;
528
0
    case ZSTD_p_nbWorkers :
529
0
#ifndef ZSTD_MULTITHREAD
530
0
        assert(CCtxParams->nbWorkers == 0);
531
0
#endif
532
0
        *value = CCtxParams->nbWorkers;
533
0
        break;
534
0
    case ZSTD_p_jobSize :
535
0
#ifndef ZSTD_MULTITHREAD
536
0
        return ERROR(parameter_unsupported);
537
#else
538
        *value = CCtxParams->jobSize;
539
        break;
540
#endif
541
0
    case ZSTD_p_overlapSizeLog :
542
0
#ifndef ZSTD_MULTITHREAD
543
0
        return ERROR(parameter_unsupported);
544
#else
545
        *value = CCtxParams->overlapSizeLog;
546
        break;
547
#endif
548
0
    case ZSTD_p_enableLongDistanceMatching :
549
0
        *value = CCtxParams->ldmParams.enableLdm;
550
0
        break;
551
0
    case ZSTD_p_ldmHashLog :
552
0
        *value = CCtxParams->ldmParams.hashLog;
553
0
        break;
554
0
    case ZSTD_p_ldmMinMatch :
555
0
        *value = CCtxParams->ldmParams.minMatchLength;
556
0
        break;
557
0
    case ZSTD_p_ldmBucketSizeLog :
558
0
        *value = CCtxParams->ldmParams.bucketSizeLog;
559
0
        break;
560
0
    case ZSTD_p_ldmHashEveryLog :
561
0
        *value = CCtxParams->ldmParams.hashEveryLog;
562
0
        break;
563
0
    default: return ERROR(parameter_unsupported);
564
0
    }
565
0
    return 0;
566
0
}
567
568
/** ZSTD_CCtx_setParametersUsingCCtxParams() :
569
 *  just applies `params` into `cctx`
570
 *  no action is performed, parameters are merely stored.
571
 *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
572
 *    This is possible even if a compression is ongoing.
573
 *    In which case, new parameters will be applied on the fly, starting with next compression job.
574
 */
575
size_t ZSTD_CCtx_setParametersUsingCCtxParams(
576
        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
577
0
{
578
0
    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
579
0
    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
580
0
    if (cctx->cdict) return ERROR(stage_wrong);
581
582
0
    cctx->requestedParams = *params;
583
0
    return 0;
584
0
}
585
586
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
587
0
{
588
0
    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
589
0
    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
590
0
    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
591
0
    return 0;
592
0
}
593
594
size_t ZSTD_CCtx_loadDictionary_advanced(
595
        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
596
        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
597
0
{
598
0
    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
599
0
    if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
600
0
    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
601
0
    ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
602
0
    if (dict==NULL || dictSize==0) {   /* no dictionary mode */
603
0
        cctx->cdictLocal = NULL;
604
0
        cctx->cdict = NULL;
605
0
    } else {
606
0
        ZSTD_compressionParameters const cParams =
607
0
                ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
608
0
        cctx->cdictLocal = ZSTD_createCDict_advanced(
609
0
                                dict, dictSize,
610
0
                                dictLoadMethod, dictContentType,
611
0
                                cParams, cctx->customMem);
612
0
        cctx->cdict = cctx->cdictLocal;
613
0
        if (cctx->cdictLocal == NULL)
614
0
            return ERROR(memory_allocation);
615
0
    }
616
0
    return 0;
617
0
}
618
619
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
620
      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
621
0
{
622
0
    return ZSTD_CCtx_loadDictionary_advanced(
623
0
            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
624
0
}
625
626
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
627
0
{
628
0
    return ZSTD_CCtx_loadDictionary_advanced(
629
0
            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
630
0
}
631
632
633
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
634
0
{
635
0
    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
636
0
    cctx->cdict = cdict;
637
0
    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
638
0
    return 0;
639
0
}
640
641
size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
642
0
{
643
0
    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
644
0
}
645
646
size_t ZSTD_CCtx_refPrefix_advanced(
647
        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
648
0
{
649
0
    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
650
0
    cctx->cdict = NULL;   /* prefix discards any prior cdict */
651
0
    cctx->prefixDict.dict = prefix;
652
0
    cctx->prefixDict.dictSize = prefixSize;
653
0
    cctx->prefixDict.dictContentType = dictContentType;
654
0
    return 0;
655
0
}
656
657
/*! ZSTD_CCtx_reset() :
658
 *  Also dumps dictionary */
659
void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
660
0
{
661
0
    cctx->streamStage = zcss_init;
662
0
    cctx->pledgedSrcSizePlusOne = 0;
663
0
}
664
665
size_t ZSTD_CCtx_resetParameters(ZSTD_CCtx* cctx)
666
0
{
667
0
    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
668
0
    cctx->cdict = NULL;
669
0
    return ZSTD_CCtxParams_reset(&cctx->requestedParams);
670
0
}
671
672
/** ZSTD_checkCParams() :
673
    control CParam values remain within authorized range.
674
    @return : 0, or an error code if one value is beyond authorized range */
675
size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
676
0
{
677
0
    CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
678
0
    CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
679
0
    CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
680
0
    CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
681
0
    CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
682
0
    if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
683
0
        return ERROR(parameter_unsupported);
684
0
    return 0;
685
0
}
686
687
/** ZSTD_clampCParams() :
688
 *  make CParam values within valid range.
689
 *  @return : valid CParams */
690
static ZSTD_compressionParameters
691
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
692
0
{
693
0
#   define CLAMP(val,min,max) {      \
694
0
        if (val<min) val=min;        \
695
0
        else if (val>max) val=max;   \
696
0
    }
697
0
    CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
698
0
    CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
699
0
    CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
700
0
    CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
701
0
    CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
702
0
    CLAMP(cParams.strategy, ZSTD_fast, ZSTD_btultra);
703
0
    return cParams;
704
0
}
705
706
/** ZSTD_cycleLog() :
707
 *  condition for correct operation : hashLog > 1 */
708
static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
709
0
{
710
0
    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
711
0
    return hashLog - btScale;
712
0
}
713
714
/** ZSTD_adjustCParams_internal() :
715
    optimize `cPar` for a given input (`srcSize` and `dictSize`).
716
    mostly downsizing to reduce memory consumption and initialization latency.
717
    Both `srcSize` and `dictSize` are optional (use 0 if unknown).
718
    Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
719
static ZSTD_compressionParameters
720
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
721
                            unsigned long long srcSize,
722
                            size_t dictSize)
723
0
{
724
0
    static const U64 minSrcSize = 513; /* (1<<9) + 1 */
725
0
    static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
726
0
    assert(ZSTD_checkCParams(cPar)==0);
727
728
0
    if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
729
0
        srcSize = minSrcSize;  /* presumed small when there is a dictionary */
730
0
    else if (srcSize == 0)
731
0
        srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
732
733
    /* resize windowLog if input is small enough, to use less memory */
734
0
    if ( (srcSize < maxWindowResize)
735
0
      && (dictSize < maxWindowResize) )  {
736
0
        U32 const tSize = (U32)(srcSize + dictSize);
737
0
        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
738
0
        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
739
0
                            ZSTD_highbit32(tSize-1) + 1;
740
0
        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
741
0
    }
742
0
    if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
743
0
    {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
744
0
        if (cycleLog > cPar.windowLog)
745
0
            cPar.chainLog -= (cycleLog - cPar.windowLog);
746
0
    }
747
748
0
    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
749
0
        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
750
751
0
    return cPar;
752
0
}
753
754
ZSTD_compressionParameters
755
ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
756
                   unsigned long long srcSize,
757
                   size_t dictSize)
758
0
{
759
0
    cPar = ZSTD_clampCParams(cPar);
760
0
    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
761
0
}
762
763
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
764
        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
765
0
{
766
0
    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
767
0
    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
768
0
    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
769
0
    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
770
0
    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
771
0
    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
772
0
    if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
773
0
    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
774
0
    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
775
0
    assert(!ZSTD_checkCParams(cParams));
776
0
    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
777
0
}
778
779
static size_t
780
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
781
                       const U32 forCCtx)
782
0
{
783
0
    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
784
0
    size_t const hSize = ((size_t)1) << cParams->hashLog;
785
0
    U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
786
0
    size_t const h3Size = ((size_t)1) << hashLog3;
787
0
    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
788
0
    size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
789
0
                          + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
790
0
    size_t const optSpace = (forCCtx && ((cParams->strategy == ZSTD_btopt) ||
791
0
                                         (cParams->strategy == ZSTD_btultra)))
792
0
                                ? optPotentialSpace
793
0
                                : 0;
794
0
    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
795
0
                (U32)chainSize, (U32)hSize, (U32)h3Size);
796
0
    return tableSpace + optSpace;
797
0
}
798
799
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
800
0
{
801
    /* Estimate CCtx size is supported for single-threaded compression only. */
802
0
    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
803
0
    {   ZSTD_compressionParameters const cParams =
804
0
                ZSTD_getCParamsFromCCtxParams(params, 0, 0);
805
0
        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
806
0
        U32    const divider = (cParams.searchLength==3) ? 3 : 4;
807
0
        size_t const maxNbSeq = blockSize / divider;
808
0
        size_t const tokenSpace = blockSize + 11*maxNbSeq;
809
0
        size_t const entropySpace = HUF_WORKSPACE_SIZE;
810
0
        size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
811
0
        size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
812
813
0
        size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
814
0
        size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
815
816
0
        size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
817
0
                                   matchStateSize + ldmSpace + ldmSeqSpace;
818
819
0
        DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
820
0
        DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
821
0
        return sizeof(ZSTD_CCtx) + neededSpace;
822
0
    }
823
0
}
824
825
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
826
0
{
827
0
    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
828
0
    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
829
0
}
830
831
static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
832
0
{
833
0
    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
834
0
    return ZSTD_estimateCCtxSize_usingCParams(cParams);
835
0
}
836
837
size_t ZSTD_estimateCCtxSize(int compressionLevel)
838
0
{
839
0
    int level;
840
0
    size_t memBudget = 0;
841
0
    for (level=1; level<=compressionLevel; level++) {
842
0
        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
843
0
        if (newMB > memBudget) memBudget = newMB;
844
0
    }
845
0
    return memBudget;
846
0
}
847
848
size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
849
0
{
850
0
    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
851
0
    {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
852
0
        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
853
0
        size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
854
0
        size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
855
0
        size_t const streamingSize = inBuffSize + outBuffSize;
856
857
0
        return CCtxSize + streamingSize;
858
0
    }
859
0
}
860
861
size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
862
0
{
863
0
    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
864
0
    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
865
0
}
866
867
static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
868
0
{
869
0
    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
870
0
    return ZSTD_estimateCStreamSize_usingCParams(cParams);
871
0
}
872
873
size_t ZSTD_estimateCStreamSize(int compressionLevel)
874
0
{
875
0
    int level;
876
0
    size_t memBudget = 0;
877
0
    for (level=1; level<=compressionLevel; level++) {
878
0
        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
879
0
        if (newMB > memBudget) memBudget = newMB;
880
0
    }
881
0
    return memBudget;
882
0
}
883
884
/* ZSTD_getFrameProgression():
885
 * tells how much data has been consumed (input) and produced (output) for current frame.
886
 * able to count progression inside worker threads (non-blocking mode).
887
 */
888
ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
889
0
{
890
#ifdef ZSTD_MULTITHREAD
891
    if (cctx->appliedParams.nbWorkers > 0) {
892
        return ZSTDMT_getFrameProgression(cctx->mtctx);
893
    }
894
#endif
895
0
    {   ZSTD_frameProgression fp;
896
0
        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
897
0
                                cctx->inBuffPos - cctx->inToCompress;
898
0
        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
899
0
        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
900
0
        fp.ingested = cctx->consumedSrcSize + buffered;
901
0
        fp.consumed = cctx->consumedSrcSize;
902
0
        fp.produced = cctx->producedCSize;
903
0
        return fp;
904
0
}   }
905
906
907
static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
908
                                  ZSTD_compressionParameters cParams2)
909
0
{
910
0
    return (cParams1.hashLog  == cParams2.hashLog)
911
0
         & (cParams1.chainLog == cParams2.chainLog)
912
0
         & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
913
0
         & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
914
0
}
915
916
/** The parameters are equivalent if ldm is not enabled in both sets or
917
 *  all the parameters are equivalent. */
918
static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
919
                                    ldmParams_t ldmParams2)
920
0
{
921
0
    return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
922
0
           (ldmParams1.enableLdm == ldmParams2.enableLdm &&
923
0
            ldmParams1.hashLog == ldmParams2.hashLog &&
924
0
            ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
925
0
            ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
926
0
            ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
927
0
}
928
929
typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
930
931
/* ZSTD_sufficientBuff() :
932
 * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
933
 * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
934
static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
935
                            ZSTD_buffered_policy_e buffPol2,
936
                            ZSTD_compressionParameters cParams2,
937
                            U64 pledgedSrcSize)
938
0
{
939
0
    size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
940
0
    size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
941
0
    size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
942
0
    DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u",
943
0
                (U32)windowSize2, cParams2.windowLog);
944
0
    DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u",
945
0
                (U32)blockSize2, (U32)blockSize1);
946
0
    return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
947
0
         & (neededBufferSize2 <= bufferSize1);
948
0
}
949
950
/** Equivalence for resetCCtx purposes */
951
static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
952
                                 ZSTD_CCtx_params params2,
953
                                 size_t buffSize1, size_t blockSize1,
954
                                 ZSTD_buffered_policy_e buffPol2,
955
                                 U64 pledgedSrcSize)
956
0
{
957
0
    DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
958
0
    return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
959
0
           ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
960
0
           ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
961
0
}
962
963
static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
964
0
{
965
0
    int i;
966
0
    for (i = 0; i < ZSTD_REP_NUM; ++i)
967
0
        bs->rep[i] = repStartValue[i];
968
0
    bs->entropy.huf.repeatMode = HUF_repeat_none;
969
0
    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
970
0
    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
971
0
    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
972
0
}
973
974
/*! ZSTD_invalidateMatchState()
975
 * Invalidate all the matches in the match finder tables.
976
 * Requires nextSrc and base to be set (can be NULL).
977
 */
978
static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
979
0
{
980
0
    ZSTD_window_clear(&ms->window);
981
982
0
    ms->nextToUpdate = ms->window.dictLimit + 1;
983
0
    ms->nextToUpdate3 = ms->window.dictLimit + 1;
984
0
    ms->loadedDictEnd = 0;
985
0
    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
986
0
    ms->dictMatchState = NULL;
987
0
}
988
989
/*! ZSTD_continueCCtx() :
990
 *  reuse CCtx without reset (note : requires no dictionary) */
991
static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
992
0
{
993
0
    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
994
0
    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
995
0
    DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
996
997
0
    cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
998
0
    cctx->appliedParams = params;
999
0
    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1000
0
    cctx->consumedSrcSize = 0;
1001
0
    cctx->producedCSize = 0;
1002
0
    if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1003
0
        cctx->appliedParams.fParams.contentSizeFlag = 0;
1004
0
    DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1005
0
        (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
1006
0
    cctx->stage = ZSTDcs_init;
1007
0
    cctx->dictID = 0;
1008
0
    if (params.ldmParams.enableLdm)
1009
0
        ZSTD_window_clear(&cctx->ldmState.window);
1010
0
    ZSTD_referenceExternalSequences(cctx, NULL, 0);
1011
0
    ZSTD_invalidateMatchState(&cctx->blockState.matchState);
1012
0
    ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
1013
0
    XXH64_reset(&cctx->xxhState, 0);
1014
0
    return 0;
1015
0
}
1016
1017
typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
1018
1019
static void*
1020
ZSTD_reset_matchState(ZSTD_matchState_t* ms,
1021
                      void* ptr,
1022
                const ZSTD_compressionParameters* cParams,
1023
                      ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
1024
0
{
1025
0
    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
1026
0
    size_t const hSize = ((size_t)1) << cParams->hashLog;
1027
0
    U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1028
0
    size_t const h3Size = ((size_t)1) << hashLog3;
1029
0
    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1030
1031
0
    assert(((size_t)ptr & 3) == 0);
1032
1033
0
    ms->hashLog3 = hashLog3;
1034
0
    memset(&ms->window, 0, sizeof(ms->window));
1035
0
    ZSTD_invalidateMatchState(ms);
1036
1037
    /* opt parser space */
1038
0
    if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) {
1039
0
        DEBUGLOG(4, "reserving optimal parser space");
1040
0
        ms->opt.litFreq = (U32*)ptr;
1041
0
        ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
1042
0
        ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
1043
0
        ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
1044
0
        ptr = ms->opt.offCodeFreq + (MaxOff+1);
1045
0
        ms->opt.matchTable = (ZSTD_match_t*)ptr;
1046
0
        ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
1047
0
        ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
1048
0
        ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
1049
0
    }
1050
1051
    /* table Space */
1052
0
    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
1053
0
    assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
1054
0
    if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
1055
0
    ms->hashTable = (U32*)(ptr);
1056
0
    ms->chainTable = ms->hashTable + hSize;
1057
0
    ms->hashTable3 = ms->chainTable + chainSize;
1058
0
    ptr = ms->hashTable3 + h3Size;
1059
1060
0
    assert(((size_t)ptr & 3) == 0);
1061
0
    return ptr;
1062
0
}
1063
1064
0
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
1065
0
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
1066
                                         * during at least this number of times,
1067
                                         * context's memory usage is considered wasteful,
1068
                                         * because it's sized to handle a worst case scenario which rarely happens.
1069
                                         * In which case, resize it down to free some memory */
1070
1071
/*! ZSTD_resetCCtx_internal() :
1072
    note : `params` are assumed fully validated at this stage */
1073
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
1074
                                      ZSTD_CCtx_params params,
1075
                                      U64 pledgedSrcSize,
1076
                                      ZSTD_compResetPolicy_e const crp,
1077
                                      ZSTD_buffered_policy_e const zbuff)
1078
0
{
1079
0
    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
1080
0
                (U32)pledgedSrcSize, params.cParams.windowLog);
1081
0
    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1082
1083
0
    if (crp == ZSTDcrp_continue) {
1084
0
        if (ZSTD_equivalentParams(zc->appliedParams, params,
1085
0
                                zc->inBuffSize, zc->blockSize,
1086
0
                                zbuff, pledgedSrcSize)) {
1087
0
            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
1088
0
                        zc->appliedParams.cParams.windowLog, zc->blockSize);
1089
0
            zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
1090
0
            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
1091
0
                return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
1092
0
    }   }
1093
0
    DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
1094
1095
0
    if (params.ldmParams.enableLdm) {
1096
        /* Adjust long distance matching parameters */
1097
0
        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
1098
0
        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
1099
0
        assert(params.ldmParams.hashEveryLog < 32);
1100
0
        zc->ldmState.hashPower = ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
1101
0
    }
1102
1103
0
    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1104
0
        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1105
0
        U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
1106
0
        size_t const maxNbSeq = blockSize / divider;
1107
0
        size_t const tokenSpace = blockSize + 11*maxNbSeq;
1108
0
        size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
1109
0
        size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
1110
0
        size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
1111
0
        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
1112
0
        void* ptr;   /* used to partition workSpace */
1113
1114
        /* Check if workSpace is large enough, alloc a new one if needed */
1115
0
        {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
1116
0
            size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
1117
0
            size_t const bufferSpace = buffInSize + buffOutSize;
1118
0
            size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
1119
0
            size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
1120
1121
0
            size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
1122
0
                                       ldmSeqSpace + matchStateSize + tokenSpace +
1123
0
                                       bufferSpace;
1124
1125
0
            int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
1126
0
            int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
1127
0
            int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
1128
0
            zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
1129
1130
0
            DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
1131
0
                        neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
1132
0
            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
1133
1134
0
            if (workSpaceTooSmall || workSpaceWasteful) {
1135
0
                DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
1136
0
                            zc->workSpaceSize >> 10,
1137
0
                            neededSpace >> 10);
1138
                /* static cctx : no resize, error out */
1139
0
                if (zc->staticSize) return ERROR(memory_allocation);
1140
1141
0
                zc->workSpaceSize = 0;
1142
0
                ZSTD_free(zc->workSpace, zc->customMem);
1143
0
                zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
1144
0
                if (zc->workSpace == NULL) return ERROR(memory_allocation);
1145
0
                zc->workSpaceSize = neededSpace;
1146
0
                zc->workSpaceOversizedDuration = 0;
1147
0
                ptr = zc->workSpace;
1148
1149
                /* Statically sized space.
1150
                 * entropyWorkspace never moves,
1151
                 * though prev/next block swap places */
1152
0
                assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
1153
0
                assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
1154
0
                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
1155
0
                zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
1156
0
                ptr = zc->blockState.nextCBlock + 1;
1157
0
                zc->entropyWorkspace = (U32*)ptr;
1158
0
        }   }
1159
1160
        /* init params */
1161
0
        zc->appliedParams = params;
1162
0
        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1163
0
        zc->consumedSrcSize = 0;
1164
0
        zc->producedCSize = 0;
1165
0
        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1166
0
            zc->appliedParams.fParams.contentSizeFlag = 0;
1167
0
        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1168
0
            (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
1169
0
        zc->blockSize = blockSize;
1170
1171
0
        XXH64_reset(&zc->xxhState, 0);
1172
0
        zc->stage = ZSTDcs_init;
1173
0
        zc->dictID = 0;
1174
1175
0
        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
1176
1177
0
        ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
1178
1179
        /* ldm hash table */
1180
        /* initialize bucketOffsets table later for pointer alignment */
1181
0
        if (params.ldmParams.enableLdm) {
1182
0
            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
1183
0
            memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
1184
0
            assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1185
0
            zc->ldmState.hashTable = (ldmEntry_t*)ptr;
1186
0
            ptr = zc->ldmState.hashTable + ldmHSize;
1187
0
            zc->ldmSequences = (rawSeq*)ptr;
1188
0
            ptr = zc->ldmSequences + maxNbLdmSeq;
1189
0
            zc->maxNbLdmSequences = maxNbLdmSeq;
1190
1191
0
            memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
1192
0
        }
1193
0
        assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1194
1195
0
        ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
1196
1197
        /* sequences storage */
1198
0
        zc->seqStore.sequencesStart = (seqDef*)ptr;
1199
0
        ptr = zc->seqStore.sequencesStart + maxNbSeq;
1200
0
        zc->seqStore.llCode = (BYTE*) ptr;
1201
0
        zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
1202
0
        zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
1203
0
        zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
1204
0
        ptr = zc->seqStore.litStart + blockSize;
1205
1206
        /* ldm bucketOffsets table */
1207
0
        if (params.ldmParams.enableLdm) {
1208
0
            size_t const ldmBucketSize =
1209
0
                  ((size_t)1) << (params.ldmParams.hashLog -
1210
0
                                  params.ldmParams.bucketSizeLog);
1211
0
            memset(ptr, 0, ldmBucketSize);
1212
0
            zc->ldmState.bucketOffsets = (BYTE*)ptr;
1213
0
            ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
1214
0
            ZSTD_window_clear(&zc->ldmState.window);
1215
0
        }
1216
0
        ZSTD_referenceExternalSequences(zc, NULL, 0);
1217
1218
        /* buffers */
1219
0
        zc->inBuffSize = buffInSize;
1220
0
        zc->inBuff = (char*)ptr;
1221
0
        zc->outBuffSize = buffOutSize;
1222
0
        zc->outBuff = zc->inBuff + buffInSize;
1223
1224
0
        return 0;
1225
0
    }
1226
0
}
1227
1228
/* ZSTD_invalidateRepCodes() :
1229
 * ensures next compression will not use repcodes from previous block.
1230
 * Note : only works with regular variant;
1231
 *        do not use with extDict variant ! */
1232
0
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
1233
0
    int i;
1234
0
    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
1235
0
    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
1236
0
}
1237
1238
static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
1239
                            const ZSTD_CDict* cdict,
1240
                            ZSTD_CCtx_params params,
1241
                            U64 pledgedSrcSize,
1242
                            ZSTD_buffered_policy_e zbuff)
1243
0
{
1244
    /* We have a choice between copying the dictionary context into the working
1245
     * context, or referencing the dictionary context from the working context
1246
     * in-place. We decide here which strategy to use. */
1247
0
    const U64 attachDictSizeCutoffs[(unsigned)ZSTD_btultra+1] = {
1248
0
        8 KB, /* unused */
1249
0
        8 KB, /* ZSTD_fast */
1250
0
        16 KB, /* ZSTD_dfast */
1251
0
        32 KB, /* ZSTD_greedy */
1252
0
        32 KB, /* ZSTD_lazy */
1253
0
        32 KB, /* ZSTD_lazy2 */
1254
0
        32 KB, /* ZSTD_btlazy2 */
1255
0
        32 KB, /* ZSTD_btopt */
1256
0
        8 KB /* ZSTD_btultra */
1257
0
    };
1258
0
    const int attachDict = ( pledgedSrcSize <= attachDictSizeCutoffs[cdict->cParams.strategy]
1259
0
                          || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
1260
0
                          || params.attachDictPref == ZSTD_dictForceAttach )
1261
0
                        && params.attachDictPref != ZSTD_dictForceCopy
1262
0
                        && !params.forceWindow /* dictMatchState isn't correctly
1263
                                                * handled in _enforceMaxDist */
1264
0
                        && ZSTD_equivalentCParams(cctx->appliedParams.cParams,
1265
0
                                                  cdict->cParams);
1266
1267
0
    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
1268
1269
1270
0
    {   unsigned const windowLog = params.cParams.windowLog;
1271
0
        assert(windowLog != 0);
1272
        /* Copy only compression parameters related to tables. */
1273
0
        params.cParams = cdict->cParams;
1274
0
        params.cParams.windowLog = windowLog;
1275
0
        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1276
0
                                attachDict ? ZSTDcrp_continue : ZSTDcrp_noMemset,
1277
0
                                zbuff);
1278
0
        assert(cctx->appliedParams.cParams.strategy == cdict->cParams.strategy);
1279
0
        assert(cctx->appliedParams.cParams.hashLog == cdict->cParams.hashLog);
1280
0
        assert(cctx->appliedParams.cParams.chainLog == cdict->cParams.chainLog);
1281
0
    }
1282
1283
0
    if (attachDict) {
1284
0
        const U32 cdictLen = (U32)( cdict->matchState.window.nextSrc
1285
0
                                  - cdict->matchState.window.base);
1286
0
        if (cdictLen == 0) {
1287
            /* don't even attach dictionaries with no contents */
1288
0
            DEBUGLOG(4, "skipping attaching empty dictionary");
1289
0
        } else {
1290
0
            DEBUGLOG(4, "attaching dictionary into context");
1291
0
            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
1292
1293
            /* prep working match state so dict matches never have negative indices
1294
             * when they are translated to the working context's index space. */
1295
0
            if (cctx->blockState.matchState.window.dictLimit < cdictLen) {
1296
0
                cctx->blockState.matchState.window.nextSrc =
1297
0
                    cctx->blockState.matchState.window.base + cdictLen;
1298
0
                ZSTD_window_clear(&cctx->blockState.matchState.window);
1299
0
            }
1300
0
            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
1301
0
        }
1302
0
    } else {
1303
0
        DEBUGLOG(4, "copying dictionary into context");
1304
        /* copy tables */
1305
0
        {   size_t const chainSize = (cdict->cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict->cParams.chainLog);
1306
0
            size_t const hSize =  (size_t)1 << cdict->cParams.hashLog;
1307
0
            size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
1308
0
            assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1309
0
            assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
1310
0
            assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1311
0
            assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
1312
0
            memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1313
0
        }
1314
1315
        /* Zero the hashTable3, since the cdict never fills it */
1316
0
        {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
1317
0
            assert(cdict->matchState.hashLog3 == 0);
1318
0
            memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
1319
0
        }
1320
1321
        /* copy dictionary offsets */
1322
0
        {
1323
0
            ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
1324
0
            ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
1325
0
            dstMatchState->window       = srcMatchState->window;
1326
0
            dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1327
0
            dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
1328
0
            dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1329
0
        }
1330
0
    }
1331
1332
0
    cctx->dictID = cdict->dictID;
1333
1334
    /* copy block state */
1335
0
    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1336
1337
0
    return 0;
1338
0
}
1339
1340
/*! ZSTD_copyCCtx_internal() :
1341
 *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1342
 *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1343
 *  The "context", in this case, refers to the hash and chain tables,
1344
 *  entropy tables, and dictionary references.
1345
 * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
1346
 * @return : 0, or an error code */
1347
static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1348
                            const ZSTD_CCtx* srcCCtx,
1349
                            ZSTD_frameParameters fParams,
1350
                            U64 pledgedSrcSize,
1351
                            ZSTD_buffered_policy_e zbuff)
1352
0
{
1353
0
    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1354
0
    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
1355
1356
0
    memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1357
0
    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
1358
        /* Copy only compression parameters related to tables. */
1359
0
        params.cParams = srcCCtx->appliedParams.cParams;
1360
0
        params.fParams = fParams;
1361
0
        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
1362
0
                                ZSTDcrp_noMemset, zbuff);
1363
0
        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
1364
0
        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
1365
0
        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
1366
0
        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
1367
0
        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
1368
0
    }
1369
1370
    /* copy tables */
1371
0
    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
1372
0
        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
1373
0
        size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
1374
0
        size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1375
0
        assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1376
0
        assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
1377
0
        memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1378
0
    }
1379
1380
    /* copy dictionary offsets */
1381
0
    {
1382
0
        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
1383
0
        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
1384
0
        dstMatchState->window       = srcMatchState->window;
1385
0
        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1386
0
        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
1387
0
        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1388
0
    }
1389
0
    dstCCtx->dictID = srcCCtx->dictID;
1390
1391
    /* copy block state */
1392
0
    memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
1393
1394
0
    return 0;
1395
0
}
1396
1397
/*! ZSTD_copyCCtx() :
1398
 *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1399
 *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1400
 *  pledgedSrcSize==0 means "unknown".
1401
*   @return : 0, or an error code */
1402
size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
1403
0
{
1404
0
    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
1405
0
    ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
1406
0
    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
1407
0
    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1408
0
    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
1409
1410
0
    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
1411
0
                                fParams, pledgedSrcSize,
1412
0
                                zbuff);
1413
0
}
1414
1415
1416
0
#define ZSTD_ROWSIZE 16
1417
/*! ZSTD_reduceTable() :
1418
 *  reduce table indexes by `reducerValue`, or squash to zero.
1419
 *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
1420
 *  It must be set to a clear 0/1 value, to remove branch during inlining.
1421
 *  Presume table size is a multiple of ZSTD_ROWSIZE
1422
 *  to help auto-vectorization */
1423
FORCE_INLINE_TEMPLATE void
1424
ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
1425
0
{
1426
0
    int const nbRows = (int)size / ZSTD_ROWSIZE;
1427
0
    int cellNb = 0;
1428
0
    int rowNb;
1429
0
    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
1430
0
    assert(size < (1U<<31));   /* can be casted to int */
1431
0
    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
1432
0
        int column;
1433
0
        for (column=0; column<ZSTD_ROWSIZE; column++) {
1434
0
            if (preserveMark) {
1435
0
                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
1436
0
                table[cellNb] += adder;
1437
0
            }
1438
0
            if (table[cellNb] < reducerValue) table[cellNb] = 0;
1439
0
            else table[cellNb] -= reducerValue;
1440
0
            cellNb++;
1441
0
    }   }
1442
0
}
1443
1444
static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
1445
0
{
1446
0
    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
1447
0
}
1448
1449
static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
1450
0
{
1451
0
    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
1452
0
}
1453
1454
/*! ZSTD_reduceIndex() :
1455
*   rescale all indexes to avoid future overflow (indexes are U32) */
1456
static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
1457
0
{
1458
0
    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
1459
0
    {   U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
1460
0
        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
1461
0
    }
1462
1463
0
    if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
1464
0
        U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
1465
0
        if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
1466
0
            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
1467
0
        else
1468
0
            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
1469
0
    }
1470
1471
0
    if (ms->hashLog3) {
1472
0
        U32 const h3Size = (U32)1 << ms->hashLog3;
1473
0
        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
1474
0
    }
1475
0
}
1476
1477
1478
/*-*******************************************************
1479
*  Block entropic compression
1480
*********************************************************/
1481
1482
/* See doc/zstd_compression_format.md for detailed format description */
1483
1484
size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1485
0
{
1486
0
    if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1487
0
    memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1488
0
    MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
1489
0
    return ZSTD_blockHeaderSize+srcSize;
1490
0
}
1491
1492
1493
static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1494
0
{
1495
0
    BYTE* const ostart = (BYTE* const)dst;
1496
0
    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1497
1498
0
    if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
1499
1500
0
    switch(flSize)
1501
0
    {
1502
0
        case 1: /* 2 - 1 - 5 */
1503
0
            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
1504
0
            break;
1505
0
        case 2: /* 2 - 2 - 12 */
1506
0
            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
1507
0
            break;
1508
0
        case 3: /* 2 - 2 - 20 */
1509
0
            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
1510
0
            break;
1511
0
        default:   /* not necessary : flSize is {1,2,3} */
1512
0
            assert(0);
1513
0
    }
1514
1515
0
    memcpy(ostart + flSize, src, srcSize);
1516
0
    return srcSize + flSize;
1517
0
}
1518
1519
static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1520
0
{
1521
0
    BYTE* const ostart = (BYTE* const)dst;
1522
0
    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1523
1524
0
    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
1525
1526
0
    switch(flSize)
1527
0
    {
1528
0
        case 1: /* 2 - 1 - 5 */
1529
0
            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
1530
0
            break;
1531
0
        case 2: /* 2 - 2 - 12 */
1532
0
            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
1533
0
            break;
1534
0
        case 3: /* 2 - 2 - 20 */
1535
0
            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
1536
0
            break;
1537
0
        default:   /* not necessary : flSize is {1,2,3} */
1538
0
            assert(0);
1539
0
    }
1540
1541
0
    ostart[flSize] = *(const BYTE*)src;
1542
0
    return flSize+1;
1543
0
}
1544
1545
1546
/* ZSTD_minGain() :
1547
 * minimum compression required
1548
 * to generate a compress block or a compressed literals section.
1549
 * note : use same formula for both situations */
1550
static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
1551
0
{
1552
0
    U32 const minlog = (strat==ZSTD_btultra) ? 7 : 6;
1553
0
    return (srcSize >> minlog) + 2;
1554
0
}
1555
1556
static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
1557
                                     ZSTD_hufCTables_t* nextHuf,
1558
                                     ZSTD_strategy strategy, int disableLiteralCompression,
1559
                                     void* dst, size_t dstCapacity,
1560
                               const void* src, size_t srcSize,
1561
                                     U32* workspace, const int bmi2)
1562
0
{
1563
0
    size_t const minGain = ZSTD_minGain(srcSize, strategy);
1564
0
    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
1565
0
    BYTE*  const ostart = (BYTE*)dst;
1566
0
    U32 singleStream = srcSize < 256;
1567
0
    symbolEncodingType_e hType = set_compressed;
1568
0
    size_t cLitSize;
1569
1570
0
    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
1571
0
                disableLiteralCompression);
1572
1573
    /* Prepare nextEntropy assuming reusing the existing table */
1574
0
    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1575
1576
0
    if (disableLiteralCompression)
1577
0
        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1578
1579
    /* small ? don't even attempt compression (speed opt) */
1580
0
#   define COMPRESS_LITERALS_SIZE_MIN 63
1581
0
    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
1582
0
        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1583
0
    }
1584
1585
0
    if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
1586
0
    {   HUF_repeat repeat = prevHuf->repeatMode;
1587
0
        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
1588
0
        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
1589
0
        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1590
0
                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
1591
0
                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1592
0
                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
1593
0
        if (repeat != HUF_repeat_none) {
1594
            /* reused the existing table */
1595
0
            hType = set_repeat;
1596
0
        }
1597
0
    }
1598
1599
0
    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
1600
0
        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1601
0
        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1602
0
    }
1603
0
    if (cLitSize==1) {
1604
0
        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1605
0
        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
1606
0
    }
1607
1608
0
    if (hType == set_compressed) {
1609
        /* using a newly constructed table */
1610
0
        nextHuf->repeatMode = HUF_repeat_check;
1611
0
    }
1612
1613
    /* Build header */
1614
0
    switch(lhSize)
1615
0
    {
1616
0
    case 3: /* 2 - 2 - 10 - 10 */
1617
0
        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
1618
0
            MEM_writeLE24(ostart, lhc);
1619
0
            break;
1620
0
        }
1621
0
    case 4: /* 2 - 2 - 14 - 14 */
1622
0
        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
1623
0
            MEM_writeLE32(ostart, lhc);
1624
0
            break;
1625
0
        }
1626
0
    case 5: /* 2 - 2 - 18 - 18 */
1627
0
        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
1628
0
            MEM_writeLE32(ostart, lhc);
1629
0
            ostart[4] = (BYTE)(cLitSize >> 10);
1630
0
            break;
1631
0
        }
1632
0
    default:  /* not possible : lhSize is {3,4,5} */
1633
0
        assert(0);
1634
0
    }
1635
0
    return lhSize+cLitSize;
1636
0
}
1637
1638
1639
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1640
0
{
1641
0
    const seqDef* const sequences = seqStorePtr->sequencesStart;
1642
0
    BYTE* const llCodeTable = seqStorePtr->llCode;
1643
0
    BYTE* const ofCodeTable = seqStorePtr->ofCode;
1644
0
    BYTE* const mlCodeTable = seqStorePtr->mlCode;
1645
0
    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1646
0
    U32 u;
1647
0
    for (u=0; u<nbSeq; u++) {
1648
0
        U32 const llv = sequences[u].litLength;
1649
0
        U32 const mlv = sequences[u].matchLength;
1650
0
        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
1651
0
        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
1652
0
        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
1653
0
    }
1654
0
    if (seqStorePtr->longLengthID==1)
1655
0
        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
1656
0
    if (seqStorePtr->longLengthID==2)
1657
0
        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1658
0
}
1659
1660
1661
/**
1662
 * -log2(x / 256) lookup table for x in [0, 256).
1663
 * If x == 0: Return 0
1664
 * Else: Return floor(-log2(x / 256) * 256)
1665
 */
1666
static unsigned const kInverseProbabiltyLog256[256] = {
1667
    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
1668
    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
1669
    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
1670
    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
1671
    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
1672
    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
1673
    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
1674
    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
1675
    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
1676
    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
1677
    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
1678
    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
1679
    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
1680
    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
1681
    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
1682
    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
1683
    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
1684
    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
1685
    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
1686
    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
1687
    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
1688
    5,    4,    2,    1,
1689
};
1690
1691
1692
/**
1693
 * Returns the cost in bits of encoding the distribution described by count
1694
 * using the entropy bound.
1695
 */
1696
static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
1697
0
{
1698
0
    unsigned cost = 0;
1699
0
    unsigned s;
1700
0
    for (s = 0; s <= max; ++s) {
1701
0
        unsigned norm = (unsigned)((256 * count[s]) / total);
1702
0
        if (count[s] != 0 && norm == 0)
1703
0
            norm = 1;
1704
0
        assert(count[s] < total);
1705
0
        cost += count[s] * kInverseProbabiltyLog256[norm];
1706
0
    }
1707
0
    return cost >> 8;
1708
0
}
1709
1710
1711
/**
1712
 * Returns the cost in bits of encoding the distribution in count using the
1713
 * table described by norm. The max symbol support by norm is assumed >= max.
1714
 * norm must be valid for every symbol with non-zero probability in count.
1715
 */
1716
static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
1717
                                    unsigned const* count, unsigned const max)
1718
0
{
1719
0
    unsigned const shift = 8 - accuracyLog;
1720
0
    size_t cost = 0;
1721
0
    unsigned s;
1722
0
    assert(accuracyLog <= 8);
1723
0
    for (s = 0; s <= max; ++s) {
1724
0
        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
1725
0
        unsigned const norm256 = normAcc << shift;
1726
0
        assert(norm256 > 0);
1727
0
        assert(norm256 < 256);
1728
0
        cost += count[s] * kInverseProbabiltyLog256[norm256];
1729
0
    }
1730
0
    return cost >> 8;
1731
0
}
1732
1733
1734
0
static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
1735
0
  void const* ptr = ctable;
1736
0
  U16 const* u16ptr = (U16 const*)ptr;
1737
0
  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
1738
0
  return maxSymbolValue;
1739
0
}
1740
1741
1742
/**
1743
 * Returns the cost in bits of encoding the distribution in count using ctable.
1744
 * Returns an error if ctable cannot represent all the symbols in count.
1745
 */
1746
static size_t ZSTD_fseBitCost(
1747
    FSE_CTable const* ctable,
1748
    unsigned const* count,
1749
    unsigned const max)
1750
0
{
1751
0
    unsigned const kAccuracyLog = 8;
1752
0
    size_t cost = 0;
1753
0
    unsigned s;
1754
0
    FSE_CState_t cstate;
1755
0
    FSE_initCState(&cstate, ctable);
1756
0
    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
1757
0
        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
1758
0
                    ZSTD_getFSEMaxSymbolValue(ctable), max);
1759
0
        return ERROR(GENERIC);
1760
0
    }
1761
0
    for (s = 0; s <= max; ++s) {
1762
0
        unsigned const tableLog = cstate.stateLog;
1763
0
        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
1764
0
        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
1765
0
        if (count[s] == 0)
1766
0
            continue;
1767
0
        if (bitCost >= badCost) {
1768
0
            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
1769
0
            return ERROR(GENERIC);
1770
0
        }
1771
0
        cost += count[s] * bitCost;
1772
0
    }
1773
0
    return cost >> kAccuracyLog;
1774
0
}
1775
1776
/**
1777
 * Returns the cost in bytes of encoding the normalized count header.
1778
 * Returns an error if any of the helper functions return an error.
1779
 */
1780
static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
1781
                              size_t const nbSeq, unsigned const FSELog)
1782
0
{
1783
0
    BYTE wksp[FSE_NCOUNTBOUND];
1784
0
    S16 norm[MaxSeq + 1];
1785
0
    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
1786
0
    CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
1787
0
    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
1788
0
}
1789
1790
1791
typedef enum {
1792
    ZSTD_defaultDisallowed = 0,
1793
    ZSTD_defaultAllowed = 1
1794
} ZSTD_defaultPolicy_e;
1795
1796
MEM_STATIC symbolEncodingType_e
1797
ZSTD_selectEncodingType(
1798
        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
1799
        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
1800
        FSE_CTable const* prevCTable,
1801
        short const* defaultNorm, U32 defaultNormLog,
1802
        ZSTD_defaultPolicy_e const isDefaultAllowed,
1803
        ZSTD_strategy const strategy)
1804
0
{
1805
0
    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
1806
0
    if (mostFrequent == nbSeq) {
1807
0
        *repeatMode = FSE_repeat_none;
1808
0
        if (isDefaultAllowed && nbSeq <= 2) {
1809
            /* Prefer set_basic over set_rle when there are 2 or less symbols,
1810
             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
1811
             * If basic encoding isn't possible, always choose RLE.
1812
             */
1813
0
            DEBUGLOG(5, "Selected set_basic");
1814
0
            return set_basic;
1815
0
        }
1816
0
        DEBUGLOG(5, "Selected set_rle");
1817
0
        return set_rle;
1818
0
    }
1819
0
    if (strategy < ZSTD_lazy) {
1820
0
        if (isDefaultAllowed) {
1821
0
            size_t const staticFse_nbSeq_max = 1000;
1822
0
            size_t const mult = 10 - strategy;
1823
0
            size_t const baseLog = 3;
1824
0
            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
1825
0
            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
1826
0
            assert(mult <= 9 && mult >= 7);
1827
0
            if ( (*repeatMode == FSE_repeat_valid)
1828
0
              && (nbSeq < staticFse_nbSeq_max) ) {
1829
0
                DEBUGLOG(5, "Selected set_repeat");
1830
0
                return set_repeat;
1831
0
            }
1832
0
            if ( (nbSeq < dynamicFse_nbSeq_min)
1833
0
              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
1834
0
                DEBUGLOG(5, "Selected set_basic");
1835
                /* The format allows default tables to be repeated, but it isn't useful.
1836
                 * When using simple heuristics to select encoding type, we don't want
1837
                 * to confuse these tables with dictionaries. When running more careful
1838
                 * analysis, we don't need to waste time checking both repeating tables
1839
                 * and default tables.
1840
                 */
1841
0
                *repeatMode = FSE_repeat_none;
1842
0
                return set_basic;
1843
0
            }
1844
0
        }
1845
0
    } else {
1846
0
        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
1847
0
        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
1848
0
        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
1849
0
        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
1850
1851
0
        if (isDefaultAllowed) {
1852
0
            assert(!ZSTD_isError(basicCost));
1853
0
            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
1854
0
        }
1855
0
        assert(!ZSTD_isError(NCountCost));
1856
0
        assert(compressedCost < ERROR(maxCode));
1857
0
        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
1858
0
                    (U32)basicCost, (U32)repeatCost, (U32)compressedCost);
1859
0
        if (basicCost <= repeatCost && basicCost <= compressedCost) {
1860
0
            DEBUGLOG(5, "Selected set_basic");
1861
0
            assert(isDefaultAllowed);
1862
0
            *repeatMode = FSE_repeat_none;
1863
0
            return set_basic;
1864
0
        }
1865
0
        if (repeatCost <= compressedCost) {
1866
0
            DEBUGLOG(5, "Selected set_repeat");
1867
0
            assert(!ZSTD_isError(repeatCost));
1868
0
            return set_repeat;
1869
0
        }
1870
0
        assert(compressedCost < basicCost && compressedCost < repeatCost);
1871
0
    }
1872
0
    DEBUGLOG(5, "Selected set_compressed");
1873
0
    *repeatMode = FSE_repeat_check;
1874
0
    return set_compressed;
1875
0
}
1876
1877
MEM_STATIC size_t
1878
ZSTD_buildCTable(void* dst, size_t dstCapacity,
1879
                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
1880
                U32* count, U32 max,
1881
                const BYTE* codeTable, size_t nbSeq,
1882
                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
1883
                const FSE_CTable* prevCTable, size_t prevCTableSize,
1884
                void* workspace, size_t workspaceSize)
1885
0
{
1886
0
    BYTE* op = (BYTE*)dst;
1887
0
    const BYTE* const oend = op + dstCapacity;
1888
1889
0
    switch (type) {
1890
0
    case set_rle:
1891
0
        *op = codeTable[0];
1892
0
        CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
1893
0
        return 1;
1894
0
    case set_repeat:
1895
0
        memcpy(nextCTable, prevCTable, prevCTableSize);
1896
0
        return 0;
1897
0
    case set_basic:
1898
0
        CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
1899
0
        return 0;
1900
0
    case set_compressed: {
1901
0
        S16 norm[MaxSeq + 1];
1902
0
        size_t nbSeq_1 = nbSeq;
1903
0
        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
1904
0
        if (count[codeTable[nbSeq-1]] > 1) {
1905
0
            count[codeTable[nbSeq-1]]--;
1906
0
            nbSeq_1--;
1907
0
        }
1908
0
        assert(nbSeq_1 > 1);
1909
0
        CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
1910
0
        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
1911
0
            if (FSE_isError(NCountSize)) return NCountSize;
1912
0
            CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
1913
0
            return NCountSize;
1914
0
        }
1915
0
    }
1916
0
    default: return assert(0), ERROR(GENERIC);
1917
0
    }
1918
0
}
1919
1920
FORCE_INLINE_TEMPLATE size_t
1921
ZSTD_encodeSequences_body(
1922
            void* dst, size_t dstCapacity,
1923
            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
1924
            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
1925
            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
1926
            seqDef const* sequences, size_t nbSeq, int longOffsets)
1927
0
{
1928
0
    BIT_CStream_t blockStream;
1929
0
    FSE_CState_t  stateMatchLength;
1930
0
    FSE_CState_t  stateOffsetBits;
1931
0
    FSE_CState_t  stateLitLength;
1932
1933
0
    CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
1934
1935
    /* first symbols */
1936
0
    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
1937
0
    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
1938
0
    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
1939
0
    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
1940
0
    if (MEM_32bits()) BIT_flushBits(&blockStream);
1941
0
    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
1942
0
    if (MEM_32bits()) BIT_flushBits(&blockStream);
1943
0
    if (longOffsets) {
1944
0
        U32 const ofBits = ofCodeTable[nbSeq-1];
1945
0
        int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1946
0
        if (extraBits) {
1947
0
            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
1948
0
            BIT_flushBits(&blockStream);
1949
0
        }
1950
0
        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
1951
0
                    ofBits - extraBits);
1952
0
    } else {
1953
0
        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
1954
0
    }
1955
0
    BIT_flushBits(&blockStream);
1956
1957
0
    {   size_t n;
1958
0
        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
1959
0
            BYTE const llCode = llCodeTable[n];
1960
0
            BYTE const ofCode = ofCodeTable[n];
1961
0
            BYTE const mlCode = mlCodeTable[n];
1962
0
            U32  const llBits = LL_bits[llCode];
1963
0
            U32  const ofBits = ofCode;
1964
0
            U32  const mlBits = ML_bits[mlCode];
1965
0
            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
1966
0
                        sequences[n].litLength,
1967
0
                        sequences[n].matchLength + MINMATCH,
1968
0
                        sequences[n].offset);
1969
                                                                            /* 32b*/  /* 64b*/
1970
                                                                            /* (7)*/  /* (7)*/
1971
0
            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
1972
0
            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
1973
0
            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
1974
0
            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
1975
0
            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
1976
0
                BIT_flushBits(&blockStream);                                /* (7)*/
1977
0
            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
1978
0
            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
1979
0
            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
1980
0
            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
1981
0
            if (longOffsets) {
1982
0
                int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1983
0
                if (extraBits) {
1984
0
                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
1985
0
                    BIT_flushBits(&blockStream);                            /* (7)*/
1986
0
                }
1987
0
                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
1988
0
                            ofBits - extraBits);                            /* 31 */
1989
0
            } else {
1990
0
                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
1991
0
            }
1992
0
            BIT_flushBits(&blockStream);                                    /* (7)*/
1993
0
    }   }
1994
1995
0
    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
1996
0
    FSE_flushCState(&blockStream, &stateMatchLength);
1997
0
    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
1998
0
    FSE_flushCState(&blockStream, &stateOffsetBits);
1999
0
    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
2000
0
    FSE_flushCState(&blockStream, &stateLitLength);
2001
2002
0
    {   size_t const streamSize = BIT_closeCStream(&blockStream);
2003
0
        if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
2004
0
        return streamSize;
2005
0
    }
2006
0
}
2007
2008
static size_t
2009
ZSTD_encodeSequences_default(
2010
            void* dst, size_t dstCapacity,
2011
            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2012
            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2013
            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2014
            seqDef const* sequences, size_t nbSeq, int longOffsets)
2015
0
{
2016
0
    return ZSTD_encodeSequences_body(dst, dstCapacity,
2017
0
                                    CTable_MatchLength, mlCodeTable,
2018
0
                                    CTable_OffsetBits, ofCodeTable,
2019
0
                                    CTable_LitLength, llCodeTable,
2020
0
                                    sequences, nbSeq, longOffsets);
2021
0
}
2022
2023
2024
#if DYNAMIC_BMI2
2025
2026
static TARGET_ATTRIBUTE("bmi2") size_t
2027
ZSTD_encodeSequences_bmi2(
2028
            void* dst, size_t dstCapacity,
2029
            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2030
            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2031
            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2032
            seqDef const* sequences, size_t nbSeq, int longOffsets)
2033
0
{
2034
0
    return ZSTD_encodeSequences_body(dst, dstCapacity,
2035
0
                                    CTable_MatchLength, mlCodeTable,
2036
0
                                    CTable_OffsetBits, ofCodeTable,
2037
0
                                    CTable_LitLength, llCodeTable,
2038
0
                                    sequences, nbSeq, longOffsets);
2039
0
}
2040
2041
#endif
2042
2043
size_t ZSTD_encodeSequences(
2044
            void* dst, size_t dstCapacity,
2045
            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2046
            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2047
            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2048
            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
2049
0
{
2050
0
#if DYNAMIC_BMI2
2051
0
    if (bmi2) {
2052
0
        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
2053
0
                                         CTable_MatchLength, mlCodeTable,
2054
0
                                         CTable_OffsetBits, ofCodeTable,
2055
0
                                         CTable_LitLength, llCodeTable,
2056
0
                                         sequences, nbSeq, longOffsets);
2057
0
    }
2058
0
#endif
2059
0
    (void)bmi2;
2060
0
    return ZSTD_encodeSequences_default(dst, dstCapacity,
2061
0
                                        CTable_MatchLength, mlCodeTable,
2062
0
                                        CTable_OffsetBits, ofCodeTable,
2063
0
                                        CTable_LitLength, llCodeTable,
2064
0
                                        sequences, nbSeq, longOffsets);
2065
0
}
2066
2067
MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2068
                              ZSTD_entropyCTables_t const* prevEntropy,
2069
                              ZSTD_entropyCTables_t* nextEntropy,
2070
                              ZSTD_CCtx_params const* cctxParams,
2071
                              void* dst, size_t dstCapacity, U32* workspace,
2072
                              const int bmi2)
2073
0
{
2074
0
    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
2075
0
    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
2076
0
    U32 count[MaxSeq+1];
2077
0
    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
2078
0
    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
2079
0
    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
2080
0
    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
2081
0
    const seqDef* const sequences = seqStorePtr->sequencesStart;
2082
0
    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
2083
0
    const BYTE* const llCodeTable = seqStorePtr->llCode;
2084
0
    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
2085
0
    BYTE* const ostart = (BYTE*)dst;
2086
0
    BYTE* const oend = ostart + dstCapacity;
2087
0
    BYTE* op = ostart;
2088
0
    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
2089
0
    BYTE* seqHead;
2090
0
    BYTE* lastNCount = NULL;
2091
2092
0
    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2093
2094
    /* Compress literals */
2095
0
    {   const BYTE* const literals = seqStorePtr->litStart;
2096
0
        size_t const litSize = seqStorePtr->lit - literals;
2097
0
        int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
2098
0
        size_t const cSize = ZSTD_compressLiterals(
2099
0
                                    &prevEntropy->huf, &nextEntropy->huf,
2100
0
                                    cctxParams->cParams.strategy, disableLiteralCompression,
2101
0
                                    op, dstCapacity,
2102
0
                                    literals, litSize,
2103
0
                                    workspace, bmi2);
2104
0
        if (ZSTD_isError(cSize))
2105
0
          return cSize;
2106
0
        assert(cSize <= dstCapacity);
2107
0
        op += cSize;
2108
0
    }
2109
2110
    /* Sequences Header */
2111
0
    if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
2112
0
    if (nbSeq < 0x7F)
2113
0
        *op++ = (BYTE)nbSeq;
2114
0
    else if (nbSeq < LONGNBSEQ)
2115
0
        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
2116
0
    else
2117
0
        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
2118
0
    if (nbSeq==0) {
2119
        /* Copy the old tables over as if we repeated them */
2120
0
        memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
2121
0
        return op - ostart;
2122
0
    }
2123
2124
    /* seqHead : flags for FSE encoding type */
2125
0
    seqHead = op++;
2126
2127
    /* convert length/distances into codes */
2128
0
    ZSTD_seqToCodes(seqStorePtr);
2129
    /* build CTable for Literal Lengths */
2130
0
    {   U32 max = MaxLL;
2131
0
        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);   /* can't fail */
2132
0
        DEBUGLOG(5, "Building LL table");
2133
0
        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
2134
0
        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy);
2135
0
        assert(set_basic < set_compressed && set_rle < set_compressed);
2136
0
        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2137
0
        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
2138
0
                                                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
2139
0
                                                    prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
2140
0
                                                    workspace, HUF_WORKSPACE_SIZE);
2141
0
            if (ZSTD_isError(countSize)) return countSize;
2142
0
            if (LLtype == set_compressed)
2143
0
                lastNCount = op;
2144
0
            op += countSize;
2145
0
    }   }
2146
    /* build CTable for Offsets */
2147
0
    {   U32 max = MaxOff;
2148
0
        size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);  /* can't fail */
2149
        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
2150
0
        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
2151
0
        DEBUGLOG(5, "Building OF table");
2152
0
        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
2153
0
        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy);
2154
0
        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2155
0
        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
2156
0
                                                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2157
0
                                                    prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
2158
0
                                                    workspace, HUF_WORKSPACE_SIZE);
2159
0
            if (ZSTD_isError(countSize)) return countSize;
2160
0
            if (Offtype == set_compressed)
2161
0
                lastNCount = op;
2162
0
            op += countSize;
2163
0
    }   }
2164
    /* build CTable for MatchLengths */
2165
0
    {   U32 max = MaxML;
2166
0
        size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);   /* can't fail */
2167
0
        DEBUGLOG(5, "Building ML table");
2168
0
        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
2169
0
        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy);
2170
0
        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2171
0
        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
2172
0
                                                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
2173
0
                                                    prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
2174
0
                                                    workspace, HUF_WORKSPACE_SIZE);
2175
0
            if (ZSTD_isError(countSize)) return countSize;
2176
0
            if (MLtype == set_compressed)
2177
0
                lastNCount = op;
2178
0
            op += countSize;
2179
0
    }   }
2180
2181
0
    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
2182
2183
0
    {   size_t const bitstreamSize = ZSTD_encodeSequences(
2184
0
                                        op, oend - op,
2185
0
                                        CTable_MatchLength, mlCodeTable,
2186
0
                                        CTable_OffsetBits, ofCodeTable,
2187
0
                                        CTable_LitLength, llCodeTable,
2188
0
                                        sequences, nbSeq,
2189
0
                                        longOffsets, bmi2);
2190
0
        if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
2191
0
        op += bitstreamSize;
2192
        /* zstd versions <= 1.3.4 mistakenly report corruption when
2193
         * FSE_readNCount() recieves a buffer < 4 bytes.
2194
         * Fixed by https://github.com/facebook/zstd/pull/1146.
2195
         * This can happen when the last set_compressed table present is 2
2196
         * bytes and the bitstream is only one byte.
2197
         * In this exceedingly rare case, we will simply emit an uncompressed
2198
         * block, since it isn't worth optimizing.
2199
         */
2200
0
        if (lastNCount && (op - lastNCount) < 4) {
2201
            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
2202
0
            assert(op - lastNCount == 3);
2203
0
            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
2204
0
                        "emitting an uncompressed block.");
2205
0
            return 0;
2206
0
        }
2207
0
    }
2208
2209
0
    return op - ostart;
2210
0
}
2211
2212
MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
2213
                        const ZSTD_entropyCTables_t* prevEntropy,
2214
                              ZSTD_entropyCTables_t* nextEntropy,
2215
                        const ZSTD_CCtx_params* cctxParams,
2216
                              void* dst, size_t dstCapacity,
2217
                              size_t srcSize, U32* workspace, int bmi2)
2218
0
{
2219
0
    size_t const cSize = ZSTD_compressSequences_internal(
2220
0
            seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity,
2221
0
            workspace, bmi2);
2222
0
    if (cSize == 0) return 0;
2223
    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
2224
     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
2225
     */
2226
0
    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
2227
0
        return 0;  /* block not compressed */
2228
0
    if (ZSTD_isError(cSize)) return cSize;
2229
2230
    /* Check compressibility */
2231
0
    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
2232
0
        if (cSize >= maxCSize) return 0;  /* block not compressed */
2233
0
    }
2234
2235
    /* We check that dictionaries have offset codes available for the first
2236
     * block. After the first block, the offcode table might not have large
2237
     * enough codes to represent the offsets in the data.
2238
     */
2239
0
    if (nextEntropy->fse.offcode_repeatMode == FSE_repeat_valid)
2240
0
        nextEntropy->fse.offcode_repeatMode = FSE_repeat_check;
2241
2242
0
    return cSize;
2243
0
}
2244
2245
/* ZSTD_selectBlockCompressor() :
2246
 * Not static, but internal use only (used by long distance matcher)
2247
 * assumption : strat is a valid strategy */
2248
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
2249
0
{
2250
0
    static const ZSTD_blockCompressor blockCompressor[3][(unsigned)ZSTD_btultra+1] = {
2251
0
        { ZSTD_compressBlock_fast  /* default for 0 */,
2252
0
          ZSTD_compressBlock_fast,
2253
0
          ZSTD_compressBlock_doubleFast,
2254
0
          ZSTD_compressBlock_greedy,
2255
0
          ZSTD_compressBlock_lazy,
2256
0
          ZSTD_compressBlock_lazy2,
2257
0
          ZSTD_compressBlock_btlazy2,
2258
0
          ZSTD_compressBlock_btopt,
2259
0
          ZSTD_compressBlock_btultra },
2260
0
        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
2261
0
          ZSTD_compressBlock_fast_extDict,
2262
0
          ZSTD_compressBlock_doubleFast_extDict,
2263
0
          ZSTD_compressBlock_greedy_extDict,
2264
0
          ZSTD_compressBlock_lazy_extDict,
2265
0
          ZSTD_compressBlock_lazy2_extDict,
2266
0
          ZSTD_compressBlock_btlazy2_extDict,
2267
0
          ZSTD_compressBlock_btopt_extDict,
2268
0
          ZSTD_compressBlock_btultra_extDict },
2269
0
        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
2270
0
          ZSTD_compressBlock_fast_dictMatchState,
2271
0
          ZSTD_compressBlock_doubleFast_dictMatchState,
2272
0
          ZSTD_compressBlock_greedy_dictMatchState,
2273
0
          ZSTD_compressBlock_lazy_dictMatchState,
2274
0
          ZSTD_compressBlock_lazy2_dictMatchState,
2275
0
          ZSTD_compressBlock_btlazy2_dictMatchState,
2276
0
          ZSTD_compressBlock_btopt_dictMatchState,
2277
0
          ZSTD_compressBlock_btultra_dictMatchState }
2278
0
    };
2279
0
    ZSTD_blockCompressor selectedCompressor;
2280
0
    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
2281
2282
0
    assert((U32)strat >= (U32)ZSTD_fast);
2283
0
    assert((U32)strat <= (U32)ZSTD_btultra);
2284
0
    selectedCompressor = blockCompressor[(int)dictMode][(U32)strat];
2285
0
    assert(selectedCompressor != NULL);
2286
0
    return selectedCompressor;
2287
0
}
2288
2289
static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
2290
                                   const BYTE* anchor, size_t lastLLSize)
2291
0
{
2292
0
    memcpy(seqStorePtr->lit, anchor, lastLLSize);
2293
0
    seqStorePtr->lit += lastLLSize;
2294
0
}
2295
2296
void ZSTD_resetSeqStore(seqStore_t* ssPtr)
2297
0
{
2298
0
    ssPtr->lit = ssPtr->litStart;
2299
0
    ssPtr->sequences = ssPtr->sequencesStart;
2300
0
    ssPtr->longLengthID = 0;
2301
0
}
2302
2303
static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2304
                                        void* dst, size_t dstCapacity,
2305
                                        const void* src, size_t srcSize)
2306
0
{
2307
0
    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
2308
0
    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%zu, dictLimit=%u, nextToUpdate=%u)",
2309
0
                dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
2310
2311
0
    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
2312
0
        ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength);
2313
0
        return 0;   /* don't even attempt compression below a certain srcSize */
2314
0
    }
2315
0
    ZSTD_resetSeqStore(&(zc->seqStore));
2316
0
    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;   /* required for optimal parser to read stats from dictionary */
2317
2318
    /* a gap between an attached dict and the current window is not safe,
2319
     * they must remain adjacent, and when that stops being the case, the dict
2320
     * must be unset */
2321
0
    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
2322
2323
    /* limited update after a very long match */
2324
0
    {   const BYTE* const base = ms->window.base;
2325
0
        const BYTE* const istart = (const BYTE*)src;
2326
0
        const U32 current = (U32)(istart-base);
2327
0
        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
2328
0
        if (current > ms->nextToUpdate + 384)
2329
0
            ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
2330
0
    }
2331
2332
    /* select and store sequences */
2333
0
    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
2334
0
        size_t lastLLSize;
2335
0
        {   int i;
2336
0
            for (i = 0; i < ZSTD_REP_NUM; ++i)
2337
0
                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
2338
0
        }
2339
0
        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
2340
0
            assert(!zc->appliedParams.ldmParams.enableLdm);
2341
            /* Updates ldmSeqStore.pos */
2342
0
            lastLLSize =
2343
0
                ZSTD_ldm_blockCompress(&zc->externSeqStore,
2344
0
                                       ms, &zc->seqStore,
2345
0
                                       zc->blockState.nextCBlock->rep,
2346
0
                                       &zc->appliedParams.cParams,
2347
0
                                       src, srcSize);
2348
0
            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
2349
0
        } else if (zc->appliedParams.ldmParams.enableLdm) {
2350
0
            rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
2351
2352
0
            ldmSeqStore.seq = zc->ldmSequences;
2353
0
            ldmSeqStore.capacity = zc->maxNbLdmSequences;
2354
            /* Updates ldmSeqStore.size */
2355
0
            CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
2356
0
                                               &zc->appliedParams.ldmParams,
2357
0
                                               src, srcSize));
2358
            /* Updates ldmSeqStore.pos */
2359
0
            lastLLSize =
2360
0
                ZSTD_ldm_blockCompress(&ldmSeqStore,
2361
0
                                       ms, &zc->seqStore,
2362
0
                                       zc->blockState.nextCBlock->rep,
2363
0
                                       &zc->appliedParams.cParams,
2364
0
                                       src, srcSize);
2365
0
            assert(ldmSeqStore.pos == ldmSeqStore.size);
2366
0
        } else {   /* not long range mode */
2367
0
            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
2368
0
            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, &zc->appliedParams.cParams, src, srcSize);
2369
0
        }
2370
0
        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
2371
0
            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
2372
0
    }   }
2373
2374
    /* encode sequences and literals */
2375
0
    {   size_t const cSize = ZSTD_compressSequences(&zc->seqStore,
2376
0
                                &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
2377
0
                                &zc->appliedParams,
2378
0
                                dst, dstCapacity,
2379
0
                                srcSize, zc->entropyWorkspace, zc->bmi2);
2380
0
        if (ZSTD_isError(cSize) || cSize == 0) return cSize;
2381
        /* confirm repcodes and entropy tables */
2382
0
        {   ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
2383
0
            zc->blockState.prevCBlock = zc->blockState.nextCBlock;
2384
0
            zc->blockState.nextCBlock = tmp;
2385
0
        }
2386
0
        return cSize;
2387
0
    }
2388
0
}
2389
2390
2391
/*! ZSTD_compress_frameChunk() :
2392
*   Compress a chunk of data into one or multiple blocks.
2393
*   All blocks will be terminated, all input will be consumed.
2394
*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2395
*   Frame is supposed already started (header already produced)
2396
*   @return : compressed size, or an error code
2397
*/
2398
static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
2399
                                     void* dst, size_t dstCapacity,
2400
                               const void* src, size_t srcSize,
2401
                                     U32 lastFrameChunk)
2402
0
{
2403
0
    size_t blockSize = cctx->blockSize;
2404
0
    size_t remaining = srcSize;
2405
0
    const BYTE* ip = (const BYTE*)src;
2406
0
    BYTE* const ostart = (BYTE*)dst;
2407
0
    BYTE* op = ostart;
2408
0
    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
2409
0
    assert(cctx->appliedParams.cParams.windowLog <= 31);
2410
2411
0
    DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
2412
0
    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
2413
0
        XXH64_update(&cctx->xxhState, src, srcSize);
2414
2415
0
    while (remaining) {
2416
0
        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2417
0
        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2418
2419
0
        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
2420
0
            return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
2421
0
        if (remaining < blockSize) blockSize = remaining;
2422
2423
0
        if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
2424
0
            U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
2425
0
            U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
2426
0
            ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
2427
0
            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
2428
0
            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
2429
2430
0
            ZSTD_reduceIndex(cctx, correction);
2431
0
            if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
2432
0
            else ms->nextToUpdate -= correction;
2433
0
            ms->loadedDictEnd = 0;
2434
0
            ms->dictMatchState = NULL;
2435
0
        }
2436
0
        ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
2437
0
        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
2438
2439
0
        {   size_t cSize = ZSTD_compressBlock_internal(cctx,
2440
0
                                op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
2441
0
                                ip, blockSize);
2442
0
            if (ZSTD_isError(cSize)) return cSize;
2443
2444
0
            if (cSize == 0) {  /* block is not compressible */
2445
0
                U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
2446
0
                if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
2447
0
                MEM_writeLE32(op, cBlockHeader24);   /* 4th byte will be overwritten */
2448
0
                memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
2449
0
                cSize = ZSTD_blockHeaderSize + blockSize;
2450
0
            } else {
2451
0
                U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
2452
0
                MEM_writeLE24(op, cBlockHeader24);
2453
0
                cSize += ZSTD_blockHeaderSize;
2454
0
            }
2455
2456
0
            ip += blockSize;
2457
0
            assert(remaining >= blockSize);
2458
0
            remaining -= blockSize;
2459
0
            op += cSize;
2460
0
            assert(dstCapacity >= cSize);
2461
0
            dstCapacity -= cSize;
2462
0
            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
2463
0
                        (U32)cSize);
2464
0
    }   }
2465
2466
0
    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
2467
0
    return op-ostart;
2468
0
}
2469
2470
2471
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2472
                                    ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
2473
0
{   BYTE* const op = (BYTE*)dst;
2474
0
    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
2475
0
    U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
2476
0
    U32   const checksumFlag = params.fParams.checksumFlag>0;
2477
0
    U32   const windowSize = (U32)1 << params.cParams.windowLog;
2478
0
    U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
2479
0
    BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2480
0
    U32   const fcsCode = params.fParams.contentSizeFlag ?
2481
0
                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
2482
0
    BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2483
0
    size_t pos=0;
2484
2485
0
    assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
2486
0
    if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
2487
0
    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
2488
0
                !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
2489
2490
0
    if (params.format == ZSTD_f_zstd1) {
2491
0
        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
2492
0
        pos = 4;
2493
0
    }
2494
0
    op[pos++] = frameHeaderDecriptionByte;
2495
0
    if (!singleSegment) op[pos++] = windowLogByte;
2496
0
    switch(dictIDSizeCode)
2497
0
    {
2498
0
        default:  assert(0); /* impossible */
2499
0
        case 0 : break;
2500
0
        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
2501
0
        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
2502
0
        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
2503
0
    }
2504
0
    switch(fcsCode)
2505
0
    {
2506
0
        default:  assert(0); /* impossible */
2507
0
        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
2508
0
        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
2509
0
        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
2510
0
        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
2511
0
    }
2512
0
    return pos;
2513
0
}
2514
2515
/* ZSTD_writeLastEmptyBlock() :
2516
 * output an empty Block with end-of-frame mark to complete a frame
2517
 * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
2518
 *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
2519
 */
2520
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
2521
0
{
2522
0
    if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
2523
0
    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
2524
0
        MEM_writeLE24(dst, cBlockHeader24);
2525
0
        return ZSTD_blockHeaderSize;
2526
0
    }
2527
0
}
2528
2529
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
2530
0
{
2531
0
    if (cctx->stage != ZSTDcs_init)
2532
0
        return ERROR(stage_wrong);
2533
0
    if (cctx->appliedParams.ldmParams.enableLdm)
2534
0
        return ERROR(parameter_unsupported);
2535
0
    cctx->externSeqStore.seq = seq;
2536
0
    cctx->externSeqStore.size = nbSeq;
2537
0
    cctx->externSeqStore.capacity = nbSeq;
2538
0
    cctx->externSeqStore.pos = 0;
2539
0
    return 0;
2540
0
}
2541
2542
2543
static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2544
                              void* dst, size_t dstCapacity,
2545
                        const void* src, size_t srcSize,
2546
                               U32 frame, U32 lastFrameChunk)
2547
0
{
2548
0
    ZSTD_matchState_t* ms = &cctx->blockState.matchState;
2549
0
    size_t fhSize = 0;
2550
2551
0
    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
2552
0
                cctx->stage, (U32)srcSize);
2553
0
    if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
2554
2555
0
    if (frame && (cctx->stage==ZSTDcs_init)) {
2556
0
        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
2557
0
                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
2558
0
        if (ZSTD_isError(fhSize)) return fhSize;
2559
0
        dstCapacity -= fhSize;
2560
0
        dst = (char*)dst + fhSize;
2561
0
        cctx->stage = ZSTDcs_ongoing;
2562
0
    }
2563
2564
0
    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
2565
2566
0
    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
2567
0
        ms->nextToUpdate = ms->window.dictLimit;
2568
0
    }
2569
0
    if (cctx->appliedParams.ldmParams.enableLdm)
2570
0
        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
2571
2572
0
    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
2573
0
    {   size_t const cSize = frame ?
2574
0
                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
2575
0
                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
2576
0
        if (ZSTD_isError(cSize)) return cSize;
2577
0
        cctx->consumedSrcSize += srcSize;
2578
0
        cctx->producedCSize += (cSize + fhSize);
2579
0
        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2580
0
        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
2581
0
            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2582
0
            if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
2583
0
                DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
2584
0
                    (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
2585
0
                return ERROR(srcSize_wrong);
2586
0
            }
2587
0
        }
2588
0
        return cSize + fhSize;
2589
0
    }
2590
0
}
2591
2592
size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
2593
                              void* dst, size_t dstCapacity,
2594
                        const void* src, size_t srcSize)
2595
0
{
2596
0
    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize);
2597
0
    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
2598
0
}
2599
2600
2601
size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
2602
0
{
2603
0
    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
2604
0
    assert(!ZSTD_checkCParams(cParams));
2605
0
    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
2606
0
}
2607
2608
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2609
0
{
2610
0
    size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2611
0
    if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
2612
0
    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
2613
0
}
2614
2615
/*! ZSTD_loadDictionaryContent() :
2616
 *  @return : 0, or an error code
2617
 */
2618
static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
2619
                                         ZSTD_CCtx_params const* params,
2620
                                         const void* src, size_t srcSize,
2621
                                         ZSTD_dictTableLoadMethod_e dtlm)
2622
0
{
2623
0
    const BYTE* const ip = (const BYTE*) src;
2624
0
    const BYTE* const iend = ip + srcSize;
2625
0
    ZSTD_compressionParameters const* cParams = &params->cParams;
2626
2627
0
    ZSTD_window_update(&ms->window, src, srcSize);
2628
0
    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
2629
2630
0
    if (srcSize <= HASH_READ_SIZE) return 0;
2631
2632
0
    switch(params->cParams.strategy)
2633
0
    {
2634
0
    case ZSTD_fast:
2635
0
        ZSTD_fillHashTable(ms, cParams, iend, dtlm);
2636
0
        break;
2637
0
    case ZSTD_dfast:
2638
0
        ZSTD_fillDoubleHashTable(ms, cParams, iend, dtlm);
2639
0
        break;
2640
2641
0
    case ZSTD_greedy:
2642
0
    case ZSTD_lazy:
2643
0
    case ZSTD_lazy2:
2644
0
        if (srcSize >= HASH_READ_SIZE)
2645
0
            ZSTD_insertAndFindFirstIndex(ms, cParams, iend-HASH_READ_SIZE);
2646
0
        break;
2647
2648
0
    case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
2649
0
    case ZSTD_btopt:
2650
0
    case ZSTD_btultra:
2651
0
        if (srcSize >= HASH_READ_SIZE)
2652
0
            ZSTD_updateTree(ms, cParams, iend-HASH_READ_SIZE, iend);
2653
0
        break;
2654
2655
0
    default:
2656
0
        assert(0);  /* not possible : not a valid strategy id */
2657
0
    }
2658
2659
0
    ms->nextToUpdate = (U32)(iend - ms->window.base);
2660
0
    return 0;
2661
0
}
2662
2663
2664
/* Dictionaries that assign zero probability to symbols that show up causes problems
2665
   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
2666
   that we may encounter during compression.
2667
   NOTE: This behavior is not standard and could be improved in the future. */
2668
0
static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
2669
0
    U32 s;
2670
0
    if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
2671
0
    for (s = 0; s <= maxSymbolValue; ++s) {
2672
0
        if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
2673
0
    }
2674
0
    return 0;
2675
0
}
2676
2677
2678
/* Dictionary format :
2679
 * See :
2680
 * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
2681
 */
2682
/*! ZSTD_loadZstdDictionary() :
2683
 * @return : dictID, or an error code
2684
 *  assumptions : magic number supposed already checked
2685
 *                dictSize supposed > 8
2686
 */
2687
static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
2688
                                      ZSTD_matchState_t* ms,
2689
                                      ZSTD_CCtx_params const* params,
2690
                                      const void* dict, size_t dictSize,
2691
                                      ZSTD_dictTableLoadMethod_e dtlm,
2692
                                      void* workspace)
2693
0
{
2694
0
    const BYTE* dictPtr = (const BYTE*)dict;
2695
0
    const BYTE* const dictEnd = dictPtr + dictSize;
2696
0
    short offcodeNCount[MaxOff+1];
2697
0
    unsigned offcodeMaxValue = MaxOff;
2698
0
    size_t dictID;
2699
2700
0
    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2701
0
    assert(dictSize > 8);
2702
0
    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
2703
2704
0
    dictPtr += 4;   /* skip magic number */
2705
0
    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
2706
0
    dictPtr += 4;
2707
2708
0
    {   unsigned maxSymbolValue = 255;
2709
0
        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
2710
0
        if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
2711
0
        if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
2712
0
        dictPtr += hufHeaderSize;
2713
0
    }
2714
2715
0
    {   unsigned offcodeLog;
2716
0
        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
2717
0
        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
2718
0
        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
2719
        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
2720
        /* fill all offset symbols to avoid garbage at end of table */
2721
0
        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
2722
0
                 dictionary_corrupted);
2723
0
        dictPtr += offcodeHeaderSize;
2724
0
    }
2725
2726
0
    {   short matchlengthNCount[MaxML+1];
2727
0
        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2728
0
        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
2729
0
        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
2730
0
        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
2731
        /* Every match length code must have non-zero probability */
2732
0
        CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2733
0
        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
2734
0
                 dictionary_corrupted);
2735
0
        dictPtr += matchlengthHeaderSize;
2736
0
    }
2737
2738
0
    {   short litlengthNCount[MaxLL+1];
2739
0
        unsigned litlengthMaxValue = MaxLL, litlengthLog;
2740
0
        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
2741
0
        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
2742
0
        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
2743
        /* Every literal length code must have non-zero probability */
2744
0
        CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2745
0
        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
2746
0
                 dictionary_corrupted);
2747
0
        dictPtr += litlengthHeaderSize;
2748
0
    }
2749
2750
0
    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
2751
0
    bs->rep[0] = MEM_readLE32(dictPtr+0);
2752
0
    bs->rep[1] = MEM_readLE32(dictPtr+4);
2753
0
    bs->rep[2] = MEM_readLE32(dictPtr+8);
2754
0
    dictPtr += 12;
2755
2756
0
    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
2757
0
        U32 offcodeMax = MaxOff;
2758
0
        if (dictContentSize <= ((U32)-1) - 128 KB) {
2759
0
            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
2760
0
            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
2761
0
        }
2762
        /* All offset values <= dictContentSize + 128 KB must be representable */
2763
0
        CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2764
        /* All repCodes must be <= dictContentSize and != 0*/
2765
0
        {   U32 u;
2766
0
            for (u=0; u<3; u++) {
2767
0
                if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
2768
0
                if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
2769
0
        }   }
2770
2771
0
        bs->entropy.huf.repeatMode = HUF_repeat_valid;
2772
0
        bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
2773
0
        bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
2774
0
        bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
2775
0
        CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
2776
0
        return dictID;
2777
0
    }
2778
0
}
2779
2780
/** ZSTD_compress_insertDictionary() :
2781
*   @return : dictID, or an error code */
2782
static size_t
2783
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
2784
                               ZSTD_matchState_t* ms,
2785
                         const ZSTD_CCtx_params* params,
2786
                         const void* dict, size_t dictSize,
2787
                               ZSTD_dictContentType_e dictContentType,
2788
                               ZSTD_dictTableLoadMethod_e dtlm,
2789
                               void* workspace)
2790
0
{
2791
0
    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
2792
0
    if ((dict==NULL) || (dictSize<=8)) return 0;
2793
2794
0
    ZSTD_reset_compressedBlockState(bs);
2795
2796
    /* dict restricted modes */
2797
0
    if (dictContentType == ZSTD_dct_rawContent)
2798
0
        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
2799
2800
0
    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
2801
0
        if (dictContentType == ZSTD_dct_auto) {
2802
0
            DEBUGLOG(4, "raw content dictionary detected");
2803
0
            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
2804
0
        }
2805
0
        if (dictContentType == ZSTD_dct_fullDict)
2806
0
            return ERROR(dictionary_wrong);
2807
0
        assert(0);   /* impossible */
2808
0
    }
2809
2810
    /* dict as full zstd dictionary */
2811
0
    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
2812
0
}
2813
2814
/*! ZSTD_compressBegin_internal() :
2815
 * @return : 0, or an error code */
2816
size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
2817
                             const void* dict, size_t dictSize,
2818
                             ZSTD_dictContentType_e dictContentType,
2819
                             ZSTD_dictTableLoadMethod_e dtlm,
2820
                             const ZSTD_CDict* cdict,
2821
                             ZSTD_CCtx_params params, U64 pledgedSrcSize,
2822
                             ZSTD_buffered_policy_e zbuff)
2823
0
{
2824
0
    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
2825
    /* params are supposed to be fully validated at this point */
2826
0
    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2827
0
    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2828
2829
0
    if (cdict && cdict->dictContentSize>0) {
2830
0
        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
2831
0
    }
2832
2833
0
    CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
2834
0
                                     ZSTDcrp_continue, zbuff) );
2835
0
    {
2836
0
        size_t const dictID = ZSTD_compress_insertDictionary(
2837
0
                cctx->blockState.prevCBlock, &cctx->blockState.matchState,
2838
0
                &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
2839
0
        if (ZSTD_isError(dictID)) return dictID;
2840
0
        assert(dictID <= (size_t)(U32)-1);
2841
0
        cctx->dictID = (U32)dictID;
2842
0
    }
2843
0
    return 0;
2844
0
}
2845
2846
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
2847
                                    const void* dict, size_t dictSize,
2848
                                    ZSTD_dictContentType_e dictContentType,
2849
                                    ZSTD_dictTableLoadMethod_e dtlm,
2850
                                    const ZSTD_CDict* cdict,
2851
                                    ZSTD_CCtx_params params,
2852
                                    unsigned long long pledgedSrcSize)
2853
0
{
2854
0
    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
2855
    /* compression parameters verification and optimization */
2856
0
    CHECK_F( ZSTD_checkCParams(params.cParams) );
2857
0
    return ZSTD_compressBegin_internal(cctx,
2858
0
                                       dict, dictSize, dictContentType, dtlm,
2859
0
                                       cdict,
2860
0
                                       params, pledgedSrcSize,
2861
0
                                       ZSTDb_not_buffered);
2862
0
}
2863
2864
/*! ZSTD_compressBegin_advanced() :
2865
*   @return : 0, or an error code */
2866
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
2867
                             const void* dict, size_t dictSize,
2868
                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
2869
0
{
2870
0
    ZSTD_CCtx_params const cctxParams =
2871
0
            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2872
0
    return ZSTD_compressBegin_advanced_internal(cctx,
2873
0
                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
2874
0
                                            NULL /*cdict*/,
2875
0
                                            cctxParams, pledgedSrcSize);
2876
0
}
2877
2878
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
2879
0
{
2880
0
    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
2881
0
    ZSTD_CCtx_params const cctxParams =
2882
0
            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2883
0
    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
2884
0
    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
2885
0
                                       cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
2886
0
}
2887
2888
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
2889
0
{
2890
0
    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
2891
0
}
2892
2893
2894
/*! ZSTD_writeEpilogue() :
2895
*   Ends a frame.
2896
*   @return : nb of bytes written into dst (or an error code) */
2897
static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
2898
0
{
2899
0
    BYTE* const ostart = (BYTE*)dst;
2900
0
    BYTE* op = ostart;
2901
0
    size_t fhSize = 0;
2902
2903
0
    DEBUGLOG(4, "ZSTD_writeEpilogue");
2904
0
    if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
2905
2906
    /* special case : empty frame */
2907
0
    if (cctx->stage == ZSTDcs_init) {
2908
0
        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
2909
0
        if (ZSTD_isError(fhSize)) return fhSize;
2910
0
        dstCapacity -= fhSize;
2911
0
        op += fhSize;
2912
0
        cctx->stage = ZSTDcs_ongoing;
2913
0
    }
2914
2915
0
    if (cctx->stage != ZSTDcs_ending) {
2916
        /* write one last empty block, make it the "last" block */
2917
0
        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
2918
0
        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2919
0
        MEM_writeLE32(op, cBlockHeader24);
2920
0
        op += ZSTD_blockHeaderSize;
2921
0
        dstCapacity -= ZSTD_blockHeaderSize;
2922
0
    }
2923
2924
0
    if (cctx->appliedParams.fParams.checksumFlag) {
2925
0
        U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
2926
0
        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2927
0
        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum);
2928
0
        MEM_writeLE32(op, checksum);
2929
0
        op += 4;
2930
0
    }
2931
2932
0
    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
2933
0
    return op-ostart;
2934
0
}
2935
2936
size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
2937
                         void* dst, size_t dstCapacity,
2938
                   const void* src, size_t srcSize)
2939
0
{
2940
0
    size_t endResult;
2941
0
    size_t const cSize = ZSTD_compressContinue_internal(cctx,
2942
0
                                dst, dstCapacity, src, srcSize,
2943
0
                                1 /* frame mode */, 1 /* last chunk */);
2944
0
    if (ZSTD_isError(cSize)) return cSize;
2945
0
    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
2946
0
    if (ZSTD_isError(endResult)) return endResult;
2947
0
    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2948
0
    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
2949
0
        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2950
0
        DEBUGLOG(4, "end of frame : controlling src size");
2951
0
        if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
2952
0
            DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
2953
0
                (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
2954
0
            return ERROR(srcSize_wrong);
2955
0
    }   }
2956
0
    return cSize + endResult;
2957
0
}
2958
2959
2960
static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
2961
                                      void* dst, size_t dstCapacity,
2962
                                const void* src, size_t srcSize,
2963
                                const void* dict,size_t dictSize,
2964
                                      ZSTD_parameters params)
2965
0
{
2966
0
    ZSTD_CCtx_params const cctxParams =
2967
0
            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2968
0
    DEBUGLOG(4, "ZSTD_compress_internal");
2969
0
    return ZSTD_compress_advanced_internal(cctx,
2970
0
                                           dst, dstCapacity,
2971
0
                                           src, srcSize,
2972
0
                                           dict, dictSize,
2973
0
                                           cctxParams);
2974
0
}
2975
2976
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
2977
                               void* dst, size_t dstCapacity,
2978
                         const void* src, size_t srcSize,
2979
                         const void* dict,size_t dictSize,
2980
                               ZSTD_parameters params)
2981
0
{
2982
0
    DEBUGLOG(4, "ZSTD_compress_advanced");
2983
0
    CHECK_F(ZSTD_checkCParams(params.cParams));
2984
0
    return ZSTD_compress_internal(cctx,
2985
0
                                  dst, dstCapacity,
2986
0
                                  src, srcSize,
2987
0
                                  dict, dictSize,
2988
0
                                  params);
2989
0
}
2990
2991
/* Internal */
2992
size_t ZSTD_compress_advanced_internal(
2993
        ZSTD_CCtx* cctx,
2994
        void* dst, size_t dstCapacity,
2995
        const void* src, size_t srcSize,
2996
        const void* dict,size_t dictSize,
2997
        ZSTD_CCtx_params params)
2998
0
{
2999
0
    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize);
3000
0
    CHECK_F( ZSTD_compressBegin_internal(cctx,
3001
0
                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
3002
0
                         params, srcSize, ZSTDb_not_buffered) );
3003
0
    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3004
0
}
3005
3006
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
3007
                               void* dst, size_t dstCapacity,
3008
                         const void* src, size_t srcSize,
3009
                         const void* dict, size_t dictSize,
3010
                               int compressionLevel)
3011
0
{
3012
0
    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
3013
0
    ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3014
0
    assert(params.fParams.contentSizeFlag == 1);
3015
0
    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
3016
0
}
3017
3018
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
3019
                         void* dst, size_t dstCapacity,
3020
                   const void* src, size_t srcSize,
3021
                         int compressionLevel)
3022
0
{
3023
0
    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
3024
0
    assert(cctx != NULL);
3025
0
    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
3026
0
}
3027
3028
size_t ZSTD_compress(void* dst, size_t dstCapacity,
3029
               const void* src, size_t srcSize,
3030
                     int compressionLevel)
3031
0
{
3032
0
    size_t result;
3033
0
    ZSTD_CCtx ctxBody;
3034
0
    ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
3035
0
    result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
3036
0
    ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
3037
0
    return result;
3038
0
}
3039
3040
3041
/* =====  Dictionary API  ===== */
3042
3043
/*! ZSTD_estimateCDictSize_advanced() :
3044
 *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
3045
size_t ZSTD_estimateCDictSize_advanced(
3046
        size_t dictSize, ZSTD_compressionParameters cParams,
3047
        ZSTD_dictLoadMethod_e dictLoadMethod)
3048
0
{
3049
0
    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
3050
0
    return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
3051
0
           + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
3052
0
}
3053
3054
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
3055
0
{
3056
0
    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3057
0
    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
3058
0
}
3059
3060
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
3061
0
{
3062
0
    if (cdict==NULL) return 0;   /* support sizeof on NULL */
3063
0
    DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
3064
0
    return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
3065
0
}
3066
3067
static size_t ZSTD_initCDict_internal(
3068
                    ZSTD_CDict* cdict,
3069
              const void* dictBuffer, size_t dictSize,
3070
                    ZSTD_dictLoadMethod_e dictLoadMethod,
3071
                    ZSTD_dictContentType_e dictContentType,
3072
                    ZSTD_compressionParameters cParams)
3073
0
{
3074
0
    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (U32)dictContentType);
3075
0
    assert(!ZSTD_checkCParams(cParams));
3076
0
    cdict->cParams = cParams;
3077
0
    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
3078
0
        cdict->dictBuffer = NULL;
3079
0
        cdict->dictContent = dictBuffer;
3080
0
    } else {
3081
0
        void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
3082
0
        cdict->dictBuffer = internalBuffer;
3083
0
        cdict->dictContent = internalBuffer;
3084
0
        if (!internalBuffer) return ERROR(memory_allocation);
3085
0
        memcpy(internalBuffer, dictBuffer, dictSize);
3086
0
    }
3087
0
    cdict->dictContentSize = dictSize;
3088
3089
    /* Reset the state to no dictionary */
3090
0
    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
3091
0
    {   void* const end = ZSTD_reset_matchState(
3092
0
                &cdict->matchState,
3093
0
                (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
3094
0
                &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
3095
0
        assert(end == (char*)cdict->workspace + cdict->workspaceSize);
3096
0
        (void)end;
3097
0
    }
3098
    /* (Maybe) load the dictionary
3099
     * Skips loading the dictionary if it is <= 8 bytes.
3100
     */
3101
0
    {   ZSTD_CCtx_params params;
3102
0
        memset(&params, 0, sizeof(params));
3103
0
        params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
3104
0
        params.fParams.contentSizeFlag = 1;
3105
0
        params.cParams = cParams;
3106
0
        {   size_t const dictID = ZSTD_compress_insertDictionary(
3107
0
                    &cdict->cBlockState, &cdict->matchState, &params,
3108
0
                    cdict->dictContent, cdict->dictContentSize,
3109
0
                    dictContentType, ZSTD_dtlm_full, cdict->workspace);
3110
0
            if (ZSTD_isError(dictID)) return dictID;
3111
0
            assert(dictID <= (size_t)(U32)-1);
3112
0
            cdict->dictID = (U32)dictID;
3113
0
        }
3114
0
    }
3115
3116
0
    return 0;
3117
0
}
3118
3119
ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
3120
                                      ZSTD_dictLoadMethod_e dictLoadMethod,
3121
                                      ZSTD_dictContentType_e dictContentType,
3122
                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
3123
0
{
3124
0
    DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType);
3125
0
    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
3126
3127
0
    {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
3128
0
        size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3129
0
        void* const workspace = ZSTD_malloc(workspaceSize, customMem);
3130
3131
0
        if (!cdict || !workspace) {
3132
0
            ZSTD_free(cdict, customMem);
3133
0
            ZSTD_free(workspace, customMem);
3134
0
            return NULL;
3135
0
        }
3136
0
        cdict->customMem = customMem;
3137
0
        cdict->workspace = workspace;
3138
0
        cdict->workspaceSize = workspaceSize;
3139
0
        if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3140
0
                                        dictBuffer, dictSize,
3141
0
                                        dictLoadMethod, dictContentType,
3142
0
                                        cParams) )) {
3143
0
            ZSTD_freeCDict(cdict);
3144
0
            return NULL;
3145
0
        }
3146
3147
0
        return cdict;
3148
0
    }
3149
0
}
3150
3151
ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
3152
0
{
3153
0
    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3154
0
    return ZSTD_createCDict_advanced(dict, dictSize,
3155
0
                                     ZSTD_dlm_byCopy, ZSTD_dct_auto,
3156
0
                                     cParams, ZSTD_defaultCMem);
3157
0
}
3158
3159
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
3160
0
{
3161
0
    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3162
0
    return ZSTD_createCDict_advanced(dict, dictSize,
3163
0
                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
3164
0
                                     cParams, ZSTD_defaultCMem);
3165
0
}
3166
3167
size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
3168
0
{
3169
0
    if (cdict==NULL) return 0;   /* support free on NULL */
3170
0
    {   ZSTD_customMem const cMem = cdict->customMem;
3171
0
        ZSTD_free(cdict->workspace, cMem);
3172
0
        ZSTD_free(cdict->dictBuffer, cMem);
3173
0
        ZSTD_free(cdict, cMem);
3174
0
        return 0;
3175
0
    }
3176
0
}
3177
3178
/*! ZSTD_initStaticCDict_advanced() :
3179
 *  Generate a digested dictionary in provided memory area.
3180
 *  workspace: The memory area to emplace the dictionary into.
3181
 *             Provided pointer must 8-bytes aligned.
3182
 *             It must outlive dictionary usage.
3183
 *  workspaceSize: Use ZSTD_estimateCDictSize()
3184
 *                 to determine how large workspace must be.
3185
 *  cParams : use ZSTD_getCParams() to transform a compression level
3186
 *            into its relevants cParams.
3187
 * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
3188
 *  Note : there is no corresponding "free" function.
3189
 *         Since workspace was allocated externally, it must be freed externally.
3190
 */
3191
const ZSTD_CDict* ZSTD_initStaticCDict(
3192
                                 void* workspace, size_t workspaceSize,
3193
                           const void* dict, size_t dictSize,
3194
                                 ZSTD_dictLoadMethod_e dictLoadMethod,
3195
                                 ZSTD_dictContentType_e dictContentType,
3196
                                 ZSTD_compressionParameters cParams)
3197
0
{
3198
0
    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3199
0
    size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
3200
0
                            + HUF_WORKSPACE_SIZE + matchStateSize;
3201
0
    ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
3202
0
    void* ptr;
3203
0
    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
3204
0
    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
3205
0
        (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
3206
0
    if (workspaceSize < neededSize) return NULL;
3207
3208
0
    if (dictLoadMethod == ZSTD_dlm_byCopy) {
3209
0
        memcpy(cdict+1, dict, dictSize);
3210
0
        dict = cdict+1;
3211
0
        ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
3212
0
    } else {
3213
0
        ptr = cdict+1;
3214
0
    }
3215
0
    cdict->workspace = ptr;
3216
0
    cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
3217
3218
0
    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3219
0
                                              dict, dictSize,
3220
0
                                              ZSTD_dlm_byRef, dictContentType,
3221
0
                                              cParams) ))
3222
0
        return NULL;
3223
3224
0
    return cdict;
3225
0
}
3226
3227
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
3228
0
{
3229
0
    assert(cdict != NULL);
3230
0
    return cdict->cParams;
3231
0
}
3232
3233
/* ZSTD_compressBegin_usingCDict_advanced() :
3234
 * cdict must be != NULL */
3235
size_t ZSTD_compressBegin_usingCDict_advanced(
3236
    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
3237
    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
3238
0
{
3239
0
    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
3240
0
    if (cdict==NULL) return ERROR(dictionary_wrong);
3241
0
    {   ZSTD_CCtx_params params = cctx->requestedParams;
3242
0
        params.cParams = ZSTD_getCParamsFromCDict(cdict);
3243
        /* Increase window log to fit the entire dictionary and source if the
3244
         * source size is known. Limit the increase to 19, which is the
3245
         * window log for compression level 1 with the largest source size.
3246
         */
3247
0
        if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
3248
0
            U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
3249
0
            U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
3250
0
            params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
3251
0
        }
3252
0
        params.fParams = fParams;
3253
0
        return ZSTD_compressBegin_internal(cctx,
3254
0
                                           NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
3255
0
                                           cdict,
3256
0
                                           params, pledgedSrcSize,
3257
0
                                           ZSTDb_not_buffered);
3258
0
    }
3259
0
}
3260
3261
/* ZSTD_compressBegin_usingCDict() :
3262
 * pledgedSrcSize=0 means "unknown"
3263
 * if pledgedSrcSize>0, it will enable contentSizeFlag */
3264
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
3265
0
{
3266
0
    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3267
0
    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
3268
0
    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
3269
0
}
3270
3271
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
3272
                                void* dst, size_t dstCapacity,
3273
                                const void* src, size_t srcSize,
3274
                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
3275
0
{
3276
0
    CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
3277
0
    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3278
0
}
3279
3280
/*! ZSTD_compress_usingCDict() :
3281
 *  Compression using a digested Dictionary.
3282
 *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
3283
 *  Note that compression parameters are decided at CDict creation time
3284
 *  while frame parameters are hardcoded */
3285
size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
3286
                                void* dst, size_t dstCapacity,
3287
                                const void* src, size_t srcSize,
3288
                                const ZSTD_CDict* cdict)
3289
0
{
3290
0
    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3291
0
    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
3292
0
}
3293
3294
3295
3296
/* ******************************************************************
3297
*  Streaming
3298
********************************************************************/
3299
3300
ZSTD_CStream* ZSTD_createCStream(void)
3301
0
{
3302
0
    DEBUGLOG(3, "ZSTD_createCStream");
3303
0
    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
3304
0
}
3305
3306
ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
3307
0
{
3308
0
    return ZSTD_initStaticCCtx(workspace, workspaceSize);
3309
0
}
3310
3311
ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
3312
0
{   /* CStream and CCtx are now same object */
3313
0
    return ZSTD_createCCtx_advanced(customMem);
3314
0
}
3315
3316
size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
3317
0
{
3318
0
    return ZSTD_freeCCtx(zcs);   /* same object */
3319
0
}
3320
3321
3322
3323
/*======   Initialization   ======*/
3324
3325
0
size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
3326
3327
size_t ZSTD_CStreamOutSize(void)
3328
0
{
3329
0
    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
3330
0
}
3331
3332
static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
3333
                    const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
3334
                    const ZSTD_CDict* const cdict,
3335
                    ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize)
3336
0
{
3337
0
    DEBUGLOG(4, "ZSTD_resetCStream_internal");
3338
    /* params are supposed to be fully validated at this point */
3339
0
    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3340
0
    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3341
3342
0
    CHECK_F( ZSTD_compressBegin_internal(cctx,
3343
0
                                         dict, dictSize, dictContentType, ZSTD_dtlm_fast,
3344
0
                                         cdict,
3345
0
                                         params, pledgedSrcSize,
3346
0
                                         ZSTDb_buffered) );
3347
3348
0
    cctx->inToCompress = 0;
3349
0
    cctx->inBuffPos = 0;
3350
0
    cctx->inBuffTarget = cctx->blockSize
3351
0
                      + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
3352
0
    cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
3353
0
    cctx->streamStage = zcss_load;
3354
0
    cctx->frameEnded = 0;
3355
0
    return 0;   /* ready to go */
3356
0
}
3357
3358
/* ZSTD_resetCStream():
3359
 * pledgedSrcSize == 0 means "unknown" */
3360
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
3361
0
{
3362
0
    ZSTD_CCtx_params params = zcs->requestedParams;
3363
0
    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
3364
0
    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
3365
0
    params.fParams.contentSizeFlag = 1;
3366
0
    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, 0);
3367
0
    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3368
0
}
3369
3370
/*! ZSTD_initCStream_internal() :
3371
 *  Note : for lib/compress only. Used by zstdmt_compress.c.
3372
 *  Assumption 1 : params are valid
3373
 *  Assumption 2 : either dict, or cdict, is defined, not both */
3374
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
3375
                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
3376
                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
3377
0
{
3378
0
    DEBUGLOG(4, "ZSTD_initCStream_internal");
3379
0
    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3380
0
    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3381
3382
0
    if (dict && dictSize >= 8) {
3383
0
        DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
3384
0
        if (zcs->staticSize) {   /* static CCtx : never uses malloc */
3385
            /* incompatible with internal cdict creation */
3386
0
            return ERROR(memory_allocation);
3387
0
        }
3388
0
        ZSTD_freeCDict(zcs->cdictLocal);
3389
0
        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
3390
0
                                            ZSTD_dlm_byCopy, ZSTD_dct_auto,
3391
0
                                            params.cParams, zcs->customMem);
3392
0
        zcs->cdict = zcs->cdictLocal;
3393
0
        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
3394
0
    } else {
3395
0
        if (cdict) {
3396
0
            params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
3397
0
        }
3398
0
        ZSTD_freeCDict(zcs->cdictLocal);
3399
0
        zcs->cdictLocal = NULL;
3400
0
        zcs->cdict = cdict;
3401
0
    }
3402
3403
0
    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3404
0
}
3405
3406
/* ZSTD_initCStream_usingCDict_advanced() :
3407
 * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
3408
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
3409
                                            const ZSTD_CDict* cdict,
3410
                                            ZSTD_frameParameters fParams,
3411
                                            unsigned long long pledgedSrcSize)
3412
0
{
3413
0
    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
3414
0
    if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
3415
0
    {   ZSTD_CCtx_params params = zcs->requestedParams;
3416
0
        params.cParams = ZSTD_getCParamsFromCDict(cdict);
3417
0
        params.fParams = fParams;
3418
0
        return ZSTD_initCStream_internal(zcs,
3419
0
                                NULL, 0, cdict,
3420
0
                                params, pledgedSrcSize);
3421
0
    }
3422
0
}
3423
3424
/* note : cdict must outlive compression session */
3425
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
3426
0
{
3427
0
    ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
3428
0
    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
3429
0
    return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
3430
0
}
3431
3432
3433
/* ZSTD_initCStream_advanced() :
3434
 * pledgedSrcSize must be exact.
3435
 * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
3436
 * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
3437
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
3438
                                 const void* dict, size_t dictSize,
3439
                                 ZSTD_parameters params, unsigned long long pledgedSrcSize)
3440
0
{
3441
0
    DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
3442
0
                (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
3443
0
    CHECK_F( ZSTD_checkCParams(params.cParams) );
3444
0
    if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
3445
0
    {   ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3446
0
        return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize);
3447
0
    }
3448
0
}
3449
3450
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
3451
0
{
3452
0
    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
3453
0
    ZSTD_CCtx_params const cctxParams =
3454
0
            ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3455
0
    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
3456
0
}
3457
3458
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
3459
0
{
3460
0
    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
3461
0
    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
3462
0
    ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3463
0
    return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
3464
0
}
3465
3466
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
3467
0
{
3468
0
    DEBUGLOG(4, "ZSTD_initCStream");
3469
0
    return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
3470
0
}
3471
3472
/*======   Compression   ======*/
3473
3474
MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
3475
                           const void* src, size_t srcSize)
3476
0
{
3477
0
    size_t const length = MIN(dstCapacity, srcSize);
3478
0
    if (length) memcpy(dst, src, length);
3479
0
    return length;
3480
0
}
3481
3482
/** ZSTD_compressStream_generic():
3483
 *  internal function for all *compressStream*() variants and *compress_generic()
3484
 *  non-static, because can be called from zstdmt_compress.c
3485
 * @return : hint size for next input */
3486
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3487
                                   ZSTD_outBuffer* output,
3488
                                   ZSTD_inBuffer* input,
3489
                                   ZSTD_EndDirective const flushMode)
3490
0
{
3491
0
    const char* const istart = (const char*)input->src;
3492
0
    const char* const iend = istart + input->size;
3493
0
    const char* ip = istart + input->pos;
3494
0
    char* const ostart = (char*)output->dst;
3495
0
    char* const oend = ostart + output->size;
3496
0
    char* op = ostart + output->pos;
3497
0
    U32 someMoreWork = 1;
3498
3499
    /* check expectations */
3500
0
    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
3501
0
    assert(zcs->inBuff != NULL);
3502
0
    assert(zcs->inBuffSize > 0);
3503
0
    assert(zcs->outBuff !=  NULL);
3504
0
    assert(zcs->outBuffSize > 0);
3505
0
    assert(output->pos <= output->size);
3506
0
    assert(input->pos <= input->size);
3507
3508
0
    while (someMoreWork) {
3509
0
        switch(zcs->streamStage)
3510
0
        {
3511
0
        case zcss_init:
3512
            /* call ZSTD_initCStream() first ! */
3513
0
            return ERROR(init_missing);
3514
3515
0
        case zcss_load:
3516
0
            if ( (flushMode == ZSTD_e_end)
3517
0
              && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
3518
0
              && (zcs->inBuffPos == 0) ) {
3519
                /* shortcut to compression pass directly into output buffer */
3520
0
                size_t const cSize = ZSTD_compressEnd(zcs,
3521
0
                                                op, oend-op, ip, iend-ip);
3522
0
                DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
3523
0
                if (ZSTD_isError(cSize)) return cSize;
3524
0
                ip = iend;
3525
0
                op += cSize;
3526
0
                zcs->frameEnded = 1;
3527
0
                ZSTD_CCtx_reset(zcs);
3528
0
                someMoreWork = 0; break;
3529
0
            }
3530
            /* complete loading into inBuffer */
3531
0
            {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3532
0
                size_t const loaded = ZSTD_limitCopy(
3533
0
                                        zcs->inBuff + zcs->inBuffPos, toLoad,
3534
0
                                        ip, iend-ip);
3535
0
                zcs->inBuffPos += loaded;
3536
0
                ip += loaded;
3537
0
                if ( (flushMode == ZSTD_e_continue)
3538
0
                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
3539
                    /* not enough input to fill full block : stop here */
3540
0
                    someMoreWork = 0; break;
3541
0
                }
3542
0
                if ( (flushMode == ZSTD_e_flush)
3543
0
                  && (zcs->inBuffPos == zcs->inToCompress) ) {
3544
                    /* empty */
3545
0
                    someMoreWork = 0; break;
3546
0
                }
3547
0
            }
3548
            /* compress current block (note : this stage cannot be stopped in the middle) */
3549
0
            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
3550
0
            {   void* cDst;
3551
0
                size_t cSize;
3552
0
                size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3553
0
                size_t oSize = oend-op;
3554
0
                unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
3555
0
                if (oSize >= ZSTD_compressBound(iSize))
3556
0
                    cDst = op;   /* compress into output buffer, to skip flush stage */
3557
0
                else
3558
0
                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3559
0
                cSize = lastBlock ?
3560
0
                        ZSTD_compressEnd(zcs, cDst, oSize,
3561
0
                                    zcs->inBuff + zcs->inToCompress, iSize) :
3562
0
                        ZSTD_compressContinue(zcs, cDst, oSize,
3563
0
                                    zcs->inBuff + zcs->inToCompress, iSize);
3564
0
                if (ZSTD_isError(cSize)) return cSize;
3565
0
                zcs->frameEnded = lastBlock;
3566
                /* prepare next block */
3567
0
                zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3568
0
                if (zcs->inBuffTarget > zcs->inBuffSize)
3569
0
                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
3570
0
                DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
3571
0
                         (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
3572
0
                if (!lastBlock)
3573
0
                    assert(zcs->inBuffTarget <= zcs->inBuffSize);
3574
0
                zcs->inToCompress = zcs->inBuffPos;
3575
0
                if (cDst == op) {  /* no need to flush */
3576
0
                    op += cSize;
3577
0
                    if (zcs->frameEnded) {
3578
0
                        DEBUGLOG(5, "Frame completed directly in outBuffer");
3579
0
                        someMoreWork = 0;
3580
0
                        ZSTD_CCtx_reset(zcs);
3581
0
                    }
3582
0
                    break;
3583
0
                }
3584
0
                zcs->outBuffContentSize = cSize;
3585
0
                zcs->outBuffFlushedSize = 0;
3586
0
                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
3587
0
            }
3588
      /* fall-through */
3589
0
        case zcss_flush:
3590
0
            DEBUGLOG(5, "flush stage");
3591
0
            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3592
0
                size_t const flushed = ZSTD_limitCopy(op, oend-op,
3593
0
                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3594
0
                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
3595
0
                            (U32)toFlush, (U32)(oend-op), (U32)flushed);
3596
0
                op += flushed;
3597
0
                zcs->outBuffFlushedSize += flushed;
3598
0
                if (toFlush!=flushed) {
3599
                    /* flush not fully completed, presumably because dst is too small */
3600
0
                    assert(op==oend);
3601
0
                    someMoreWork = 0;
3602
0
                    break;
3603
0
                }
3604
0
                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3605
0
                if (zcs->frameEnded) {
3606
0
                    DEBUGLOG(5, "Frame completed on flush");
3607
0
                    someMoreWork = 0;
3608
0
                    ZSTD_CCtx_reset(zcs);
3609
0
                    break;
3610
0
                }
3611
0
                zcs->streamStage = zcss_load;
3612
0
                break;
3613
0
            }
3614
3615
0
        default: /* impossible */
3616
0
            assert(0);
3617
0
        }
3618
0
    }
3619
3620
0
    input->pos = ip - istart;
3621
0
    output->pos = op - ostart;
3622
0
    if (zcs->frameEnded) return 0;
3623
0
    {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
3624
0
        if (hintInSize==0) hintInSize = zcs->blockSize;
3625
0
        return hintInSize;
3626
0
    }
3627
0
}
3628
3629
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
3630
0
{
3631
    /* check conditions */
3632
0
    if (output->pos > output->size) return ERROR(GENERIC);
3633
0
    if (input->pos  > input->size)  return ERROR(GENERIC);
3634
3635
0
    return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
3636
0
}
3637
3638
3639
size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
3640
                              ZSTD_outBuffer* output,
3641
                              ZSTD_inBuffer* input,
3642
                              ZSTD_EndDirective endOp)
3643
0
{
3644
0
    DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp);
3645
    /* check conditions */
3646
0
    if (output->pos > output->size) return ERROR(GENERIC);
3647
0
    if (input->pos  > input->size)  return ERROR(GENERIC);
3648
0
    assert(cctx!=NULL);
3649
3650
    /* transparent initialization stage */
3651
0
    if (cctx->streamStage == zcss_init) {
3652
0
        ZSTD_CCtx_params params = cctx->requestedParams;
3653
0
        ZSTD_prefixDict const prefixDict = cctx->prefixDict;
3654
0
        memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* single usage */
3655
0
        assert(prefixDict.dict==NULL || cctx->cdict==NULL);   /* only one can be set */
3656
0
        DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
3657
0
        if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
3658
0
        params.cParams = ZSTD_getCParamsFromCCtxParams(
3659
0
                &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
3660
3661
3662
#ifdef ZSTD_MULTITHREAD
3663
        if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
3664
            params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
3665
        }
3666
        if (params.nbWorkers > 0) {
3667
            /* mt context creation */
3668
            if (cctx->mtctx == NULL) {
3669
                DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
3670
                            params.nbWorkers);
3671
                cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
3672
                if (cctx->mtctx == NULL) return ERROR(memory_allocation);
3673
            }
3674
            /* mt compression */
3675
            DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
3676
            CHECK_F( ZSTDMT_initCStream_internal(
3677
                        cctx->mtctx,
3678
                        prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
3679
                        cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
3680
            cctx->streamStage = zcss_load;
3681
            cctx->appliedParams.nbWorkers = params.nbWorkers;
3682
        } else
3683
#endif
3684
0
        {   CHECK_F( ZSTD_resetCStream_internal(cctx,
3685
0
                            prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
3686
0
                            cctx->cdict,
3687
0
                            params, cctx->pledgedSrcSizePlusOne-1) );
3688
0
            assert(cctx->streamStage == zcss_load);
3689
0
            assert(cctx->appliedParams.nbWorkers == 0);
3690
0
    }   }
3691
3692
    /* compression stage */
3693
#ifdef ZSTD_MULTITHREAD
3694
    if (cctx->appliedParams.nbWorkers > 0) {
3695
        if (cctx->cParamsChanged) {
3696
            ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
3697
            cctx->cParamsChanged = 0;
3698
        }
3699
        {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
3700
            if ( ZSTD_isError(flushMin)
3701
              || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
3702
                ZSTD_CCtx_reset(cctx);
3703
            }
3704
            return flushMin;
3705
    }   }
3706
#endif
3707
0
    CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
3708
0
    DEBUGLOG(5, "completed ZSTD_compress_generic");
3709
0
    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
3710
0
}
3711
3712
size_t ZSTD_compress_generic_simpleArgs (
3713
                            ZSTD_CCtx* cctx,
3714
                            void* dst, size_t dstCapacity, size_t* dstPos,
3715
                      const void* src, size_t srcSize, size_t* srcPos,
3716
                            ZSTD_EndDirective endOp)
3717
0
{
3718
0
    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
3719
0
    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
3720
    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
3721
0
    size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
3722
0
    *dstPos = output.pos;
3723
0
    *srcPos = input.pos;
3724
0
    return cErr;
3725
0
}
3726
3727
3728
/*======   Finalize   ======*/
3729
3730
/*! ZSTD_flushStream() :
3731
 * @return : amount of data remaining to flush */
3732
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3733
0
{
3734
0
    ZSTD_inBuffer input = { NULL, 0, 0 };
3735
0
    if (output->pos > output->size) return ERROR(GENERIC);
3736
0
    CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
3737
0
    return zcs->outBuffContentSize - zcs->outBuffFlushedSize;  /* remaining to flush */
3738
0
}
3739
3740
3741
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3742
0
{
3743
0
    ZSTD_inBuffer input = { NULL, 0, 0 };
3744
0
    if (output->pos > output->size) return ERROR(GENERIC);
3745
0
    CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
3746
0
    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
3747
0
        size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
3748
0
        size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
3749
0
        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
3750
0
        return toFlush;
3751
0
    }
3752
0
}
3753
3754
3755
/*-=====  Pre-defined compression levels  =====-*/
3756
3757
0
#define ZSTD_MAX_CLEVEL     22
3758
0
int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
3759
3760
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
3761
{   /* "default" - guarantees a monotonically increasing memory budget */
3762
    /* W,  C,  H,  S,  L, TL, strat */
3763
    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
3764
    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
3765
    { 19, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
3766
    { 20, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
3767
    { 20, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
3768
    { 20, 18, 18,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
3769
    { 21, 18, 19,  2,  5,  4, ZSTD_lazy    },  /* level  6 */
3770
    { 21, 18, 19,  3,  5,  8, ZSTD_lazy2   },  /* level  7 */
3771
    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
3772
    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
3773
    { 21, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
3774
    { 21, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
3775
    { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
3776
    { 22, 21, 22,  4,  5, 32, ZSTD_btlazy2 },  /* level 13 */
3777
    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
3778
    { 22, 22, 22,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
3779
    { 22, 21, 22,  4,  5, 48, ZSTD_btopt   },  /* level 16 */
3780
    { 23, 22, 22,  4,  4, 64, ZSTD_btopt   },  /* level 17 */
3781
    { 23, 23, 22,  6,  3,256, ZSTD_btopt   },  /* level 18 */
3782
    { 23, 24, 22,  7,  3,256, ZSTD_btultra },  /* level 19 */
3783
    { 25, 25, 23,  7,  3,256, ZSTD_btultra },  /* level 20 */
3784
    { 26, 26, 24,  7,  3,512, ZSTD_btultra },  /* level 21 */
3785
    { 27, 27, 25,  9,  3,999, ZSTD_btultra },  /* level 22 */
3786
},
3787
{   /* for srcSize <= 256 KB */
3788
    /* W,  C,  H,  S,  L,  T, strat */
3789
    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3790
    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
3791
    { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
3792
    { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
3793
    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
3794
    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
3795
    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
3796
    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
3797
    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3798
    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3799
    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3800
    { 18, 18, 19,  5,  4, 16, ZSTD_btlazy2 },  /* level 11.*/
3801
    { 18, 19, 19,  6,  4, 16, ZSTD_btlazy2 },  /* level 12.*/
3802
    { 18, 19, 19,  8,  4, 16, ZSTD_btlazy2 },  /* level 13 */
3803
    { 18, 18, 19,  4,  4, 24, ZSTD_btopt   },  /* level 14.*/
3804
    { 18, 18, 19,  4,  3, 24, ZSTD_btopt   },  /* level 15.*/
3805
    { 18, 19, 19,  6,  3, 64, ZSTD_btopt   },  /* level 16.*/
3806
    { 18, 19, 19,  8,  3,128, ZSTD_btopt   },  /* level 17.*/
3807
    { 18, 19, 19, 10,  3,256, ZSTD_btopt   },  /* level 18.*/
3808
    { 18, 19, 19, 10,  3,256, ZSTD_btultra },  /* level 19.*/
3809
    { 18, 19, 19, 11,  3,512, ZSTD_btultra },  /* level 20.*/
3810
    { 18, 19, 19, 12,  3,512, ZSTD_btultra },  /* level 21.*/
3811
    { 18, 19, 19, 13,  3,999, ZSTD_btultra },  /* level 22.*/
3812
},
3813
{   /* for srcSize <= 128 KB */
3814
    /* W,  C,  H,  S,  L,  T, strat */
3815
    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3816
    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
3817
    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
3818
    { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
3819
    { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
3820
    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
3821
    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
3822
    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
3823
    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3824
    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3825
    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3826
    { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
3827
    { 17, 18, 17,  6,  4, 16, ZSTD_btlazy2 },  /* level 12 */
3828
    { 17, 18, 17,  8,  4, 16, ZSTD_btlazy2 },  /* level 13.*/
3829
    { 17, 18, 17,  4,  4, 32, ZSTD_btopt   },  /* level 14.*/
3830
    { 17, 18, 17,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
3831
    { 17, 18, 17,  7,  3,128, ZSTD_btopt   },  /* level 16.*/
3832
    { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 17.*/
3833
    { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 18.*/
3834
    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 19.*/
3835
    { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
3836
    { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
3837
    { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
3838
},
3839
{   /* for srcSize <= 16 KB */
3840
    /* W,  C,  H,  S,  L,  T, strat */
3841
    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3842
    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
3843
    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
3844
    { 14, 14, 14,  2,  4,  1, ZSTD_dfast   },  /* level  3.*/
3845
    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4.*/
3846
    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
3847
    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
3848
    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
3849
    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
3850
    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
3851
    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
3852
    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
3853
    { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
3854
    { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
3855
    { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
3856
    { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
3857
    { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
3858
    { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
3859
    { 14, 15, 15,  8,  3,256, ZSTD_btopt   },  /* level 18.*/
3860
    { 14, 15, 15,  6,  3,256, ZSTD_btultra },  /* level 19.*/
3861
    { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
3862
    { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
3863
    { 14, 15, 15, 10,  3,512, ZSTD_btultra },  /* level 22.*/
3864
},
3865
};
3866
3867
/*! ZSTD_getCParams() :
3868
*  @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
3869
*   Size values are optional, provide 0 if not known or unused */
3870
ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
3871
0
{
3872
0
    size_t const addedSize = srcSizeHint ? 0 : 500;
3873
0
    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
3874
0
    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
3875
0
    int row = compressionLevel;
3876
0
    DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
3877
0
    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
3878
0
    if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
3879
0
    if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
3880
0
    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
3881
0
        if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
3882
0
        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
3883
3884
0
}
3885
3886
/*! ZSTD_getParams() :
3887
*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
3888
*   All fields of `ZSTD_frameParameters` are set to default (0) */
3889
0
ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
3890
0
    ZSTD_parameters params;
3891
0
    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
3892
0
    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
3893
0
    memset(&params, 0, sizeof(params));
3894
0
    params.cParams = cParams;
3895
0
    params.fParams.contentSizeFlag = 1;
3896
0
    return params;
3897
0
}