Coverage Report

Created: 2025-08-09 07:02

/src/zstd/lib/common/zstd_internal.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
#ifndef ZSTD_CCOMMON_H_MODULE
12
#define ZSTD_CCOMMON_H_MODULE
13
14
/* this module contains definitions which must be identical
15
 * across compression, decompression and dictBuilder.
16
 * It also contains a few functions useful to at least 2 of them
17
 * and which benefit from being inlined */
18
19
/*-*************************************
20
*  Dependencies
21
***************************************/
22
#include "compiler.h"
23
#include "cpu.h"
24
#include "mem.h"
25
#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
26
#include "error_private.h"
27
#define ZSTD_STATIC_LINKING_ONLY
28
#include "../zstd.h"
29
#define FSE_STATIC_LINKING_ONLY
30
#include "fse.h"
31
#include "huf.h"
32
#ifndef XXH_STATIC_LINKING_ONLY
33
#  define XXH_STATIC_LINKING_ONLY  /* XXH64_state_t */
34
#endif
35
#include "xxhash.h"                /* XXH_reset, update, digest */
36
#ifndef ZSTD_NO_TRACE
37
#  include "zstd_trace.h"
38
#else
39
#  define ZSTD_TRACE 0
40
#endif
41
42
/* ---- static assert (debug) --- */
43
183M
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
44
9.64M
#define ZSTD_isError ERR_isError   /* for inlining */
45
0
#define FSE_isError  ERR_isError
46
0
#define HUF_isError  ERR_isError
47
48
49
/*-*************************************
50
*  shared macros
51
***************************************/
52
#undef MIN
53
#undef MAX
54
337M
#define MIN(a,b) ((a)<(b) ? (a) : (b))
55
28.1M
#define MAX(a,b) ((a)>(b) ? (a) : (b))
56
2.25M
#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
57
58
59
/*-*************************************
60
*  Common constants
61
***************************************/
62
41.8M
#define ZSTD_OPT_NUM    (1<<12)
63
64
469M
#define ZSTD_REP_NUM      3                 /* number of repcodes */
65
static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
66
67
1.94M
#define KB *(1 <<10)
68
520k
#define MB *(1 <<20)
69
0
#define GB *(1U<<30)
70
71
#define BIT7 128
72
#define BIT6  64
73
#define BIT5  32
74
#define BIT4  16
75
#define BIT1   2
76
#define BIT0   1
77
78
460k
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
79
static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
80
static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
81
82
0
#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
83
84
0
#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
85
static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
86
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
87
88
#define ZSTD_FRAMECHECKSUMSIZE 4
89
90
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
91
626k
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */)   /* for a non-null block */
92
#define MIN_LITERALS_FOR_4_STREAMS 6
93
94
typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e;
95
96
141k
#define LONGNBSEQ 0x7F00
97
98
483M
#define MINMATCH 3
99
100
173k
#define Litbits  8
101
353k
#define LitHufLog 11
102
72.7k
#define MaxLit ((1<<Litbits) - 1)
103
2.03M
#define MaxML   52
104
1.69M
#define MaxLL   35
105
1.10M
#define DefaultMaxOff 28
106
1.11M
#define MaxOff  31
107
1.46M
#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
108
38.2M
#define MLFSELog    9
109
38.2M
#define LLFSELog    9
110
38.2M
#define OffFSELog   8
111
#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
112
#define MaxMLBits 16
113
#define MaxLLBits 16
114
115
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
116
/* Each table cannot take more than #symbols * FSELog bits */
117
#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
118
119
static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
120
     0, 0, 0, 0, 0, 0, 0, 0,
121
     0, 0, 0, 0, 0, 0, 0, 0,
122
     1, 1, 1, 1, 2, 2, 3, 3,
123
     4, 6, 7, 8, 9,10,11,12,
124
    13,14,15,16
125
};
126
static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
127
     4, 3, 2, 2, 2, 2, 2, 2,
128
     2, 2, 2, 2, 2, 1, 1, 1,
129
     2, 2, 2, 2, 2, 2, 2, 2,
130
     2, 3, 2, 1, 1, 1, 1, 1,
131
    -1,-1,-1,-1
132
};
133
#define LL_DEFAULTNORMLOG 6  /* for static allocation */
134
static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
135
136
static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
137
     0, 0, 0, 0, 0, 0, 0, 0,
138
     0, 0, 0, 0, 0, 0, 0, 0,
139
     0, 0, 0, 0, 0, 0, 0, 0,
140
     0, 0, 0, 0, 0, 0, 0, 0,
141
     1, 1, 1, 1, 2, 2, 3, 3,
142
     4, 4, 5, 7, 8, 9,10,11,
143
    12,13,14,15,16
144
};
145
static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
146
     1, 4, 3, 2, 2, 2, 2, 2,
147
     2, 1, 1, 1, 1, 1, 1, 1,
148
     1, 1, 1, 1, 1, 1, 1, 1,
149
     1, 1, 1, 1, 1, 1, 1, 1,
150
     1, 1, 1, 1, 1, 1, 1, 1,
151
     1, 1, 1, 1, 1, 1,-1,-1,
152
    -1,-1,-1,-1,-1
153
};
154
#define ML_DEFAULTNORMLOG 6  /* for static allocation */
155
static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
156
157
static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
158
     1, 1, 1, 1, 1, 1, 2, 2,
159
     2, 1, 1, 1, 1, 1, 1, 1,
160
     1, 1, 1, 1, 1, 1, 1, 1,
161
    -1,-1,-1,-1,-1
162
};
163
#define OF_DEFAULTNORMLOG 5  /* for static allocation */
164
static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
165
166
167
/*-*******************************************
168
*  Shared functions to include for inlining
169
*********************************************/
170
7.58M
static void ZSTD_copy8(void* dst, const void* src) {
171
#if defined(ZSTD_ARCH_ARM_NEON) && !defined(__aarch64__)
172
    vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
173
#else
174
7.58M
    ZSTD_memcpy(dst, src, 8);
175
7.58M
#endif
176
7.58M
}
Unexecuted instantiation: sequence_producer.c:ZSTD_copy8
Unexecuted instantiation: zstd_common.c:ZSTD_copy8
Unexecuted instantiation: zstd_compress.c:ZSTD_copy8
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_copy8
Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_copy8
Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_copy8
Unexecuted instantiation: zstd_double_fast.c:ZSTD_copy8
Unexecuted instantiation: zstd_fast.c:ZSTD_copy8
Unexecuted instantiation: zstd_lazy.c:ZSTD_copy8
Unexecuted instantiation: zstd_ldm.c:ZSTD_copy8
Unexecuted instantiation: zstd_opt.c:ZSTD_copy8
Unexecuted instantiation: zstd_preSplit.c:ZSTD_copy8
Unexecuted instantiation: zstdmt_compress.c:ZSTD_copy8
Unexecuted instantiation: huf_decompress.c:ZSTD_copy8
Unexecuted instantiation: zstd_ddict.c:ZSTD_copy8
Unexecuted instantiation: zstd_decompress.c:ZSTD_copy8
zstd_decompress_block.c:ZSTD_copy8
Line
Count
Source
170
7.58M
static void ZSTD_copy8(void* dst, const void* src) {
171
#if defined(ZSTD_ARCH_ARM_NEON) && !defined(__aarch64__)
172
    vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
173
#else
174
7.58M
    ZSTD_memcpy(dst, src, 8);
175
7.58M
#endif
176
7.58M
}
Unexecuted instantiation: cover.c:ZSTD_copy8
Unexecuted instantiation: fastcover.c:ZSTD_copy8
Unexecuted instantiation: zdict.c:ZSTD_copy8
177
6.53M
#define COPY8(d,s) do { ZSTD_copy8(d,s); d+=8; s+=8; } while (0)
178
179
/* Need to use memmove here since the literal buffer can now be located within
180
   the dst buffer. In circumstances where the op "catches up" to where the
181
   literal buffer is, there can be partial overlaps in this call on the final
182
   copy if the literal is being shifted by less than 16 bytes. */
183
122M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
122M
}
Unexecuted instantiation: sequence_producer.c:ZSTD_copy16
Unexecuted instantiation: zstd_common.c:ZSTD_copy16
zstd_compress.c:ZSTD_copy16
Line
Count
Source
183
36.1M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
36.1M
}
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_copy16
Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_copy16
Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_copy16
zstd_double_fast.c:ZSTD_copy16
Line
Count
Source
183
1.40M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
1.40M
}
zstd_fast.c:ZSTD_copy16
Line
Count
Source
183
1.71M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
1.71M
}
zstd_lazy.c:ZSTD_copy16
Line
Count
Source
183
3.72M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
3.72M
}
zstd_ldm.c:ZSTD_copy16
Line
Count
Source
183
1.72M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
1.72M
}
zstd_opt.c:ZSTD_copy16
Line
Count
Source
183
9.73M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
9.73M
}
Unexecuted instantiation: zstd_preSplit.c:ZSTD_copy16
Unexecuted instantiation: zstdmt_compress.c:ZSTD_copy16
Unexecuted instantiation: huf_decompress.c:ZSTD_copy16
Unexecuted instantiation: zstd_ddict.c:ZSTD_copy16
Unexecuted instantiation: zstd_decompress.c:ZSTD_copy16
zstd_decompress_block.c:ZSTD_copy16
Line
Count
Source
183
68.1M
static void ZSTD_copy16(void* dst, const void* src) {
184
#if defined(ZSTD_ARCH_ARM_NEON)
185
    vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
186
#elif defined(ZSTD_ARCH_X86_SSE2)
187
    _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
188
#elif defined(__clang__)
189
    ZSTD_memmove(dst, src, 16);
190
#else
191
    /* ZSTD_memmove is not inlined properly by gcc */
192
    BYTE copy16_buf[16];
193
    ZSTD_memcpy(copy16_buf, src, 16);
194
    ZSTD_memcpy(dst, copy16_buf, 16);
195
#endif
196
68.1M
}
Unexecuted instantiation: cover.c:ZSTD_copy16
Unexecuted instantiation: fastcover.c:ZSTD_copy16
Unexecuted instantiation: zdict.c:ZSTD_copy16
197
31.8M
#define COPY16(d,s) do { ZSTD_copy16(d,s); d+=16; s+=16; } while (0)
198
199
72.1M
#define WILDCOPY_OVERLENGTH 32
200
1.32M
#define WILDCOPY_VECLEN 16
201
202
typedef enum {
203
    ZSTD_no_overlap,
204
    ZSTD_overlap_src_before_dst
205
    /*  ZSTD_overlap_dst_before_src, */
206
} ZSTD_overlap_e;
207
208
/*! ZSTD_wildcopy() :
209
 *  Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
210
 *  @param ovtype controls the overlap detection
211
 *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
212
 *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
213
 *           The src buffer must be before the dst buffer.
214
 */
215
MEM_STATIC FORCE_INLINE_ATTR
216
void ZSTD_wildcopy(void* dst, const void* src, size_t length, ZSTD_overlap_e const ovtype)
217
22.6M
{
218
22.6M
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
22.6M
    const BYTE* ip = (const BYTE*)src;
220
22.6M
    BYTE* op = (BYTE*)dst;
221
22.6M
    BYTE* const oend = op + length;
222
223
22.6M
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
6.53M
        do {
226
6.53M
            COPY8(op, ip);
227
6.53M
        } while (op < oend);
228
21.2M
    } else {
229
21.2M
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
21.2M
        ZSTD_copy16(op, ip);
237
21.2M
        if (16 >= length) return;
238
4.40M
        op += 16;
239
4.40M
        ip += 16;
240
15.9M
        do {
241
15.9M
            COPY16(op, ip);
242
15.9M
            COPY16(op, ip);
243
15.9M
        }
244
15.9M
        while (op < oend);
245
4.40M
    }
246
22.6M
}
Unexecuted instantiation: sequence_producer.c:ZSTD_wildcopy
Unexecuted instantiation: zstd_common.c:ZSTD_wildcopy
zstd_compress.c:ZSTD_wildcopy
Line
Count
Source
217
1.28M
{
218
1.28M
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
1.28M
    const BYTE* ip = (const BYTE*)src;
220
1.28M
    BYTE* op = (BYTE*)dst;
221
1.28M
    BYTE* const oend = op + length;
222
223
1.28M
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
0
        do {
226
0
            COPY8(op, ip);
227
0
        } while (op < oend);
228
1.28M
    } else {
229
1.28M
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
1.28M
        ZSTD_copy16(op, ip);
237
1.28M
        if (16 >= length) return;
238
704k
        op += 16;
239
704k
        ip += 16;
240
3.62M
        do {
241
3.62M
            COPY16(op, ip);
242
3.62M
            COPY16(op, ip);
243
3.62M
        }
244
3.62M
        while (op < oend);
245
704k
    }
246
1.28M
}
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_wildcopy
Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_wildcopy
Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_wildcopy
zstd_double_fast.c:ZSTD_wildcopy
Line
Count
Source
217
124k
{
218
124k
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
124k
    const BYTE* ip = (const BYTE*)src;
220
124k
    BYTE* op = (BYTE*)dst;
221
124k
    BYTE* const oend = op + length;
222
223
124k
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
0
        do {
226
0
            COPY8(op, ip);
227
0
        } while (op < oend);
228
124k
    } else {
229
124k
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
124k
        ZSTD_copy16(op, ip);
237
124k
        if (16 >= length) return;
238
53.2k
        op += 16;
239
53.2k
        ip += 16;
240
182k
        do {
241
182k
            COPY16(op, ip);
242
182k
            COPY16(op, ip);
243
182k
        }
244
182k
        while (op < oend);
245
53.2k
    }
246
124k
}
zstd_fast.c:ZSTD_wildcopy
Line
Count
Source
217
97.0k
{
218
97.0k
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
97.0k
    const BYTE* ip = (const BYTE*)src;
220
97.0k
    BYTE* op = (BYTE*)dst;
221
97.0k
    BYTE* const oend = op + length;
222
223
97.0k
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
0
        do {
226
0
            COPY8(op, ip);
227
0
        } while (op < oend);
228
97.0k
    } else {
229
97.0k
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
97.0k
        ZSTD_copy16(op, ip);
237
97.0k
        if (16 >= length) return;
238
70.4k
        op += 16;
239
70.4k
        ip += 16;
240
508k
        do {
241
508k
            COPY16(op, ip);
242
508k
            COPY16(op, ip);
243
508k
        }
244
508k
        while (op < oend);
245
70.4k
    }
246
97.0k
}
zstd_lazy.c:ZSTD_wildcopy
Line
Count
Source
217
176k
{
218
176k
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
176k
    const BYTE* ip = (const BYTE*)src;
220
176k
    BYTE* op = (BYTE*)dst;
221
176k
    BYTE* const oend = op + length;
222
223
176k
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
0
        do {
226
0
            COPY8(op, ip);
227
0
        } while (op < oend);
228
176k
    } else {
229
176k
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
176k
        ZSTD_copy16(op, ip);
237
176k
        if (16 >= length) return;
238
88.9k
        op += 16;
239
88.9k
        ip += 16;
240
604k
        do {
241
604k
            COPY16(op, ip);
242
604k
            COPY16(op, ip);
243
604k
        }
244
604k
        while (op < oend);
245
88.9k
    }
246
176k
}
zstd_ldm.c:ZSTD_wildcopy
Line
Count
Source
217
93.0k
{
218
93.0k
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
93.0k
    const BYTE* ip = (const BYTE*)src;
220
93.0k
    BYTE* op = (BYTE*)dst;
221
93.0k
    BYTE* const oend = op + length;
222
223
93.0k
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
0
        do {
226
0
            COPY8(op, ip);
227
0
        } while (op < oend);
228
93.0k
    } else {
229
93.0k
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
93.0k
        ZSTD_copy16(op, ip);
237
93.0k
        if (16 >= length) return;
238
35.4k
        op += 16;
239
35.4k
        ip += 16;
240
123k
        do {
241
123k
            COPY16(op, ip);
242
123k
            COPY16(op, ip);
243
123k
        }
244
123k
        while (op < oend);
245
35.4k
    }
246
93.0k
}
zstd_opt.c:ZSTD_wildcopy
Line
Count
Source
217
181k
{
218
181k
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
181k
    const BYTE* ip = (const BYTE*)src;
220
181k
    BYTE* op = (BYTE*)dst;
221
181k
    BYTE* const oend = op + length;
222
223
181k
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
0
        do {
226
0
            COPY8(op, ip);
227
0
        } while (op < oend);
228
181k
    } else {
229
181k
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
181k
        ZSTD_copy16(op, ip);
237
181k
        if (16 >= length) return;
238
110k
        op += 16;
239
110k
        ip += 16;
240
375k
        do {
241
375k
            COPY16(op, ip);
242
375k
            COPY16(op, ip);
243
375k
        }
244
375k
        while (op < oend);
245
110k
    }
246
181k
}
Unexecuted instantiation: zstd_preSplit.c:ZSTD_wildcopy
Unexecuted instantiation: zstdmt_compress.c:ZSTD_wildcopy
Unexecuted instantiation: huf_decompress.c:ZSTD_wildcopy
Unexecuted instantiation: zstd_ddict.c:ZSTD_wildcopy
Unexecuted instantiation: zstd_decompress.c:ZSTD_wildcopy
zstd_decompress_block.c:ZSTD_wildcopy
Line
Count
Source
217
20.6M
{
218
20.6M
    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
219
20.6M
    const BYTE* ip = (const BYTE*)src;
220
20.6M
    BYTE* op = (BYTE*)dst;
221
20.6M
    BYTE* const oend = op + length;
222
223
20.6M
    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
224
        /* Handle short offset copies. */
225
6.53M
        do {
226
6.53M
            COPY8(op, ip);
227
6.53M
        } while (op < oend);
228
19.3M
    } else {
229
19.3M
        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
230
        /* Separate out the first COPY16() call because the copy length is
231
         * almost certain to be short, so the branches have different
232
         * probabilities. Since it is almost certain to be short, only do
233
         * one COPY16() in the first call. Then, do two calls per loop since
234
         * at that point it is more likely to have a high trip count.
235
         */
236
19.3M
        ZSTD_copy16(op, ip);
237
19.3M
        if (16 >= length) return;
238
3.33M
        op += 16;
239
3.33M
        ip += 16;
240
10.5M
        do {
241
10.5M
            COPY16(op, ip);
242
10.5M
            COPY16(op, ip);
243
10.5M
        }
244
10.5M
        while (op < oend);
245
3.33M
    }
246
20.6M
}
Unexecuted instantiation: cover.c:ZSTD_wildcopy
Unexecuted instantiation: fastcover.c:ZSTD_wildcopy
Unexecuted instantiation: zdict.c:ZSTD_wildcopy
247
248
MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
249
0
{
250
0
    size_t const length = MIN(dstCapacity, srcSize);
251
0
    if (length > 0) {
252
0
        ZSTD_memcpy(dst, src, length);
253
0
    }
254
0
    return length;
255
0
}
Unexecuted instantiation: sequence_producer.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_common.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_compress.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_double_fast.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_fast.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_lazy.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_ldm.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_opt.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_preSplit.c:ZSTD_limitCopy
Unexecuted instantiation: zstdmt_compress.c:ZSTD_limitCopy
Unexecuted instantiation: huf_decompress.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_ddict.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_decompress.c:ZSTD_limitCopy
Unexecuted instantiation: zstd_decompress_block.c:ZSTD_limitCopy
Unexecuted instantiation: cover.c:ZSTD_limitCopy
Unexecuted instantiation: fastcover.c:ZSTD_limitCopy
Unexecuted instantiation: zdict.c:ZSTD_limitCopy
256
257
/* define "workspace is too large" as this number of times larger than needed */
258
166k
#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
259
260
/* when workspace is continuously too large
261
 * during at least this number of times,
262
 * context's memory usage is considered wasteful,
263
 * because it's sized to handle a worst case scenario which rarely happens.
264
 * In which case, resize it down to free some memory */
265
0
#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
266
267
/* Controls whether the input/output buffer is buffered or stable. */
268
typedef enum {
269
    ZSTD_bm_buffered = 0,  /* Buffer the input/output */
270
    ZSTD_bm_stable = 1     /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
271
} ZSTD_bufferMode_e;
272
273
274
/*-*******************************************
275
*  Private declarations
276
*********************************************/
277
278
/**
279
 * Contains the compressed frame size and an upper-bound for the decompressed frame size.
280
 * Note: before using `compressedSize`, check for errors using ZSTD_isError().
281
 *       similarly, before using `decompressedBound`, check for errors using:
282
 *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
283
 */
284
typedef struct {
285
    size_t nbBlocks;
286
    size_t compressedSize;
287
    unsigned long long decompressedBound;
288
} ZSTD_frameSizeInfo;   /* decompress & legacy */
289
290
/* ZSTD_invalidateRepCodes() :
291
 * ensures next compression will not use repcodes from previous block.
292
 * Note : only works with regular variant;
293
 *        do not use with extDict variant ! */
294
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
295
296
297
typedef struct {
298
    blockType_e blockType;
299
    U32 lastBlock;
300
    U32 origSize;
301
} blockProperties_t;   /* declared here for decompress and fullbench */
302
303
/*! ZSTD_getcBlockSize() :
304
 *  Provides the size of compressed block from block header `src` */
305
/*  Used by: decompress, fullbench */
306
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
307
                          blockProperties_t* bpPtr);
308
309
/*! ZSTD_decodeSeqHeaders() :
310
 *  decode sequence header from src */
311
/*  Used by: zstd_decompress_block, fullbench */
312
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
313
                       const void* src, size_t srcSize);
314
315
/**
316
 * @returns true iff the CPU supports dynamic BMI2 dispatch.
317
 */
318
MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
319
83.0k
{
320
83.0k
    ZSTD_cpuid_t cpuid = ZSTD_cpuid();
321
83.0k
    return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
322
83.0k
}
Unexecuted instantiation: sequence_producer.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_common.c:ZSTD_cpuSupportsBmi2
zstd_compress.c:ZSTD_cpuSupportsBmi2
Line
Count
Source
319
31.7k
{
320
31.7k
    ZSTD_cpuid_t cpuid = ZSTD_cpuid();
321
31.7k
    return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
322
31.7k
}
Unexecuted instantiation: zstd_compress_literals.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_compress_sequences.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_compress_superblock.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_double_fast.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_fast.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_lazy.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_ldm.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_opt.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_preSplit.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstdmt_compress.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: huf_decompress.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zstd_ddict.c:ZSTD_cpuSupportsBmi2
zstd_decompress.c:ZSTD_cpuSupportsBmi2
Line
Count
Source
319
51.2k
{
320
51.2k
    ZSTD_cpuid_t cpuid = ZSTD_cpuid();
321
51.2k
    return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
322
51.2k
}
Unexecuted instantiation: zstd_decompress_block.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: cover.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: fastcover.c:ZSTD_cpuSupportsBmi2
Unexecuted instantiation: zdict.c:ZSTD_cpuSupportsBmi2
323
324
#endif   /* ZSTD_CCOMMON_H_MODULE */