Coverage Report

Created: 2026-01-17 06:30

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/zstd/lib/compress/zstd_preSplit.c
Line
Count
Source
1
/*
2
 * Copyright (c) Meta Platforms, Inc. and affiliates.
3
 * All rights reserved.
4
 *
5
 * This source code is licensed under both the BSD-style license (found in the
6
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
 * in the COPYING file in the root directory of this source tree).
8
 * You may select, at your option, one of the above-listed licenses.
9
 */
10
11
#include "../common/compiler.h" /* ZSTD_ALIGNOF */
12
#include "../common/mem.h" /* S64 */
13
#include "../common/zstd_deps.h" /* ZSTD_memset */
14
#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */
15
#include "hist.h" /* HIST_add */
16
#include "zstd_preSplit.h"
17
18
19
#define BLOCKSIZE_MIN 3500
20
328k
#define THRESHOLD_PENALTY_RATE 16
21
164k
#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2)
22
57.0k
#define THRESHOLD_PENALTY 3
23
24
204k
#define HASHLENGTH 2
25
95.9M
#define HASHLOG_MAX 10
26
95.9M
#define HASHTABLESIZE (1 << HASHLOG_MAX)
27
#define HASHMASK (HASHTABLESIZE - 1)
28
371M
#define KNUTH 0x9e3779b9
29
30
/* for hashLog > 8, hash 2 bytes.
31
 * for hashLog == 8, just take the byte, no hashing.
32
 * The speed of this method relies on compile-time constant propagation */
33
FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog)
34
392M
{
35
392M
    assert(hashLog >= 8);
36
392M
    if (hashLog == 8) return (U32)((const BYTE*)p)[0];
37
392M
    assert(hashLog <= HASHLOG_MAX);
38
371M
    return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog);
39
371M
}
40
41
42
typedef struct {
43
  unsigned events[HASHTABLESIZE];
44
  size_t nbEvents;
45
} Fingerprint;
46
typedef struct {
47
    Fingerprint pastEvents;
48
    Fingerprint newEvents;
49
} FPStats;
50
51
static void initStats(FPStats* fpstats)
52
74.3k
{
53
74.3k
    ZSTD_memset(fpstats, 0, sizeof(FPStats));
54
74.3k
}
55
56
FORCE_INLINE_TEMPLATE void
57
addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
58
204k
{
59
204k
    const char* p = (const char*)src;
60
204k
    size_t limit = srcSize - HASHLENGTH + 1;
61
204k
    size_t n;
62
204k
    assert(srcSize >= HASHLENGTH);
63
392M
    for (n = 0; n < limit; n+=samplingRate) {
64
392M
        fp->events[hash2(p+n, hashLog)]++;
65
392M
    }
66
204k
    fp->nbEvents += limit/samplingRate;
67
204k
}
68
69
FORCE_INLINE_TEMPLATE void
70
recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog)
71
204k
{
72
204k
    ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog));
73
204k
    fp->nbEvents = 0;
74
204k
    addEvents_generic(fp, src, srcSize, samplingRate, hashLog);
75
204k
}
76
77
typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize);
78
79
228k
#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate
80
81
#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize)                                 \
82
    static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \
83
204k
    {                                                                              \
84
204k
        recordFingerprint_generic(fp, src, srcSize, _rate, _hSize);                \
85
204k
    }
zstd_preSplit.c:ZSTD_recordFingerprint_43
Line
Count
Source
83
110k
    {                                                                              \
84
110k
        recordFingerprint_generic(fp, src, srcSize, _rate, _hSize);                \
85
110k
    }
zstd_preSplit.c:ZSTD_recordFingerprint_11
Line
Count
Source
83
22.3k
    {                                                                              \
84
22.3k
        recordFingerprint_generic(fp, src, srcSize, _rate, _hSize);                \
85
22.3k
    }
zstd_preSplit.c:ZSTD_recordFingerprint_5
Line
Count
Source
83
34.8k
    {                                                                              \
84
34.8k
        recordFingerprint_generic(fp, src, srcSize, _rate, _hSize);                \
85
34.8k
    }
zstd_preSplit.c:ZSTD_recordFingerprint_1
Line
Count
Source
83
36.3k
    {                                                                              \
84
36.3k
        recordFingerprint_generic(fp, src, srcSize, _rate, _hSize);                \
85
36.3k
    }
86
87
ZSTD_GEN_RECORD_FINGERPRINT(1, 10)
88
ZSTD_GEN_RECORD_FINGERPRINT(5, 10)
89
ZSTD_GEN_RECORD_FINGERPRINT(11, 9)
90
ZSTD_GEN_RECORD_FINGERPRINT(43, 8)
91
92
93
94.6M
static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); }
94
95
static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog)
96
188k
{
97
188k
    U64 distance = 0;
98
188k
    size_t n;
99
188k
    assert(hashLog <= HASHLOG_MAX);
100
94.8M
    for (n = 0; n < ((size_t)1 << hashLog); n++) {
101
94.6M
        distance +=
102
94.6M
            abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents);
103
94.6M
    }
104
188k
    return distance;
105
188k
}
106
107
/* Compare newEvents with pastEvents
108
 * return 1 when considered "too different"
109
 */
110
static int compareFingerprints(const Fingerprint* ref,
111
                            const Fingerprint* newfp,
112
                            int penalty,
113
                            unsigned hashLog)
114
164k
{
115
164k
    assert(ref->nbEvents > 0);
116
164k
    assert(newfp->nbEvents > 0);
117
164k
    {   U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents;
118
164k
        U64 deviation = fpDistance(ref, newfp, hashLog);
119
164k
        U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE;
120
164k
        return deviation >= threshold;
121
164k
    }
122
164k
}
123
124
static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp)
125
93.6k
{
126
93.6k
    size_t n;
127
95.9M
    for (n = 0; n < HASHTABLESIZE; n++) {
128
95.8M
        acc->events[n] += newfp->events[n];
129
95.8M
    }
130
93.6k
    acc->nbEvents += newfp->nbEvents;
131
93.6k
}
132
133
static void flushEvents(FPStats* fpstats)
134
0
{
135
0
    size_t n;
136
0
    for (n = 0; n < HASHTABLESIZE; n++) {
137
0
        fpstats->pastEvents.events[n] = fpstats->newEvents.events[n];
138
0
    }
139
0
    fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents;
140
0
    ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents));
141
0
}
142
143
static void removeEvents(Fingerprint* acc, const Fingerprint* slice)
144
0
{
145
0
    size_t n;
146
0
    for (n = 0; n < HASHTABLESIZE; n++) {
147
0
        assert(acc->events[n] >= slice->events[n]);
148
0
        acc->events[n] -= slice->events[n];
149
0
    }
150
0
    acc->nbEvents -= slice->nbEvents;
151
0
}
152
153
505k
#define CHUNKSIZE (8 << 10)
154
static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize,
155
                        int level,
156
                        void* workspace, size_t wkspSize)
157
57.0k
{
158
57.0k
    static const RecordEvents_f records_fs[] = {
159
57.0k
        FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1)
160
57.0k
    };
161
57.0k
    static const unsigned hashParams[] = { 8, 9, 10, 10 };
162
57.0k
    const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]);
163
57.0k
    FPStats* const fpstats = (FPStats*)workspace;
164
57.0k
    const char* p = (const char*)blockStart;
165
57.0k
    int penalty = THRESHOLD_PENALTY;
166
57.0k
    size_t pos = 0;
167
57.0k
    assert(blockSize == (128 << 10));
168
57.0k
    assert(workspace != NULL);
169
57.0k
    assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
170
57.0k
    ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
171
57.0k
    assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
172
173
57.0k
    initStats(fpstats);
174
57.0k
    record_f(&fpstats->pastEvents, p, CHUNKSIZE);
175
150k
    for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) {
176
147k
        record_f(&fpstats->newEvents, p + pos, CHUNKSIZE);
177
147k
        if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) {
178
53.4k
            return pos;
179
93.6k
        } else {
180
93.6k
            mergeEvents(&fpstats->pastEvents, &fpstats->newEvents);
181
93.6k
            if (penalty > 0) penalty--;
182
93.6k
        }
183
147k
    }
184
57.0k
    assert(pos == blockSize);
185
3.64k
    return blockSize;
186
0
    (void)flushEvents; (void)removeEvents;
187
0
}
188
189
/* ZSTD_splitBlock_fromBorders(): very fast strategy :
190
 * compare fingerprint from beginning and end of the block,
191
 * derive from their difference if it's preferable to split in the middle,
192
 * repeat the process a second time, for finer grained decision.
193
 * 3 times did not brought improvements, so I stopped at 2.
194
 * Benefits are good enough for a cheap heuristic.
195
 * More accurate splitting saves more, but speed impact is also more perceptible.
196
 * For better accuracy, use more elaborate variant *_byChunks.
197
 */
198
static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize,
199
                        void* workspace, size_t wkspSize)
200
17.3k
{
201
129k
#define SEGMENT_SIZE 512
202
17.3k
    FPStats* const fpstats = (FPStats*)workspace;
203
17.3k
    Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned));
204
17.3k
    assert(blockSize == (128 << 10));
205
17.3k
    assert(workspace != NULL);
206
17.3k
    assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0);
207
17.3k
    ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats));
208
17.3k
    assert(wkspSize >= sizeof(FPStats)); (void)wkspSize;
209
210
17.3k
    initStats(fpstats);
211
17.3k
    HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE);
212
17.3k
    HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE);
213
17.3k
    fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE;
214
17.3k
    if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8))
215
5.25k
        return blockSize;
216
217
12.0k
    HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE);
218
12.0k
    middleEvents->nbEvents = SEGMENT_SIZE;
219
12.0k
    {   U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8);
220
12.0k
        U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8);
221
12.0k
        U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3;
222
12.0k
        if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance)
223
4.55k
            return 64 KB;
224
7.51k
        return (distFromBegin > distFromEnd) ? 32 KB : 96 KB;
225
12.0k
    }
226
12.0k
}
227
228
size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize,
229
                    int level,
230
                    void* workspace, size_t wkspSize)
231
74.3k
{
232
74.3k
    DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level);
233
74.3k
    assert(0<=level && level<=4);
234
74.3k
    if (level == 0)
235
17.3k
        return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize);
236
    /* level >= 1*/
237
57.0k
    return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize);
238
74.3k
}