/src/zstd/lib/compress/huf_compress.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* ****************************************************************** |
2 | | * Huffman encoder, part of New Generation Entropy library |
3 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
4 | | * |
5 | | * You can contact the author at : |
6 | | * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy |
7 | | * - Public forum : https://groups.google.com/forum/#!forum/lz4c |
8 | | * |
9 | | * This source code is licensed under both the BSD-style license (found in the |
10 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
11 | | * in the COPYING file in the root directory of this source tree). |
12 | | * You may select, at your option, one of the above-listed licenses. |
13 | | ****************************************************************** */ |
14 | | |
15 | | /* ************************************************************** |
16 | | * Compiler specifics |
17 | | ****************************************************************/ |
18 | | #ifdef _MSC_VER /* Visual Studio */ |
19 | | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ |
20 | | #endif |
21 | | |
22 | | |
23 | | /* ************************************************************** |
24 | | * Includes |
25 | | ****************************************************************/ |
26 | | #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ |
27 | | #include "../common/compiler.h" |
28 | | #include "../common/bitstream.h" |
29 | | #include "hist.h" |
30 | | #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ |
31 | | #include "../common/fse.h" /* header compression */ |
32 | | #include "../common/huf.h" |
33 | | #include "../common/error_private.h" |
34 | | #include "../common/bits.h" /* ZSTD_highbit32 */ |
35 | | |
36 | | |
37 | | /* ************************************************************** |
38 | | * Error Management |
39 | | ****************************************************************/ |
40 | 0 | #define HUF_isError ERR_isError |
41 | 0 | #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
42 | | |
43 | | |
44 | | /* ************************************************************** |
45 | | * Required declarations |
46 | | ****************************************************************/ |
47 | | typedef struct nodeElt_s { |
48 | | U32 count; |
49 | | U16 parent; |
50 | | BYTE byte; |
51 | | BYTE nbBits; |
52 | | } nodeElt; |
53 | | |
54 | | |
55 | | /* ************************************************************** |
56 | | * Debug Traces |
57 | | ****************************************************************/ |
58 | | |
59 | | #if DEBUGLEVEL >= 2 |
60 | | |
61 | | static size_t showU32(const U32* arr, size_t size) |
62 | | { |
63 | | size_t u; |
64 | | for (u=0; u<size; u++) { |
65 | | RAWLOG(6, " %u", arr[u]); (void)arr; |
66 | | } |
67 | | RAWLOG(6, " \n"); |
68 | | return size; |
69 | | } |
70 | | |
71 | | static size_t HUF_getNbBits(HUF_CElt elt); |
72 | | |
73 | | static size_t showCTableBits(const HUF_CElt* ctable, size_t size) |
74 | | { |
75 | | size_t u; |
76 | | for (u=0; u<size; u++) { |
77 | | RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable; |
78 | | } |
79 | | RAWLOG(6, " \n"); |
80 | | return size; |
81 | | |
82 | | } |
83 | | |
84 | | static size_t showHNodeSymbols(const nodeElt* hnode, size_t size) |
85 | | { |
86 | | size_t u; |
87 | | for (u=0; u<size; u++) { |
88 | | RAWLOG(6, " %u", hnode[u].byte); (void)hnode; |
89 | | } |
90 | | RAWLOG(6, " \n"); |
91 | | return size; |
92 | | } |
93 | | |
94 | | static size_t showHNodeBits(const nodeElt* hnode, size_t size) |
95 | | { |
96 | | size_t u; |
97 | | for (u=0; u<size; u++) { |
98 | | RAWLOG(6, " %u", hnode[u].nbBits); (void)hnode; |
99 | | } |
100 | | RAWLOG(6, " \n"); |
101 | | return size; |
102 | | } |
103 | | |
104 | | #endif |
105 | | |
106 | | |
107 | | /* ******************************************************* |
108 | | * HUF : Huffman block compression |
109 | | *********************************************************/ |
110 | | #define HUF_WORKSPACE_MAX_ALIGNMENT 8 |
111 | | |
112 | | static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align) |
113 | 0 | { |
114 | 0 | size_t const mask = align - 1; |
115 | 0 | size_t const rem = (size_t)workspace & mask; |
116 | 0 | size_t const add = (align - rem) & mask; |
117 | 0 | BYTE* const aligned = (BYTE*)workspace + add; |
118 | 0 | assert((align & (align - 1)) == 0); /* pow 2 */ |
119 | 0 | assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT); |
120 | 0 | if (*workspaceSizePtr >= add) { |
121 | 0 | assert(add < align); |
122 | 0 | assert(((size_t)aligned & mask) == 0); |
123 | 0 | *workspaceSizePtr -= add; |
124 | 0 | return aligned; |
125 | 0 | } else { |
126 | 0 | *workspaceSizePtr = 0; |
127 | 0 | return NULL; |
128 | 0 | } |
129 | 0 | } |
130 | | |
131 | | |
132 | | /* HUF_compressWeights() : |
133 | | * Same as FSE_compress(), but dedicated to huff0's weights compression. |
134 | | * The use case needs much less stack memory. |
135 | | * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. |
136 | | */ |
137 | 0 | #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 |
138 | | |
139 | | typedef struct { |
140 | | FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; |
141 | | U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; |
142 | | unsigned count[HUF_TABLELOG_MAX+1]; |
143 | | S16 norm[HUF_TABLELOG_MAX+1]; |
144 | | } HUF_CompressWeightsWksp; |
145 | | |
146 | | static size_t |
147 | | HUF_compressWeights(void* dst, size_t dstSize, |
148 | | const void* weightTable, size_t wtSize, |
149 | | void* workspace, size_t workspaceSize) |
150 | 0 | { |
151 | 0 | BYTE* const ostart = (BYTE*) dst; |
152 | 0 | BYTE* op = ostart; |
153 | 0 | BYTE* const oend = ostart + dstSize; |
154 | |
|
155 | 0 | unsigned maxSymbolValue = HUF_TABLELOG_MAX; |
156 | 0 | U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; |
157 | 0 | HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); |
158 | |
|
159 | 0 | if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); |
160 | | |
161 | | /* init conditions */ |
162 | 0 | if (wtSize <= 1) return 0; /* Not compressible */ |
163 | | |
164 | | /* Scan input and build symbol stats */ |
165 | 0 | { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */ |
166 | 0 | if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ |
167 | 0 | if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ |
168 | 0 | } |
169 | | |
170 | 0 | tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); |
171 | 0 | CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); |
172 | | |
173 | | /* Write table description header */ |
174 | 0 | { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) ); |
175 | 0 | op += hSize; |
176 | 0 | } |
177 | | |
178 | | /* Compress */ |
179 | 0 | CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) ); |
180 | 0 | { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) ); |
181 | 0 | if (cSize == 0) return 0; /* not enough space for compressed data */ |
182 | 0 | op += cSize; |
183 | 0 | } |
184 | | |
185 | 0 | return (size_t)(op-ostart); |
186 | 0 | } |
187 | | |
188 | | static size_t HUF_getNbBits(HUF_CElt elt) |
189 | 0 | { |
190 | 0 | return elt & 0xFF; |
191 | 0 | } |
192 | | |
193 | | static size_t HUF_getNbBitsFast(HUF_CElt elt) |
194 | 0 | { |
195 | 0 | return elt; |
196 | 0 | } |
197 | | |
198 | | static size_t HUF_getValue(HUF_CElt elt) |
199 | 0 | { |
200 | 0 | return elt & ~(size_t)0xFF; |
201 | 0 | } |
202 | | |
203 | | static size_t HUF_getValueFast(HUF_CElt elt) |
204 | 0 | { |
205 | 0 | return elt; |
206 | 0 | } |
207 | | |
208 | | static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) |
209 | 0 | { |
210 | 0 | assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); |
211 | 0 | *elt = nbBits; |
212 | 0 | } |
213 | | |
214 | | static void HUF_setValue(HUF_CElt* elt, size_t value) |
215 | 0 | { |
216 | 0 | size_t const nbBits = HUF_getNbBits(*elt); |
217 | 0 | if (nbBits > 0) { |
218 | 0 | assert((value >> nbBits) == 0); |
219 | 0 | *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); |
220 | 0 | } |
221 | 0 | } |
222 | | |
223 | | HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable) |
224 | 0 | { |
225 | 0 | HUF_CTableHeader header; |
226 | 0 | ZSTD_memcpy(&header, ctable, sizeof(header)); |
227 | 0 | return header; |
228 | 0 | } |
229 | | |
230 | | static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue) |
231 | 0 | { |
232 | 0 | HUF_CTableHeader header; |
233 | 0 | HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header)); |
234 | 0 | ZSTD_memset(&header, 0, sizeof(header)); |
235 | 0 | assert(tableLog < 256); |
236 | 0 | header.tableLog = (BYTE)tableLog; |
237 | 0 | assert(maxSymbolValue < 256); |
238 | 0 | header.maxSymbolValue = (BYTE)maxSymbolValue; |
239 | 0 | ZSTD_memcpy(ctable, &header, sizeof(header)); |
240 | 0 | } |
241 | | |
242 | | typedef struct { |
243 | | HUF_CompressWeightsWksp wksp; |
244 | | BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ |
245 | | BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; |
246 | | } HUF_WriteCTableWksp; |
247 | | |
248 | | size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, |
249 | | const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, |
250 | | void* workspace, size_t workspaceSize) |
251 | 0 | { |
252 | 0 | HUF_CElt const* const ct = CTable + 1; |
253 | 0 | BYTE* op = (BYTE*)dst; |
254 | 0 | U32 n; |
255 | 0 | HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); |
256 | |
|
257 | 0 | HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp)); |
258 | |
|
259 | 0 | assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); |
260 | 0 | assert(HUF_readCTableHeader(CTable).tableLog == huffLog); |
261 | | |
262 | | /* check conditions */ |
263 | 0 | if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); |
264 | 0 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); |
265 | | |
266 | | /* convert to weight */ |
267 | 0 | wksp->bitsToWeight[0] = 0; |
268 | 0 | for (n=1; n<huffLog+1; n++) |
269 | 0 | wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n); |
270 | 0 | for (n=0; n<maxSymbolValue; n++) |
271 | 0 | wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; |
272 | | |
273 | | /* attempt weights compression by FSE */ |
274 | 0 | if (maxDstSize < 1) return ERROR(dstSize_tooSmall); |
275 | 0 | { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); |
276 | 0 | if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ |
277 | 0 | op[0] = (BYTE)hSize; |
278 | 0 | return hSize+1; |
279 | 0 | } } |
280 | | |
281 | | /* write raw values as 4-bits (max : 15) */ |
282 | 0 | if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ |
283 | 0 | if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ |
284 | 0 | op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); |
285 | 0 | wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ |
286 | 0 | for (n=0; n<maxSymbolValue; n+=2) |
287 | 0 | op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]); |
288 | 0 | return ((maxSymbolValue+1)/2) + 1; |
289 | 0 | } |
290 | | |
291 | | |
292 | | size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) |
293 | 0 | { |
294 | 0 | BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ |
295 | 0 | U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ |
296 | 0 | U32 tableLog = 0; |
297 | 0 | U32 nbSymbols = 0; |
298 | 0 | HUF_CElt* const ct = CTable + 1; |
299 | | |
300 | | /* get symbol weights */ |
301 | 0 | CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); |
302 | 0 | *hasZeroWeights = (rankVal[0] > 0); |
303 | | |
304 | | /* check result */ |
305 | 0 | if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); |
306 | 0 | if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); |
307 | | |
308 | 0 | *maxSymbolValuePtr = nbSymbols - 1; |
309 | |
|
310 | 0 | HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); |
311 | | |
312 | | /* Prepare base value per rank */ |
313 | 0 | { U32 n, nextRankStart = 0; |
314 | 0 | for (n=1; n<=tableLog; n++) { |
315 | 0 | U32 curr = nextRankStart; |
316 | 0 | nextRankStart += (rankVal[n] << (n-1)); |
317 | 0 | rankVal[n] = curr; |
318 | 0 | } } |
319 | | |
320 | | /* fill nbBits */ |
321 | 0 | { U32 n; for (n=0; n<nbSymbols; n++) { |
322 | 0 | const U32 w = huffWeight[n]; |
323 | 0 | HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0)); |
324 | 0 | } } |
325 | | |
326 | | /* fill val */ |
327 | 0 | { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ |
328 | 0 | U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; |
329 | 0 | { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; } |
330 | | /* determine stating value per rank */ |
331 | 0 | valPerRank[tableLog+1] = 0; /* for w==0 */ |
332 | 0 | { U16 min = 0; |
333 | 0 | U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */ |
334 | 0 | valPerRank[n] = min; /* get starting value within each rank */ |
335 | 0 | min += nbPerRank[n]; |
336 | 0 | min >>= 1; |
337 | 0 | } } |
338 | | /* assign value within rank, symbol order */ |
339 | 0 | { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); } |
340 | 0 | } |
341 | |
|
342 | 0 | return readSize; |
343 | 0 | } |
344 | | |
345 | | U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) |
346 | 0 | { |
347 | 0 | const HUF_CElt* const ct = CTable + 1; |
348 | 0 | assert(symbolValue <= HUF_SYMBOLVALUE_MAX); |
349 | 0 | if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue) |
350 | 0 | return 0; |
351 | 0 | return (U32)HUF_getNbBits(ct[symbolValue]); |
352 | 0 | } |
353 | | |
354 | | |
355 | | /** |
356 | | * HUF_setMaxHeight(): |
357 | | * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. |
358 | | * |
359 | | * It attempts to convert all nodes with nbBits > @targetNbBits |
360 | | * to employ @targetNbBits instead. Then it adjusts the tree |
361 | | * so that it remains a valid canonical Huffman tree. |
362 | | * |
363 | | * @pre The sum of the ranks of each symbol == 2^largestBits, |
364 | | * where largestBits == huffNode[lastNonNull].nbBits. |
365 | | * @post The sum of the ranks of each symbol == 2^largestBits, |
366 | | * where largestBits is the return value (expected <= targetNbBits). |
367 | | * |
368 | | * @param huffNode The Huffman tree modified in place to enforce targetNbBits. |
369 | | * It's presumed sorted, from most frequent to rarest symbol. |
370 | | * @param lastNonNull The symbol with the lowest count in the Huffman tree. |
371 | | * @param targetNbBits The allowed number of bits, which the Huffman tree |
372 | | * may not respect. After this function the Huffman tree will |
373 | | * respect targetNbBits. |
374 | | * @return The maximum number of bits of the Huffman tree after adjustment. |
375 | | */ |
376 | | static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) |
377 | 0 | { |
378 | 0 | const U32 largestBits = huffNode[lastNonNull].nbBits; |
379 | | /* early exit : no elt > targetNbBits, so the tree is already valid. */ |
380 | 0 | if (largestBits <= targetNbBits) return largestBits; |
381 | | |
382 | 0 | DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits); |
383 | | |
384 | | /* there are several too large elements (at least >= 2) */ |
385 | 0 | { int totalCost = 0; |
386 | 0 | const U32 baseCost = 1 << (largestBits - targetNbBits); |
387 | 0 | int n = (int)lastNonNull; |
388 | | |
389 | | /* Adjust any ranks > targetNbBits to targetNbBits. |
390 | | * Compute totalCost, which is how far the sum of the ranks is |
391 | | * we are over 2^largestBits after adjust the offending ranks. |
392 | | */ |
393 | 0 | while (huffNode[n].nbBits > targetNbBits) { |
394 | 0 | totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); |
395 | 0 | huffNode[n].nbBits = (BYTE)targetNbBits; |
396 | 0 | n--; |
397 | 0 | } |
398 | | /* n stops at huffNode[n].nbBits <= targetNbBits */ |
399 | 0 | assert(huffNode[n].nbBits <= targetNbBits); |
400 | | /* n end at index of smallest symbol using < targetNbBits */ |
401 | 0 | while (huffNode[n].nbBits == targetNbBits) --n; |
402 | | |
403 | | /* renorm totalCost from 2^largestBits to 2^targetNbBits |
404 | | * note : totalCost is necessarily a multiple of baseCost */ |
405 | 0 | assert(((U32)totalCost & (baseCost - 1)) == 0); |
406 | 0 | totalCost >>= (largestBits - targetNbBits); |
407 | 0 | assert(totalCost > 0); |
408 | | |
409 | | /* repay normalized cost */ |
410 | 0 | { U32 const noSymbol = 0xF0F0F0F0; |
411 | 0 | U32 rankLast[HUF_TABLELOG_MAX+2]; |
412 | | |
413 | | /* Get pos of last (smallest = lowest cum. count) symbol per rank */ |
414 | 0 | ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); |
415 | 0 | { U32 currentNbBits = targetNbBits; |
416 | 0 | int pos; |
417 | 0 | for (pos=n ; pos >= 0; pos--) { |
418 | 0 | if (huffNode[pos].nbBits >= currentNbBits) continue; |
419 | 0 | currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ |
420 | 0 | rankLast[targetNbBits-currentNbBits] = (U32)pos; |
421 | 0 | } } |
422 | |
|
423 | 0 | while (totalCost > 0) { |
424 | | /* Try to reduce the next power of 2 above totalCost because we |
425 | | * gain back half the rank. |
426 | | */ |
427 | 0 | U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; |
428 | 0 | for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { |
429 | 0 | U32 const highPos = rankLast[nBitsToDecrease]; |
430 | 0 | U32 const lowPos = rankLast[nBitsToDecrease-1]; |
431 | 0 | if (highPos == noSymbol) continue; |
432 | | /* Decrease highPos if no symbols of lowPos or if it is |
433 | | * not cheaper to remove 2 lowPos than highPos. |
434 | | */ |
435 | 0 | if (lowPos == noSymbol) break; |
436 | 0 | { U32 const highTotal = huffNode[highPos].count; |
437 | 0 | U32 const lowTotal = 2 * huffNode[lowPos].count; |
438 | 0 | if (highTotal <= lowTotal) break; |
439 | 0 | } } |
440 | | /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ |
441 | 0 | assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); |
442 | | /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ |
443 | 0 | while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) |
444 | 0 | nBitsToDecrease++; |
445 | 0 | assert(rankLast[nBitsToDecrease] != noSymbol); |
446 | | /* Increase the number of bits to gain back half the rank cost. */ |
447 | 0 | totalCost -= 1 << (nBitsToDecrease-1); |
448 | 0 | huffNode[rankLast[nBitsToDecrease]].nbBits++; |
449 | | |
450 | | /* Fix up the new rank. |
451 | | * If the new rank was empty, this symbol is now its smallest. |
452 | | * Otherwise, this symbol will be the largest in the new rank so no adjustment. |
453 | | */ |
454 | 0 | if (rankLast[nBitsToDecrease-1] == noSymbol) |
455 | 0 | rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; |
456 | | /* Fix up the old rank. |
457 | | * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, |
458 | | * it must be the only symbol in its rank, so the old rank now has no symbols. |
459 | | * Otherwise, since the Huffman nodes are sorted by count, the previous position is now |
460 | | * the smallest node in the rank. If the previous position belongs to a different rank, |
461 | | * then the rank is now empty. |
462 | | */ |
463 | 0 | if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ |
464 | 0 | rankLast[nBitsToDecrease] = noSymbol; |
465 | 0 | else { |
466 | 0 | rankLast[nBitsToDecrease]--; |
467 | 0 | if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) |
468 | 0 | rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ |
469 | 0 | } |
470 | 0 | } /* while (totalCost > 0) */ |
471 | | |
472 | | /* If we've removed too much weight, then we have to add it back. |
473 | | * To avoid overshooting again, we only adjust the smallest rank. |
474 | | * We take the largest nodes from the lowest rank 0 and move them |
475 | | * to rank 1. There's guaranteed to be enough rank 0 symbols because |
476 | | * TODO. |
477 | | */ |
478 | 0 | while (totalCost < 0) { /* Sometimes, cost correction overshoot */ |
479 | | /* special case : no rank 1 symbol (using targetNbBits-1); |
480 | | * let's create one from largest rank 0 (using targetNbBits). |
481 | | */ |
482 | 0 | if (rankLast[1] == noSymbol) { |
483 | 0 | while (huffNode[n].nbBits == targetNbBits) n--; |
484 | 0 | huffNode[n+1].nbBits--; |
485 | 0 | assert(n >= 0); |
486 | 0 | rankLast[1] = (U32)(n+1); |
487 | 0 | totalCost++; |
488 | 0 | continue; |
489 | 0 | } |
490 | 0 | huffNode[ rankLast[1] + 1 ].nbBits--; |
491 | 0 | rankLast[1]++; |
492 | 0 | totalCost ++; |
493 | 0 | } |
494 | 0 | } /* repay normalized cost */ |
495 | 0 | } /* there are several too large elements (at least >= 2) */ |
496 | |
|
497 | 0 | return targetNbBits; |
498 | 0 | } |
499 | | |
500 | | typedef struct { |
501 | | U16 base; |
502 | | U16 curr; |
503 | | } rankPos; |
504 | | |
505 | | typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; |
506 | | |
507 | | /* Number of buckets available for HUF_sort() */ |
508 | 0 | #define RANK_POSITION_TABLE_SIZE 192 |
509 | | |
510 | | typedef struct { |
511 | | huffNodeTable huffNodeTbl; |
512 | | rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; |
513 | | } HUF_buildCTable_wksp_tables; |
514 | | |
515 | | /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. |
516 | | * Strategy is to use as many buckets as possible for representing distinct |
517 | | * counts while using the remainder to represent all "large" counts. |
518 | | * |
519 | | * To satisfy this requirement for 192 buckets, we can do the following: |
520 | | * Let buckets 0-166 represent distinct counts of [0, 166] |
521 | | * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. |
522 | | */ |
523 | 0 | #define RANK_POSITION_MAX_COUNT_LOG 32 |
524 | 0 | #define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) |
525 | 0 | #define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) |
526 | | |
527 | | /* Return the appropriate bucket index for a given count. See definition of |
528 | | * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. |
529 | | */ |
530 | 0 | static U32 HUF_getIndex(U32 const count) { |
531 | 0 | return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) |
532 | 0 | ? count |
533 | 0 | : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; |
534 | 0 | } |
535 | | |
536 | | /* Helper swap function for HUF_quickSortPartition() */ |
537 | 0 | static void HUF_swapNodes(nodeElt* a, nodeElt* b) { |
538 | 0 | nodeElt tmp = *a; |
539 | 0 | *a = *b; |
540 | 0 | *b = tmp; |
541 | 0 | } |
542 | | |
543 | | /* Returns 0 if the huffNode array is not sorted by descending count */ |
544 | 0 | MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { |
545 | 0 | U32 i; |
546 | 0 | for (i = 1; i < maxSymbolValue1; ++i) { |
547 | 0 | if (huffNode[i].count > huffNode[i-1].count) { |
548 | 0 | return 0; |
549 | 0 | } |
550 | 0 | } |
551 | 0 | return 1; |
552 | 0 | } |
553 | | |
554 | | /* Insertion sort by descending order */ |
555 | 0 | HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { |
556 | 0 | int i; |
557 | 0 | int const size = high-low+1; |
558 | 0 | huffNode += low; |
559 | 0 | for (i = 1; i < size; ++i) { |
560 | 0 | nodeElt const key = huffNode[i]; |
561 | 0 | int j = i - 1; |
562 | 0 | while (j >= 0 && huffNode[j].count < key.count) { |
563 | 0 | huffNode[j + 1] = huffNode[j]; |
564 | 0 | j--; |
565 | 0 | } |
566 | 0 | huffNode[j + 1] = key; |
567 | 0 | } |
568 | 0 | } |
569 | | |
570 | | /* Pivot helper function for quicksort. */ |
571 | 0 | static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { |
572 | | /* Simply select rightmost element as pivot. "Better" selectors like |
573 | | * median-of-three don't experimentally appear to have any benefit. |
574 | | */ |
575 | 0 | U32 const pivot = arr[high].count; |
576 | 0 | int i = low - 1; |
577 | 0 | int j = low; |
578 | 0 | for ( ; j < high; j++) { |
579 | 0 | if (arr[j].count > pivot) { |
580 | 0 | i++; |
581 | 0 | HUF_swapNodes(&arr[i], &arr[j]); |
582 | 0 | } |
583 | 0 | } |
584 | 0 | HUF_swapNodes(&arr[i + 1], &arr[high]); |
585 | 0 | return i + 1; |
586 | 0 | } |
587 | | |
588 | | /* Classic quicksort by descending with partially iterative calls |
589 | | * to reduce worst case callstack size. |
590 | | */ |
591 | 0 | static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { |
592 | 0 | int const kInsertionSortThreshold = 8; |
593 | 0 | if (high - low < kInsertionSortThreshold) { |
594 | 0 | HUF_insertionSort(arr, low, high); |
595 | 0 | return; |
596 | 0 | } |
597 | 0 | while (low < high) { |
598 | 0 | int const idx = HUF_quickSortPartition(arr, low, high); |
599 | 0 | if (idx - low < high - idx) { |
600 | 0 | HUF_simpleQuickSort(arr, low, idx - 1); |
601 | 0 | low = idx + 1; |
602 | 0 | } else { |
603 | 0 | HUF_simpleQuickSort(arr, idx + 1, high); |
604 | 0 | high = idx - 1; |
605 | 0 | } |
606 | 0 | } |
607 | 0 | } |
608 | | |
609 | | /** |
610 | | * HUF_sort(): |
611 | | * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. |
612 | | * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. |
613 | | * |
614 | | * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. |
615 | | * Must have (maxSymbolValue + 1) entries. |
616 | | * @param[in] count Histogram of the symbols. |
617 | | * @param[in] maxSymbolValue Maximum symbol value. |
618 | | * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. |
619 | | */ |
620 | 0 | static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { |
621 | 0 | U32 n; |
622 | 0 | U32 const maxSymbolValue1 = maxSymbolValue+1; |
623 | | |
624 | | /* Compute base and set curr to base. |
625 | | * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. |
626 | | * See HUF_getIndex to see bucketing strategy. |
627 | | * We attribute each symbol to lowerRank's base value, because we want to know where |
628 | | * each rank begins in the output, so for rank R we want to count ranks R+1 and above. |
629 | | */ |
630 | 0 | ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); |
631 | 0 | for (n = 0; n < maxSymbolValue1; ++n) { |
632 | 0 | U32 lowerRank = HUF_getIndex(count[n]); |
633 | 0 | assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); |
634 | 0 | rankPosition[lowerRank].base++; |
635 | 0 | } |
636 | |
|
637 | 0 | assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); |
638 | | /* Set up the rankPosition table */ |
639 | 0 | for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { |
640 | 0 | rankPosition[n-1].base += rankPosition[n].base; |
641 | 0 | rankPosition[n-1].curr = rankPosition[n-1].base; |
642 | 0 | } |
643 | | |
644 | | /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ |
645 | 0 | for (n = 0; n < maxSymbolValue1; ++n) { |
646 | 0 | U32 const c = count[n]; |
647 | 0 | U32 const r = HUF_getIndex(c) + 1; |
648 | 0 | U32 const pos = rankPosition[r].curr++; |
649 | 0 | assert(pos < maxSymbolValue1); |
650 | 0 | huffNode[pos].count = c; |
651 | 0 | huffNode[pos].byte = (BYTE)n; |
652 | 0 | } |
653 | | |
654 | | /* Sort each bucket. */ |
655 | 0 | for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { |
656 | 0 | int const bucketSize = rankPosition[n].curr - rankPosition[n].base; |
657 | 0 | U32 const bucketStartIdx = rankPosition[n].base; |
658 | 0 | if (bucketSize > 1) { |
659 | 0 | assert(bucketStartIdx < maxSymbolValue1); |
660 | 0 | HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); |
661 | 0 | } |
662 | 0 | } |
663 | |
|
664 | 0 | assert(HUF_isSorted(huffNode, maxSymbolValue1)); |
665 | 0 | } |
666 | | |
667 | | |
668 | | /** HUF_buildCTable_wksp() : |
669 | | * Same as HUF_buildCTable(), but using externally allocated scratch buffer. |
670 | | * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). |
671 | | */ |
672 | 0 | #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) |
673 | | |
674 | | /* HUF_buildTree(): |
675 | | * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. |
676 | | * |
677 | | * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. |
678 | | * @param maxSymbolValue The maximum symbol value. |
679 | | * @return The smallest node in the Huffman tree (by count). |
680 | | */ |
681 | | static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) |
682 | 0 | { |
683 | 0 | nodeElt* const huffNode0 = huffNode - 1; |
684 | 0 | int nonNullRank; |
685 | 0 | int lowS, lowN; |
686 | 0 | int nodeNb = STARTNODE; |
687 | 0 | int n, nodeRoot; |
688 | 0 | DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1); |
689 | | /* init for parents */ |
690 | 0 | nonNullRank = (int)maxSymbolValue; |
691 | 0 | while(huffNode[nonNullRank].count == 0) nonNullRank--; |
692 | 0 | lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; |
693 | 0 | huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; |
694 | 0 | huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; |
695 | 0 | nodeNb++; lowS-=2; |
696 | 0 | for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); |
697 | 0 | huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ |
698 | | |
699 | | /* create parents */ |
700 | 0 | while (nodeNb <= nodeRoot) { |
701 | 0 | int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
702 | 0 | int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
703 | 0 | huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; |
704 | 0 | huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; |
705 | 0 | nodeNb++; |
706 | 0 | } |
707 | | |
708 | | /* distribute weights (unlimited tree height) */ |
709 | 0 | huffNode[nodeRoot].nbBits = 0; |
710 | 0 | for (n=nodeRoot-1; n>=STARTNODE; n--) |
711 | 0 | huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; |
712 | 0 | for (n=0; n<=nonNullRank; n++) |
713 | 0 | huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; |
714 | |
|
715 | 0 | DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1)); |
716 | |
|
717 | 0 | return nonNullRank; |
718 | 0 | } |
719 | | |
720 | | /** |
721 | | * HUF_buildCTableFromTree(): |
722 | | * Build the CTable given the Huffman tree in huffNode. |
723 | | * |
724 | | * @param[out] CTable The output Huffman CTable. |
725 | | * @param huffNode The Huffman tree. |
726 | | * @param nonNullRank The last and smallest node in the Huffman tree. |
727 | | * @param maxSymbolValue The maximum symbol value. |
728 | | * @param maxNbBits The exact maximum number of bits used in the Huffman tree. |
729 | | */ |
730 | | static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) |
731 | 0 | { |
732 | 0 | HUF_CElt* const ct = CTable + 1; |
733 | | /* fill result into ctable (val, nbBits) */ |
734 | 0 | int n; |
735 | 0 | U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; |
736 | 0 | U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; |
737 | 0 | int const alphabetSize = (int)(maxSymbolValue + 1); |
738 | 0 | for (n=0; n<=nonNullRank; n++) |
739 | 0 | nbPerRank[huffNode[n].nbBits]++; |
740 | | /* determine starting value per rank */ |
741 | 0 | { U16 min = 0; |
742 | 0 | for (n=(int)maxNbBits; n>0; n--) { |
743 | 0 | valPerRank[n] = min; /* get starting value within each rank */ |
744 | 0 | min += nbPerRank[n]; |
745 | 0 | min >>= 1; |
746 | 0 | } } |
747 | 0 | for (n=0; n<alphabetSize; n++) |
748 | 0 | HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */ |
749 | 0 | for (n=0; n<alphabetSize; n++) |
750 | 0 | HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */ |
751 | |
|
752 | 0 | HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue); |
753 | 0 | } |
754 | | |
755 | | size_t |
756 | | HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, |
757 | | void* workSpace, size_t wkspSize) |
758 | 0 | { |
759 | 0 | HUF_buildCTable_wksp_tables* const wksp_tables = |
760 | 0 | (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); |
761 | 0 | nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; |
762 | 0 | nodeElt* const huffNode = huffNode0+1; |
763 | 0 | int nonNullRank; |
764 | |
|
765 | 0 | HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); |
766 | |
|
767 | 0 | DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1); |
768 | | |
769 | | /* safety checks */ |
770 | 0 | if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) |
771 | 0 | return ERROR(workSpace_tooSmall); |
772 | 0 | if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; |
773 | 0 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) |
774 | 0 | return ERROR(maxSymbolValue_tooLarge); |
775 | 0 | ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); |
776 | | |
777 | | /* sort, decreasing order */ |
778 | 0 | HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); |
779 | 0 | DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1)); |
780 | | |
781 | | /* build tree */ |
782 | 0 | nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); |
783 | | |
784 | | /* determine and enforce maxTableLog */ |
785 | 0 | maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); |
786 | 0 | if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ |
787 | | |
788 | 0 | HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); |
789 | |
|
790 | 0 | return maxNbBits; |
791 | 0 | } |
792 | | |
793 | | size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) |
794 | 0 | { |
795 | 0 | HUF_CElt const* ct = CTable + 1; |
796 | 0 | size_t nbBits = 0; |
797 | 0 | int s; |
798 | 0 | for (s = 0; s <= (int)maxSymbolValue; ++s) { |
799 | 0 | nbBits += HUF_getNbBits(ct[s]) * count[s]; |
800 | 0 | } |
801 | 0 | return nbBits >> 3; |
802 | 0 | } |
803 | | |
804 | 0 | int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { |
805 | 0 | HUF_CTableHeader header = HUF_readCTableHeader(CTable); |
806 | 0 | HUF_CElt const* ct = CTable + 1; |
807 | 0 | int bad = 0; |
808 | 0 | int s; |
809 | |
|
810 | 0 | assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX); |
811 | |
|
812 | 0 | if (header.maxSymbolValue < maxSymbolValue) |
813 | 0 | return 0; |
814 | | |
815 | 0 | for (s = 0; s <= (int)maxSymbolValue; ++s) { |
816 | 0 | bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); |
817 | 0 | } |
818 | 0 | return !bad; |
819 | 0 | } |
820 | | |
821 | 0 | size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } |
822 | | |
823 | | /** HUF_CStream_t: |
824 | | * Huffman uses its own BIT_CStream_t implementation. |
825 | | * There are three major differences from BIT_CStream_t: |
826 | | * 1. HUF_addBits() takes a HUF_CElt (size_t) which is |
827 | | * the pair (nbBits, value) in the format: |
828 | | * format: |
829 | | * - Bits [0, 4) = nbBits |
830 | | * - Bits [4, 64 - nbBits) = 0 |
831 | | * - Bits [64 - nbBits, 64) = value |
832 | | * 2. The bitContainer is built from the upper bits and |
833 | | * right shifted. E.g. to add a new value of N bits |
834 | | * you right shift the bitContainer by N, then or in |
835 | | * the new value into the N upper bits. |
836 | | * 3. The bitstream has two bit containers. You can add |
837 | | * bits to the second container and merge them into |
838 | | * the first container. |
839 | | */ |
840 | | |
841 | 0 | #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) |
842 | | |
843 | | typedef struct { |
844 | | size_t bitContainer[2]; |
845 | | size_t bitPos[2]; |
846 | | |
847 | | BYTE* startPtr; |
848 | | BYTE* ptr; |
849 | | BYTE* endPtr; |
850 | | } HUF_CStream_t; |
851 | | |
852 | | /**! HUF_initCStream(): |
853 | | * Initializes the bitstream. |
854 | | * @returns 0 or an error code. |
855 | | */ |
856 | | static size_t HUF_initCStream(HUF_CStream_t* bitC, |
857 | | void* startPtr, size_t dstCapacity) |
858 | 0 | { |
859 | 0 | ZSTD_memset(bitC, 0, sizeof(*bitC)); |
860 | 0 | bitC->startPtr = (BYTE*)startPtr; |
861 | 0 | bitC->ptr = bitC->startPtr; |
862 | 0 | bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); |
863 | 0 | if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); |
864 | 0 | return 0; |
865 | 0 | } |
866 | | |
867 | | /*! HUF_addBits(): |
868 | | * Adds the symbol stored in HUF_CElt elt to the bitstream. |
869 | | * |
870 | | * @param elt The element we're adding. This is a (nbBits, value) pair. |
871 | | * See the HUF_CStream_t docs for the format. |
872 | | * @param idx Insert into the bitstream at this idx. |
873 | | * @param kFast This is a template parameter. If the bitstream is guaranteed |
874 | | * to have at least 4 unused bits after this call it may be 1, |
875 | | * otherwise it must be 0. HUF_addBits() is faster when fast is set. |
876 | | */ |
877 | | FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) |
878 | 0 | { |
879 | 0 | assert(idx <= 1); |
880 | 0 | assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); |
881 | | /* This is efficient on x86-64 with BMI2 because shrx |
882 | | * only reads the low 6 bits of the register. The compiler |
883 | | * knows this and elides the mask. When fast is set, |
884 | | * every operation can use the same value loaded from elt. |
885 | | */ |
886 | 0 | bitC->bitContainer[idx] >>= HUF_getNbBits(elt); |
887 | 0 | bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); |
888 | | /* We only read the low 8 bits of bitC->bitPos[idx] so it |
889 | | * doesn't matter that the high bits have noise from the value. |
890 | | */ |
891 | 0 | bitC->bitPos[idx] += HUF_getNbBitsFast(elt); |
892 | 0 | assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
893 | | /* The last 4-bits of elt are dirty if fast is set, |
894 | | * so we must not be overwriting bits that have already been |
895 | | * inserted into the bit container. |
896 | | */ |
897 | | #if DEBUGLEVEL >= 1 |
898 | | { |
899 | | size_t const nbBits = HUF_getNbBits(elt); |
900 | | size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; |
901 | | (void)dirtyBits; |
902 | | /* Middle bits are 0. */ |
903 | | assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); |
904 | | /* We didn't overwrite any bits in the bit container. */ |
905 | | assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
906 | | (void)dirtyBits; |
907 | | } |
908 | | #endif |
909 | 0 | } |
910 | | |
911 | | FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) |
912 | 0 | { |
913 | 0 | bitC->bitContainer[1] = 0; |
914 | 0 | bitC->bitPos[1] = 0; |
915 | 0 | } |
916 | | |
917 | | /*! HUF_mergeIndex1() : |
918 | | * Merges the bit container @ index 1 into the bit container @ index 0 |
919 | | * and zeros the bit container @ index 1. |
920 | | */ |
921 | | FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) |
922 | 0 | { |
923 | 0 | assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); |
924 | 0 | bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); |
925 | 0 | bitC->bitContainer[0] |= bitC->bitContainer[1]; |
926 | 0 | bitC->bitPos[0] += bitC->bitPos[1]; |
927 | 0 | assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
928 | 0 | } |
929 | | |
930 | | /*! HUF_flushBits() : |
931 | | * Flushes the bits in the bit container @ index 0. |
932 | | * |
933 | | * @post bitPos will be < 8. |
934 | | * @param kFast If kFast is set then we must know a-priori that |
935 | | * the bit container will not overflow. |
936 | | */ |
937 | | FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) |
938 | 0 | { |
939 | | /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ |
940 | 0 | size_t const nbBits = bitC->bitPos[0] & 0xFF; |
941 | 0 | size_t const nbBytes = nbBits >> 3; |
942 | | /* The top nbBits bits of bitContainer are the ones we need. */ |
943 | 0 | size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); |
944 | | /* Mask bitPos to account for the bytes we consumed. */ |
945 | 0 | bitC->bitPos[0] &= 7; |
946 | 0 | assert(nbBits > 0); |
947 | 0 | assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); |
948 | 0 | assert(bitC->ptr <= bitC->endPtr); |
949 | 0 | MEM_writeLEST(bitC->ptr, bitContainer); |
950 | 0 | bitC->ptr += nbBytes; |
951 | 0 | assert(!kFast || bitC->ptr <= bitC->endPtr); |
952 | 0 | if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; |
953 | | /* bitContainer doesn't need to be modified because the leftover |
954 | | * bits are already the top bitPos bits. And we don't care about |
955 | | * noise in the lower values. |
956 | | */ |
957 | 0 | } |
958 | | |
959 | | /*! HUF_endMark() |
960 | | * @returns The Huffman stream end mark: A 1-bit value = 1. |
961 | | */ |
962 | | static HUF_CElt HUF_endMark(void) |
963 | 0 | { |
964 | 0 | HUF_CElt endMark; |
965 | 0 | HUF_setNbBits(&endMark, 1); |
966 | 0 | HUF_setValue(&endMark, 1); |
967 | 0 | return endMark; |
968 | 0 | } |
969 | | |
970 | | /*! HUF_closeCStream() : |
971 | | * @return Size of CStream, in bytes, |
972 | | * or 0 if it could not fit into dstBuffer */ |
973 | | static size_t HUF_closeCStream(HUF_CStream_t* bitC) |
974 | 0 | { |
975 | 0 | HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); |
976 | 0 | HUF_flushBits(bitC, /* kFast */ 0); |
977 | 0 | { |
978 | 0 | size_t const nbBits = bitC->bitPos[0] & 0xFF; |
979 | 0 | if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ |
980 | 0 | return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); |
981 | 0 | } |
982 | 0 | } |
983 | | |
984 | | FORCE_INLINE_TEMPLATE void |
985 | | HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) |
986 | 0 | { |
987 | 0 | HUF_addBits(bitCPtr, CTable[symbol], idx, fast); |
988 | 0 | } |
989 | | |
990 | | FORCE_INLINE_TEMPLATE void |
991 | | HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, |
992 | | const BYTE* ip, size_t srcSize, |
993 | | const HUF_CElt* ct, |
994 | | int kUnroll, int kFastFlush, int kLastFast) |
995 | 0 | { |
996 | | /* Join to kUnroll */ |
997 | 0 | int n = (int)srcSize; |
998 | 0 | int rem = n % kUnroll; |
999 | 0 | if (rem > 0) { |
1000 | 0 | for (; rem > 0; --rem) { |
1001 | 0 | HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); |
1002 | 0 | } |
1003 | 0 | HUF_flushBits(bitC, kFastFlush); |
1004 | 0 | } |
1005 | 0 | assert(n % kUnroll == 0); |
1006 | | |
1007 | | /* Join to 2 * kUnroll */ |
1008 | 0 | if (n % (2 * kUnroll)) { |
1009 | 0 | int u; |
1010 | 0 | for (u = 1; u < kUnroll; ++u) { |
1011 | 0 | HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); |
1012 | 0 | } |
1013 | 0 | HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); |
1014 | 0 | HUF_flushBits(bitC, kFastFlush); |
1015 | 0 | n -= kUnroll; |
1016 | 0 | } |
1017 | 0 | assert(n % (2 * kUnroll) == 0); |
1018 | |
|
1019 | 0 | for (; n>0; n-= 2 * kUnroll) { |
1020 | | /* Encode kUnroll symbols into the bitstream @ index 0. */ |
1021 | 0 | int u; |
1022 | 0 | for (u = 1; u < kUnroll; ++u) { |
1023 | 0 | HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); |
1024 | 0 | } |
1025 | 0 | HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); |
1026 | 0 | HUF_flushBits(bitC, kFastFlush); |
1027 | | /* Encode kUnroll symbols into the bitstream @ index 1. |
1028 | | * This allows us to start filling the bit container |
1029 | | * without any data dependencies. |
1030 | | */ |
1031 | 0 | HUF_zeroIndex1(bitC); |
1032 | 0 | for (u = 1; u < kUnroll; ++u) { |
1033 | 0 | HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); |
1034 | 0 | } |
1035 | 0 | HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); |
1036 | | /* Merge bitstream @ index 1 into the bitstream @ index 0 */ |
1037 | 0 | HUF_mergeIndex1(bitC); |
1038 | 0 | HUF_flushBits(bitC, kFastFlush); |
1039 | 0 | } |
1040 | 0 | assert(n == 0); |
1041 | |
|
1042 | 0 | } |
1043 | | |
1044 | | /** |
1045 | | * Returns a tight upper bound on the output space needed by Huffman |
1046 | | * with 8 bytes buffer to handle over-writes. If the output is at least |
1047 | | * this large we don't need to do bounds checks during Huffman encoding. |
1048 | | */ |
1049 | | static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) |
1050 | 0 | { |
1051 | 0 | return ((srcSize * tableLog) >> 3) + 8; |
1052 | 0 | } |
1053 | | |
1054 | | |
1055 | | FORCE_INLINE_TEMPLATE size_t |
1056 | | HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, |
1057 | | const void* src, size_t srcSize, |
1058 | | const HUF_CElt* CTable) |
1059 | 0 | { |
1060 | 0 | U32 const tableLog = HUF_readCTableHeader(CTable).tableLog; |
1061 | 0 | HUF_CElt const* ct = CTable + 1; |
1062 | 0 | const BYTE* ip = (const BYTE*) src; |
1063 | 0 | BYTE* const ostart = (BYTE*)dst; |
1064 | 0 | BYTE* const oend = ostart + dstSize; |
1065 | 0 | HUF_CStream_t bitC; |
1066 | | |
1067 | | /* init */ |
1068 | 0 | if (dstSize < 8) return 0; /* not enough space to compress */ |
1069 | 0 | { BYTE* op = ostart; |
1070 | 0 | size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); |
1071 | 0 | if (HUF_isError(initErr)) return 0; } |
1072 | | |
1073 | 0 | if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) |
1074 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); |
1075 | 0 | else { |
1076 | 0 | if (MEM_32bits()) { |
1077 | 0 | switch (tableLog) { |
1078 | 0 | case 11: |
1079 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); |
1080 | 0 | break; |
1081 | 0 | case 10: ZSTD_FALLTHROUGH; |
1082 | 0 | case 9: ZSTD_FALLTHROUGH; |
1083 | 0 | case 8: |
1084 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); |
1085 | 0 | break; |
1086 | 0 | case 7: ZSTD_FALLTHROUGH; |
1087 | 0 | default: |
1088 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); |
1089 | 0 | break; |
1090 | 0 | } |
1091 | 0 | } else { |
1092 | 0 | switch (tableLog) { |
1093 | 0 | case 11: |
1094 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); |
1095 | 0 | break; |
1096 | 0 | case 10: |
1097 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); |
1098 | 0 | break; |
1099 | 0 | case 9: |
1100 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); |
1101 | 0 | break; |
1102 | 0 | case 8: |
1103 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); |
1104 | 0 | break; |
1105 | 0 | case 7: |
1106 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); |
1107 | 0 | break; |
1108 | 0 | case 6: ZSTD_FALLTHROUGH; |
1109 | 0 | default: |
1110 | 0 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); |
1111 | 0 | break; |
1112 | 0 | } |
1113 | 0 | } |
1114 | 0 | } |
1115 | 0 | assert(bitC.ptr <= bitC.endPtr); |
1116 | |
|
1117 | 0 | return HUF_closeCStream(&bitC); |
1118 | 0 | } |
1119 | | |
1120 | | #if DYNAMIC_BMI2 |
1121 | | |
1122 | | static BMI2_TARGET_ATTRIBUTE size_t |
1123 | | HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, |
1124 | | const void* src, size_t srcSize, |
1125 | | const HUF_CElt* CTable) |
1126 | 0 | { |
1127 | 0 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1128 | 0 | } |
1129 | | |
1130 | | static size_t |
1131 | | HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, |
1132 | | const void* src, size_t srcSize, |
1133 | | const HUF_CElt* CTable) |
1134 | 0 | { |
1135 | 0 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1136 | 0 | } |
1137 | | |
1138 | | static size_t |
1139 | | HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, |
1140 | | const void* src, size_t srcSize, |
1141 | | const HUF_CElt* CTable, const int flags) |
1142 | 0 | { |
1143 | 0 | if (flags & HUF_flags_bmi2) { |
1144 | 0 | return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); |
1145 | 0 | } |
1146 | 0 | return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); |
1147 | 0 | } |
1148 | | |
1149 | | #else |
1150 | | |
1151 | | static size_t |
1152 | | HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, |
1153 | | const void* src, size_t srcSize, |
1154 | | const HUF_CElt* CTable, const int flags) |
1155 | | { |
1156 | | (void)flags; |
1157 | | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1158 | | } |
1159 | | |
1160 | | #endif |
1161 | | |
1162 | | size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) |
1163 | 0 | { |
1164 | 0 | return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); |
1165 | 0 | } |
1166 | | |
1167 | | static size_t |
1168 | | HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, |
1169 | | const void* src, size_t srcSize, |
1170 | | const HUF_CElt* CTable, int flags) |
1171 | 0 | { |
1172 | 0 | size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ |
1173 | 0 | const BYTE* ip = (const BYTE*) src; |
1174 | 0 | const BYTE* const iend = ip + srcSize; |
1175 | 0 | BYTE* const ostart = (BYTE*) dst; |
1176 | 0 | BYTE* const oend = ostart + dstSize; |
1177 | 0 | BYTE* op = ostart; |
1178 | |
|
1179 | 0 | if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ |
1180 | 0 | if (srcSize < 12) return 0; /* no saving possible : too small input */ |
1181 | 0 | op += 6; /* jumpTable */ |
1182 | |
|
1183 | 0 | assert(op <= oend); |
1184 | 0 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1185 | 0 | if (cSize == 0 || cSize > 65535) return 0; |
1186 | 0 | MEM_writeLE16(ostart, (U16)cSize); |
1187 | 0 | op += cSize; |
1188 | 0 | } |
1189 | | |
1190 | 0 | ip += segmentSize; |
1191 | 0 | assert(op <= oend); |
1192 | 0 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1193 | 0 | if (cSize == 0 || cSize > 65535) return 0; |
1194 | 0 | MEM_writeLE16(ostart+2, (U16)cSize); |
1195 | 0 | op += cSize; |
1196 | 0 | } |
1197 | | |
1198 | 0 | ip += segmentSize; |
1199 | 0 | assert(op <= oend); |
1200 | 0 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1201 | 0 | if (cSize == 0 || cSize > 65535) return 0; |
1202 | 0 | MEM_writeLE16(ostart+4, (U16)cSize); |
1203 | 0 | op += cSize; |
1204 | 0 | } |
1205 | | |
1206 | 0 | ip += segmentSize; |
1207 | 0 | assert(op <= oend); |
1208 | 0 | assert(ip <= iend); |
1209 | 0 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); |
1210 | 0 | if (cSize == 0 || cSize > 65535) return 0; |
1211 | 0 | op += cSize; |
1212 | 0 | } |
1213 | | |
1214 | 0 | return (size_t)(op-ostart); |
1215 | 0 | } |
1216 | | |
1217 | | size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) |
1218 | 0 | { |
1219 | 0 | return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); |
1220 | 0 | } |
1221 | | |
1222 | | typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; |
1223 | | |
1224 | | static size_t HUF_compressCTable_internal( |
1225 | | BYTE* const ostart, BYTE* op, BYTE* const oend, |
1226 | | const void* src, size_t srcSize, |
1227 | | HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) |
1228 | 0 | { |
1229 | 0 | size_t const cSize = (nbStreams==HUF_singleStream) ? |
1230 | 0 | HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : |
1231 | 0 | HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); |
1232 | 0 | if (HUF_isError(cSize)) { return cSize; } |
1233 | 0 | if (cSize==0) { return 0; } /* uncompressible */ |
1234 | 0 | op += cSize; |
1235 | | /* check compressibility */ |
1236 | 0 | assert(op >= ostart); |
1237 | 0 | if ((size_t)(op-ostart) >= srcSize-1) { return 0; } |
1238 | 0 | return (size_t)(op-ostart); |
1239 | 0 | } |
1240 | | |
1241 | | typedef struct { |
1242 | | unsigned count[HUF_SYMBOLVALUE_MAX + 1]; |
1243 | | HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; |
1244 | | union { |
1245 | | HUF_buildCTable_wksp_tables buildCTable_wksp; |
1246 | | HUF_WriteCTableWksp writeCTable_wksp; |
1247 | | U32 hist_wksp[HIST_WKSP_SIZE_U32]; |
1248 | | } wksps; |
1249 | | } HUF_compress_tables_t; |
1250 | | |
1251 | 0 | #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 |
1252 | 0 | #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ |
1253 | | |
1254 | | unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) |
1255 | 0 | { |
1256 | 0 | unsigned cardinality = 0; |
1257 | 0 | unsigned i; |
1258 | |
|
1259 | 0 | for (i = 0; i < maxSymbolValue + 1; i++) { |
1260 | 0 | if (count[i] != 0) cardinality += 1; |
1261 | 0 | } |
1262 | |
|
1263 | 0 | return cardinality; |
1264 | 0 | } |
1265 | | |
1266 | | unsigned HUF_minTableLog(unsigned symbolCardinality) |
1267 | 0 | { |
1268 | 0 | U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; |
1269 | 0 | return minBitsSymbols; |
1270 | 0 | } |
1271 | | |
1272 | | unsigned HUF_optimalTableLog( |
1273 | | unsigned maxTableLog, |
1274 | | size_t srcSize, |
1275 | | unsigned maxSymbolValue, |
1276 | | void* workSpace, size_t wkspSize, |
1277 | | HUF_CElt* table, |
1278 | | const unsigned* count, |
1279 | | int flags) |
1280 | 0 | { |
1281 | 0 | assert(srcSize > 1); /* Not supported, RLE should be used instead */ |
1282 | 0 | assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); |
1283 | |
|
1284 | 0 | if (!(flags & HUF_flags_optimalDepth)) { |
1285 | | /* cheap evaluation, based on FSE */ |
1286 | 0 | return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); |
1287 | 0 | } |
1288 | | |
1289 | 0 | { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); |
1290 | 0 | size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); |
1291 | 0 | size_t hSize, newSize; |
1292 | 0 | const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); |
1293 | 0 | const unsigned minTableLog = HUF_minTableLog(symbolCardinality); |
1294 | 0 | size_t optSize = ((size_t) ~0) - 1; |
1295 | 0 | unsigned optLog = maxTableLog, optLogGuess; |
1296 | |
|
1297 | 0 | DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); |
1298 | | |
1299 | | /* Search until size increases */ |
1300 | 0 | for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { |
1301 | 0 | DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); |
1302 | |
|
1303 | 0 | { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); |
1304 | 0 | if (ERR_isError(maxBits)) continue; |
1305 | | |
1306 | 0 | if (maxBits < optLogGuess && optLogGuess > minTableLog) break; |
1307 | | |
1308 | 0 | hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); |
1309 | 0 | } |
1310 | | |
1311 | 0 | if (ERR_isError(hSize)) continue; |
1312 | | |
1313 | 0 | newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; |
1314 | |
|
1315 | 0 | if (newSize > optSize + 1) { |
1316 | 0 | break; |
1317 | 0 | } |
1318 | | |
1319 | 0 | if (newSize < optSize) { |
1320 | 0 | optSize = newSize; |
1321 | 0 | optLog = optLogGuess; |
1322 | 0 | } |
1323 | 0 | } |
1324 | 0 | assert(optLog <= HUF_TABLELOG_MAX); |
1325 | 0 | return optLog; |
1326 | 0 | } |
1327 | 0 | } |
1328 | | |
1329 | | /* HUF_compress_internal() : |
1330 | | * `workSpace_align4` must be aligned on 4-bytes boundaries, |
1331 | | * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ |
1332 | | static size_t |
1333 | | HUF_compress_internal (void* dst, size_t dstSize, |
1334 | | const void* src, size_t srcSize, |
1335 | | unsigned maxSymbolValue, unsigned huffLog, |
1336 | | HUF_nbStreams_e nbStreams, |
1337 | | void* workSpace, size_t wkspSize, |
1338 | | HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) |
1339 | 0 | { |
1340 | 0 | HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); |
1341 | 0 | BYTE* const ostart = (BYTE*)dst; |
1342 | 0 | BYTE* const oend = ostart + dstSize; |
1343 | 0 | BYTE* op = ostart; |
1344 | |
|
1345 | 0 | DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize); |
1346 | 0 | HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); |
1347 | | |
1348 | | /* checks & inits */ |
1349 | 0 | if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); |
1350 | 0 | if (!srcSize) return 0; /* Uncompressed */ |
1351 | 0 | if (!dstSize) return 0; /* cannot fit anything within dst budget */ |
1352 | 0 | if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ |
1353 | 0 | if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); |
1354 | 0 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); |
1355 | 0 | if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; |
1356 | 0 | if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; |
1357 | | |
1358 | | /* Heuristic : If old table is valid, use it for small inputs */ |
1359 | 0 | if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { |
1360 | 0 | return HUF_compressCTable_internal(ostart, op, oend, |
1361 | 0 | src, srcSize, |
1362 | 0 | nbStreams, oldHufTable, flags); |
1363 | 0 | } |
1364 | | |
1365 | | /* If uncompressible data is suspected, do a smaller sampling first */ |
1366 | 0 | DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); |
1367 | 0 | if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { |
1368 | 0 | size_t largestTotal = 0; |
1369 | 0 | DEBUGLOG(5, "input suspected incompressible : sampling to check"); |
1370 | 0 | { unsigned maxSymbolValueBegin = maxSymbolValue; |
1371 | 0 | CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); |
1372 | 0 | largestTotal += largestBegin; |
1373 | 0 | } |
1374 | 0 | { unsigned maxSymbolValueEnd = maxSymbolValue; |
1375 | 0 | CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); |
1376 | 0 | largestTotal += largestEnd; |
1377 | 0 | } |
1378 | 0 | if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ |
1379 | 0 | } |
1380 | | |
1381 | | /* Scan input and build symbol stats */ |
1382 | 0 | { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); |
1383 | 0 | if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ |
1384 | 0 | if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ |
1385 | 0 | } |
1386 | 0 | DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1)); |
1387 | | |
1388 | | /* Check validity of previous table */ |
1389 | 0 | if ( repeat |
1390 | 0 | && *repeat == HUF_repeat_check |
1391 | 0 | && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { |
1392 | 0 | *repeat = HUF_repeat_none; |
1393 | 0 | } |
1394 | | /* Heuristic : use existing table for small inputs */ |
1395 | 0 | if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { |
1396 | 0 | return HUF_compressCTable_internal(ostart, op, oend, |
1397 | 0 | src, srcSize, |
1398 | 0 | nbStreams, oldHufTable, flags); |
1399 | 0 | } |
1400 | | |
1401 | | /* Build Huffman Tree */ |
1402 | 0 | huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); |
1403 | 0 | { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, |
1404 | 0 | maxSymbolValue, huffLog, |
1405 | 0 | &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); |
1406 | 0 | CHECK_F(maxBits); |
1407 | 0 | huffLog = (U32)maxBits; |
1408 | 0 | DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); |
1409 | 0 | } |
1410 | | |
1411 | | /* Write table description header */ |
1412 | 0 | { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, |
1413 | 0 | &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) ); |
1414 | | /* Check if using previous huffman table is beneficial */ |
1415 | 0 | if (repeat && *repeat != HUF_repeat_none) { |
1416 | 0 | size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); |
1417 | 0 | size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); |
1418 | 0 | if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { |
1419 | 0 | return HUF_compressCTable_internal(ostart, op, oend, |
1420 | 0 | src, srcSize, |
1421 | 0 | nbStreams, oldHufTable, flags); |
1422 | 0 | } } |
1423 | | |
1424 | | /* Use the new huffman table */ |
1425 | 0 | if (hSize + 12ul >= srcSize) { return 0; } |
1426 | 0 | op += hSize; |
1427 | 0 | if (repeat) { *repeat = HUF_repeat_none; } |
1428 | 0 | if (oldHufTable) |
1429 | 0 | ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ |
1430 | 0 | } |
1431 | 0 | return HUF_compressCTable_internal(ostart, op, oend, |
1432 | 0 | src, srcSize, |
1433 | 0 | nbStreams, table->CTable, flags); |
1434 | 0 | } |
1435 | | |
1436 | | size_t HUF_compress1X_repeat (void* dst, size_t dstSize, |
1437 | | const void* src, size_t srcSize, |
1438 | | unsigned maxSymbolValue, unsigned huffLog, |
1439 | | void* workSpace, size_t wkspSize, |
1440 | | HUF_CElt* hufTable, HUF_repeat* repeat, int flags) |
1441 | 0 | { |
1442 | 0 | DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); |
1443 | 0 | return HUF_compress_internal(dst, dstSize, src, srcSize, |
1444 | 0 | maxSymbolValue, huffLog, HUF_singleStream, |
1445 | 0 | workSpace, wkspSize, hufTable, |
1446 | 0 | repeat, flags); |
1447 | 0 | } |
1448 | | |
1449 | | /* HUF_compress4X_repeat(): |
1450 | | * compress input using 4 streams. |
1451 | | * consider skipping quickly |
1452 | | * reuse an existing huffman compression table */ |
1453 | | size_t HUF_compress4X_repeat (void* dst, size_t dstSize, |
1454 | | const void* src, size_t srcSize, |
1455 | | unsigned maxSymbolValue, unsigned huffLog, |
1456 | | void* workSpace, size_t wkspSize, |
1457 | | HUF_CElt* hufTable, HUF_repeat* repeat, int flags) |
1458 | 0 | { |
1459 | 0 | DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); |
1460 | 0 | return HUF_compress_internal(dst, dstSize, src, srcSize, |
1461 | 0 | maxSymbolValue, huffLog, HUF_fourStreams, |
1462 | 0 | workSpace, wkspSize, |
1463 | 0 | hufTable, repeat, flags); |
1464 | 0 | } |