/src/zstd/lib/compress/zstd_fast.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ |
12 | | #include "zstd_fast.h" |
13 | | |
14 | | static |
15 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
16 | | void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, |
17 | | const void* const end, |
18 | | ZSTD_dictTableLoadMethod_e dtlm) |
19 | 0 | { |
20 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
21 | 0 | U32* const hashTable = ms->hashTable; |
22 | 0 | U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; |
23 | 0 | U32 const mls = cParams->minMatch; |
24 | 0 | const BYTE* const base = ms->window.base; |
25 | 0 | const BYTE* ip = base + ms->nextToUpdate; |
26 | 0 | const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; |
27 | 0 | const U32 fastHashFillStep = 3; |
28 | | |
29 | | /* Currently, we always use ZSTD_dtlm_full for filling CDict tables. |
30 | | * Feel free to remove this assert if there's a good reason! */ |
31 | 0 | assert(dtlm == ZSTD_dtlm_full); |
32 | | |
33 | | /* Always insert every fastHashFillStep position into the hash table. |
34 | | * Insert the other positions if their hash entry is empty. |
35 | | */ |
36 | 0 | for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { |
37 | 0 | U32 const curr = (U32)(ip - base); |
38 | 0 | { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls); |
39 | 0 | ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); } |
40 | |
|
41 | 0 | if (dtlm == ZSTD_dtlm_fast) continue; |
42 | | /* Only load extra positions for ZSTD_dtlm_full */ |
43 | 0 | { U32 p; |
44 | 0 | for (p = 1; p < fastHashFillStep; ++p) { |
45 | 0 | size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); |
46 | 0 | if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ |
47 | 0 | ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); |
48 | 0 | } } } } |
49 | 0 | } |
50 | | |
51 | | static |
52 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
53 | | void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, |
54 | | const void* const end, |
55 | | ZSTD_dictTableLoadMethod_e dtlm) |
56 | 0 | { |
57 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
58 | 0 | U32* const hashTable = ms->hashTable; |
59 | 0 | U32 const hBits = cParams->hashLog; |
60 | 0 | U32 const mls = cParams->minMatch; |
61 | 0 | const BYTE* const base = ms->window.base; |
62 | 0 | const BYTE* ip = base + ms->nextToUpdate; |
63 | 0 | const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; |
64 | 0 | const U32 fastHashFillStep = 3; |
65 | | |
66 | | /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables. |
67 | | * Feel free to remove this assert if there's a good reason! */ |
68 | 0 | assert(dtlm == ZSTD_dtlm_fast); |
69 | | |
70 | | /* Always insert every fastHashFillStep position into the hash table. |
71 | | * Insert the other positions if their hash entry is empty. |
72 | | */ |
73 | 0 | for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { |
74 | 0 | U32 const curr = (U32)(ip - base); |
75 | 0 | size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); |
76 | 0 | hashTable[hash0] = curr; |
77 | 0 | if (dtlm == ZSTD_dtlm_fast) continue; |
78 | | /* Only load extra positions for ZSTD_dtlm_full */ |
79 | 0 | { U32 p; |
80 | 0 | for (p = 1; p < fastHashFillStep; ++p) { |
81 | 0 | size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); |
82 | 0 | if (hashTable[hash] == 0) { /* not yet filled */ |
83 | 0 | hashTable[hash] = curr + p; |
84 | 0 | } } } } |
85 | 0 | } |
86 | | |
87 | | void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, |
88 | | const void* const end, |
89 | | ZSTD_dictTableLoadMethod_e dtlm, |
90 | | ZSTD_tableFillPurpose_e tfp) |
91 | 0 | { |
92 | 0 | if (tfp == ZSTD_tfp_forCDict) { |
93 | 0 | ZSTD_fillHashTableForCDict(ms, end, dtlm); |
94 | 0 | } else { |
95 | 0 | ZSTD_fillHashTableForCCtx(ms, end, dtlm); |
96 | 0 | } |
97 | 0 | } |
98 | | |
99 | | |
100 | | typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit); |
101 | | |
102 | | static int |
103 | | ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) |
104 | 0 | { |
105 | | /* Array of ~random data, should have low probability of matching data. |
106 | | * Load from here if the index is invalid. |
107 | | * Used to avoid unpredictable branches. */ |
108 | 0 | static const BYTE dummy[] = {0x12,0x34,0x56,0x78}; |
109 | | |
110 | | /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. |
111 | | * However expression below compiles into conditional move. |
112 | | */ |
113 | 0 | const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); |
114 | | /* Note: this used to be written as : return test1 && test2; |
115 | | * Unfortunately, once inlined, these tests become branches, |
116 | | * in which case it becomes critical that they are executed in the right order (test1 then test2). |
117 | | * So we have to write these tests in a specific manner to ensure their ordering. |
118 | | */ |
119 | 0 | if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0; |
120 | | /* force ordering of these tests, which matters once the function is inlined, as they become branches */ |
121 | 0 | #if defined(__GNUC__) |
122 | 0 | __asm__(""); |
123 | 0 | #endif |
124 | 0 | return matchIdx >= idxLowLimit; |
125 | 0 | } |
126 | | |
127 | | static int |
128 | | ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) |
129 | 0 | { |
130 | | /* using a branch instead of a cmov, |
131 | | * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, |
132 | | * aka almost all candidates are within range */ |
133 | 0 | U32 mval; |
134 | 0 | if (matchIdx >= idxLowLimit) { |
135 | 0 | mval = MEM_read32(matchAddress); |
136 | 0 | } else { |
137 | 0 | mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */ |
138 | 0 | } |
139 | |
|
140 | 0 | return (MEM_read32(currentPtr) == mval); |
141 | 0 | } |
142 | | |
143 | | |
144 | | /** |
145 | | * If you squint hard enough (and ignore repcodes), the search operation at any |
146 | | * given position is broken into 4 stages: |
147 | | * |
148 | | * 1. Hash (map position to hash value via input read) |
149 | | * 2. Lookup (map hash val to index via hashtable read) |
150 | | * 3. Load (map index to value at that position via input read) |
151 | | * 4. Compare |
152 | | * |
153 | | * Each of these steps involves a memory read at an address which is computed |
154 | | * from the previous step. This means these steps must be sequenced and their |
155 | | * latencies are cumulative. |
156 | | * |
157 | | * Rather than do 1->2->3->4 sequentially for a single position before moving |
158 | | * onto the next, this implementation interleaves these operations across the |
159 | | * next few positions: |
160 | | * |
161 | | * R = Repcode Read & Compare |
162 | | * H = Hash |
163 | | * T = Table Lookup |
164 | | * M = Match Read & Compare |
165 | | * |
166 | | * Pos | Time --> |
167 | | * ----+------------------- |
168 | | * N | ... M |
169 | | * N+1 | ... TM |
170 | | * N+2 | R H T M |
171 | | * N+3 | H TM |
172 | | * N+4 | R H T M |
173 | | * N+5 | H ... |
174 | | * N+6 | R ... |
175 | | * |
176 | | * This is very much analogous to the pipelining of execution in a CPU. And just |
177 | | * like a CPU, we have to dump the pipeline when we find a match (i.e., take a |
178 | | * branch). |
179 | | * |
180 | | * When this happens, we throw away our current state, and do the following prep |
181 | | * to re-enter the loop: |
182 | | * |
183 | | * Pos | Time --> |
184 | | * ----+------------------- |
185 | | * N | H T |
186 | | * N+1 | H |
187 | | * |
188 | | * This is also the work we do at the beginning to enter the loop initially. |
189 | | */ |
190 | | FORCE_INLINE_TEMPLATE |
191 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
192 | | size_t ZSTD_compressBlock_fast_noDict_generic( |
193 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
194 | | void const* src, size_t srcSize, |
195 | | U32 const mls, int useCmov) |
196 | 0 | { |
197 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
198 | 0 | U32* const hashTable = ms->hashTable; |
199 | 0 | U32 const hlog = cParams->hashLog; |
200 | 0 | size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */ |
201 | 0 | const BYTE* const base = ms->window.base; |
202 | 0 | const BYTE* const istart = (const BYTE*)src; |
203 | 0 | const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); |
204 | 0 | const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); |
205 | 0 | const BYTE* const prefixStart = base + prefixStartIndex; |
206 | 0 | const BYTE* const iend = istart + srcSize; |
207 | 0 | const BYTE* const ilimit = iend - HASH_READ_SIZE; |
208 | |
|
209 | 0 | const BYTE* anchor = istart; |
210 | 0 | const BYTE* ip0 = istart; |
211 | 0 | const BYTE* ip1; |
212 | 0 | const BYTE* ip2; |
213 | 0 | const BYTE* ip3; |
214 | 0 | U32 current0; |
215 | |
|
216 | 0 | U32 rep_offset1 = rep[0]; |
217 | 0 | U32 rep_offset2 = rep[1]; |
218 | 0 | U32 offsetSaved1 = 0, offsetSaved2 = 0; |
219 | |
|
220 | 0 | size_t hash0; /* hash for ip0 */ |
221 | 0 | size_t hash1; /* hash for ip1 */ |
222 | 0 | U32 matchIdx; /* match idx for ip0 */ |
223 | |
|
224 | 0 | U32 offcode; |
225 | 0 | const BYTE* match0; |
226 | 0 | size_t mLength; |
227 | | |
228 | | /* ip0 and ip1 are always adjacent. The targetLength skipping and |
229 | | * uncompressibility acceleration is applied to every other position, |
230 | | * matching the behavior of #1562. step therefore represents the gap |
231 | | * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ |
232 | 0 | size_t step; |
233 | 0 | const BYTE* nextStep; |
234 | 0 | const size_t kStepIncr = (1 << (kSearchStrength - 1)); |
235 | 0 | const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch; |
236 | |
|
237 | 0 | DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); |
238 | 0 | ip0 += (ip0 == prefixStart); |
239 | 0 | { U32 const curr = (U32)(ip0 - base); |
240 | 0 | U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); |
241 | 0 | U32 const maxRep = curr - windowLow; |
242 | 0 | if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0; |
243 | 0 | if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0; |
244 | 0 | } |
245 | | |
246 | | /* start each op */ |
247 | 0 | _start: /* Requires: ip0 */ |
248 | |
|
249 | 0 | step = stepSize; |
250 | 0 | nextStep = ip0 + kStepIncr; |
251 | | |
252 | | /* calculate positions, ip0 - anchor == 0, so we skip step calc */ |
253 | 0 | ip1 = ip0 + 1; |
254 | 0 | ip2 = ip0 + step; |
255 | 0 | ip3 = ip2 + 1; |
256 | |
|
257 | 0 | if (ip3 >= ilimit) { |
258 | 0 | goto _cleanup; |
259 | 0 | } |
260 | | |
261 | 0 | hash0 = ZSTD_hashPtr(ip0, hlog, mls); |
262 | 0 | hash1 = ZSTD_hashPtr(ip1, hlog, mls); |
263 | |
|
264 | 0 | matchIdx = hashTable[hash0]; |
265 | |
|
266 | 0 | do { |
267 | | /* load repcode match for ip[2]*/ |
268 | 0 | const U32 rval = MEM_read32(ip2 - rep_offset1); |
269 | | |
270 | | /* write back hash table entry */ |
271 | 0 | current0 = (U32)(ip0 - base); |
272 | 0 | hashTable[hash0] = current0; |
273 | | |
274 | | /* check repcode at ip[2] */ |
275 | 0 | if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) { |
276 | 0 | ip0 = ip2; |
277 | 0 | match0 = ip0 - rep_offset1; |
278 | 0 | mLength = ip0[-1] == match0[-1]; |
279 | 0 | ip0 -= mLength; |
280 | 0 | match0 -= mLength; |
281 | 0 | offcode = REPCODE1_TO_OFFBASE; |
282 | 0 | mLength += 4; |
283 | | |
284 | | /* Write next hash table entry: it's already calculated. |
285 | | * This write is known to be safe because ip1 is before the |
286 | | * repcode (ip2). */ |
287 | 0 | hashTable[hash1] = (U32)(ip1 - base); |
288 | |
|
289 | 0 | goto _match; |
290 | 0 | } |
291 | | |
292 | 0 | if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { |
293 | | /* Write next hash table entry (it's already calculated). |
294 | | * This write is known to be safe because the ip1 == ip0 + 1, |
295 | | * so searching will resume after ip1 */ |
296 | 0 | hashTable[hash1] = (U32)(ip1 - base); |
297 | |
|
298 | 0 | goto _offset; |
299 | 0 | } |
300 | | |
301 | | /* lookup ip[1] */ |
302 | 0 | matchIdx = hashTable[hash1]; |
303 | | |
304 | | /* hash ip[2] */ |
305 | 0 | hash0 = hash1; |
306 | 0 | hash1 = ZSTD_hashPtr(ip2, hlog, mls); |
307 | | |
308 | | /* advance to next positions */ |
309 | 0 | ip0 = ip1; |
310 | 0 | ip1 = ip2; |
311 | 0 | ip2 = ip3; |
312 | | |
313 | | /* write back hash table entry */ |
314 | 0 | current0 = (U32)(ip0 - base); |
315 | 0 | hashTable[hash0] = current0; |
316 | |
|
317 | 0 | if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { |
318 | | /* Write next hash table entry, since it's already calculated */ |
319 | 0 | if (step <= 4) { |
320 | | /* Avoid writing an index if it's >= position where search will resume. |
321 | | * The minimum possible match has length 4, so search can resume at ip0 + 4. |
322 | | */ |
323 | 0 | hashTable[hash1] = (U32)(ip1 - base); |
324 | 0 | } |
325 | 0 | goto _offset; |
326 | 0 | } |
327 | | |
328 | | /* lookup ip[1] */ |
329 | 0 | matchIdx = hashTable[hash1]; |
330 | | |
331 | | /* hash ip[2] */ |
332 | 0 | hash0 = hash1; |
333 | 0 | hash1 = ZSTD_hashPtr(ip2, hlog, mls); |
334 | | |
335 | | /* advance to next positions */ |
336 | 0 | ip0 = ip1; |
337 | 0 | ip1 = ip2; |
338 | 0 | ip2 = ip0 + step; |
339 | 0 | ip3 = ip1 + step; |
340 | | |
341 | | /* calculate step */ |
342 | 0 | if (ip2 >= nextStep) { |
343 | 0 | step++; |
344 | 0 | PREFETCH_L1(ip1 + 64); |
345 | 0 | PREFETCH_L1(ip1 + 128); |
346 | 0 | nextStep += kStepIncr; |
347 | 0 | } |
348 | 0 | } while (ip3 < ilimit); |
349 | | |
350 | 0 | _cleanup: |
351 | | /* Note that there are probably still a couple positions one could search. |
352 | | * However, it seems to be a meaningful performance hit to try to search |
353 | | * them. So let's not. */ |
354 | | |
355 | | /* When the repcodes are outside of the prefix, we set them to zero before the loop. |
356 | | * When the offsets are still zero, we need to restore them after the block to have a correct |
357 | | * repcode history. If only one offset was invalid, it is easy. The tricky case is when both |
358 | | * offsets were invalid. We need to figure out which offset to refill with. |
359 | | * - If both offsets are zero they are in the same order. |
360 | | * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`. |
361 | | * - If only one is zero, we need to decide which offset to restore. |
362 | | * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1. |
363 | | * - It is impossible for rep_offset2 to be non-zero. |
364 | | * |
365 | | * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then |
366 | | * set rep[0] = rep_offset1 and rep[1] = offsetSaved1. |
367 | | */ |
368 | 0 | offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2; |
369 | | |
370 | | /* save reps for next block */ |
371 | 0 | rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1; |
372 | 0 | rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2; |
373 | | |
374 | | /* Return the last literals size */ |
375 | 0 | return (size_t)(iend - anchor); |
376 | | |
377 | 0 | _offset: /* Requires: ip0, idx */ |
378 | | |
379 | | /* Compute the offset code. */ |
380 | 0 | match0 = base + matchIdx; |
381 | 0 | rep_offset2 = rep_offset1; |
382 | 0 | rep_offset1 = (U32)(ip0-match0); |
383 | 0 | offcode = OFFSET_TO_OFFBASE(rep_offset1); |
384 | 0 | mLength = 4; |
385 | | |
386 | | /* Count the backwards match length. */ |
387 | 0 | while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { |
388 | 0 | ip0--; |
389 | 0 | match0--; |
390 | 0 | mLength++; |
391 | 0 | } |
392 | |
|
393 | 0 | _match: /* Requires: ip0, match0, offcode */ |
394 | | |
395 | | /* Count the forward length. */ |
396 | 0 | mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); |
397 | |
|
398 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); |
399 | |
|
400 | 0 | ip0 += mLength; |
401 | 0 | anchor = ip0; |
402 | | |
403 | | /* Fill table and check for immediate repcode. */ |
404 | 0 | if (ip0 <= ilimit) { |
405 | | /* Fill Table */ |
406 | 0 | assert(base+current0+2 > istart); /* check base overflow */ |
407 | 0 | hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ |
408 | 0 | hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); |
409 | |
|
410 | 0 | if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */ |
411 | 0 | while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) { |
412 | | /* store sequence */ |
413 | 0 | size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; |
414 | 0 | { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ |
415 | 0 | hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); |
416 | 0 | ip0 += rLength; |
417 | 0 | ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength); |
418 | 0 | anchor = ip0; |
419 | 0 | continue; /* faster when present (confirmed on gcc-8) ... (?) */ |
420 | 0 | } } } |
421 | |
|
422 | 0 | goto _start; |
423 | 0 | } |
424 | | |
425 | | #define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \ |
426 | | static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \ |
427 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ |
428 | | void const* src, size_t srcSize) \ |
429 | 0 | { \ |
430 | 0 | return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \ |
431 | 0 | } Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_4_1 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_5_1 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_6_1 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_7_1 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_4_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_5_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_6_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_noDict_7_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_4_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_5_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_6_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_dictMatchState_7_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_4_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_5_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_6_0 Unexecuted instantiation: zstd_fast.c:ZSTD_compressBlock_fast_extDict_7_0 |
432 | | |
433 | | ZSTD_GEN_FAST_FN(noDict, 4, 1) |
434 | | ZSTD_GEN_FAST_FN(noDict, 5, 1) |
435 | | ZSTD_GEN_FAST_FN(noDict, 6, 1) |
436 | | ZSTD_GEN_FAST_FN(noDict, 7, 1) |
437 | | |
438 | | ZSTD_GEN_FAST_FN(noDict, 4, 0) |
439 | | ZSTD_GEN_FAST_FN(noDict, 5, 0) |
440 | | ZSTD_GEN_FAST_FN(noDict, 6, 0) |
441 | | ZSTD_GEN_FAST_FN(noDict, 7, 0) |
442 | | |
443 | | size_t ZSTD_compressBlock_fast( |
444 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
445 | | void const* src, size_t srcSize) |
446 | 0 | { |
447 | 0 | U32 const mml = ms->cParams.minMatch; |
448 | | /* use cmov when "candidate in range" branch is likely unpredictable */ |
449 | 0 | int const useCmov = ms->cParams.windowLog < 19; |
450 | 0 | assert(ms->dictMatchState == NULL); |
451 | 0 | if (useCmov) { |
452 | 0 | switch(mml) |
453 | 0 | { |
454 | 0 | default: /* includes case 3 */ |
455 | 0 | case 4 : |
456 | 0 | return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); |
457 | 0 | case 5 : |
458 | 0 | return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); |
459 | 0 | case 6 : |
460 | 0 | return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); |
461 | 0 | case 7 : |
462 | 0 | return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); |
463 | 0 | } |
464 | 0 | } else { |
465 | | /* use a branch instead */ |
466 | 0 | switch(mml) |
467 | 0 | { |
468 | 0 | default: /* includes case 3 */ |
469 | 0 | case 4 : |
470 | 0 | return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); |
471 | 0 | case 5 : |
472 | 0 | return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); |
473 | 0 | case 6 : |
474 | 0 | return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); |
475 | 0 | case 7 : |
476 | 0 | return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); |
477 | 0 | } |
478 | 0 | } |
479 | 0 | } |
480 | | |
481 | | FORCE_INLINE_TEMPLATE |
482 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
483 | | size_t ZSTD_compressBlock_fast_dictMatchState_generic( |
484 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
485 | | void const* src, size_t srcSize, U32 const mls, U32 const hasStep) |
486 | 0 | { |
487 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
488 | 0 | U32* const hashTable = ms->hashTable; |
489 | 0 | U32 const hlog = cParams->hashLog; |
490 | | /* support stepSize of 0 */ |
491 | 0 | U32 const stepSize = cParams->targetLength + !(cParams->targetLength); |
492 | 0 | const BYTE* const base = ms->window.base; |
493 | 0 | const BYTE* const istart = (const BYTE*)src; |
494 | 0 | const BYTE* ip0 = istart; |
495 | 0 | const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */ |
496 | 0 | const BYTE* anchor = istart; |
497 | 0 | const U32 prefixStartIndex = ms->window.dictLimit; |
498 | 0 | const BYTE* const prefixStart = base + prefixStartIndex; |
499 | 0 | const BYTE* const iend = istart + srcSize; |
500 | 0 | const BYTE* const ilimit = iend - HASH_READ_SIZE; |
501 | 0 | U32 offset_1=rep[0], offset_2=rep[1]; |
502 | |
|
503 | 0 | const ZSTD_MatchState_t* const dms = ms->dictMatchState; |
504 | 0 | const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; |
505 | 0 | const U32* const dictHashTable = dms->hashTable; |
506 | 0 | const U32 dictStartIndex = dms->window.dictLimit; |
507 | 0 | const BYTE* const dictBase = dms->window.base; |
508 | 0 | const BYTE* const dictStart = dictBase + dictStartIndex; |
509 | 0 | const BYTE* const dictEnd = dms->window.nextSrc; |
510 | 0 | const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); |
511 | 0 | const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart); |
512 | 0 | const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; |
513 | | |
514 | | /* if a dictionary is still attached, it necessarily means that |
515 | | * it is within window size. So we just check it. */ |
516 | 0 | const U32 maxDistance = 1U << cParams->windowLog; |
517 | 0 | const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); |
518 | 0 | assert(endIndex - prefixStartIndex <= maxDistance); |
519 | 0 | (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ |
520 | |
|
521 | 0 | (void)hasStep; /* not currently specialized on whether it's accelerated */ |
522 | | |
523 | | /* ensure there will be no underflow |
524 | | * when translating a dict index into a local index */ |
525 | 0 | assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); |
526 | |
|
527 | 0 | if (ms->prefetchCDictTables) { |
528 | 0 | size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); |
529 | 0 | PREFETCH_AREA(dictHashTable, hashTableBytes); |
530 | 0 | } |
531 | | |
532 | | /* init */ |
533 | 0 | DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); |
534 | 0 | ip0 += (dictAndPrefixLength == 0); |
535 | | /* dictMatchState repCode checks don't currently handle repCode == 0 |
536 | | * disabling. */ |
537 | 0 | assert(offset_1 <= dictAndPrefixLength); |
538 | 0 | assert(offset_2 <= dictAndPrefixLength); |
539 | | |
540 | | /* Outer search loop */ |
541 | 0 | assert(stepSize >= 1); |
542 | 0 | while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */ |
543 | 0 | size_t mLength; |
544 | 0 | size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls); |
545 | |
|
546 | 0 | size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls); |
547 | 0 | U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS]; |
548 | 0 | int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0); |
549 | |
|
550 | 0 | U32 matchIndex = hashTable[hash0]; |
551 | 0 | U32 curr = (U32)(ip0 - base); |
552 | 0 | size_t step = stepSize; |
553 | 0 | const size_t kStepIncr = 1 << kSearchStrength; |
554 | 0 | const BYTE* nextStep = ip0 + kStepIncr; |
555 | | |
556 | | /* Inner search loop */ |
557 | 0 | while (1) { |
558 | 0 | const BYTE* match = base + matchIndex; |
559 | 0 | const U32 repIndex = curr + 1 - offset_1; |
560 | 0 | const BYTE* repMatch = (repIndex < prefixStartIndex) ? |
561 | 0 | dictBase + (repIndex - dictIndexDelta) : |
562 | 0 | base + repIndex; |
563 | 0 | const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls); |
564 | 0 | size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); |
565 | 0 | hashTable[hash0] = curr; /* update hash table */ |
566 | |
|
567 | 0 | if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) |
568 | 0 | && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { |
569 | 0 | const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; |
570 | 0 | mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; |
571 | 0 | ip0++; |
572 | 0 | ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); |
573 | 0 | break; |
574 | 0 | } |
575 | | |
576 | 0 | if (dictTagsMatch) { |
577 | | /* Found a possible dict match */ |
578 | 0 | const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS; |
579 | 0 | const BYTE* dictMatch = dictBase + dictMatchIndex; |
580 | 0 | if (dictMatchIndex > dictStartIndex && |
581 | 0 | MEM_read32(dictMatch) == MEM_read32(ip0)) { |
582 | | /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */ |
583 | 0 | if (matchIndex <= prefixStartIndex) { |
584 | 0 | U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta); |
585 | 0 | mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4; |
586 | 0 | while (((ip0 > anchor) & (dictMatch > dictStart)) |
587 | 0 | && (ip0[-1] == dictMatch[-1])) { |
588 | 0 | ip0--; |
589 | 0 | dictMatch--; |
590 | 0 | mLength++; |
591 | 0 | } /* catch up */ |
592 | 0 | offset_2 = offset_1; |
593 | 0 | offset_1 = offset; |
594 | 0 | ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); |
595 | 0 | break; |
596 | 0 | } |
597 | 0 | } |
598 | 0 | } |
599 | | |
600 | 0 | if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) { |
601 | | /* found a regular match of size >= 4 */ |
602 | 0 | U32 const offset = (U32) (ip0 - match); |
603 | 0 | mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; |
604 | 0 | while (((ip0 > anchor) & (match > prefixStart)) |
605 | 0 | && (ip0[-1] == match[-1])) { |
606 | 0 | ip0--; |
607 | 0 | match--; |
608 | 0 | mLength++; |
609 | 0 | } /* catch up */ |
610 | 0 | offset_2 = offset_1; |
611 | 0 | offset_1 = offset; |
612 | 0 | ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); |
613 | 0 | break; |
614 | 0 | } |
615 | | |
616 | | /* Prepare for next iteration */ |
617 | 0 | dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS]; |
618 | 0 | dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1); |
619 | 0 | matchIndex = hashTable[hash1]; |
620 | |
|
621 | 0 | if (ip1 >= nextStep) { |
622 | 0 | step++; |
623 | 0 | nextStep += kStepIncr; |
624 | 0 | } |
625 | 0 | ip0 = ip1; |
626 | 0 | ip1 = ip1 + step; |
627 | 0 | if (ip1 > ilimit) goto _cleanup; |
628 | | |
629 | 0 | curr = (U32)(ip0 - base); |
630 | 0 | hash0 = hash1; |
631 | 0 | } /* end inner search loop */ |
632 | | |
633 | | /* match found */ |
634 | 0 | assert(mLength); |
635 | 0 | ip0 += mLength; |
636 | 0 | anchor = ip0; |
637 | |
|
638 | 0 | if (ip0 <= ilimit) { |
639 | | /* Fill Table */ |
640 | 0 | assert(base+curr+2 > istart); /* check base overflow */ |
641 | 0 | hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */ |
642 | 0 | hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); |
643 | | |
644 | | /* check immediate repcode */ |
645 | 0 | while (ip0 <= ilimit) { |
646 | 0 | U32 const current2 = (U32)(ip0-base); |
647 | 0 | U32 const repIndex2 = current2 - offset_2; |
648 | 0 | const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? |
649 | 0 | dictBase - dictIndexDelta + repIndex2 : |
650 | 0 | base + repIndex2; |
651 | 0 | if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) |
652 | 0 | && (MEM_read32(repMatch2) == MEM_read32(ip0))) { |
653 | 0 | const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; |
654 | 0 | size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; |
655 | 0 | U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ |
656 | 0 | ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); |
657 | 0 | hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2; |
658 | 0 | ip0 += repLength2; |
659 | 0 | anchor = ip0; |
660 | 0 | continue; |
661 | 0 | } |
662 | 0 | break; |
663 | 0 | } |
664 | 0 | } |
665 | | |
666 | | /* Prepare for next iteration */ |
667 | 0 | assert(ip0 == anchor); |
668 | 0 | ip1 = ip0 + stepSize; |
669 | 0 | } |
670 | | |
671 | 0 | _cleanup: |
672 | | /* save reps for next block */ |
673 | 0 | rep[0] = offset_1; |
674 | 0 | rep[1] = offset_2; |
675 | | |
676 | | /* Return the last literals size */ |
677 | 0 | return (size_t)(iend - anchor); |
678 | 0 | } |
679 | | |
680 | | |
681 | | ZSTD_GEN_FAST_FN(dictMatchState, 4, 0) |
682 | | ZSTD_GEN_FAST_FN(dictMatchState, 5, 0) |
683 | | ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) |
684 | | ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) |
685 | | |
686 | | size_t ZSTD_compressBlock_fast_dictMatchState( |
687 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
688 | | void const* src, size_t srcSize) |
689 | 0 | { |
690 | 0 | U32 const mls = ms->cParams.minMatch; |
691 | 0 | assert(ms->dictMatchState != NULL); |
692 | 0 | switch(mls) |
693 | 0 | { |
694 | 0 | default: /* includes case 3 */ |
695 | 0 | case 4 : |
696 | 0 | return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); |
697 | 0 | case 5 : |
698 | 0 | return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); |
699 | 0 | case 6 : |
700 | 0 | return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); |
701 | 0 | case 7 : |
702 | 0 | return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); |
703 | 0 | } |
704 | 0 | } |
705 | | |
706 | | |
707 | | static |
708 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
709 | | size_t ZSTD_compressBlock_fast_extDict_generic( |
710 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
711 | | void const* src, size_t srcSize, U32 const mls, U32 const hasStep) |
712 | 0 | { |
713 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
714 | 0 | U32* const hashTable = ms->hashTable; |
715 | 0 | U32 const hlog = cParams->hashLog; |
716 | | /* support stepSize of 0 */ |
717 | 0 | size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; |
718 | 0 | const BYTE* const base = ms->window.base; |
719 | 0 | const BYTE* const dictBase = ms->window.dictBase; |
720 | 0 | const BYTE* const istart = (const BYTE*)src; |
721 | 0 | const BYTE* anchor = istart; |
722 | 0 | const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); |
723 | 0 | const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); |
724 | 0 | const U32 dictStartIndex = lowLimit; |
725 | 0 | const BYTE* const dictStart = dictBase + dictStartIndex; |
726 | 0 | const U32 dictLimit = ms->window.dictLimit; |
727 | 0 | const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; |
728 | 0 | const BYTE* const prefixStart = base + prefixStartIndex; |
729 | 0 | const BYTE* const dictEnd = dictBase + prefixStartIndex; |
730 | 0 | const BYTE* const iend = istart + srcSize; |
731 | 0 | const BYTE* const ilimit = iend - 8; |
732 | 0 | U32 offset_1=rep[0], offset_2=rep[1]; |
733 | 0 | U32 offsetSaved1 = 0, offsetSaved2 = 0; |
734 | |
|
735 | 0 | const BYTE* ip0 = istart; |
736 | 0 | const BYTE* ip1; |
737 | 0 | const BYTE* ip2; |
738 | 0 | const BYTE* ip3; |
739 | 0 | U32 current0; |
740 | | |
741 | |
|
742 | 0 | size_t hash0; /* hash for ip0 */ |
743 | 0 | size_t hash1; /* hash for ip1 */ |
744 | 0 | U32 idx; /* match idx for ip0 */ |
745 | 0 | const BYTE* idxBase; /* base pointer for idx */ |
746 | |
|
747 | 0 | U32 offcode; |
748 | 0 | const BYTE* match0; |
749 | 0 | size_t mLength; |
750 | 0 | const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */ |
751 | |
|
752 | 0 | size_t step; |
753 | 0 | const BYTE* nextStep; |
754 | 0 | const size_t kStepIncr = (1 << (kSearchStrength - 1)); |
755 | |
|
756 | 0 | (void)hasStep; /* not currently specialized on whether it's accelerated */ |
757 | |
|
758 | 0 | DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); |
759 | | |
760 | | /* switch to "regular" variant if extDict is invalidated due to maxDistance */ |
761 | 0 | if (prefixStartIndex == dictStartIndex) |
762 | 0 | return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); |
763 | | |
764 | 0 | { U32 const curr = (U32)(ip0 - base); |
765 | 0 | U32 const maxRep = curr - dictStartIndex; |
766 | 0 | if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0; |
767 | 0 | if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0; |
768 | 0 | } |
769 | | |
770 | | /* start each op */ |
771 | 0 | _start: /* Requires: ip0 */ |
772 | |
|
773 | 0 | step = stepSize; |
774 | 0 | nextStep = ip0 + kStepIncr; |
775 | | |
776 | | /* calculate positions, ip0 - anchor == 0, so we skip step calc */ |
777 | 0 | ip1 = ip0 + 1; |
778 | 0 | ip2 = ip0 + step; |
779 | 0 | ip3 = ip2 + 1; |
780 | |
|
781 | 0 | if (ip3 >= ilimit) { |
782 | 0 | goto _cleanup; |
783 | 0 | } |
784 | | |
785 | 0 | hash0 = ZSTD_hashPtr(ip0, hlog, mls); |
786 | 0 | hash1 = ZSTD_hashPtr(ip1, hlog, mls); |
787 | |
|
788 | 0 | idx = hashTable[hash0]; |
789 | 0 | idxBase = idx < prefixStartIndex ? dictBase : base; |
790 | |
|
791 | 0 | do { |
792 | 0 | { /* load repcode match for ip[2] */ |
793 | 0 | U32 const current2 = (U32)(ip2 - base); |
794 | 0 | U32 const repIndex = current2 - offset_1; |
795 | 0 | const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; |
796 | 0 | U32 rval; |
797 | 0 | if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */ |
798 | 0 | & (offset_1 > 0) ) { |
799 | 0 | rval = MEM_read32(repBase + repIndex); |
800 | 0 | } else { |
801 | 0 | rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */ |
802 | 0 | } |
803 | | |
804 | | /* write back hash table entry */ |
805 | 0 | current0 = (U32)(ip0 - base); |
806 | 0 | hashTable[hash0] = current0; |
807 | | |
808 | | /* check repcode at ip[2] */ |
809 | 0 | if (MEM_read32(ip2) == rval) { |
810 | 0 | ip0 = ip2; |
811 | 0 | match0 = repBase + repIndex; |
812 | 0 | matchEnd = repIndex < prefixStartIndex ? dictEnd : iend; |
813 | 0 | assert((match0 != prefixStart) & (match0 != dictStart)); |
814 | 0 | mLength = ip0[-1] == match0[-1]; |
815 | 0 | ip0 -= mLength; |
816 | 0 | match0 -= mLength; |
817 | 0 | offcode = REPCODE1_TO_OFFBASE; |
818 | 0 | mLength += 4; |
819 | 0 | goto _match; |
820 | 0 | } } |
821 | | |
822 | 0 | { /* load match for ip[0] */ |
823 | 0 | U32 const mval = idx >= dictStartIndex ? |
824 | 0 | MEM_read32(idxBase + idx) : |
825 | 0 | MEM_read32(ip0) ^ 1; /* guaranteed not to match */ |
826 | | |
827 | | /* check match at ip[0] */ |
828 | 0 | if (MEM_read32(ip0) == mval) { |
829 | | /* found a match! */ |
830 | 0 | goto _offset; |
831 | 0 | } } |
832 | | |
833 | | /* lookup ip[1] */ |
834 | 0 | idx = hashTable[hash1]; |
835 | 0 | idxBase = idx < prefixStartIndex ? dictBase : base; |
836 | | |
837 | | /* hash ip[2] */ |
838 | 0 | hash0 = hash1; |
839 | 0 | hash1 = ZSTD_hashPtr(ip2, hlog, mls); |
840 | | |
841 | | /* advance to next positions */ |
842 | 0 | ip0 = ip1; |
843 | 0 | ip1 = ip2; |
844 | 0 | ip2 = ip3; |
845 | | |
846 | | /* write back hash table entry */ |
847 | 0 | current0 = (U32)(ip0 - base); |
848 | 0 | hashTable[hash0] = current0; |
849 | |
|
850 | 0 | { /* load match for ip[0] */ |
851 | 0 | U32 const mval = idx >= dictStartIndex ? |
852 | 0 | MEM_read32(idxBase + idx) : |
853 | 0 | MEM_read32(ip0) ^ 1; /* guaranteed not to match */ |
854 | | |
855 | | /* check match at ip[0] */ |
856 | 0 | if (MEM_read32(ip0) == mval) { |
857 | | /* found a match! */ |
858 | 0 | goto _offset; |
859 | 0 | } } |
860 | | |
861 | | /* lookup ip[1] */ |
862 | 0 | idx = hashTable[hash1]; |
863 | 0 | idxBase = idx < prefixStartIndex ? dictBase : base; |
864 | | |
865 | | /* hash ip[2] */ |
866 | 0 | hash0 = hash1; |
867 | 0 | hash1 = ZSTD_hashPtr(ip2, hlog, mls); |
868 | | |
869 | | /* advance to next positions */ |
870 | 0 | ip0 = ip1; |
871 | 0 | ip1 = ip2; |
872 | 0 | ip2 = ip0 + step; |
873 | 0 | ip3 = ip1 + step; |
874 | | |
875 | | /* calculate step */ |
876 | 0 | if (ip2 >= nextStep) { |
877 | 0 | step++; |
878 | 0 | PREFETCH_L1(ip1 + 64); |
879 | 0 | PREFETCH_L1(ip1 + 128); |
880 | 0 | nextStep += kStepIncr; |
881 | 0 | } |
882 | 0 | } while (ip3 < ilimit); |
883 | | |
884 | 0 | _cleanup: |
885 | | /* Note that there are probably still a couple positions we could search. |
886 | | * However, it seems to be a meaningful performance hit to try to search |
887 | | * them. So let's not. */ |
888 | | |
889 | | /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), |
890 | | * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ |
891 | 0 | offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; |
892 | | |
893 | | /* save reps for next block */ |
894 | 0 | rep[0] = offset_1 ? offset_1 : offsetSaved1; |
895 | 0 | rep[1] = offset_2 ? offset_2 : offsetSaved2; |
896 | | |
897 | | /* Return the last literals size */ |
898 | 0 | return (size_t)(iend - anchor); |
899 | | |
900 | 0 | _offset: /* Requires: ip0, idx, idxBase */ |
901 | | |
902 | | /* Compute the offset code. */ |
903 | 0 | { U32 const offset = current0 - idx; |
904 | 0 | const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart; |
905 | 0 | matchEnd = idx < prefixStartIndex ? dictEnd : iend; |
906 | 0 | match0 = idxBase + idx; |
907 | 0 | offset_2 = offset_1; |
908 | 0 | offset_1 = offset; |
909 | 0 | offcode = OFFSET_TO_OFFBASE(offset); |
910 | 0 | mLength = 4; |
911 | | |
912 | | /* Count the backwards match length. */ |
913 | 0 | while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) { |
914 | 0 | ip0--; |
915 | 0 | match0--; |
916 | 0 | mLength++; |
917 | 0 | } } |
918 | |
|
919 | 0 | _match: /* Requires: ip0, match0, offcode, matchEnd */ |
920 | | |
921 | | /* Count the forward length. */ |
922 | 0 | assert(matchEnd != 0); |
923 | 0 | mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart); |
924 | |
|
925 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); |
926 | |
|
927 | 0 | ip0 += mLength; |
928 | 0 | anchor = ip0; |
929 | | |
930 | | /* write next hash table entry */ |
931 | 0 | if (ip1 < ip0) { |
932 | 0 | hashTable[hash1] = (U32)(ip1 - base); |
933 | 0 | } |
934 | | |
935 | | /* Fill table and check for immediate repcode. */ |
936 | 0 | if (ip0 <= ilimit) { |
937 | | /* Fill Table */ |
938 | 0 | assert(base+current0+2 > istart); /* check base overflow */ |
939 | 0 | hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ |
940 | 0 | hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); |
941 | |
|
942 | 0 | while (ip0 <= ilimit) { |
943 | 0 | U32 const repIndex2 = (U32)(ip0-base) - offset_2; |
944 | 0 | const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; |
945 | 0 | if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0)) |
946 | 0 | && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { |
947 | 0 | const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; |
948 | 0 | size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; |
949 | 0 | { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ |
950 | 0 | ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); |
951 | 0 | hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); |
952 | 0 | ip0 += repLength2; |
953 | 0 | anchor = ip0; |
954 | 0 | continue; |
955 | 0 | } |
956 | 0 | break; |
957 | 0 | } } |
958 | |
|
959 | 0 | goto _start; |
960 | 0 | } |
961 | | |
962 | | ZSTD_GEN_FAST_FN(extDict, 4, 0) |
963 | | ZSTD_GEN_FAST_FN(extDict, 5, 0) |
964 | | ZSTD_GEN_FAST_FN(extDict, 6, 0) |
965 | | ZSTD_GEN_FAST_FN(extDict, 7, 0) |
966 | | |
967 | | size_t ZSTD_compressBlock_fast_extDict( |
968 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
969 | | void const* src, size_t srcSize) |
970 | 0 | { |
971 | 0 | U32 const mls = ms->cParams.minMatch; |
972 | 0 | assert(ms->dictMatchState == NULL); |
973 | 0 | switch(mls) |
974 | 0 | { |
975 | 0 | default: /* includes case 3 */ |
976 | 0 | case 4 : |
977 | 0 | return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); |
978 | 0 | case 5 : |
979 | 0 | return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); |
980 | 0 | case 6 : |
981 | 0 | return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); |
982 | 0 | case 7 : |
983 | 0 | return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); |
984 | 0 | } |
985 | 0 | } |