/src/zstd/lib/compress/zstd_double_fast.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) Meta Platforms, Inc. and affiliates. |
3 | | * All rights reserved. |
4 | | * |
5 | | * This source code is licensed under both the BSD-style license (found in the |
6 | | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | | * in the COPYING file in the root directory of this source tree). |
8 | | * You may select, at your option, one of the above-listed licenses. |
9 | | */ |
10 | | |
11 | | #include "zstd_compress_internal.h" |
12 | | #include "zstd_double_fast.h" |
13 | | |
14 | | #ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR |
15 | | |
16 | | static |
17 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
18 | | void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, |
19 | | void const* end, ZSTD_dictTableLoadMethod_e dtlm) |
20 | 0 | { |
21 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
22 | 0 | U32* const hashLarge = ms->hashTable; |
23 | 0 | U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; |
24 | 0 | U32 const mls = cParams->minMatch; |
25 | 0 | U32* const hashSmall = ms->chainTable; |
26 | 0 | U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; |
27 | 0 | const BYTE* const base = ms->window.base; |
28 | 0 | const BYTE* ip = base + ms->nextToUpdate; |
29 | 0 | const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; |
30 | 0 | const U32 fastHashFillStep = 3; |
31 | | |
32 | | /* Always insert every fastHashFillStep position into the hash tables. |
33 | | * Insert the other positions into the large hash table if their entry |
34 | | * is empty. |
35 | | */ |
36 | 0 | for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { |
37 | 0 | U32 const curr = (U32)(ip - base); |
38 | 0 | U32 i; |
39 | 0 | for (i = 0; i < fastHashFillStep; ++i) { |
40 | 0 | size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls); |
41 | 0 | size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8); |
42 | 0 | if (i == 0) { |
43 | 0 | ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i); |
44 | 0 | } |
45 | 0 | if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { |
46 | 0 | ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i); |
47 | 0 | } |
48 | | /* Only load extra positions for ZSTD_dtlm_full */ |
49 | 0 | if (dtlm == ZSTD_dtlm_fast) |
50 | 0 | break; |
51 | 0 | } } |
52 | 0 | } |
53 | | |
54 | | static |
55 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
56 | | void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, |
57 | | void const* end, ZSTD_dictTableLoadMethod_e dtlm) |
58 | 0 | { |
59 | 0 | const ZSTD_compressionParameters* const cParams = &ms->cParams; |
60 | 0 | U32* const hashLarge = ms->hashTable; |
61 | 0 | U32 const hBitsL = cParams->hashLog; |
62 | 0 | U32 const mls = cParams->minMatch; |
63 | 0 | U32* const hashSmall = ms->chainTable; |
64 | 0 | U32 const hBitsS = cParams->chainLog; |
65 | 0 | const BYTE* const base = ms->window.base; |
66 | 0 | const BYTE* ip = base + ms->nextToUpdate; |
67 | 0 | const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; |
68 | 0 | const U32 fastHashFillStep = 3; |
69 | | |
70 | | /* Always insert every fastHashFillStep position into the hash tables. |
71 | | * Insert the other positions into the large hash table if their entry |
72 | | * is empty. |
73 | | */ |
74 | 0 | for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { |
75 | 0 | U32 const curr = (U32)(ip - base); |
76 | 0 | U32 i; |
77 | 0 | for (i = 0; i < fastHashFillStep; ++i) { |
78 | 0 | size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); |
79 | 0 | size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); |
80 | 0 | if (i == 0) |
81 | 0 | hashSmall[smHash] = curr + i; |
82 | 0 | if (i == 0 || hashLarge[lgHash] == 0) |
83 | 0 | hashLarge[lgHash] = curr + i; |
84 | | /* Only load extra positions for ZSTD_dtlm_full */ |
85 | 0 | if (dtlm == ZSTD_dtlm_fast) |
86 | 0 | break; |
87 | 0 | } } |
88 | 0 | } |
89 | | |
90 | | void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, |
91 | | const void* const end, |
92 | | ZSTD_dictTableLoadMethod_e dtlm, |
93 | | ZSTD_tableFillPurpose_e tfp) |
94 | 0 | { |
95 | 0 | if (tfp == ZSTD_tfp_forCDict) { |
96 | 0 | ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm); |
97 | 0 | } else { |
98 | 0 | ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm); |
99 | 0 | } |
100 | 0 | } |
101 | | |
102 | | |
103 | | FORCE_INLINE_TEMPLATE |
104 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
105 | | size_t ZSTD_compressBlock_doubleFast_noDict_generic( |
106 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
107 | | void const* src, size_t srcSize, U32 const mls /* template */) |
108 | 0 | { |
109 | 0 | ZSTD_compressionParameters const* cParams = &ms->cParams; |
110 | 0 | U32* const hashLong = ms->hashTable; |
111 | 0 | const U32 hBitsL = cParams->hashLog; |
112 | 0 | U32* const hashSmall = ms->chainTable; |
113 | 0 | const U32 hBitsS = cParams->chainLog; |
114 | 0 | const BYTE* const base = ms->window.base; |
115 | 0 | const BYTE* const istart = (const BYTE*)src; |
116 | 0 | const BYTE* anchor = istart; |
117 | 0 | const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); |
118 | | /* presumes that, if there is a dictionary, it must be using Attach mode */ |
119 | 0 | const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); |
120 | 0 | const BYTE* const prefixLowest = base + prefixLowestIndex; |
121 | 0 | const BYTE* const iend = istart + srcSize; |
122 | 0 | const BYTE* const ilimit = iend - HASH_READ_SIZE; |
123 | 0 | U32 offset_1=rep[0], offset_2=rep[1]; |
124 | 0 | U32 offsetSaved1 = 0, offsetSaved2 = 0; |
125 | |
|
126 | 0 | size_t mLength; |
127 | 0 | U32 offset; |
128 | 0 | U32 curr; |
129 | | |
130 | | /* how many positions to search before increasing step size */ |
131 | 0 | const size_t kStepIncr = 1 << kSearchStrength; |
132 | | /* the position at which to increment the step size if no match is found */ |
133 | 0 | const BYTE* nextStep; |
134 | 0 | size_t step; /* the current step size */ |
135 | |
|
136 | 0 | size_t hl0; /* the long hash at ip */ |
137 | 0 | size_t hl1; /* the long hash at ip1 */ |
138 | |
|
139 | 0 | U32 idxl0; /* the long match index for ip */ |
140 | 0 | U32 idxl1; /* the long match index for ip1 */ |
141 | |
|
142 | 0 | const BYTE* matchl0; /* the long match for ip */ |
143 | 0 | const BYTE* matchs0; /* the short match for ip */ |
144 | 0 | const BYTE* matchl1; /* the long match for ip1 */ |
145 | 0 | const BYTE* matchs0_safe; /* matchs0 or safe address */ |
146 | |
|
147 | 0 | const BYTE* ip = istart; /* the current position */ |
148 | 0 | const BYTE* ip1; /* the next position */ |
149 | | /* Array of ~random data, should have low probability of matching data |
150 | | * we load from here instead of from tables, if matchl0/matchl1 are |
151 | | * invalid indices. Used to avoid unpredictable branches. */ |
152 | 0 | const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4}; |
153 | |
|
154 | 0 | DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); |
155 | | |
156 | | /* init */ |
157 | 0 | ip += ((ip - prefixLowest) == 0); |
158 | 0 | { |
159 | 0 | U32 const current = (U32)(ip - base); |
160 | 0 | U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); |
161 | 0 | U32 const maxRep = current - windowLow; |
162 | 0 | if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0; |
163 | 0 | if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0; |
164 | 0 | } |
165 | | |
166 | | /* Outer Loop: one iteration per match found and stored */ |
167 | 0 | while (1) { |
168 | 0 | step = 1; |
169 | 0 | nextStep = ip + kStepIncr; |
170 | 0 | ip1 = ip + step; |
171 | |
|
172 | 0 | if (ip1 > ilimit) { |
173 | 0 | goto _cleanup; |
174 | 0 | } |
175 | | |
176 | 0 | hl0 = ZSTD_hashPtr(ip, hBitsL, 8); |
177 | 0 | idxl0 = hashLong[hl0]; |
178 | 0 | matchl0 = base + idxl0; |
179 | | |
180 | | /* Inner Loop: one iteration per search / position */ |
181 | 0 | do { |
182 | 0 | const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls); |
183 | 0 | const U32 idxs0 = hashSmall[hs0]; |
184 | 0 | curr = (U32)(ip-base); |
185 | 0 | matchs0 = base + idxs0; |
186 | |
|
187 | 0 | hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */ |
188 | | |
189 | | /* check noDict repcode */ |
190 | 0 | if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { |
191 | 0 | mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; |
192 | 0 | ip++; |
193 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); |
194 | 0 | goto _match_stored; |
195 | 0 | } |
196 | | |
197 | 0 | hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); |
198 | | |
199 | | /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch. |
200 | | * However expression below complies into conditional move. Since |
201 | | * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex |
202 | | * if there is a match, all branches become predictable. */ |
203 | 0 | { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); |
204 | | |
205 | | /* check prefix long match */ |
206 | 0 | if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { |
207 | 0 | mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; |
208 | 0 | offset = (U32)(ip-matchl0); |
209 | 0 | while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ |
210 | 0 | goto _match_found; |
211 | 0 | } } |
212 | | |
213 | 0 | idxl1 = hashLong[hl1]; |
214 | 0 | matchl1 = base + idxl1; |
215 | | |
216 | | /* Same optimization as matchl0 above */ |
217 | 0 | matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); |
218 | | |
219 | | /* check prefix short match */ |
220 | 0 | if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) { |
221 | 0 | goto _search_next_long; |
222 | 0 | } |
223 | | |
224 | 0 | if (ip1 >= nextStep) { |
225 | 0 | PREFETCH_L1(ip1 + 64); |
226 | 0 | PREFETCH_L1(ip1 + 128); |
227 | 0 | step++; |
228 | 0 | nextStep += kStepIncr; |
229 | 0 | } |
230 | 0 | ip = ip1; |
231 | 0 | ip1 += step; |
232 | |
|
233 | 0 | hl0 = hl1; |
234 | 0 | idxl0 = idxl1; |
235 | 0 | matchl0 = matchl1; |
236 | | #if defined(__aarch64__) |
237 | | PREFETCH_L1(ip+256); |
238 | | #endif |
239 | 0 | } while (ip1 <= ilimit); |
240 | | |
241 | 0 | _cleanup: |
242 | | /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0), |
243 | | * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */ |
244 | 0 | offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2; |
245 | | |
246 | | /* save reps for next block */ |
247 | 0 | rep[0] = offset_1 ? offset_1 : offsetSaved1; |
248 | 0 | rep[1] = offset_2 ? offset_2 : offsetSaved2; |
249 | | |
250 | | /* Return the last literals size */ |
251 | 0 | return (size_t)(iend - anchor); |
252 | | |
253 | 0 | _search_next_long: |
254 | | |
255 | | /* short match found: let's check for a longer one */ |
256 | 0 | mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; |
257 | 0 | offset = (U32)(ip - matchs0); |
258 | | |
259 | | /* check long match at +1 position */ |
260 | 0 | if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) { |
261 | 0 | size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8; |
262 | 0 | if (l1len > mLength) { |
263 | | /* use the long match instead */ |
264 | 0 | ip = ip1; |
265 | 0 | mLength = l1len; |
266 | 0 | offset = (U32)(ip-matchl1); |
267 | 0 | matchs0 = matchl1; |
268 | 0 | } |
269 | 0 | } |
270 | |
|
271 | 0 | while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */ |
272 | | |
273 | | /* fall-through */ |
274 | |
|
275 | 0 | _match_found: /* requires ip, offset, mLength */ |
276 | 0 | offset_2 = offset_1; |
277 | 0 | offset_1 = offset; |
278 | |
|
279 | 0 | if (step < 4) { |
280 | | /* It is unsafe to write this value back to the hashtable when ip1 is |
281 | | * greater than or equal to the new ip we will have after we're done |
282 | | * processing this match. Rather than perform that test directly |
283 | | * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler |
284 | | * more predictable test. The minmatch even if we take a short match is |
285 | | * 4 bytes, so as long as step, the distance between ip and ip1 |
286 | | * (initially) is less than 4, we know ip1 < new ip. */ |
287 | 0 | hashLong[hl1] = (U32)(ip1 - base); |
288 | 0 | } |
289 | |
|
290 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); |
291 | |
|
292 | 0 | _match_stored: |
293 | | /* match found */ |
294 | 0 | ip += mLength; |
295 | 0 | anchor = ip; |
296 | |
|
297 | 0 | if (ip <= ilimit) { |
298 | | /* Complementary insertion */ |
299 | | /* done after iLimit test, as candidates could be > iend-8 */ |
300 | 0 | { U32 const indexToInsert = curr+2; |
301 | 0 | hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; |
302 | 0 | hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); |
303 | 0 | hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; |
304 | 0 | hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); |
305 | 0 | } |
306 | | |
307 | | /* check immediate repcode */ |
308 | 0 | while ( (ip <= ilimit) |
309 | 0 | && ( (offset_2>0) |
310 | 0 | & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { |
311 | | /* store sequence */ |
312 | 0 | size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; |
313 | 0 | U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ |
314 | 0 | hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); |
315 | 0 | hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); |
316 | 0 | ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength); |
317 | 0 | ip += rLength; |
318 | 0 | anchor = ip; |
319 | 0 | continue; /* faster when present ... (?) */ |
320 | 0 | } |
321 | 0 | } |
322 | 0 | } |
323 | 0 | } |
324 | | |
325 | | |
326 | | FORCE_INLINE_TEMPLATE |
327 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
328 | | size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( |
329 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
330 | | void const* src, size_t srcSize, |
331 | | U32 const mls /* template */) |
332 | 0 | { |
333 | 0 | ZSTD_compressionParameters const* cParams = &ms->cParams; |
334 | 0 | U32* const hashLong = ms->hashTable; |
335 | 0 | const U32 hBitsL = cParams->hashLog; |
336 | 0 | U32* const hashSmall = ms->chainTable; |
337 | 0 | const U32 hBitsS = cParams->chainLog; |
338 | 0 | const BYTE* const base = ms->window.base; |
339 | 0 | const BYTE* const istart = (const BYTE*)src; |
340 | 0 | const BYTE* ip = istart; |
341 | 0 | const BYTE* anchor = istart; |
342 | 0 | const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); |
343 | | /* presumes that, if there is a dictionary, it must be using Attach mode */ |
344 | 0 | const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); |
345 | 0 | const BYTE* const prefixLowest = base + prefixLowestIndex; |
346 | 0 | const BYTE* const iend = istart + srcSize; |
347 | 0 | const BYTE* const ilimit = iend - HASH_READ_SIZE; |
348 | 0 | U32 offset_1=rep[0], offset_2=rep[1]; |
349 | |
|
350 | 0 | const ZSTD_MatchState_t* const dms = ms->dictMatchState; |
351 | 0 | const ZSTD_compressionParameters* const dictCParams = &dms->cParams; |
352 | 0 | const U32* const dictHashLong = dms->hashTable; |
353 | 0 | const U32* const dictHashSmall = dms->chainTable; |
354 | 0 | const U32 dictStartIndex = dms->window.dictLimit; |
355 | 0 | const BYTE* const dictBase = dms->window.base; |
356 | 0 | const BYTE* const dictStart = dictBase + dictStartIndex; |
357 | 0 | const BYTE* const dictEnd = dms->window.nextSrc; |
358 | 0 | const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); |
359 | 0 | const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS; |
360 | 0 | const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS; |
361 | 0 | const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); |
362 | |
|
363 | 0 | DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); |
364 | | |
365 | | /* if a dictionary is attached, it must be within window range */ |
366 | 0 | assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); |
367 | |
|
368 | 0 | if (ms->prefetchCDictTables) { |
369 | 0 | size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); |
370 | 0 | size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32); |
371 | 0 | PREFETCH_AREA(dictHashLong, hashTableBytes); |
372 | 0 | PREFETCH_AREA(dictHashSmall, chainTableBytes); |
373 | 0 | } |
374 | | |
375 | | /* init */ |
376 | 0 | ip += (dictAndPrefixLength == 0); |
377 | | |
378 | | /* dictMatchState repCode checks don't currently handle repCode == 0 |
379 | | * disabling. */ |
380 | 0 | assert(offset_1 <= dictAndPrefixLength); |
381 | 0 | assert(offset_2 <= dictAndPrefixLength); |
382 | | |
383 | | /* Main Search Loop */ |
384 | 0 | while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ |
385 | 0 | size_t mLength; |
386 | 0 | U32 offset; |
387 | 0 | size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); |
388 | 0 | size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); |
389 | 0 | size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8); |
390 | 0 | size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls); |
391 | 0 | U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS]; |
392 | 0 | U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS]; |
393 | 0 | int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL); |
394 | 0 | int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS); |
395 | 0 | U32 const curr = (U32)(ip-base); |
396 | 0 | U32 const matchIndexL = hashLong[h2]; |
397 | 0 | U32 matchIndexS = hashSmall[h]; |
398 | 0 | const BYTE* matchLong = base + matchIndexL; |
399 | 0 | const BYTE* match = base + matchIndexS; |
400 | 0 | const U32 repIndex = curr + 1 - offset_1; |
401 | 0 | const BYTE* repMatch = (repIndex < prefixLowestIndex) ? |
402 | 0 | dictBase + (repIndex - dictIndexDelta) : |
403 | 0 | base + repIndex; |
404 | 0 | hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ |
405 | | |
406 | | /* check repcode */ |
407 | 0 | if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) |
408 | 0 | && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { |
409 | 0 | const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; |
410 | 0 | mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; |
411 | 0 | ip++; |
412 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); |
413 | 0 | goto _match_stored; |
414 | 0 | } |
415 | | |
416 | 0 | if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { |
417 | | /* check prefix long match */ |
418 | 0 | mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; |
419 | 0 | offset = (U32)(ip-matchLong); |
420 | 0 | while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ |
421 | 0 | goto _match_found; |
422 | 0 | } else if (dictTagsMatchL) { |
423 | | /* check dictMatchState long match */ |
424 | 0 | U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS; |
425 | 0 | const BYTE* dictMatchL = dictBase + dictMatchIndexL; |
426 | 0 | assert(dictMatchL < dictEnd); |
427 | |
|
428 | 0 | if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { |
429 | 0 | mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; |
430 | 0 | offset = (U32)(curr - dictMatchIndexL - dictIndexDelta); |
431 | 0 | while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ |
432 | 0 | goto _match_found; |
433 | 0 | } } |
434 | | |
435 | 0 | if (matchIndexS > prefixLowestIndex) { |
436 | | /* short match candidate */ |
437 | 0 | if (MEM_read32(match) == MEM_read32(ip)) { |
438 | 0 | goto _search_next_long; |
439 | 0 | } |
440 | 0 | } else if (dictTagsMatchS) { |
441 | | /* check dictMatchState short match */ |
442 | 0 | U32 const dictMatchIndexS = dictMatchIndexAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS; |
443 | 0 | match = dictBase + dictMatchIndexS; |
444 | 0 | matchIndexS = dictMatchIndexS + dictIndexDelta; |
445 | |
|
446 | 0 | if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) { |
447 | 0 | goto _search_next_long; |
448 | 0 | } } |
449 | | |
450 | 0 | ip += ((ip-anchor) >> kSearchStrength) + 1; |
451 | | #if defined(__aarch64__) |
452 | | PREFETCH_L1(ip+256); |
453 | | #endif |
454 | 0 | continue; |
455 | | |
456 | 0 | _search_next_long: |
457 | 0 | { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); |
458 | 0 | size_t const dictHashAndTagL3 = ZSTD_hashPtr(ip+1, dictHBitsL, 8); |
459 | 0 | U32 const matchIndexL3 = hashLong[hl3]; |
460 | 0 | U32 const dictMatchIndexAndTagL3 = dictHashLong[dictHashAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS]; |
461 | 0 | int const dictTagsMatchL3 = ZSTD_comparePackedTags(dictMatchIndexAndTagL3, dictHashAndTagL3); |
462 | 0 | const BYTE* matchL3 = base + matchIndexL3; |
463 | 0 | hashLong[hl3] = curr + 1; |
464 | | |
465 | | /* check prefix long +1 match */ |
466 | 0 | if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) { |
467 | 0 | mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; |
468 | 0 | ip++; |
469 | 0 | offset = (U32)(ip-matchL3); |
470 | 0 | while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ |
471 | 0 | goto _match_found; |
472 | 0 | } else if (dictTagsMatchL3) { |
473 | | /* check dict long +1 match */ |
474 | 0 | U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS; |
475 | 0 | const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; |
476 | 0 | assert(dictMatchL3 < dictEnd); |
477 | 0 | if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { |
478 | 0 | mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; |
479 | 0 | ip++; |
480 | 0 | offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta); |
481 | 0 | while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ |
482 | 0 | goto _match_found; |
483 | 0 | } } } |
484 | | |
485 | | /* if no long +1 match, explore the short match we found */ |
486 | 0 | if (matchIndexS < prefixLowestIndex) { |
487 | 0 | mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; |
488 | 0 | offset = (U32)(curr - matchIndexS); |
489 | 0 | while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ |
490 | 0 | } else { |
491 | 0 | mLength = ZSTD_count(ip+4, match+4, iend) + 4; |
492 | 0 | offset = (U32)(ip - match); |
493 | 0 | while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ |
494 | 0 | } |
495 | |
|
496 | 0 | _match_found: |
497 | 0 | offset_2 = offset_1; |
498 | 0 | offset_1 = offset; |
499 | |
|
500 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); |
501 | |
|
502 | 0 | _match_stored: |
503 | | /* match found */ |
504 | 0 | ip += mLength; |
505 | 0 | anchor = ip; |
506 | |
|
507 | 0 | if (ip <= ilimit) { |
508 | | /* Complementary insertion */ |
509 | | /* done after iLimit test, as candidates could be > iend-8 */ |
510 | 0 | { U32 const indexToInsert = curr+2; |
511 | 0 | hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; |
512 | 0 | hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); |
513 | 0 | hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; |
514 | 0 | hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); |
515 | 0 | } |
516 | | |
517 | | /* check immediate repcode */ |
518 | 0 | while (ip <= ilimit) { |
519 | 0 | U32 const current2 = (U32)(ip-base); |
520 | 0 | U32 const repIndex2 = current2 - offset_2; |
521 | 0 | const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? |
522 | 0 | dictBase + repIndex2 - dictIndexDelta : |
523 | 0 | base + repIndex2; |
524 | 0 | if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2)) |
525 | 0 | && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { |
526 | 0 | const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; |
527 | 0 | size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; |
528 | 0 | U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ |
529 | 0 | ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); |
530 | 0 | hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; |
531 | 0 | hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; |
532 | 0 | ip += repLength2; |
533 | 0 | anchor = ip; |
534 | 0 | continue; |
535 | 0 | } |
536 | 0 | break; |
537 | 0 | } |
538 | 0 | } |
539 | 0 | } /* while (ip < ilimit) */ |
540 | | |
541 | | /* save reps for next block */ |
542 | 0 | rep[0] = offset_1; |
543 | 0 | rep[1] = offset_2; |
544 | | |
545 | | /* Return the last literals size */ |
546 | 0 | return (size_t)(iend - anchor); |
547 | 0 | } |
548 | | |
549 | | #define ZSTD_GEN_DFAST_FN(dictMode, mls) \ |
550 | | static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ |
551 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ |
552 | | void const* src, size_t srcSize) \ |
553 | 0 | { \ |
554 | 0 | return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ |
555 | 0 | } Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_4 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_5 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_6 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_noDict_7 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_4 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_5 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_6 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_dictMatchState_7 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_4 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_5 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_6 Unexecuted instantiation: zstd_double_fast.c:ZSTD_compressBlock_doubleFast_extDict_7 |
556 | | |
557 | | ZSTD_GEN_DFAST_FN(noDict, 4) |
558 | | ZSTD_GEN_DFAST_FN(noDict, 5) |
559 | | ZSTD_GEN_DFAST_FN(noDict, 6) |
560 | | ZSTD_GEN_DFAST_FN(noDict, 7) |
561 | | |
562 | | ZSTD_GEN_DFAST_FN(dictMatchState, 4) |
563 | | ZSTD_GEN_DFAST_FN(dictMatchState, 5) |
564 | | ZSTD_GEN_DFAST_FN(dictMatchState, 6) |
565 | | ZSTD_GEN_DFAST_FN(dictMatchState, 7) |
566 | | |
567 | | |
568 | | size_t ZSTD_compressBlock_doubleFast( |
569 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
570 | | void const* src, size_t srcSize) |
571 | 0 | { |
572 | 0 | const U32 mls = ms->cParams.minMatch; |
573 | 0 | switch(mls) |
574 | 0 | { |
575 | 0 | default: /* includes case 3 */ |
576 | 0 | case 4 : |
577 | 0 | return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); |
578 | 0 | case 5 : |
579 | 0 | return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); |
580 | 0 | case 6 : |
581 | 0 | return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); |
582 | 0 | case 7 : |
583 | 0 | return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); |
584 | 0 | } |
585 | 0 | } |
586 | | |
587 | | |
588 | | size_t ZSTD_compressBlock_doubleFast_dictMatchState( |
589 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
590 | | void const* src, size_t srcSize) |
591 | 0 | { |
592 | 0 | const U32 mls = ms->cParams.minMatch; |
593 | 0 | switch(mls) |
594 | 0 | { |
595 | 0 | default: /* includes case 3 */ |
596 | 0 | case 4 : |
597 | 0 | return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); |
598 | 0 | case 5 : |
599 | 0 | return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); |
600 | 0 | case 6 : |
601 | 0 | return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); |
602 | 0 | case 7 : |
603 | 0 | return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); |
604 | 0 | } |
605 | 0 | } |
606 | | |
607 | | |
608 | | static |
609 | | ZSTD_ALLOW_POINTER_OVERFLOW_ATTR |
610 | | size_t ZSTD_compressBlock_doubleFast_extDict_generic( |
611 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
612 | | void const* src, size_t srcSize, |
613 | | U32 const mls /* template */) |
614 | 0 | { |
615 | 0 | ZSTD_compressionParameters const* cParams = &ms->cParams; |
616 | 0 | U32* const hashLong = ms->hashTable; |
617 | 0 | U32 const hBitsL = cParams->hashLog; |
618 | 0 | U32* const hashSmall = ms->chainTable; |
619 | 0 | U32 const hBitsS = cParams->chainLog; |
620 | 0 | const BYTE* const istart = (const BYTE*)src; |
621 | 0 | const BYTE* ip = istart; |
622 | 0 | const BYTE* anchor = istart; |
623 | 0 | const BYTE* const iend = istart + srcSize; |
624 | 0 | const BYTE* const ilimit = iend - 8; |
625 | 0 | const BYTE* const base = ms->window.base; |
626 | 0 | const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); |
627 | 0 | const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); |
628 | 0 | const U32 dictStartIndex = lowLimit; |
629 | 0 | const U32 dictLimit = ms->window.dictLimit; |
630 | 0 | const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; |
631 | 0 | const BYTE* const prefixStart = base + prefixStartIndex; |
632 | 0 | const BYTE* const dictBase = ms->window.dictBase; |
633 | 0 | const BYTE* const dictStart = dictBase + dictStartIndex; |
634 | 0 | const BYTE* const dictEnd = dictBase + prefixStartIndex; |
635 | 0 | U32 offset_1=rep[0], offset_2=rep[1]; |
636 | |
|
637 | 0 | DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize); |
638 | | |
639 | | /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ |
640 | 0 | if (prefixStartIndex == dictStartIndex) |
641 | 0 | return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); |
642 | | |
643 | | /* Search Loop */ |
644 | 0 | while (ip < ilimit) { /* < instead of <=, because (ip+1) */ |
645 | 0 | const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); |
646 | 0 | const U32 matchIndex = hashSmall[hSmall]; |
647 | 0 | const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; |
648 | 0 | const BYTE* match = matchBase + matchIndex; |
649 | |
|
650 | 0 | const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); |
651 | 0 | const U32 matchLongIndex = hashLong[hLong]; |
652 | 0 | const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; |
653 | 0 | const BYTE* matchLong = matchLongBase + matchLongIndex; |
654 | |
|
655 | 0 | const U32 curr = (U32)(ip-base); |
656 | 0 | const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ |
657 | 0 | const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; |
658 | 0 | const BYTE* const repMatch = repBase + repIndex; |
659 | 0 | size_t mLength; |
660 | 0 | hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ |
661 | |
|
662 | 0 | if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) |
663 | 0 | & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ |
664 | 0 | && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { |
665 | 0 | const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; |
666 | 0 | mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; |
667 | 0 | ip++; |
668 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength); |
669 | 0 | } else { |
670 | 0 | if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { |
671 | 0 | const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; |
672 | 0 | const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; |
673 | 0 | U32 offset; |
674 | 0 | mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; |
675 | 0 | offset = curr - matchLongIndex; |
676 | 0 | while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ |
677 | 0 | offset_2 = offset_1; |
678 | 0 | offset_1 = offset; |
679 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); |
680 | |
|
681 | 0 | } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { |
682 | 0 | size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); |
683 | 0 | U32 const matchIndex3 = hashLong[h3]; |
684 | 0 | const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; |
685 | 0 | const BYTE* match3 = match3Base + matchIndex3; |
686 | 0 | U32 offset; |
687 | 0 | hashLong[h3] = curr + 1; |
688 | 0 | if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { |
689 | 0 | const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; |
690 | 0 | const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; |
691 | 0 | mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; |
692 | 0 | ip++; |
693 | 0 | offset = curr+1 - matchIndex3; |
694 | 0 | while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ |
695 | 0 | } else { |
696 | 0 | const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; |
697 | 0 | const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; |
698 | 0 | mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; |
699 | 0 | offset = curr - matchIndex; |
700 | 0 | while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ |
701 | 0 | } |
702 | 0 | offset_2 = offset_1; |
703 | 0 | offset_1 = offset; |
704 | 0 | ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength); |
705 | |
|
706 | 0 | } else { |
707 | 0 | ip += ((ip-anchor) >> kSearchStrength) + 1; |
708 | 0 | continue; |
709 | 0 | } } |
710 | | |
711 | | /* move to next sequence start */ |
712 | 0 | ip += mLength; |
713 | 0 | anchor = ip; |
714 | |
|
715 | 0 | if (ip <= ilimit) { |
716 | | /* Complementary insertion */ |
717 | | /* done after iLimit test, as candidates could be > iend-8 */ |
718 | 0 | { U32 const indexToInsert = curr+2; |
719 | 0 | hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; |
720 | 0 | hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); |
721 | 0 | hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; |
722 | 0 | hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); |
723 | 0 | } |
724 | | |
725 | | /* check immediate repcode */ |
726 | 0 | while (ip <= ilimit) { |
727 | 0 | U32 const current2 = (U32)(ip-base); |
728 | 0 | U32 const repIndex2 = current2 - offset_2; |
729 | 0 | const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; |
730 | 0 | if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) |
731 | 0 | & (offset_2 <= current2 - dictStartIndex)) |
732 | 0 | && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { |
733 | 0 | const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; |
734 | 0 | size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; |
735 | 0 | U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ |
736 | 0 | ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2); |
737 | 0 | hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; |
738 | 0 | hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; |
739 | 0 | ip += repLength2; |
740 | 0 | anchor = ip; |
741 | 0 | continue; |
742 | 0 | } |
743 | 0 | break; |
744 | 0 | } } } |
745 | | |
746 | | /* save reps for next block */ |
747 | 0 | rep[0] = offset_1; |
748 | 0 | rep[1] = offset_2; |
749 | | |
750 | | /* Return the last literals size */ |
751 | 0 | return (size_t)(iend - anchor); |
752 | 0 | } |
753 | | |
754 | | ZSTD_GEN_DFAST_FN(extDict, 4) |
755 | | ZSTD_GEN_DFAST_FN(extDict, 5) |
756 | | ZSTD_GEN_DFAST_FN(extDict, 6) |
757 | | ZSTD_GEN_DFAST_FN(extDict, 7) |
758 | | |
759 | | size_t ZSTD_compressBlock_doubleFast_extDict( |
760 | | ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], |
761 | | void const* src, size_t srcSize) |
762 | 0 | { |
763 | 0 | U32 const mls = ms->cParams.minMatch; |
764 | 0 | switch(mls) |
765 | 0 | { |
766 | 0 | default: /* includes case 3 */ |
767 | 0 | case 4 : |
768 | 0 | return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); |
769 | 0 | case 5 : |
770 | 0 | return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); |
771 | 0 | case 6 : |
772 | 0 | return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); |
773 | 0 | case 7 : |
774 | 0 | return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); |
775 | 0 | } |
776 | 0 | } |
777 | | |
778 | | #endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ |