/src/zlib-ng/inffast_tpl.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* inffast.c -- fast decoding |
2 | | * Copyright (C) 1995-2017 Mark Adler |
3 | | * For conditions of distribution and use, see copyright notice in zlib.h |
4 | | */ |
5 | | |
6 | | #include "zbuild.h" |
7 | | #include "zendian.h" |
8 | | #include "zutil.h" |
9 | | #include "inftrees.h" |
10 | | #include "inflate.h" |
11 | | #include "inflate_p.h" |
12 | | #include "functable.h" |
13 | | |
14 | | /* |
15 | | Decode literal, length, and distance codes and write out the resulting |
16 | | literal and match bytes until either not enough input or output is |
17 | | available, an end-of-block is encountered, or a data error is encountered. |
18 | | When large enough input and output buffers are supplied to inflate(), for |
19 | | example, a 16K input buffer and a 64K output buffer, more than 95% of the |
20 | | inflate execution time is spent in this routine. |
21 | | |
22 | | Entry assumptions: |
23 | | |
24 | | state->mode == LEN |
25 | | strm->avail_in >= INFLATE_FAST_MIN_HAVE |
26 | | strm->avail_out >= INFLATE_FAST_MIN_LEFT |
27 | | start >= strm->avail_out |
28 | | state->bits < 8 |
29 | | |
30 | | On return, state->mode is one of: |
31 | | |
32 | | LEN -- ran out of enough output space or enough available input |
33 | | TYPE -- reached end of block code, inflate() to interpret next block |
34 | | BAD -- error in block data |
35 | | |
36 | | Notes: |
37 | | |
38 | | - The maximum input bits used by a length/distance pair is 15 bits for the |
39 | | length code, 5 bits for the length extra, 15 bits for the distance code, |
40 | | and 13 bits for the distance extra. This totals 48 bits, or six bytes. |
41 | | Therefore if strm->avail_in >= 6, then there is enough input to avoid |
42 | | checking for available input while decoding. |
43 | | |
44 | | - On some architectures, it can be significantly faster (e.g. up to 1.2x |
45 | | faster on x86_64) to load from strm->next_in 64 bits, or 8 bytes, at a |
46 | | time, so INFLATE_FAST_MIN_HAVE == 8. |
47 | | |
48 | | - The maximum bytes that a single length/distance pair can output is 258 |
49 | | bytes, which is the maximum length that can be coded. inflate_fast() |
50 | | requires strm->avail_out >= 258 for each loop to avoid checking for |
51 | | output space. |
52 | | */ |
53 | 0 | void Z_INTERNAL INFLATE_FAST(PREFIX3(stream) *strm, uint32_t start) { |
54 | | /* start: inflate()'s starting value for strm->avail_out */ |
55 | 0 | struct inflate_state *state; |
56 | 0 | z_const unsigned char *in; /* local strm->next_in */ |
57 | 0 | const unsigned char *last; /* have enough input while in < last */ |
58 | 0 | unsigned char *out; /* local strm->next_out */ |
59 | 0 | unsigned char *beg; /* inflate()'s initial strm->next_out */ |
60 | 0 | unsigned char *end; /* while out < end, enough space available */ |
61 | 0 | unsigned char *safe; /* can use chunkcopy provided out < safe */ |
62 | 0 | unsigned char *window; /* allocated sliding window, if wsize != 0 */ |
63 | 0 | unsigned wsize; /* window size or zero if not using window */ |
64 | 0 | unsigned whave; /* valid bytes in the window */ |
65 | 0 | unsigned wnext; /* window write index */ |
66 | | |
67 | | /* hold is a local copy of strm->hold. By default, hold satisfies the same |
68 | | invariants that strm->hold does, namely that (hold >> bits) == 0. This |
69 | | invariant is kept by loading bits into hold one byte at a time, like: |
70 | | |
71 | | hold |= next_byte_of_input << bits; in++; bits += 8; |
72 | | |
73 | | If we need to ensure that bits >= 15 then this code snippet is simply |
74 | | repeated. Over one iteration of the outermost do/while loop, this |
75 | | happens up to six times (48 bits of input), as described in the NOTES |
76 | | above. |
77 | | |
78 | | However, on some little endian architectures, it can be significantly |
79 | | faster to load 64 bits once instead of 8 bits six times: |
80 | | |
81 | | if (bits <= 16) { |
82 | | hold |= next_8_bytes_of_input << bits; in += 6; bits += 48; |
83 | | } |
84 | | |
85 | | Unlike the simpler one byte load, shifting the next_8_bytes_of_input |
86 | | by bits will overflow and lose those high bits, up to 2 bytes' worth. |
87 | | The conservative estimate is therefore that we have read only 6 bytes |
88 | | (48 bits). Again, as per the NOTES above, 48 bits is sufficient for the |
89 | | rest of the iteration, and we will not need to load another 8 bytes. |
90 | | |
91 | | Inside this function, we no longer satisfy (hold >> bits) == 0, but |
92 | | this is not problematic, even if that overflow does not land on an 8 bit |
93 | | byte boundary. Those excess bits will eventually shift down lower as the |
94 | | Huffman decoder consumes input, and when new input bits need to be loaded |
95 | | into the bits variable, the same input bits will be or'ed over those |
96 | | existing bits. A bitwise or is idempotent: (a | b | b) equals (a | b). |
97 | | Note that we therefore write that load operation as "hold |= etc" and not |
98 | | "hold += etc". |
99 | | |
100 | | Outside that loop, at the end of the function, hold is bitwise and'ed |
101 | | with (1<<bits)-1 to drop those excess bits so that, on function exit, we |
102 | | keep the invariant that (state->hold >> state->bits) == 0. |
103 | | */ |
104 | 0 | unsigned bits; /* local strm->bits */ |
105 | 0 | uint64_t hold; /* local strm->hold */ |
106 | 0 | unsigned lmask; /* mask for first level of length codes */ |
107 | 0 | unsigned dmask; /* mask for first level of distance codes */ |
108 | 0 | code const *lcode; /* local strm->lencode */ |
109 | 0 | code const *dcode; /* local strm->distcode */ |
110 | 0 | const code *here; /* retrieved table entry */ |
111 | 0 | unsigned op; /* code bits, operation, extra bits, or */ |
112 | | /* window position, window bytes to copy */ |
113 | 0 | unsigned len; /* match length, unused bytes */ |
114 | 0 | unsigned char *from; /* where to copy match from */ |
115 | 0 | unsigned dist; /* match distance */ |
116 | 0 | unsigned extra_safe; /* copy chunks safely in all cases */ |
117 | | |
118 | | /* copy state to local variables */ |
119 | 0 | state = (struct inflate_state *)strm->state; |
120 | 0 | in = strm->next_in; |
121 | 0 | last = in + (strm->avail_in - (INFLATE_FAST_MIN_HAVE - 1)); |
122 | 0 | out = strm->next_out; |
123 | 0 | beg = out - (start - strm->avail_out); |
124 | 0 | end = out + (strm->avail_out - (INFLATE_FAST_MIN_LEFT - 1)); |
125 | 0 | safe = out + strm->avail_out; |
126 | 0 | wsize = state->wsize; |
127 | 0 | whave = state->whave; |
128 | 0 | wnext = state->wnext; |
129 | 0 | window = state->window; |
130 | 0 | hold = state->hold; |
131 | 0 | bits = state->bits; |
132 | 0 | lcode = state->lencode; |
133 | 0 | dcode = state->distcode; |
134 | 0 | lmask = (1U << state->lenbits) - 1; |
135 | 0 | dmask = (1U << state->distbits) - 1; |
136 | | |
137 | | /* Detect if out and window point to the same memory allocation. In this instance it is |
138 | | necessary to use safe chunk copy functions to prevent overwriting the window. If the |
139 | | window is overwritten then future matches with far distances will fail to copy correctly. */ |
140 | 0 | extra_safe = (wsize != 0 && out >= window && out + INFLATE_FAST_MIN_LEFT <= window + state->wbufsize); |
141 | |
|
142 | 0 | #define REFILL() do { \ |
143 | 0 | hold |= load_64_bits(in, bits); \ |
144 | 0 | in += 7; \ |
145 | 0 | in -= ((bits >> 3) & 7); \ |
146 | 0 | bits |= 56; \ |
147 | 0 | } while (0) |
148 | | |
149 | | /* decode literals and length/distances until end-of-block or not enough |
150 | | input data or output space */ |
151 | 0 | do { |
152 | 0 | REFILL(); |
153 | 0 | here = lcode + (hold & lmask); |
154 | 0 | if (here->op == 0) { |
155 | 0 | *out++ = (unsigned char)(here->val); |
156 | 0 | DROPBITS(here->bits); |
157 | 0 | here = lcode + (hold & lmask); |
158 | 0 | if (here->op == 0) { |
159 | 0 | *out++ = (unsigned char)(here->val); |
160 | 0 | DROPBITS(here->bits); |
161 | 0 | here = lcode + (hold & lmask); |
162 | 0 | } |
163 | 0 | } |
164 | 0 | dolen: |
165 | 0 | DROPBITS(here->bits); |
166 | 0 | op = here->op; |
167 | 0 | if (op == 0) { /* literal */ |
168 | 0 | Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ? |
169 | 0 | "inflate: literal '%c'\n" : |
170 | 0 | "inflate: literal 0x%02x\n", here->val)); |
171 | 0 | *out++ = (unsigned char)(here->val); |
172 | 0 | } else if (op & 16) { /* length base */ |
173 | 0 | len = here->val; |
174 | 0 | op &= MAX_BITS; /* number of extra bits */ |
175 | 0 | len += BITS(op); |
176 | 0 | DROPBITS(op); |
177 | 0 | Tracevv((stderr, "inflate: length %u\n", len)); |
178 | 0 | here = dcode + (hold & dmask); |
179 | 0 | if (bits < MAX_BITS + MAX_DIST_EXTRA_BITS) { |
180 | 0 | REFILL(); |
181 | 0 | } |
182 | 0 | dodist: |
183 | 0 | DROPBITS(here->bits); |
184 | 0 | op = here->op; |
185 | 0 | if (op & 16) { /* distance base */ |
186 | 0 | dist = here->val; |
187 | 0 | op &= MAX_BITS; /* number of extra bits */ |
188 | 0 | dist += BITS(op); |
189 | | #ifdef INFLATE_STRICT |
190 | | if (dist > state->dmax) { |
191 | | SET_BAD("invalid distance too far back"); |
192 | | break; |
193 | | } |
194 | | #endif |
195 | 0 | DROPBITS(op); |
196 | 0 | Tracevv((stderr, "inflate: distance %u\n", dist)); |
197 | 0 | op = (unsigned)(out - beg); /* max distance in output */ |
198 | 0 | if (dist > op) { /* see if copy from window */ |
199 | 0 | op = dist - op; /* distance back in window */ |
200 | 0 | if (op > whave) { |
201 | | #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR |
202 | | if (state->sane) { |
203 | | SET_BAD("invalid distance too far back"); |
204 | | break; |
205 | | } |
206 | | if (len <= op - whave) { |
207 | | do { |
208 | | *out++ = 0; |
209 | | } while (--len); |
210 | | continue; |
211 | | } |
212 | | len -= op - whave; |
213 | | do { |
214 | | *out++ = 0; |
215 | | } while (--op > whave); |
216 | | if (op == 0) { |
217 | | from = out - dist; |
218 | | do { |
219 | | *out++ = *from++; |
220 | | } while (--len); |
221 | | continue; |
222 | | } |
223 | | #else |
224 | 0 | SET_BAD("invalid distance too far back"); |
225 | 0 | break; |
226 | 0 | #endif |
227 | 0 | } |
228 | 0 | from = window; |
229 | 0 | if (wnext == 0) { /* very common case */ |
230 | 0 | from += wsize - op; |
231 | 0 | } else if (wnext >= op) { /* contiguous in window */ |
232 | 0 | from += wnext - op; |
233 | 0 | } else { /* wrap around window */ |
234 | 0 | op -= wnext; |
235 | 0 | from += wsize - op; |
236 | 0 | if (op < len) { /* some from end of window */ |
237 | 0 | len -= op; |
238 | 0 | out = CHUNKCOPY_SAFE(out, from, op, safe); |
239 | 0 | from = window; /* more from start of window */ |
240 | 0 | op = wnext; |
241 | | /* This (rare) case can create a situation where |
242 | | the first chunkcopy below must be checked. |
243 | | */ |
244 | 0 | } |
245 | 0 | } |
246 | 0 | if (op < len) { /* still need some from output */ |
247 | 0 | len -= op; |
248 | 0 | if (!extra_safe) { |
249 | 0 | out = CHUNKCOPY_SAFE(out, from, op, safe); |
250 | 0 | out = CHUNKUNROLL(out, &dist, &len); |
251 | 0 | out = CHUNKCOPY_SAFE(out, out - dist, len, safe); |
252 | 0 | } else { |
253 | 0 | out = chunkcopy_safe(out, from, op, safe); |
254 | 0 | out = chunkcopy_safe(out, out - dist, len, safe); |
255 | 0 | } |
256 | 0 | } else { |
257 | | #ifndef HAVE_MASKED_READWRITE |
258 | 0 | if (extra_safe) |
259 | 0 | out = chunkcopy_safe(out, from, len, safe); |
260 | 0 | else |
261 | 0 | #endif |
262 | 0 | out = CHUNKCOPY_SAFE(out, from, len, safe); |
263 | 0 | } |
264 | | #ifndef HAVE_MASKED_READWRITE |
265 | 0 | } else if (extra_safe) { |
266 | | /* Whole reference is in range of current output. */ |
267 | | out = chunkcopy_safe(out, out - dist, len, safe); |
268 | | #endif |
269 | 0 | } else { |
270 | | /* Whole reference is in range of current output. No range checks are |
271 | | necessary because we start with room for at least 258 bytes of output, |
272 | | so unroll and roundoff operations can write beyond `out+len` so long |
273 | | as they stay within 258 bytes of `out`. |
274 | | */ |
275 | 0 | if (dist >= len || dist >= state->chunksize) |
276 | 0 | out = CHUNKCOPY(out, out - dist, len); |
277 | 0 | else |
278 | 0 | out = CHUNKMEMSET(out, out - dist, len); |
279 | 0 | } |
280 | 0 | } else if ((op & 64) == 0) { /* 2nd level distance code */ |
281 | 0 | here = dcode + here->val + BITS(op); |
282 | 0 | goto dodist; |
283 | 0 | } else { |
284 | 0 | SET_BAD("invalid distance code"); |
285 | 0 | break; |
286 | 0 | } |
287 | 0 | } else if ((op & 64) == 0) { /* 2nd level length code */ |
288 | 0 | here = lcode + here->val + BITS(op); |
289 | 0 | goto dolen; |
290 | 0 | } else if (op & 32) { /* end-of-block */ |
291 | 0 | Tracevv((stderr, "inflate: end of block\n")); |
292 | 0 | state->mode = TYPE; |
293 | 0 | break; |
294 | 0 | } else { |
295 | 0 | SET_BAD("invalid literal/length code"); |
296 | 0 | break; |
297 | 0 | } |
298 | 0 | } while (in < last && out < end); |
299 | | |
300 | | /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ |
301 | 0 | len = bits >> 3; |
302 | 0 | in -= len; |
303 | 0 | bits -= len << 3; |
304 | 0 | hold &= (UINT64_C(1) << bits) - 1; |
305 | | |
306 | | /* update state and return */ |
307 | 0 | strm->next_in = in; |
308 | 0 | strm->next_out = out; |
309 | 0 | strm->avail_in = (unsigned)(in < last ? (INFLATE_FAST_MIN_HAVE - 1) + (last - in) |
310 | 0 | : (INFLATE_FAST_MIN_HAVE - 1) - (in - last)); |
311 | 0 | strm->avail_out = (unsigned)(out < end ? (INFLATE_FAST_MIN_LEFT - 1) + (end - out) |
312 | 0 | : (INFLATE_FAST_MIN_LEFT - 1) - (out - end)); |
313 | |
|
314 | 0 | Assert(bits <= 32, "Remaining bits greater than 32"); |
315 | 0 | state->hold = (uint32_t)hold; |
316 | 0 | state->bits = bits; |
317 | 0 | return; |
318 | 0 | } Unexecuted instantiation: inflate_fast_sse2 Unexecuted instantiation: inflate_fast_ssse3 Unexecuted instantiation: inflate_fast_avx2 Unexecuted instantiation: inflate_fast_avx512 |
319 | | |
320 | | /* |
321 | | inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): |
322 | | - Using bit fields for code structure |
323 | | - Different op definition to avoid & for extra bits (do & for table bits) |
324 | | - Three separate decoding do-loops for direct, window, and wnext == 0 |
325 | | - Special case for distance > 1 copies to do overlapped load and store copy |
326 | | - Explicit branch predictions (based on measured branch probabilities) |
327 | | - Deferring match copy and interspersed it with decoding subsequent codes |
328 | | - Swapping literal/length else |
329 | | - Swapping window/direct else |
330 | | - Larger unrolled copy loops (three is about right) |
331 | | - Moving len -= 3 statement into middle of loop |
332 | | */ |