/rust/registry/src/index.crates.io-6f17d22bba15001f/miniz_oxide-0.7.4/src/inflate/core.rs
Line | Count | Source (jump to first uncovered line) |
1 | | //! Streaming decompression functionality. |
2 | | |
3 | | use super::*; |
4 | | use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER}; |
5 | | use ::core::cell::Cell; |
6 | | |
7 | | use ::core::convert::TryInto; |
8 | | use ::core::{cmp, slice}; |
9 | | |
10 | | use self::output_buffer::OutputBuffer; |
11 | | |
12 | | pub const TINFL_LZ_DICT_SIZE: usize = 32_768; |
13 | | |
14 | | /// A struct containing huffman code lengths and the huffman code tree used by the decompressor. |
15 | | struct HuffmanTable { |
16 | | /// Length of the code at each index. |
17 | | pub code_size: [u8; MAX_HUFF_SYMBOLS_0], |
18 | | /// Fast lookup table for shorter huffman codes. |
19 | | /// |
20 | | /// See `HuffmanTable::fast_lookup`. |
21 | | pub look_up: [i16; FAST_LOOKUP_SIZE as usize], |
22 | | /// Full huffman tree. |
23 | | /// |
24 | | /// Positive values are edge nodes/symbols, negative values are |
25 | | /// parent nodes/references to other nodes. |
26 | | pub tree: [i16; MAX_HUFF_TREE_SIZE], |
27 | | } |
28 | | |
29 | | impl HuffmanTable { |
30 | 0 | const fn new() -> HuffmanTable { |
31 | 0 | HuffmanTable { |
32 | 0 | code_size: [0; MAX_HUFF_SYMBOLS_0], |
33 | 0 | look_up: [0; FAST_LOOKUP_SIZE as usize], |
34 | 0 | tree: [0; MAX_HUFF_TREE_SIZE], |
35 | 0 | } |
36 | 0 | } Unexecuted instantiation: <miniz_oxide::inflate::core::HuffmanTable>::new Unexecuted instantiation: <miniz_oxide::inflate::core::HuffmanTable>::new |
37 | | |
38 | | /// Look for a symbol in the fast lookup table. |
39 | | /// The symbol is stored in the lower 9 bits, the length in the next 6. |
40 | | /// If the returned value is negative, the code wasn't found in the |
41 | | /// fast lookup table and the full tree has to be traversed to find the code. |
42 | | #[inline] |
43 | 0 | fn fast_lookup(&self, bit_buf: BitBuffer) -> i16 { |
44 | 0 | self.look_up[(bit_buf & BitBuffer::from(FAST_LOOKUP_SIZE - 1)) as usize] |
45 | 0 | } |
46 | | |
47 | | /// Get the symbol and the code length from the huffman tree. |
48 | | #[inline] |
49 | 0 | fn tree_lookup(&self, fast_symbol: i32, bit_buf: BitBuffer, mut code_len: u32) -> (i32, u32) { |
50 | 0 | let mut symbol = fast_symbol; |
51 | | // We step through the tree until we encounter a positive value, which indicates a |
52 | | // symbol. |
53 | 0 | loop { |
54 | 0 | // symbol here indicates the position of the left (0) node, if the next bit is 1 |
55 | 0 | // we add 1 to the lookup position to get the right node. |
56 | 0 | let tree_index = (!symbol + ((bit_buf >> code_len) & 1) as i32) as usize; |
57 | 0 | debug_assert!(tree_index < self.tree.len()); |
58 | 0 | if tree_index >= self.tree.len() { |
59 | 0 | break; |
60 | 0 | } |
61 | 0 | symbol = i32::from(self.tree[tree_index]); |
62 | 0 | code_len += 1; |
63 | 0 | if symbol >= 0 { |
64 | 0 | break; |
65 | 0 | } |
66 | | } |
67 | 0 | (symbol, code_len) |
68 | 0 | } |
69 | | |
70 | | #[inline] |
71 | | /// Look up a symbol and code length from the bits in the provided bit buffer. |
72 | | /// |
73 | | /// Returns Some(symbol, length) on success, |
74 | | /// None if the length is 0. |
75 | | /// |
76 | | /// It's possible we could avoid checking for 0 if we can guarantee a sane table. |
77 | | /// TODO: Check if a smaller type for code_len helps performance. |
78 | 0 | fn lookup(&self, bit_buf: BitBuffer) -> Option<(i32, u32)> { |
79 | 0 | let symbol = self.fast_lookup(bit_buf).into(); |
80 | 0 | if symbol >= 0 { |
81 | 0 | if (symbol >> 9) as u32 != 0 { |
82 | 0 | Some((symbol, (symbol >> 9) as u32)) |
83 | | } else { |
84 | | // Zero-length code. |
85 | 0 | None |
86 | | } |
87 | | } else { |
88 | | // We didn't get a symbol from the fast lookup table, so check the tree instead. |
89 | 0 | Some(self.tree_lookup(symbol, bit_buf, FAST_LOOKUP_BITS.into())) |
90 | | } |
91 | 0 | } |
92 | | } |
93 | | |
94 | | /// The number of huffman tables used. |
95 | | const MAX_HUFF_TABLES: usize = 3; |
96 | | /// The length of the first (literal/length) huffman table. |
97 | | const MAX_HUFF_SYMBOLS_0: usize = 288; |
98 | | /// The length of the second (distance) huffman table. |
99 | | const MAX_HUFF_SYMBOLS_1: usize = 32; |
100 | | /// The length of the last (huffman code length) huffman table. |
101 | | const _MAX_HUFF_SYMBOLS_2: usize = 19; |
102 | | /// The maximum length of a code that can be looked up in the fast lookup table. |
103 | | const FAST_LOOKUP_BITS: u8 = 10; |
104 | | /// The size of the fast lookup table. |
105 | | const FAST_LOOKUP_SIZE: u32 = 1 << FAST_LOOKUP_BITS; |
106 | | const MAX_HUFF_TREE_SIZE: usize = MAX_HUFF_SYMBOLS_0 * 2; |
107 | | const LITLEN_TABLE: usize = 0; |
108 | | const DIST_TABLE: usize = 1; |
109 | | const HUFFLEN_TABLE: usize = 2; |
110 | | |
111 | | /// Flags to [`decompress()`] to control how inflation works. |
112 | | /// |
113 | | /// These define bits for a bitmask argument. |
114 | | pub mod inflate_flags { |
115 | | /// Should we try to parse a zlib header? |
116 | | /// |
117 | | /// If unset, the function will expect an RFC1951 deflate stream. If set, it will expect a |
118 | | /// RFC1950 zlib wrapper around the deflate stream. |
119 | | pub const TINFL_FLAG_PARSE_ZLIB_HEADER: u32 = 1; |
120 | | |
121 | | /// There will be more input that hasn't been given to the decompressor yet. |
122 | | /// |
123 | | /// This is useful when you want to decompress what you have so far, |
124 | | /// even if you know there is probably more input that hasn't gotten here yet (_e.g._, over a |
125 | | /// network connection). When [`decompress()`][super::decompress] reaches the end of the input |
126 | | /// without finding the end of the compressed stream, it will return |
127 | | /// [`TINFLStatus::NeedsMoreInput`][super::TINFLStatus::NeedsMoreInput] if this is set, |
128 | | /// indicating that you should get more data before calling again. If not set, it will return |
129 | | /// [`TINFLStatus::FailedCannotMakeProgress`][super::TINFLStatus::FailedCannotMakeProgress] |
130 | | /// suggesting the stream is corrupt, since you claimed it was all there. |
131 | | pub const TINFL_FLAG_HAS_MORE_INPUT: u32 = 2; |
132 | | |
133 | | /// The output buffer should not wrap around. |
134 | | pub const TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: u32 = 4; |
135 | | |
136 | | /// Calculate the adler32 checksum of the output data even if we're not inflating a zlib stream. |
137 | | /// |
138 | | /// If [`TINFL_FLAG_IGNORE_ADLER32`] is specified, it will override this. |
139 | | /// |
140 | | /// NOTE: Enabling/disabling this between calls to decompress will result in an incorrect |
141 | | /// checksum. |
142 | | pub const TINFL_FLAG_COMPUTE_ADLER32: u32 = 8; |
143 | | |
144 | | /// Ignore adler32 checksum even if we are inflating a zlib stream. |
145 | | /// |
146 | | /// Overrides [`TINFL_FLAG_COMPUTE_ADLER32`] if both are enabled. |
147 | | /// |
148 | | /// NOTE: This flag does not exist in miniz as it does not support this and is a |
149 | | /// custom addition for miniz_oxide. |
150 | | /// |
151 | | /// NOTE: Should not be changed from enabled to disabled after decompression has started, |
152 | | /// this will result in checksum failure (outside the unlikely event where the checksum happens |
153 | | /// to match anyway). |
154 | | pub const TINFL_FLAG_IGNORE_ADLER32: u32 = 64; |
155 | | } |
156 | | |
157 | | use self::inflate_flags::*; |
158 | | |
159 | | const MIN_TABLE_SIZES: [u16; 3] = [257, 1, 4]; |
160 | | |
161 | | #[cfg(target_pointer_width = "64")] |
162 | | type BitBuffer = u64; |
163 | | |
164 | | #[cfg(not(target_pointer_width = "64"))] |
165 | | type BitBuffer = u32; |
166 | | |
167 | | /// Main decompression struct. |
168 | | /// |
169 | | pub struct DecompressorOxide { |
170 | | /// Current state of the decompressor. |
171 | | state: core::State, |
172 | | /// Number of bits in the bit buffer. |
173 | | num_bits: u32, |
174 | | /// Zlib CMF |
175 | | z_header0: u32, |
176 | | /// Zlib FLG |
177 | | z_header1: u32, |
178 | | /// Adler32 checksum from the zlib header. |
179 | | z_adler32: u32, |
180 | | /// 1 if the current block is the last block, 0 otherwise. |
181 | | finish: u32, |
182 | | /// The type of the current block. |
183 | | block_type: u32, |
184 | | /// 1 if the adler32 value should be checked. |
185 | | check_adler32: u32, |
186 | | /// Last match distance. |
187 | | dist: u32, |
188 | | /// Variable used for match length, symbols, and a number of other things. |
189 | | counter: u32, |
190 | | /// Number of extra bits for the last length or distance code. |
191 | | num_extra: u32, |
192 | | /// Number of entries in each huffman table. |
193 | | table_sizes: [u32; MAX_HUFF_TABLES], |
194 | | /// Buffer of input data. |
195 | | bit_buf: BitBuffer, |
196 | | /// Huffman tables. |
197 | | tables: [HuffmanTable; MAX_HUFF_TABLES], |
198 | | /// Raw block header. |
199 | | raw_header: [u8; 4], |
200 | | /// Huffman length codes. |
201 | | len_codes: [u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137], |
202 | | } |
203 | | |
204 | | impl DecompressorOxide { |
205 | | /// Create a new tinfl_decompressor with all fields set to 0. |
206 | 0 | pub fn new() -> DecompressorOxide { |
207 | 0 | DecompressorOxide::default() |
208 | 0 | } Unexecuted instantiation: <miniz_oxide::inflate::core::DecompressorOxide>::new Unexecuted instantiation: <miniz_oxide::inflate::core::DecompressorOxide>::new |
209 | | |
210 | | /// Set the current state to `Start`. |
211 | | #[inline] |
212 | 0 | pub fn init(&mut self) { |
213 | 0 | // The rest of the data is reset or overwritten when used. |
214 | 0 | self.state = core::State::Start; |
215 | 0 | } |
216 | | |
217 | | /// Returns the adler32 checksum of the currently decompressed data. |
218 | | /// Note: Will return Some(1) if decompressing zlib but ignoring adler32. |
219 | | #[inline] |
220 | 0 | pub fn adler32(&self) -> Option<u32> { |
221 | 0 | if self.state != State::Start && !self.state.is_failure() && self.z_header0 != 0 { |
222 | 0 | Some(self.check_adler32) |
223 | | } else { |
224 | 0 | None |
225 | | } |
226 | 0 | } |
227 | | |
228 | | /// Returns the adler32 that was read from the zlib header if it exists. |
229 | | #[inline] |
230 | 0 | pub fn adler32_header(&self) -> Option<u32> { |
231 | 0 | if self.state != State::Start && self.state != State::BadZlibHeader && self.z_header0 != 0 { |
232 | 0 | Some(self.z_adler32) |
233 | | } else { |
234 | 0 | None |
235 | | } |
236 | 0 | } |
237 | | } |
238 | | |
239 | | impl Default for DecompressorOxide { |
240 | | /// Create a new tinfl_decompressor with all fields set to 0. |
241 | | #[inline(always)] |
242 | 0 | fn default() -> Self { |
243 | 0 | DecompressorOxide { |
244 | 0 | state: core::State::Start, |
245 | 0 | num_bits: 0, |
246 | 0 | z_header0: 0, |
247 | 0 | z_header1: 0, |
248 | 0 | z_adler32: 0, |
249 | 0 | finish: 0, |
250 | 0 | block_type: 0, |
251 | 0 | check_adler32: 0, |
252 | 0 | dist: 0, |
253 | 0 | counter: 0, |
254 | 0 | num_extra: 0, |
255 | 0 | table_sizes: [0; MAX_HUFF_TABLES], |
256 | 0 | bit_buf: 0, |
257 | 0 | // TODO:(oyvindln) Check that copies here are optimized out in release mode. |
258 | 0 | tables: [ |
259 | 0 | HuffmanTable::new(), |
260 | 0 | HuffmanTable::new(), |
261 | 0 | HuffmanTable::new(), |
262 | 0 | ], |
263 | 0 | raw_header: [0; 4], |
264 | 0 | len_codes: [0; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137], |
265 | 0 | } |
266 | 0 | } |
267 | | } |
268 | | |
269 | | #[derive(Copy, Clone, PartialEq, Eq, Debug)] |
270 | | #[non_exhaustive] |
271 | | enum State { |
272 | | Start = 0, |
273 | | ReadZlibCmf, |
274 | | ReadZlibFlg, |
275 | | ReadBlockHeader, |
276 | | BlockTypeNoCompression, |
277 | | RawHeader, |
278 | | RawMemcpy1, |
279 | | RawMemcpy2, |
280 | | ReadTableSizes, |
281 | | ReadHufflenTableCodeSize, |
282 | | ReadLitlenDistTablesCodeSize, |
283 | | ReadExtraBitsCodeSize, |
284 | | DecodeLitlen, |
285 | | WriteSymbol, |
286 | | ReadExtraBitsLitlen, |
287 | | DecodeDistance, |
288 | | ReadExtraBitsDistance, |
289 | | RawReadFirstByte, |
290 | | RawStoreFirstByte, |
291 | | WriteLenBytesToEnd, |
292 | | BlockDone, |
293 | | HuffDecodeOuterLoop1, |
294 | | HuffDecodeOuterLoop2, |
295 | | ReadAdler32, |
296 | | |
297 | | DoneForever, |
298 | | |
299 | | // Failure states. |
300 | | BlockTypeUnexpected, |
301 | | BadCodeSizeSum, |
302 | | BadDistOrLiteralTableLength, |
303 | | BadTotalSymbols, |
304 | | BadZlibHeader, |
305 | | DistanceOutOfBounds, |
306 | | BadRawLength, |
307 | | BadCodeSizeDistPrevLookup, |
308 | | InvalidLitlen, |
309 | | InvalidDist, |
310 | | InvalidCodeLen, |
311 | | } |
312 | | |
313 | | impl State { |
314 | 0 | fn is_failure(self) -> bool { |
315 | 0 | matches!( |
316 | 0 | self, |
317 | | BlockTypeUnexpected |
318 | | | BadCodeSizeSum |
319 | | | BadDistOrLiteralTableLength |
320 | | | BadTotalSymbols |
321 | | | BadZlibHeader |
322 | | | DistanceOutOfBounds |
323 | | | BadRawLength |
324 | | | BadCodeSizeDistPrevLookup |
325 | | | InvalidLitlen |
326 | | | InvalidDist |
327 | | ) |
328 | 0 | } |
329 | | |
330 | | #[inline] |
331 | 0 | fn begin(&mut self, new_state: State) { |
332 | 0 | *self = new_state; |
333 | 0 | } |
334 | | } |
335 | | |
336 | | use self::State::*; |
337 | | |
338 | | // Not sure why miniz uses 32-bit values for these, maybe alignment/cache again? |
339 | | // # Optimization |
340 | | // We add a extra value at the end and make the tables 32 elements long |
341 | | // so we can use a mask to avoid bounds checks. |
342 | | // The invalid values are set to something high enough to avoid underflowing |
343 | | // the match length. |
344 | | /// Base length for each length code. |
345 | | /// |
346 | | /// The base is used together with the value of the extra bits to decode the actual |
347 | | /// length/distance values in a match. |
348 | | #[rustfmt::skip] |
349 | | const LENGTH_BASE: [u16; 32] = [ |
350 | | 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, |
351 | | 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 512, 512, 512 |
352 | | ]; |
353 | | |
354 | | /// Number of extra bits for each length code. |
355 | | #[rustfmt::skip] |
356 | | const LENGTH_EXTRA: [u8; 32] = [ |
357 | | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, |
358 | | 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0 |
359 | | ]; |
360 | | |
361 | | /// Base length for each distance code. |
362 | | #[rustfmt::skip] |
363 | | const DIST_BASE: [u16; 32] = [ |
364 | | 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, |
365 | | 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, |
366 | | 2049, 3073, 4097, 6145, 8193, 12_289, 16_385, 24_577, 32_768, 32_768 |
367 | | ]; |
368 | | |
369 | | /// Number of extra bits for each distance code. |
370 | | #[rustfmt::skip] |
371 | | const DIST_EXTRA: [u8; 32] = [ |
372 | | 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, |
373 | | 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 13 |
374 | | ]; |
375 | | |
376 | | /// The mask used when indexing the base/extra arrays. |
377 | | const BASE_EXTRA_MASK: usize = 32 - 1; |
378 | | |
379 | | /// Sets the value of all the elements of the slice to `val`. |
380 | | #[inline] |
381 | 0 | fn memset<T: Copy>(slice: &mut [T], val: T) { |
382 | 0 | for x in slice { |
383 | 0 | *x = val |
384 | | } |
385 | 0 | } Unexecuted instantiation: miniz_oxide::inflate::core::memset::<u8> Unexecuted instantiation: miniz_oxide::inflate::core::memset::<i16> |
386 | | |
387 | | /// Read an le u16 value from the slice iterator. |
388 | | /// |
389 | | /// # Panics |
390 | | /// Panics if there are less than two bytes left. |
391 | | #[inline] |
392 | 0 | fn read_u16_le(iter: &mut slice::Iter<u8>) -> u16 { |
393 | 0 | let ret = { |
394 | 0 | let two_bytes = iter.as_ref()[..2].try_into().unwrap(); |
395 | 0 | u16::from_le_bytes(two_bytes) |
396 | 0 | }; |
397 | 0 | iter.nth(1); |
398 | 0 | ret |
399 | 0 | } |
400 | | |
401 | | /// Read an le u32 value from the slice iterator. |
402 | | /// |
403 | | /// # Panics |
404 | | /// Panics if there are less than four bytes left. |
405 | | #[inline(always)] |
406 | | #[cfg(target_pointer_width = "64")] |
407 | 0 | fn read_u32_le(iter: &mut slice::Iter<u8>) -> u32 { |
408 | 0 | let ret = { |
409 | 0 | let four_bytes: [u8; 4] = iter.as_ref()[..4].try_into().unwrap(); |
410 | 0 | u32::from_le_bytes(four_bytes) |
411 | 0 | }; |
412 | 0 | iter.nth(3); |
413 | 0 | ret |
414 | 0 | } |
415 | | |
416 | | /// Ensure that there is data in the bit buffer. |
417 | | /// |
418 | | /// On 64-bit platform, we use a 64-bit value so this will |
419 | | /// result in there being at least 32 bits in the bit buffer. |
420 | | /// This function assumes that there is at least 4 bytes left in the input buffer. |
421 | | #[inline(always)] |
422 | | #[cfg(target_pointer_width = "64")] |
423 | 0 | fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>) { |
424 | 0 | // Read four bytes into the buffer at once. |
425 | 0 | if l.num_bits < 30 { |
426 | 0 | l.bit_buf |= BitBuffer::from(read_u32_le(in_iter)) << l.num_bits; |
427 | 0 | l.num_bits += 32; |
428 | 0 | } |
429 | 0 | } |
430 | | |
431 | | /// Same as previous, but for non-64-bit platforms. |
432 | | /// Ensures at least 16 bits are present, requires at least 2 bytes in the in buffer. |
433 | | #[inline(always)] |
434 | | #[cfg(not(target_pointer_width = "64"))] |
435 | | fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>) { |
436 | | // If the buffer is 32-bit wide, read 2 bytes instead. |
437 | | if l.num_bits < 15 { |
438 | | l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits; |
439 | | l.num_bits += 16; |
440 | | } |
441 | | } |
442 | | |
443 | | /// Check that the zlib header is correct and that there is enough space in the buffer |
444 | | /// for the window size specified in the header. |
445 | | /// |
446 | | /// See https://tools.ietf.org/html/rfc1950 |
447 | | #[inline] |
448 | 0 | fn validate_zlib_header(cmf: u32, flg: u32, flags: u32, mask: usize) -> Action { |
449 | 0 | let mut failed = |
450 | | // cmf + flg should be divisible by 31. |
451 | 0 | (((cmf * 256) + flg) % 31 != 0) || |
452 | | // If this flag is set, a dictionary was used for this zlib compressed data. |
453 | | // This is currently not supported by miniz or miniz-oxide |
454 | 0 | ((flg & 0b0010_0000) != 0) || |
455 | | // Compression method. Only 8(DEFLATE) is defined by the standard. |
456 | 0 | ((cmf & 15) != 8); |
457 | | |
458 | 0 | let window_size = 1 << ((cmf >> 4) + 8); |
459 | 0 | if (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) == 0 { |
460 | 0 | // Bail if the buffer is wrapping and the window size is larger than the buffer. |
461 | 0 | failed |= (mask + 1) < window_size; |
462 | 0 | } |
463 | | |
464 | | // Zlib doesn't allow window sizes above 32 * 1024. |
465 | 0 | failed |= window_size > 32_768; |
466 | 0 |
|
467 | 0 | if failed { |
468 | 0 | Action::Jump(BadZlibHeader) |
469 | | } else { |
470 | 0 | Action::Jump(ReadBlockHeader) |
471 | | } |
472 | 0 | } |
473 | | |
474 | | enum Action { |
475 | | None, |
476 | | Jump(State), |
477 | | End(TINFLStatus), |
478 | | } |
479 | | |
480 | | /// Try to decode the next huffman code, and puts it in the counter field of the decompressor |
481 | | /// if successful. |
482 | | /// |
483 | | /// # Returns |
484 | | /// The specified action returned from `f` on success, |
485 | | /// `Action::End` if there are not enough data left to decode a symbol. |
486 | 0 | fn decode_huffman_code<F>( |
487 | 0 | r: &mut DecompressorOxide, |
488 | 0 | l: &mut LocalVars, |
489 | 0 | table: usize, |
490 | 0 | flags: u32, |
491 | 0 | in_iter: &mut slice::Iter<u8>, |
492 | 0 | f: F, |
493 | 0 | ) -> Action |
494 | 0 | where |
495 | 0 | F: FnOnce(&mut DecompressorOxide, &mut LocalVars, i32) -> Action, |
496 | 0 | { |
497 | 0 | // As the huffman codes can be up to 15 bits long we need at least 15 bits |
498 | 0 | // ready in the bit buffer to start decoding the next huffman code. |
499 | 0 | if l.num_bits < 15 { |
500 | | // First, make sure there is enough data in the bit buffer to decode a huffman code. |
501 | 0 | if in_iter.len() < 2 { |
502 | | // If there is less than 2 bytes left in the input buffer, we try to look up |
503 | | // the huffman code with what's available, and return if that doesn't succeed. |
504 | | // Original explanation in miniz: |
505 | | // /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes |
506 | | // * remaining in the input buffer falls below 2. */ |
507 | | // /* It reads just enough bytes from the input stream that are needed to decode |
508 | | // * the next Huffman code (and absolutely no more). It works by trying to fully |
509 | | // * decode a */ |
510 | | // /* Huffman code by using whatever bits are currently present in the bit buffer. |
511 | | // * If this fails, it reads another byte, and tries again until it succeeds or |
512 | | // * until the */ |
513 | | // /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */ |
514 | 0 | loop { |
515 | 0 | let mut temp = i32::from(r.tables[table].fast_lookup(l.bit_buf)); |
516 | 0 |
|
517 | 0 | if temp >= 0 { |
518 | 0 | let code_len = (temp >> 9) as u32; |
519 | 0 | if (code_len != 0) && (l.num_bits >= code_len) { |
520 | 0 | break; |
521 | 0 | } |
522 | 0 | } else if l.num_bits > FAST_LOOKUP_BITS.into() { |
523 | 0 | let mut code_len = u32::from(FAST_LOOKUP_BITS); |
524 | 0 | loop { |
525 | 0 | temp = i32::from( |
526 | 0 | r.tables[table].tree |
527 | 0 | [(!temp + ((l.bit_buf >> code_len) & 1) as i32) as usize], |
528 | 0 | ); |
529 | 0 | code_len += 1; |
530 | 0 | if temp >= 0 || l.num_bits < code_len + 1 { |
531 | 0 | break; |
532 | 0 | } |
533 | | } |
534 | 0 | if temp >= 0 { |
535 | 0 | break; |
536 | 0 | } |
537 | 0 | } |
538 | | |
539 | | // TODO: miniz jumps straight to here after getting here again after failing to read |
540 | | // a byte. |
541 | | // Doing that lets miniz avoid re-doing the lookup that that was done in the |
542 | | // previous call. |
543 | 0 | let mut byte = 0; |
544 | 0 | if let a @ Action::End(_) = read_byte(in_iter, flags, |b| { |
545 | 0 | byte = b; |
546 | 0 | Action::None |
547 | 0 | }) {Unexecuted instantiation: miniz_oxide::inflate::core::decode_huffman_code::<miniz_oxide::inflate::core::decompress::{closure#9}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::decode_huffman_code::<miniz_oxide::inflate::core::decompress::{closure#11}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::decode_huffman_code::<miniz_oxide::inflate::core::decompress::{closure#13}>::{closure#0} |
548 | 0 | return a; |
549 | 0 | }; |
550 | 0 |
|
551 | 0 | // Do this outside closure for now to avoid borrowing r. |
552 | 0 | l.bit_buf |= BitBuffer::from(byte) << l.num_bits; |
553 | 0 | l.num_bits += 8; |
554 | 0 |
|
555 | 0 | if l.num_bits >= 15 { |
556 | 0 | break; |
557 | 0 | } |
558 | | } |
559 | 0 | } else { |
560 | 0 | // There is enough data in the input buffer, so read the next two bytes |
561 | 0 | // and add them to the bit buffer. |
562 | 0 | // Unwrapping here is fine since we just checked that there are at least two |
563 | 0 | // bytes left. |
564 | 0 | l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits; |
565 | 0 | l.num_bits += 16; |
566 | 0 | } |
567 | 0 | } |
568 | | |
569 | | // We now have at least 15 bits in the input buffer. |
570 | 0 | let mut symbol = i32::from(r.tables[table].fast_lookup(l.bit_buf)); |
571 | 0 | let code_len; |
572 | 0 | // If the symbol was found in the fast lookup table. |
573 | 0 | if symbol >= 0 { |
574 | 0 | // Get the length value from the top bits. |
575 | 0 | // As we shift down the sign bit, converting to an unsigned value |
576 | 0 | // shouldn't overflow. |
577 | 0 | code_len = (symbol >> 9) as u32; |
578 | 0 | // Mask out the length value. |
579 | 0 | symbol &= 511; |
580 | 0 | } else { |
581 | 0 | let res = r.tables[table].tree_lookup(symbol, l.bit_buf, u32::from(FAST_LOOKUP_BITS)); |
582 | 0 | symbol = res.0; |
583 | 0 | code_len = res.1; |
584 | 0 | }; |
585 | | |
586 | 0 | if code_len == 0 { |
587 | 0 | return Action::Jump(InvalidCodeLen); |
588 | 0 | } |
589 | 0 |
|
590 | 0 | l.bit_buf >>= code_len; |
591 | 0 | l.num_bits -= code_len; |
592 | 0 | f(r, l, symbol) |
593 | 0 | } Unexecuted instantiation: miniz_oxide::inflate::core::decode_huffman_code::<miniz_oxide::inflate::core::decompress::{closure#9}>Unexecuted instantiation: miniz_oxide::inflate::core::decode_huffman_code::<miniz_oxide::inflate::core::decompress::{closure#11}>Unexecuted instantiation: miniz_oxide::inflate::core::decode_huffman_code::<miniz_oxide::inflate::core::decompress::{closure#13}> |
594 | | |
595 | | /// Try to read one byte from `in_iter` and call `f` with the read byte as an argument, |
596 | | /// returning the result. |
597 | | /// If reading fails, `Action::End is returned` |
598 | | #[inline] |
599 | 0 | fn read_byte<F>(in_iter: &mut slice::Iter<u8>, flags: u32, f: F) -> Action |
600 | 0 | where |
601 | 0 | F: FnOnce(u8) -> Action, |
602 | 0 | { |
603 | 0 | match in_iter.next() { |
604 | 0 | None => end_of_input(flags), |
605 | 0 | Some(&byte) => f(byte), |
606 | | } |
607 | 0 | } Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decode_huffman_code<miniz_oxide::inflate::core::decompress::{closure#9}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decode_huffman_code<miniz_oxide::inflate::core::decompress::{closure#11}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decode_huffman_code<miniz_oxide::inflate::core::decompress::{closure#13}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::pad_to_bytes<miniz_oxide::inflate::core::decompress::{closure#3}>::{closure#0}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::pad_to_bytes<miniz_oxide::inflate::core::decompress::{closure#15}>::{closure#0}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#2}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#4}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#6}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#7}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#8}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#10}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#12}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#14}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::read_bits<miniz_oxide::inflate::core::decompress::{closure#16}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decompress::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decompress::{closure#5}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decompress::{closure#1}>Unexecuted instantiation: miniz_oxide::inflate::core::read_byte::<miniz_oxide::inflate::core::decompress::{closure#17}> |
608 | | |
609 | | // TODO: `l: &mut LocalVars` may be slow similar to decompress_fast (even with inline(always)) |
610 | | /// Try to read `amount` number of bits from `in_iter` and call the function `f` with the bits as an |
611 | | /// an argument after reading, returning the result of that function, or `Action::End` if there are |
612 | | /// not enough bytes left. |
613 | | #[inline] |
614 | | #[allow(clippy::while_immutable_condition)] |
615 | 0 | fn read_bits<F>( |
616 | 0 | l: &mut LocalVars, |
617 | 0 | amount: u32, |
618 | 0 | in_iter: &mut slice::Iter<u8>, |
619 | 0 | flags: u32, |
620 | 0 | f: F, |
621 | 0 | ) -> Action |
622 | 0 | where |
623 | 0 | F: FnOnce(&mut LocalVars, BitBuffer) -> Action, |
624 | 0 | { |
625 | | // Clippy gives a false positive warning here due to the closure. |
626 | | // Read enough bytes from the input iterator to cover the number of bits we want. |
627 | 0 | while l.num_bits < amount { |
628 | 0 | let action = read_byte(in_iter, flags, |byte| { |
629 | 0 | l.bit_buf |= BitBuffer::from(byte) << l.num_bits; |
630 | 0 | l.num_bits += 8; |
631 | 0 | Action::None |
632 | 0 | }); Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::pad_to_bytes<miniz_oxide::inflate::core::decompress::{closure#3}>::{closure#0}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::pad_to_bytes<miniz_oxide::inflate::core::decompress::{closure#15}>::{closure#0}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#2}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#4}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#6}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#7}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#8}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#10}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#12}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#14}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#16}>::{closure#0} |
633 | | |
634 | | // If there are not enough bytes in the input iterator, return and signal that we need more. |
635 | 0 | if !matches!(action, Action::None) { |
636 | 0 | return action; |
637 | 0 | } |
638 | | } |
639 | | |
640 | 0 | let bits = l.bit_buf & ((1 << amount) - 1); |
641 | 0 | l.bit_buf >>= amount; |
642 | 0 | l.num_bits -= amount; |
643 | 0 | f(l, bits) |
644 | 0 | } Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::pad_to_bytes<miniz_oxide::inflate::core::decompress::{closure#3}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::pad_to_bytes<miniz_oxide::inflate::core::decompress::{closure#15}>::{closure#0}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#2}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#4}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#6}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#7}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#8}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#10}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#12}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#14}>Unexecuted instantiation: miniz_oxide::inflate::core::read_bits::<miniz_oxide::inflate::core::decompress::{closure#16}> |
645 | | |
646 | | #[inline] |
647 | 0 | fn pad_to_bytes<F>(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>, flags: u32, f: F) -> Action |
648 | 0 | where |
649 | 0 | F: FnOnce(&mut LocalVars) -> Action, |
650 | 0 | { |
651 | 0 | let num_bits = l.num_bits & 7; |
652 | 0 | read_bits(l, num_bits, in_iter, flags, |l, _| f(l)) Unexecuted instantiation: miniz_oxide::inflate::core::pad_to_bytes::<miniz_oxide::inflate::core::decompress::{closure#3}>::{closure#0}Unexecuted instantiation: miniz_oxide::inflate::core::pad_to_bytes::<miniz_oxide::inflate::core::decompress::{closure#15}>::{closure#0} |
653 | 0 | } Unexecuted instantiation: miniz_oxide::inflate::core::pad_to_bytes::<miniz_oxide::inflate::core::decompress::{closure#3}>Unexecuted instantiation: miniz_oxide::inflate::core::pad_to_bytes::<miniz_oxide::inflate::core::decompress::{closure#15}> |
654 | | |
655 | | #[inline] |
656 | 0 | fn end_of_input(flags: u32) -> Action { |
657 | 0 | Action::End(if flags & TINFL_FLAG_HAS_MORE_INPUT != 0 { |
658 | 0 | TINFLStatus::NeedsMoreInput |
659 | | } else { |
660 | 0 | TINFLStatus::FailedCannotMakeProgress |
661 | | }) |
662 | 0 | } |
663 | | |
664 | | #[inline] |
665 | 0 | fn undo_bytes(l: &mut LocalVars, max: u32) -> u32 { |
666 | 0 | let res = cmp::min(l.num_bits >> 3, max); |
667 | 0 | l.num_bits -= res << 3; |
668 | 0 | res |
669 | 0 | } |
670 | | |
671 | 0 | fn start_static_table(r: &mut DecompressorOxide) { |
672 | 0 | r.table_sizes[LITLEN_TABLE] = 288; |
673 | 0 | r.table_sizes[DIST_TABLE] = 32; |
674 | 0 | memset(&mut r.tables[LITLEN_TABLE].code_size[0..144], 8); |
675 | 0 | memset(&mut r.tables[LITLEN_TABLE].code_size[144..256], 9); |
676 | 0 | memset(&mut r.tables[LITLEN_TABLE].code_size[256..280], 7); |
677 | 0 | memset(&mut r.tables[LITLEN_TABLE].code_size[280..288], 8); |
678 | 0 | memset(&mut r.tables[DIST_TABLE].code_size[0..32], 5); |
679 | 0 | } |
680 | | |
681 | | #[cfg(feature = "rustc-dep-of-std")] |
682 | | fn reverse_bits(n: u32) -> u32 { |
683 | | // Lookup is not used when building as part of std to avoid wasting space |
684 | | // for lookup table in every rust binary |
685 | | // as it's only used for backtraces in the cold path |
686 | | // - see #152 |
687 | | n.reverse_bits() |
688 | | } |
689 | | |
690 | | #[cfg(not(feature = "rustc-dep-of-std"))] |
691 | 0 | fn reverse_bits(n: u32) -> u32 { |
692 | 0 | static REVERSED_BITS_LOOKUP: [u32; 512] = { |
693 | 0 | let mut table = [0; 512]; |
694 | 0 |
|
695 | 0 | let mut i = 0; |
696 | 0 | while i < 512 { |
697 | 0 | table[i] = (i as u32).reverse_bits(); |
698 | 0 | i += 1; |
699 | 0 | } |
700 | 0 |
|
701 | 0 | table |
702 | 0 | }; |
703 | 0 |
|
704 | 0 | REVERSED_BITS_LOOKUP[n as usize] |
705 | 0 | } |
706 | | |
707 | 0 | fn init_tree(r: &mut DecompressorOxide, l: &mut LocalVars) -> Option<Action> { |
708 | | loop { |
709 | 0 | let bt = r.block_type as usize; |
710 | 0 | if bt >= r.tables.len() { |
711 | 0 | return None; |
712 | 0 | } |
713 | 0 | let table = &mut r.tables[bt]; |
714 | 0 | let table_size = r.table_sizes[bt] as usize; |
715 | 0 | if table_size > table.code_size.len() { |
716 | 0 | return None; |
717 | 0 | } |
718 | 0 | let mut total_symbols = [0u32; 16]; |
719 | 0 | let mut next_code = [0u32; 17]; |
720 | 0 | memset(&mut table.look_up[..], 0); |
721 | 0 | memset(&mut table.tree[..], 0); |
722 | | |
723 | 0 | for &code_size in &table.code_size[..table_size] { |
724 | 0 | let cs = code_size as usize; |
725 | 0 | if cs >= total_symbols.len() { |
726 | 0 | return None; |
727 | 0 | } |
728 | 0 | total_symbols[cs] += 1; |
729 | | } |
730 | | |
731 | 0 | let mut used_symbols = 0; |
732 | 0 | let mut total = 0; |
733 | 0 | for (ts, next) in total_symbols |
734 | 0 | .iter() |
735 | 0 | .copied() |
736 | 0 | .zip(next_code.iter_mut().skip(1)) |
737 | 0 | .skip(1) |
738 | 0 | { |
739 | 0 | used_symbols += ts; |
740 | 0 | total += ts; |
741 | 0 | total <<= 1; |
742 | 0 | *next = total; |
743 | 0 | } |
744 | | |
745 | 0 | if total != 65_536 && used_symbols > 1 { |
746 | 0 | return Some(Action::Jump(BadTotalSymbols)); |
747 | 0 | } |
748 | 0 |
|
749 | 0 | let mut tree_next = -1; |
750 | 0 | for symbol_index in 0..table_size { |
751 | 0 | let code_size = table.code_size[symbol_index]; |
752 | 0 | if code_size == 0 || usize::from(code_size) >= next_code.len() { |
753 | 0 | continue; |
754 | 0 | } |
755 | 0 |
|
756 | 0 | let cur_code = next_code[code_size as usize]; |
757 | 0 | next_code[code_size as usize] += 1; |
758 | 0 |
|
759 | 0 | let n = cur_code & (u32::MAX >> (32 - code_size)); |
760 | | |
761 | 0 | let mut rev_code = if n < 512 { |
762 | | // Using a lookup table |
763 | | // for a small speedup here, |
764 | | // Seems to only really make a difference on very short |
765 | | // inputs however. |
766 | | // 512 seems to be around a sweet spot. |
767 | 0 | reverse_bits(n) |
768 | | } else { |
769 | 0 | n.reverse_bits() |
770 | 0 | } >> (32 - code_size); |
771 | 0 |
|
772 | 0 | if code_size <= FAST_LOOKUP_BITS { |
773 | 0 | let k = (i16::from(code_size) << 9) | symbol_index as i16; |
774 | 0 | while rev_code < FAST_LOOKUP_SIZE { |
775 | 0 | table.look_up[rev_code as usize] = k; |
776 | 0 | rev_code += 1 << code_size; |
777 | 0 | } |
778 | 0 | continue; |
779 | 0 | } |
780 | 0 |
|
781 | 0 | let mut tree_cur = table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize]; |
782 | 0 | if tree_cur == 0 { |
783 | 0 | table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize] = tree_next; |
784 | 0 | tree_cur = tree_next; |
785 | 0 | tree_next -= 2; |
786 | 0 | } |
787 | | |
788 | 0 | rev_code >>= FAST_LOOKUP_BITS - 1; |
789 | 0 | for _ in FAST_LOOKUP_BITS + 1..code_size { |
790 | 0 | rev_code >>= 1; |
791 | 0 | tree_cur -= (rev_code & 1) as i16; |
792 | 0 | let tree_index = (-tree_cur - 1) as usize; |
793 | 0 | if tree_index >= table.tree.len() { |
794 | 0 | return None; |
795 | 0 | } |
796 | 0 | if table.tree[tree_index] == 0 { |
797 | 0 | table.tree[tree_index] = tree_next; |
798 | 0 | tree_cur = tree_next; |
799 | 0 | tree_next -= 2; |
800 | 0 | } else { |
801 | 0 | tree_cur = table.tree[tree_index]; |
802 | 0 | } |
803 | | } |
804 | | |
805 | 0 | rev_code >>= 1; |
806 | 0 | tree_cur -= (rev_code & 1) as i16; |
807 | 0 | let tree_index = (-tree_cur - 1) as usize; |
808 | 0 | if tree_index >= table.tree.len() { |
809 | 0 | return None; |
810 | 0 | } |
811 | 0 | table.tree[tree_index] = symbol_index as i16; |
812 | | } |
813 | | |
814 | 0 | if r.block_type == 2 { |
815 | 0 | l.counter = 0; |
816 | 0 | return Some(Action::Jump(ReadLitlenDistTablesCodeSize)); |
817 | 0 | } |
818 | 0 |
|
819 | 0 | if r.block_type == 0 { |
820 | 0 | break; |
821 | 0 | } |
822 | 0 | r.block_type -= 1; |
823 | | } |
824 | | |
825 | 0 | l.counter = 0; |
826 | 0 | Some(Action::Jump(DecodeLitlen)) |
827 | 0 | } |
828 | | |
829 | | // A helper macro for generating the state machine. |
830 | | // |
831 | | // As Rust doesn't have fallthrough on matches, we have to return to the match statement |
832 | | // and jump for each state change. (Which would ideally be optimized away, but often isn't.) |
833 | | macro_rules! generate_state { |
834 | | ($state: ident, $state_machine: tt, $f: expr) => { |
835 | | loop { |
836 | | match $f { |
837 | | Action::None => continue, |
838 | | Action::Jump(new_state) => { |
839 | | $state = new_state; |
840 | | continue $state_machine; |
841 | | }, |
842 | | Action::End(result) => break $state_machine result, |
843 | | } |
844 | | } |
845 | | }; |
846 | | } |
847 | | |
848 | | #[derive(Copy, Clone)] |
849 | | struct LocalVars { |
850 | | pub bit_buf: BitBuffer, |
851 | | pub num_bits: u32, |
852 | | pub dist: u32, |
853 | | pub counter: u32, |
854 | | pub num_extra: u32, |
855 | | } |
856 | | |
857 | | #[inline] |
858 | 0 | fn transfer( |
859 | 0 | out_slice: &mut [u8], |
860 | 0 | mut source_pos: usize, |
861 | 0 | mut out_pos: usize, |
862 | 0 | match_len: usize, |
863 | 0 | out_buf_size_mask: usize, |
864 | 0 | ) { |
865 | | // special case that comes up surprisingly often. in the case that `source_pos` |
866 | | // is 1 less than `out_pos`, we can say that the entire range will be the same |
867 | | // value and optimize this to be a simple `memset` |
868 | 0 | let source_diff = if source_pos > out_pos { |
869 | 0 | source_pos - out_pos |
870 | | } else { |
871 | 0 | out_pos - source_pos |
872 | | }; |
873 | 0 | if out_buf_size_mask == usize::MAX && source_diff == 1 && out_pos > source_pos { |
874 | 0 | let init = out_slice[out_pos - 1]; |
875 | 0 | let end = (match_len >> 2) * 4 + out_pos; |
876 | 0 |
|
877 | 0 | out_slice[out_pos..end].fill(init); |
878 | 0 | out_pos = end; |
879 | 0 | source_pos = end - 1; |
880 | 0 | // if the difference between `source_pos` and `out_pos` is greater than 3, we |
881 | 0 | // can do slightly better than the naive case by copying everything at once |
882 | 0 | } else if out_buf_size_mask == usize::MAX && source_diff >= 4 && out_pos > source_pos { |
883 | 0 | for _ in 0..match_len >> 2 { |
884 | 0 | out_slice.copy_within(source_pos..=source_pos + 3, out_pos); |
885 | 0 | source_pos += 4; |
886 | 0 | out_pos += 4; |
887 | 0 | } |
888 | | } else { |
889 | 0 | for _ in 0..match_len >> 2 { |
890 | 0 | out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask]; |
891 | 0 | out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; |
892 | 0 | out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask]; |
893 | 0 | out_slice[out_pos + 3] = out_slice[(source_pos + 3) & out_buf_size_mask]; |
894 | 0 | source_pos += 4; |
895 | 0 | out_pos += 4; |
896 | 0 | } |
897 | | } |
898 | | |
899 | 0 | match match_len & 3 { |
900 | 0 | 0 => (), |
901 | 0 | 1 => out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask], |
902 | 0 | 2 => { |
903 | 0 | out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask]; |
904 | 0 | out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; |
905 | 0 | } |
906 | 0 | 3 => { |
907 | 0 | out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask]; |
908 | 0 | out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask]; |
909 | 0 | out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask]; |
910 | 0 | } |
911 | 0 | _ => unreachable!(), |
912 | | } |
913 | 0 | } |
914 | | |
915 | | /// Presumes that there is at least match_len bytes in output left. |
916 | | #[inline] |
917 | 0 | fn apply_match( |
918 | 0 | out_slice: &mut [u8], |
919 | 0 | out_pos: usize, |
920 | 0 | dist: usize, |
921 | 0 | match_len: usize, |
922 | 0 | out_buf_size_mask: usize, |
923 | 0 | ) { |
924 | 0 | debug_assert!(out_pos.checked_add(match_len).unwrap() <= out_slice.len()); |
925 | | |
926 | 0 | let source_pos = out_pos.wrapping_sub(dist) & out_buf_size_mask; |
927 | 0 |
|
928 | 0 | if match_len == 3 { |
929 | 0 | let out_slice = Cell::from_mut(out_slice).as_slice_of_cells(); |
930 | 0 | if let Some(dst) = out_slice.get(out_pos..out_pos + 3) { |
931 | | // Moving bounds checks before any memory mutation allows the optimizer |
932 | | // combine them together. |
933 | 0 | let src = out_slice |
934 | 0 | .get(source_pos) |
935 | 0 | .zip(out_slice.get((source_pos + 1) & out_buf_size_mask)) |
936 | 0 | .zip(out_slice.get((source_pos + 2) & out_buf_size_mask)); |
937 | 0 | if let Some(((a, b), c)) = src { |
938 | 0 | // For correctness, the memory reads and writes have to be interleaved. |
939 | 0 | // Cells make it possible for read and write references to overlap. |
940 | 0 | dst[0].set(a.get()); |
941 | 0 | dst[1].set(b.get()); |
942 | 0 | dst[2].set(c.get()); |
943 | 0 | } |
944 | 0 | } |
945 | 0 | return; |
946 | 0 | } |
947 | 0 |
|
948 | 0 | if cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) { |
949 | | // We are not on x86 so copy manually. |
950 | 0 | transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask); |
951 | 0 | return; |
952 | 0 | } |
953 | 0 |
|
954 | 0 | if source_pos >= out_pos && (source_pos - out_pos) < match_len { |
955 | 0 | transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask); |
956 | 0 | } else if match_len <= dist && source_pos + match_len < out_slice.len() { |
957 | | // Destination and source segments does not intersect and source does not wrap. |
958 | 0 | if source_pos < out_pos { |
959 | 0 | let (from_slice, to_slice) = out_slice.split_at_mut(out_pos); |
960 | 0 | to_slice[..match_len].copy_from_slice(&from_slice[source_pos..source_pos + match_len]); |
961 | 0 | } else { |
962 | 0 | let (to_slice, from_slice) = out_slice.split_at_mut(source_pos); |
963 | 0 | to_slice[out_pos..out_pos + match_len].copy_from_slice(&from_slice[..match_len]); |
964 | 0 | } |
965 | 0 | } else { |
966 | 0 | transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask); |
967 | 0 | } |
968 | 0 | } |
969 | | |
970 | | /// Fast inner decompression loop which is run while there is at least |
971 | | /// 259 bytes left in the output buffer, and at least 6 bytes left in the input buffer |
972 | | /// (The maximum one match would need + 1). |
973 | | /// |
974 | | /// This was inspired by a similar optimization in zlib, which uses this info to do |
975 | | /// faster unchecked copies of multiple bytes at a time. |
976 | | /// Currently we don't do this here, but this function does avoid having to jump through the |
977 | | /// big match loop on each state change(as rust does not have fallthrough or gotos at the moment), |
978 | | /// and already improves decompression speed a fair bit. |
979 | 0 | fn decompress_fast( |
980 | 0 | r: &mut DecompressorOxide, |
981 | 0 | in_iter: &mut slice::Iter<u8>, |
982 | 0 | out_buf: &mut OutputBuffer, |
983 | 0 | flags: u32, |
984 | 0 | local_vars: &mut LocalVars, |
985 | 0 | out_buf_size_mask: usize, |
986 | 0 | ) -> (TINFLStatus, State) { |
987 | 0 | // Make a local copy of the most used variables, to avoid having to update and read from values |
988 | 0 | // in a random memory location and to encourage more register use. |
989 | 0 | let mut l = *local_vars; |
990 | | let mut state; |
991 | | |
992 | 0 | let status: TINFLStatus = 'o: loop { |
993 | 0 | state = State::DecodeLitlen; |
994 | 0 | loop { |
995 | 0 | // This function assumes that there is at least 259 bytes left in the output buffer, |
996 | 0 | // and that there is at least 14 bytes left in the input buffer. 14 input bytes: |
997 | 0 | // 15 (prev lit) + 15 (length) + 5 (length extra) + 15 (dist) |
998 | 0 | // + 29 + 32 (left in bit buf, including last 13 dist extra) = 111 bits < 14 bytes |
999 | 0 | // We need the one extra byte as we may write one length and one full match |
1000 | 0 | // before checking again. |
1001 | 0 | if out_buf.bytes_left() < 259 || in_iter.len() < 14 { |
1002 | 0 | state = State::DecodeLitlen; |
1003 | 0 | break 'o TINFLStatus::Done; |
1004 | 0 | } |
1005 | 0 |
|
1006 | 0 | fill_bit_buffer(&mut l, in_iter); |
1007 | | |
1008 | 0 | if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { |
1009 | 0 | l.counter = symbol as u32; |
1010 | 0 | l.bit_buf >>= code_len; |
1011 | 0 | l.num_bits -= code_len; |
1012 | 0 |
|
1013 | 0 | if (l.counter & 256) != 0 { |
1014 | | // The symbol is not a literal. |
1015 | 0 | break; |
1016 | | } else { |
1017 | | // If we have a 32-bit buffer we need to read another two bytes now |
1018 | | // to have enough bits to keep going. |
1019 | 0 | if cfg!(not(target_pointer_width = "64")) { |
1020 | 0 | fill_bit_buffer(&mut l, in_iter); |
1021 | 0 | } |
1022 | | |
1023 | 0 | if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { |
1024 | 0 | l.bit_buf >>= code_len; |
1025 | 0 | l.num_bits -= code_len; |
1026 | 0 | // The previous symbol was a literal, so write it directly and check |
1027 | 0 | // the next one. |
1028 | 0 | out_buf.write_byte(l.counter as u8); |
1029 | 0 | if (symbol & 256) != 0 { |
1030 | 0 | l.counter = symbol as u32; |
1031 | 0 | // The symbol is a length value. |
1032 | 0 | break; |
1033 | 0 | } else { |
1034 | 0 | // The symbol is a literal, so write it directly and continue. |
1035 | 0 | out_buf.write_byte(symbol as u8); |
1036 | 0 | } |
1037 | | } else { |
1038 | 0 | state.begin(InvalidCodeLen); |
1039 | 0 | break 'o TINFLStatus::Failed; |
1040 | | } |
1041 | | } |
1042 | | } else { |
1043 | 0 | state.begin(InvalidCodeLen); |
1044 | 0 | break 'o TINFLStatus::Failed; |
1045 | | } |
1046 | | } |
1047 | | |
1048 | | // Mask the top bits since they may contain length info. |
1049 | 0 | l.counter &= 511; |
1050 | 0 | if l.counter == 256 { |
1051 | | // We hit the end of block symbol. |
1052 | 0 | state.begin(BlockDone); |
1053 | 0 | break 'o TINFLStatus::Done; |
1054 | 0 | } else if l.counter > 285 { |
1055 | | // Invalid code. |
1056 | | // We already verified earlier that the code is > 256. |
1057 | 0 | state.begin(InvalidLitlen); |
1058 | 0 | break 'o TINFLStatus::Failed; |
1059 | | } else { |
1060 | | // The symbol was a length code. |
1061 | | // # Optimization |
1062 | | // Mask the value to avoid bounds checks |
1063 | | // We could use get_unchecked later if can statically verify that |
1064 | | // this will never go out of bounds. |
1065 | 0 | l.num_extra = u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]); |
1066 | 0 | l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]); |
1067 | 0 | // Length and distance codes have a number of extra bits depending on |
1068 | 0 | // the base, which together with the base gives us the exact value. |
1069 | 0 |
|
1070 | 0 | fill_bit_buffer(&mut l, in_iter); |
1071 | 0 | if l.num_extra != 0 { |
1072 | 0 | let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1); |
1073 | 0 | l.bit_buf >>= l.num_extra; |
1074 | 0 | l.num_bits -= l.num_extra; |
1075 | 0 | l.counter += extra_bits as u32; |
1076 | 0 | } |
1077 | | |
1078 | | // We found a length code, so a distance code should follow. |
1079 | | |
1080 | 0 | if cfg!(not(target_pointer_width = "64")) { |
1081 | 0 | fill_bit_buffer(&mut l, in_iter); |
1082 | 0 | } |
1083 | | |
1084 | 0 | if let Some((mut symbol, code_len)) = r.tables[DIST_TABLE].lookup(l.bit_buf) { |
1085 | 0 | symbol &= 511; |
1086 | 0 | l.bit_buf >>= code_len; |
1087 | 0 | l.num_bits -= code_len; |
1088 | 0 | if symbol > 29 { |
1089 | 0 | state.begin(InvalidDist); |
1090 | 0 | break 'o TINFLStatus::Failed; |
1091 | 0 | } |
1092 | 0 |
|
1093 | 0 | l.num_extra = u32::from(DIST_EXTRA[symbol as usize]); |
1094 | 0 | l.dist = u32::from(DIST_BASE[symbol as usize]); |
1095 | | } else { |
1096 | 0 | state.begin(InvalidCodeLen); |
1097 | 0 | break 'o TINFLStatus::Failed; |
1098 | | } |
1099 | | |
1100 | 0 | if l.num_extra != 0 { |
1101 | 0 | fill_bit_buffer(&mut l, in_iter); |
1102 | 0 | let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1); |
1103 | 0 | l.bit_buf >>= l.num_extra; |
1104 | 0 | l.num_bits -= l.num_extra; |
1105 | 0 | l.dist += extra_bits as u32; |
1106 | 0 | } |
1107 | | |
1108 | 0 | let position = out_buf.position(); |
1109 | 0 | if l.dist as usize > out_buf.position() |
1110 | 0 | && (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0) |
1111 | | { |
1112 | | // We encountered a distance that refers a position before |
1113 | | // the start of the decoded data, so we can't continue. |
1114 | 0 | state.begin(DistanceOutOfBounds); |
1115 | 0 | break TINFLStatus::Failed; |
1116 | 0 | } |
1117 | 0 |
|
1118 | 0 | apply_match( |
1119 | 0 | out_buf.get_mut(), |
1120 | 0 | position, |
1121 | 0 | l.dist as usize, |
1122 | 0 | l.counter as usize, |
1123 | 0 | out_buf_size_mask, |
1124 | 0 | ); |
1125 | 0 |
|
1126 | 0 | out_buf.set_position(position + l.counter as usize); |
1127 | | } |
1128 | | }; |
1129 | | |
1130 | 0 | *local_vars = l; |
1131 | 0 | (status, state) |
1132 | 0 | } |
1133 | | |
1134 | | /// Main decompression function. Keeps decompressing data from `in_buf` until the `in_buf` is |
1135 | | /// empty, `out` is full, the end of the deflate stream is hit, or there is an error in the |
1136 | | /// deflate stream. |
1137 | | /// |
1138 | | /// # Arguments |
1139 | | /// |
1140 | | /// `r` is a [`DecompressorOxide`] struct with the state of this stream. |
1141 | | /// |
1142 | | /// `in_buf` is a reference to the compressed data that is to be decompressed. The decompressor will |
1143 | | /// start at the first byte of this buffer. |
1144 | | /// |
1145 | | /// `out` is a reference to the buffer that will store the decompressed data, and that |
1146 | | /// stores previously decompressed data if any. |
1147 | | /// |
1148 | | /// * The offset given by `out_pos` indicates where in the output buffer slice writing should start. |
1149 | | /// * If [`TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF`] is not set, the output buffer is used in a |
1150 | | /// wrapping manner, and it's size is required to be a power of 2. |
1151 | | /// * The decompression function normally needs access to 32KiB of the previously decompressed data |
1152 | | ///(or to the beginning of the decompressed data if less than 32KiB has been decompressed.) |
1153 | | /// - If this data is not available, decompression may fail. |
1154 | | /// - Some deflate compressors allow specifying a window size which limits match distances to |
1155 | | /// less than this, or alternatively an RLE mode where matches will only refer to the previous byte |
1156 | | /// and thus allows a smaller output buffer. The window size can be specified in the zlib |
1157 | | /// header structure, however, the header data should not be relied on to be correct. |
1158 | | /// |
1159 | | /// `flags` indicates settings and status to the decompression function. |
1160 | | /// * The [`TINFL_FLAG_HAS_MORE_INPUT`] has to be specified if more compressed data is to be provided |
1161 | | /// in a subsequent call to this function. |
1162 | | /// * See the the [`inflate_flags`] module for details on other flags. |
1163 | | /// |
1164 | | /// # Returns |
1165 | | /// |
1166 | | /// Returns a tuple containing the status of the compressor, the number of input bytes read, and the |
1167 | | /// number of bytes output to `out`. |
1168 | | /// |
1169 | | /// This function shouldn't panic pending any bugs. |
1170 | 0 | pub fn decompress( |
1171 | 0 | r: &mut DecompressorOxide, |
1172 | 0 | in_buf: &[u8], |
1173 | 0 | out: &mut [u8], |
1174 | 0 | out_pos: usize, |
1175 | 0 | flags: u32, |
1176 | 0 | ) -> (TINFLStatus, usize, usize) { |
1177 | 0 | let out_buf_size_mask = if flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0 { |
1178 | 0 | usize::max_value() |
1179 | | } else { |
1180 | | // In the case of zero len, any attempt to write would produce HasMoreOutput, |
1181 | | // so to gracefully process the case of there really being no output, |
1182 | | // set the mask to all zeros. |
1183 | 0 | out.len().saturating_sub(1) |
1184 | | }; |
1185 | | |
1186 | | // Ensure the output buffer's size is a power of 2, unless the output buffer |
1187 | | // is large enough to hold the entire output file (in which case it doesn't |
1188 | | // matter). |
1189 | | // Also make sure that the output buffer position is not past the end of the output buffer. |
1190 | 0 | if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 || out_pos > out.len() { |
1191 | 0 | return (TINFLStatus::BadParam, 0, 0); |
1192 | 0 | } |
1193 | 0 |
|
1194 | 0 | let mut in_iter = in_buf.iter(); |
1195 | 0 |
|
1196 | 0 | let mut state = r.state; |
1197 | 0 |
|
1198 | 0 | let mut out_buf = OutputBuffer::from_slice_and_pos(out, out_pos); |
1199 | 0 |
|
1200 | 0 | // Make a local copy of the important variables here so we can work with them on the stack. |
1201 | 0 | let mut l = LocalVars { |
1202 | 0 | bit_buf: r.bit_buf, |
1203 | 0 | num_bits: r.num_bits, |
1204 | 0 | dist: r.dist, |
1205 | 0 | counter: r.counter, |
1206 | 0 | num_extra: r.num_extra, |
1207 | 0 | }; |
1208 | | |
1209 | 0 | let mut status = 'state_machine: loop { |
1210 | 0 | match state { |
1211 | 0 | Start => generate_state!(state, 'state_machine, { |
1212 | 0 | l.bit_buf = 0; |
1213 | 0 | l.num_bits = 0; |
1214 | 0 | l.dist = 0; |
1215 | 0 | l.counter = 0; |
1216 | 0 | l.num_extra = 0; |
1217 | 0 | r.z_header0 = 0; |
1218 | 0 | r.z_header1 = 0; |
1219 | 0 | r.z_adler32 = 1; |
1220 | 0 | r.check_adler32 = 1; |
1221 | 0 | if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 { |
1222 | 0 | Action::Jump(State::ReadZlibCmf) |
1223 | | } else { |
1224 | 0 | Action::Jump(State::ReadBlockHeader) |
1225 | | } |
1226 | | }), |
1227 | | |
1228 | 0 | ReadZlibCmf => generate_state!(state, 'state_machine, { |
1229 | 0 | read_byte(&mut in_iter, flags, |cmf| { |
1230 | 0 | r.z_header0 = u32::from(cmf); |
1231 | 0 | Action::Jump(State::ReadZlibFlg) |
1232 | 0 | }) |
1233 | 0 | }), |
1234 | | |
1235 | 0 | ReadZlibFlg => generate_state!(state, 'state_machine, { |
1236 | 0 | read_byte(&mut in_iter, flags, |flg| { |
1237 | 0 | r.z_header1 = u32::from(flg); |
1238 | 0 | validate_zlib_header(r.z_header0, r.z_header1, flags, out_buf_size_mask) |
1239 | 0 | }) |
1240 | 0 | }), |
1241 | | |
1242 | | // Read the block header and jump to the relevant section depending on the block type. |
1243 | 0 | ReadBlockHeader => generate_state!(state, 'state_machine, { |
1244 | 0 | read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| { |
1245 | 0 | r.finish = (bits & 1) as u32; |
1246 | 0 | r.block_type = (bits >> 1) as u32 & 3; |
1247 | 0 | match r.block_type { |
1248 | 0 | 0 => Action::Jump(BlockTypeNoCompression), |
1249 | | 1 => { |
1250 | 0 | start_static_table(r); |
1251 | 0 | init_tree(r, l).unwrap_or(Action::End(TINFLStatus::Failed)) |
1252 | | }, |
1253 | | 2 => { |
1254 | 0 | l.counter = 0; |
1255 | 0 | Action::Jump(ReadTableSizes) |
1256 | | }, |
1257 | 0 | 3 => Action::Jump(BlockTypeUnexpected), |
1258 | 0 | _ => unreachable!() |
1259 | | } |
1260 | 0 | }) |
1261 | 0 | }), |
1262 | | |
1263 | | // Raw/Stored/uncompressed block. |
1264 | 0 | BlockTypeNoCompression => generate_state!(state, 'state_machine, { |
1265 | 0 | pad_to_bytes(&mut l, &mut in_iter, flags, |l| { |
1266 | 0 | l.counter = 0; |
1267 | 0 | Action::Jump(RawHeader) |
1268 | 0 | }) |
1269 | 0 | }), |
1270 | | |
1271 | | // Check that the raw block header is correct. |
1272 | 0 | RawHeader => generate_state!(state, 'state_machine, { |
1273 | 0 | if l.counter < 4 { |
1274 | | // Read block length and block length check. |
1275 | 0 | if l.num_bits != 0 { |
1276 | 0 | read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| { |
1277 | 0 | r.raw_header[l.counter as usize] = bits as u8; |
1278 | 0 | l.counter += 1; |
1279 | 0 | Action::None |
1280 | 0 | }) |
1281 | | } else { |
1282 | 0 | read_byte(&mut in_iter, flags, |byte| { |
1283 | 0 | r.raw_header[l.counter as usize] = byte; |
1284 | 0 | l.counter += 1; |
1285 | 0 | Action::None |
1286 | 0 | }) |
1287 | | } |
1288 | | } else { |
1289 | | // Check if the length value of a raw block is correct. |
1290 | | // The 2 first (2-byte) words in a raw header are the length and the |
1291 | | // ones complement of the length. |
1292 | 0 | let length = u16::from(r.raw_header[0]) | (u16::from(r.raw_header[1]) << 8); |
1293 | 0 | let check = u16::from(r.raw_header[2]) | (u16::from(r.raw_header[3]) << 8); |
1294 | 0 | let valid = length == !check; |
1295 | 0 | l.counter = length.into(); |
1296 | 0 |
|
1297 | 0 | if !valid { |
1298 | 0 | Action::Jump(BadRawLength) |
1299 | 0 | } else if l.counter == 0 { |
1300 | | // Empty raw block. Sometimes used for synchronization. |
1301 | 0 | Action::Jump(BlockDone) |
1302 | 0 | } else if l.num_bits != 0 { |
1303 | | // There is some data in the bit buffer, so we need to write that first. |
1304 | 0 | Action::Jump(RawReadFirstByte) |
1305 | | } else { |
1306 | | // The bit buffer is empty, so memcpy the rest of the uncompressed data from |
1307 | | // the block. |
1308 | 0 | Action::Jump(RawMemcpy1) |
1309 | | } |
1310 | | } |
1311 | | }), |
1312 | | |
1313 | | // Read the byte from the bit buffer. |
1314 | 0 | RawReadFirstByte => generate_state!(state, 'state_machine, { |
1315 | 0 | read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| { |
1316 | 0 | l.dist = bits as u32; |
1317 | 0 | Action::Jump(RawStoreFirstByte) |
1318 | 0 | }) |
1319 | 0 | }), |
1320 | | |
1321 | | // Write the byte we just read to the output buffer. |
1322 | 0 | RawStoreFirstByte => generate_state!(state, 'state_machine, { |
1323 | 0 | if out_buf.bytes_left() == 0 { |
1324 | 0 | Action::End(TINFLStatus::HasMoreOutput) |
1325 | | } else { |
1326 | 0 | out_buf.write_byte(l.dist as u8); |
1327 | 0 | l.counter -= 1; |
1328 | 0 | if l.counter == 0 || l.num_bits == 0 { |
1329 | 0 | Action::Jump(RawMemcpy1) |
1330 | | } else { |
1331 | | // There is still some data left in the bit buffer that needs to be output. |
1332 | | // TODO: Changed this to jump to `RawReadfirstbyte` rather than |
1333 | | // `RawStoreFirstByte` as that seemed to be the correct path, but this |
1334 | | // needs testing. |
1335 | 0 | Action::Jump(RawReadFirstByte) |
1336 | | } |
1337 | | } |
1338 | | }), |
1339 | | |
1340 | 0 | RawMemcpy1 => generate_state!(state, 'state_machine, { |
1341 | 0 | if l.counter == 0 { |
1342 | 0 | Action::Jump(BlockDone) |
1343 | 0 | } else if out_buf.bytes_left() == 0 { |
1344 | 0 | Action::End(TINFLStatus::HasMoreOutput) |
1345 | | } else { |
1346 | 0 | Action::Jump(RawMemcpy2) |
1347 | | } |
1348 | | }), |
1349 | | |
1350 | 0 | RawMemcpy2 => generate_state!(state, 'state_machine, { |
1351 | 0 | if in_iter.len() > 0 { |
1352 | | // Copy as many raw bytes as possible from the input to the output using memcpy. |
1353 | | // Raw block lengths are limited to 64 * 1024, so casting through usize and u32 |
1354 | | // is not an issue. |
1355 | 0 | let space_left = out_buf.bytes_left(); |
1356 | 0 | let bytes_to_copy = cmp::min(cmp::min( |
1357 | 0 | space_left, |
1358 | 0 | in_iter.len()), |
1359 | 0 | l.counter as usize |
1360 | 0 | ); |
1361 | 0 |
|
1362 | 0 | out_buf.write_slice(&in_iter.as_slice()[..bytes_to_copy]); |
1363 | 0 |
|
1364 | 0 | in_iter.nth(bytes_to_copy - 1); |
1365 | 0 | l.counter -= bytes_to_copy as u32; |
1366 | 0 | Action::Jump(RawMemcpy1) |
1367 | | } else { |
1368 | 0 | end_of_input(flags) |
1369 | | } |
1370 | | }), |
1371 | | |
1372 | | // Read how many huffman codes/symbols are used for each table. |
1373 | 0 | ReadTableSizes => generate_state!(state, 'state_machine, { |
1374 | 0 | if l.counter < 3 { |
1375 | 0 | let num_bits = [5, 5, 4][l.counter as usize]; |
1376 | 0 | read_bits(&mut l, num_bits, &mut in_iter, flags, |l, bits| { |
1377 | 0 | r.table_sizes[l.counter as usize] = |
1378 | 0 | bits as u32 + u32::from(MIN_TABLE_SIZES[l.counter as usize]); |
1379 | 0 | l.counter += 1; |
1380 | 0 | Action::None |
1381 | 0 | }) |
1382 | | } else { |
1383 | 0 | memset(&mut r.tables[HUFFLEN_TABLE].code_size[..], 0); |
1384 | 0 | l.counter = 0; |
1385 | 0 | // Check that the litlen and distance are within spec. |
1386 | 0 | // litlen table should be <=286 acc to the RFC and |
1387 | 0 | // additionally zlib rejects dist table sizes larger than 30. |
1388 | 0 | // NOTE this the final sizes after adding back predefined values, not |
1389 | 0 | // raw value in the data. |
1390 | 0 | // See miniz_oxide issue #130 and https://github.com/madler/zlib/issues/82. |
1391 | 0 | if r.table_sizes[LITLEN_TABLE] <= 286 && r.table_sizes[DIST_TABLE] <= 30 { |
1392 | 0 | Action::Jump(ReadHufflenTableCodeSize) |
1393 | | } |
1394 | | else { |
1395 | 0 | Action::Jump(BadDistOrLiteralTableLength) |
1396 | | } |
1397 | | } |
1398 | | }), |
1399 | | |
1400 | | // Read the 3-bit lengths of the huffman codes describing the huffman code lengths used |
1401 | | // to decode the lengths of the main tables. |
1402 | 0 | ReadHufflenTableCodeSize => generate_state!(state, 'state_machine, { |
1403 | 0 | if l.counter < r.table_sizes[HUFFLEN_TABLE] { |
1404 | 0 | read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| { |
1405 | 0 | // These lengths are not stored in a normal ascending order, but rather one |
1406 | 0 | // specified by the deflate specification intended to put the most used |
1407 | 0 | // values at the front as trailing zero lengths do not have to be stored. |
1408 | 0 | r.tables[HUFFLEN_TABLE] |
1409 | 0 | .code_size[HUFFMAN_LENGTH_ORDER[l.counter as usize] as usize] = |
1410 | 0 | bits as u8; |
1411 | 0 | l.counter += 1; |
1412 | 0 | Action::None |
1413 | 0 | }) |
1414 | | } else { |
1415 | 0 | r.table_sizes[HUFFLEN_TABLE] = 19; |
1416 | 0 | init_tree(r, &mut l).unwrap_or(Action::End(TINFLStatus::Failed)) |
1417 | | } |
1418 | | }), |
1419 | | |
1420 | 0 | ReadLitlenDistTablesCodeSize => generate_state!(state, 'state_machine, { |
1421 | 0 | if l.counter < r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] { |
1422 | 0 | decode_huffman_code( |
1423 | 0 | r, &mut l, HUFFLEN_TABLE, |
1424 | 0 | flags, &mut in_iter, |r, l, symbol| { |
1425 | 0 | l.dist = symbol as u32; |
1426 | 0 | if l.dist < 16 { |
1427 | 0 | r.len_codes[l.counter as usize] = l.dist as u8; |
1428 | 0 | l.counter += 1; |
1429 | 0 | Action::None |
1430 | 0 | } else if l.dist == 16 && l.counter == 0 { |
1431 | 0 | Action::Jump(BadCodeSizeDistPrevLookup) |
1432 | | } else { |
1433 | 0 | l.num_extra = [2, 3, 7][l.dist as usize - 16]; |
1434 | 0 | Action::Jump(ReadExtraBitsCodeSize) |
1435 | | } |
1436 | 0 | } |
1437 | 0 | ) |
1438 | 0 | } else if l.counter != r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] { |
1439 | 0 | Action::Jump(BadCodeSizeSum) |
1440 | | } else { |
1441 | 0 | r.tables[LITLEN_TABLE].code_size[..r.table_sizes[LITLEN_TABLE] as usize] |
1442 | 0 | .copy_from_slice(&r.len_codes[..r.table_sizes[LITLEN_TABLE] as usize]); |
1443 | 0 |
|
1444 | 0 | let dist_table_start = r.table_sizes[LITLEN_TABLE] as usize; |
1445 | 0 | let dist_table_end = (r.table_sizes[LITLEN_TABLE] + |
1446 | 0 | r.table_sizes[DIST_TABLE]) as usize; |
1447 | 0 | r.tables[DIST_TABLE].code_size[..r.table_sizes[DIST_TABLE] as usize] |
1448 | 0 | .copy_from_slice(&r.len_codes[dist_table_start..dist_table_end]); |
1449 | 0 |
|
1450 | 0 | r.block_type -= 1; |
1451 | 0 | init_tree(r, &mut l).unwrap_or(Action::End(TINFLStatus::Failed)) |
1452 | | } |
1453 | | }), |
1454 | | |
1455 | 0 | ReadExtraBitsCodeSize => generate_state!(state, 'state_machine, { |
1456 | 0 | let num_extra = l.num_extra; |
1457 | 0 | read_bits(&mut l, num_extra, &mut in_iter, flags, |l, mut extra_bits| { |
1458 | 0 | // Mask to avoid a bounds check. |
1459 | 0 | extra_bits += [3, 3, 11][(l.dist as usize - 16) & 3]; |
1460 | 0 | let val = if l.dist == 16 { |
1461 | 0 | r.len_codes[l.counter as usize - 1] |
1462 | | } else { |
1463 | 0 | 0 |
1464 | | }; |
1465 | | |
1466 | 0 | memset( |
1467 | 0 | &mut r.len_codes[ |
1468 | 0 | l.counter as usize..l.counter as usize + extra_bits as usize |
1469 | 0 | ], |
1470 | 0 | val, |
1471 | 0 | ); |
1472 | 0 | l.counter += extra_bits as u32; |
1473 | 0 | Action::Jump(ReadLitlenDistTablesCodeSize) |
1474 | 0 | }) |
1475 | 0 | }), |
1476 | | |
1477 | 0 | DecodeLitlen => generate_state!(state, 'state_machine, { |
1478 | 0 | if in_iter.len() < 4 || out_buf.bytes_left() < 2 { |
1479 | | // See if we can decode a literal with the data we have left. |
1480 | | // Jumps to next state (WriteSymbol) if successful. |
1481 | 0 | decode_huffman_code( |
1482 | 0 | r, |
1483 | 0 | &mut l, |
1484 | 0 | LITLEN_TABLE, |
1485 | 0 | flags, |
1486 | 0 | &mut in_iter, |
1487 | 0 | |_r, l, symbol| { |
1488 | 0 | l.counter = symbol as u32; |
1489 | 0 | Action::Jump(WriteSymbol) |
1490 | 0 | }, |
1491 | 0 | ) |
1492 | | } else if |
1493 | | // If there is enough space, use the fast inner decompression |
1494 | | // function. |
1495 | 0 | out_buf.bytes_left() >= 259 && |
1496 | 0 | in_iter.len() >= 14 |
1497 | | { |
1498 | 0 | let (status, new_state) = decompress_fast( |
1499 | 0 | r, |
1500 | 0 | &mut in_iter, |
1501 | 0 | &mut out_buf, |
1502 | 0 | flags, |
1503 | 0 | &mut l, |
1504 | 0 | out_buf_size_mask, |
1505 | 0 | ); |
1506 | 0 |
|
1507 | 0 | state = new_state; |
1508 | 0 | if status == TINFLStatus::Done { |
1509 | 0 | Action::Jump(new_state) |
1510 | | } else { |
1511 | 0 | Action::End(status) |
1512 | | } |
1513 | | } else { |
1514 | 0 | fill_bit_buffer(&mut l, &mut in_iter); |
1515 | | |
1516 | 0 | if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { |
1517 | | |
1518 | 0 | l.counter = symbol as u32; |
1519 | 0 | l.bit_buf >>= code_len; |
1520 | 0 | l.num_bits -= code_len; |
1521 | 0 |
|
1522 | 0 | if (l.counter & 256) != 0 { |
1523 | | // The symbol is not a literal. |
1524 | 0 | Action::Jump(HuffDecodeOuterLoop1) |
1525 | | } else { |
1526 | | // If we have a 32-bit buffer we need to read another two bytes now |
1527 | | // to have enough bits to keep going. |
1528 | 0 | if cfg!(not(target_pointer_width = "64")) { |
1529 | 0 | fill_bit_buffer(&mut l, &mut in_iter); |
1530 | 0 | } |
1531 | | |
1532 | 0 | if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) { |
1533 | | |
1534 | 0 | l.bit_buf >>= code_len; |
1535 | 0 | l.num_bits -= code_len; |
1536 | 0 | // The previous symbol was a literal, so write it directly and check |
1537 | 0 | // the next one. |
1538 | 0 | out_buf.write_byte(l.counter as u8); |
1539 | 0 | if (symbol & 256) != 0 { |
1540 | 0 | l.counter = symbol as u32; |
1541 | 0 | // The symbol is a length value. |
1542 | 0 | Action::Jump(HuffDecodeOuterLoop1) |
1543 | | } else { |
1544 | | // The symbol is a literal, so write it directly and continue. |
1545 | 0 | out_buf.write_byte(symbol as u8); |
1546 | 0 | Action::None |
1547 | | } |
1548 | | } else { |
1549 | 0 | Action::Jump(InvalidCodeLen) |
1550 | | } |
1551 | | } |
1552 | | } else { |
1553 | 0 | Action::Jump(InvalidCodeLen) |
1554 | | } |
1555 | | } |
1556 | | }), |
1557 | | |
1558 | 0 | WriteSymbol => generate_state!(state, 'state_machine, { |
1559 | 0 | if l.counter >= 256 { |
1560 | 0 | Action::Jump(HuffDecodeOuterLoop1) |
1561 | 0 | } else if out_buf.bytes_left() > 0 { |
1562 | 0 | out_buf.write_byte(l.counter as u8); |
1563 | 0 | Action::Jump(DecodeLitlen) |
1564 | | } else { |
1565 | 0 | Action::End(TINFLStatus::HasMoreOutput) |
1566 | | } |
1567 | | }), |
1568 | | |
1569 | 0 | HuffDecodeOuterLoop1 => generate_state!(state, 'state_machine, { |
1570 | 0 | // Mask the top bits since they may contain length info. |
1571 | 0 | l.counter &= 511; |
1572 | 0 |
|
1573 | 0 | if l.counter |
1574 | 0 | == 256 { |
1575 | | // We hit the end of block symbol. |
1576 | 0 | Action::Jump(BlockDone) |
1577 | 0 | } else if l.counter > 285 { |
1578 | | // Invalid code. |
1579 | | // We already verified earlier that the code is > 256. |
1580 | 0 | Action::Jump(InvalidLitlen) |
1581 | | } else { |
1582 | | // # Optimization |
1583 | | // Mask the value to avoid bounds checks |
1584 | | // We could use get_unchecked later if can statically verify that |
1585 | | // this will never go out of bounds. |
1586 | 0 | l.num_extra = |
1587 | 0 | u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]); |
1588 | 0 | l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]); |
1589 | 0 | // Length and distance codes have a number of extra bits depending on |
1590 | 0 | // the base, which together with the base gives us the exact value. |
1591 | 0 | if l.num_extra != 0 { |
1592 | 0 | Action::Jump(ReadExtraBitsLitlen) |
1593 | | } else { |
1594 | 0 | Action::Jump(DecodeDistance) |
1595 | | } |
1596 | | } |
1597 | | }), |
1598 | | |
1599 | 0 | ReadExtraBitsLitlen => generate_state!(state, 'state_machine, { |
1600 | 0 | let num_extra = l.num_extra; |
1601 | 0 | read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| { |
1602 | 0 | l.counter += extra_bits as u32; |
1603 | 0 | Action::Jump(DecodeDistance) |
1604 | 0 | }) |
1605 | 0 | }), |
1606 | | |
1607 | 0 | DecodeDistance => generate_state!(state, 'state_machine, { |
1608 | 0 | // Try to read a huffman code from the input buffer and look up what |
1609 | 0 | // length code the decoded symbol refers to. |
1610 | 0 | decode_huffman_code(r, &mut l, DIST_TABLE, flags, &mut in_iter, |_r, l, symbol| { |
1611 | 0 | if symbol > 29 { |
1612 | | // Invalid distance code. |
1613 | 0 | return Action::Jump(InvalidDist) |
1614 | 0 | } |
1615 | 0 | // # Optimization |
1616 | 0 | // Mask the value to avoid bounds checks |
1617 | 0 | // We could use get_unchecked later if can statically verify that |
1618 | 0 | // this will never go out of bounds. |
1619 | 0 | l.num_extra = u32::from(DIST_EXTRA[symbol as usize & BASE_EXTRA_MASK]); |
1620 | 0 | l.dist = u32::from(DIST_BASE[symbol as usize & BASE_EXTRA_MASK]); |
1621 | 0 | if l.num_extra != 0 { |
1622 | | // ReadEXTRA_BITS_DISTACNE |
1623 | 0 | Action::Jump(ReadExtraBitsDistance) |
1624 | | } else { |
1625 | 0 | Action::Jump(HuffDecodeOuterLoop2) |
1626 | | } |
1627 | 0 | }) |
1628 | 0 | }), |
1629 | | |
1630 | 0 | ReadExtraBitsDistance => generate_state!(state, 'state_machine, { |
1631 | 0 | let num_extra = l.num_extra; |
1632 | 0 | read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| { |
1633 | 0 | l.dist += extra_bits as u32; |
1634 | 0 | Action::Jump(HuffDecodeOuterLoop2) |
1635 | 0 | }) |
1636 | 0 | }), |
1637 | | |
1638 | 0 | HuffDecodeOuterLoop2 => generate_state!(state, 'state_machine, { |
1639 | 0 | if l.dist as usize > out_buf.position() && |
1640 | 0 | (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0) |
1641 | | { |
1642 | | // We encountered a distance that refers a position before |
1643 | | // the start of the decoded data, so we can't continue. |
1644 | 0 | Action::Jump(DistanceOutOfBounds) |
1645 | | } else { |
1646 | 0 | let out_pos = out_buf.position(); |
1647 | 0 | let source_pos = out_buf.position() |
1648 | 0 | .wrapping_sub(l.dist as usize) & out_buf_size_mask; |
1649 | 0 |
|
1650 | 0 | let out_len = out_buf.get_ref().len(); |
1651 | 0 | let match_end_pos = out_buf.position() + l.counter as usize; |
1652 | 0 |
|
1653 | 0 | if match_end_pos > out_len || |
1654 | | // miniz doesn't do this check here. Not sure how it makes sure |
1655 | | // that this case doesn't happen. |
1656 | 0 | (source_pos >= out_pos && (source_pos - out_pos) < l.counter as usize) |
1657 | | { |
1658 | | // Not enough space for all of the data in the output buffer, |
1659 | | // so copy what we have space for. |
1660 | 0 | if l.counter == 0 { |
1661 | 0 | Action::Jump(DecodeLitlen) |
1662 | | } else { |
1663 | 0 | Action::Jump(WriteLenBytesToEnd) |
1664 | | } |
1665 | | } else { |
1666 | 0 | apply_match( |
1667 | 0 | out_buf.get_mut(), |
1668 | 0 | out_pos, |
1669 | 0 | l.dist as usize, |
1670 | 0 | l.counter as usize, |
1671 | 0 | out_buf_size_mask |
1672 | 0 | ); |
1673 | 0 | out_buf.set_position(out_pos + l.counter as usize); |
1674 | 0 | Action::Jump(DecodeLitlen) |
1675 | | } |
1676 | | } |
1677 | | }), |
1678 | | |
1679 | 0 | WriteLenBytesToEnd => generate_state!(state, 'state_machine, { |
1680 | 0 | if out_buf.bytes_left() > 0 { |
1681 | 0 | let out_pos = out_buf.position(); |
1682 | 0 | let source_pos = out_buf.position() |
1683 | 0 | .wrapping_sub(l.dist as usize) & out_buf_size_mask; |
1684 | 0 |
|
1685 | 0 |
|
1686 | 0 | let len = cmp::min(out_buf.bytes_left(), l.counter as usize); |
1687 | 0 |
|
1688 | 0 | transfer(out_buf.get_mut(), source_pos, out_pos, len, out_buf_size_mask); |
1689 | 0 |
|
1690 | 0 | out_buf.set_position(out_pos + len); |
1691 | 0 | l.counter -= len as u32; |
1692 | 0 | if l.counter == 0 { |
1693 | 0 | Action::Jump(DecodeLitlen) |
1694 | | } else { |
1695 | 0 | Action::None |
1696 | | } |
1697 | | } else { |
1698 | 0 | Action::End(TINFLStatus::HasMoreOutput) |
1699 | | } |
1700 | | }), |
1701 | | |
1702 | 0 | BlockDone => generate_state!(state, 'state_machine, { |
1703 | 0 | // End once we've read the last block. |
1704 | 0 | if r.finish != 0 { |
1705 | 0 | pad_to_bytes(&mut l, &mut in_iter, flags, |_| Action::None); |
1706 | 0 |
|
1707 | 0 | let in_consumed = in_buf.len() - in_iter.len(); |
1708 | 0 | let undo = undo_bytes(&mut l, in_consumed as u32) as usize; |
1709 | 0 | in_iter = in_buf[in_consumed - undo..].iter(); |
1710 | 0 |
|
1711 | 0 | l.bit_buf &= ((1 as BitBuffer) << l.num_bits) - 1; |
1712 | 0 | debug_assert_eq!(l.num_bits, 0); |
1713 | | |
1714 | 0 | if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 { |
1715 | 0 | l.counter = 0; |
1716 | 0 | Action::Jump(ReadAdler32) |
1717 | | } else { |
1718 | 0 | Action::Jump(DoneForever) |
1719 | | } |
1720 | | } else { |
1721 | 0 | Action::Jump(ReadBlockHeader) |
1722 | | } |
1723 | | }), |
1724 | | |
1725 | 0 | ReadAdler32 => generate_state!(state, 'state_machine, { |
1726 | 0 | if l.counter < 4 { |
1727 | 0 | if l.num_bits != 0 { |
1728 | 0 | read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| { |
1729 | 0 | r.z_adler32 <<= 8; |
1730 | 0 | r.z_adler32 |= bits as u32; |
1731 | 0 | l.counter += 1; |
1732 | 0 | Action::None |
1733 | 0 | }) |
1734 | | } else { |
1735 | 0 | read_byte(&mut in_iter, flags, |byte| { |
1736 | 0 | r.z_adler32 <<= 8; |
1737 | 0 | r.z_adler32 |= u32::from(byte); |
1738 | 0 | l.counter += 1; |
1739 | 0 | Action::None |
1740 | 0 | }) |
1741 | | } |
1742 | | } else { |
1743 | 0 | Action::Jump(DoneForever) |
1744 | | } |
1745 | | }), |
1746 | | |
1747 | | // We are done. |
1748 | 0 | DoneForever => break TINFLStatus::Done, |
1749 | | |
1750 | | // Anything else indicates failure. |
1751 | | // BadZlibHeader | BadRawLength | BadDistOrLiteralTableLength | BlockTypeUnexpected | |
1752 | | // DistanceOutOfBounds | |
1753 | | // BadTotalSymbols | BadCodeSizeDistPrevLookup | BadCodeSizeSum | InvalidLitlen | |
1754 | | // InvalidDist | InvalidCodeLen |
1755 | 0 | _ => break TINFLStatus::Failed, |
1756 | | }; |
1757 | | }; |
1758 | | |
1759 | 0 | let in_undo = if status != TINFLStatus::NeedsMoreInput |
1760 | 0 | && status != TINFLStatus::FailedCannotMakeProgress |
1761 | | { |
1762 | 0 | undo_bytes(&mut l, (in_buf.len() - in_iter.len()) as u32) as usize |
1763 | | } else { |
1764 | 0 | 0 |
1765 | | }; |
1766 | | |
1767 | | // Make sure HasMoreOutput overrides NeedsMoreInput if the output buffer is full. |
1768 | | // (Unless the missing input is the adler32 value in which case we don't need to write anything.) |
1769 | | // TODO: May want to see if we can do this in a better way. |
1770 | 0 | if status == TINFLStatus::NeedsMoreInput |
1771 | 0 | && out_buf.bytes_left() == 0 |
1772 | 0 | && state != State::ReadAdler32 |
1773 | | { |
1774 | 0 | status = TINFLStatus::HasMoreOutput |
1775 | 0 | } |
1776 | | |
1777 | 0 | r.state = state; |
1778 | 0 | r.bit_buf = l.bit_buf; |
1779 | 0 | r.num_bits = l.num_bits; |
1780 | 0 | r.dist = l.dist; |
1781 | 0 | r.counter = l.counter; |
1782 | 0 | r.num_extra = l.num_extra; |
1783 | 0 |
|
1784 | 0 | r.bit_buf &= ((1 as BitBuffer) << r.num_bits) - 1; |
1785 | | |
1786 | | // If this is a zlib stream, and update the adler32 checksum with the decompressed bytes if |
1787 | | // requested. |
1788 | 0 | let need_adler = if (flags & TINFL_FLAG_IGNORE_ADLER32) == 0 { |
1789 | 0 | flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32) != 0 |
1790 | | } else { |
1791 | | // If TINFL_FLAG_IGNORE_ADLER32 is enabled, ignore the checksum. |
1792 | 0 | false |
1793 | | }; |
1794 | 0 | if need_adler && status as i32 >= 0 { |
1795 | 0 | let out_buf_pos = out_buf.position(); |
1796 | 0 | r.check_adler32 = update_adler32(r.check_adler32, &out_buf.get_ref()[out_pos..out_buf_pos]); |
1797 | 0 |
|
1798 | 0 | // disabled so that random input from fuzzer would not be rejected early, |
1799 | 0 | // before it has a chance to reach interesting parts of code |
1800 | 0 | if !cfg!(fuzzing) { |
1801 | | // Once we are done, check if the checksum matches with the one provided in the zlib header. |
1802 | 0 | if status == TINFLStatus::Done |
1803 | 0 | && flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 |
1804 | 0 | && r.check_adler32 != r.z_adler32 |
1805 | 0 | { |
1806 | 0 | status = TINFLStatus::Adler32Mismatch; |
1807 | 0 | } |
1808 | 0 | } |
1809 | 0 | } |
1810 | | |
1811 | 0 | ( |
1812 | 0 | status, |
1813 | 0 | in_buf.len() - in_iter.len() - in_undo, |
1814 | 0 | out_buf.position() - out_pos, |
1815 | 0 | ) |
1816 | 0 | } |
1817 | | |
1818 | | #[cfg(test)] |
1819 | | mod test { |
1820 | | use super::*; |
1821 | | |
1822 | | //TODO: Fix these. |
1823 | | |
1824 | | fn tinfl_decompress_oxide<'i>( |
1825 | | r: &mut DecompressorOxide, |
1826 | | input_buffer: &'i [u8], |
1827 | | output_buffer: &mut [u8], |
1828 | | flags: u32, |
1829 | | ) -> (TINFLStatus, &'i [u8], usize) { |
1830 | | let (status, in_pos, out_pos) = decompress(r, input_buffer, output_buffer, 0, flags); |
1831 | | (status, &input_buffer[in_pos..], out_pos) |
1832 | | } |
1833 | | |
1834 | | #[test] |
1835 | | fn decompress_zlib() { |
1836 | | let encoded = [ |
1837 | | 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, |
1838 | | ]; |
1839 | | let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER; |
1840 | | |
1841 | | let mut b = DecompressorOxide::new(); |
1842 | | const LEN: usize = 32; |
1843 | | let mut b_buf = [0; LEN]; |
1844 | | |
1845 | | // This should fail with the out buffer being to small. |
1846 | | let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags); |
1847 | | |
1848 | | assert_eq!(b_status.0, TINFLStatus::Failed); |
1849 | | |
1850 | | let flags = flags | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; |
1851 | | |
1852 | | b = DecompressorOxide::new(); |
1853 | | |
1854 | | // With TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF set this should no longer fail. |
1855 | | let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags); |
1856 | | |
1857 | | assert_eq!(b_buf[..b_status.2], b"Hello, zlib!"[..]); |
1858 | | assert_eq!(b_status.0, TINFLStatus::Done); |
1859 | | } |
1860 | | |
1861 | | #[cfg(feature = "with-alloc")] |
1862 | | #[test] |
1863 | | fn raw_block() { |
1864 | | const LEN: usize = 64; |
1865 | | |
1866 | | let text = b"Hello, zlib!"; |
1867 | | let encoded = { |
1868 | | let len = text.len(); |
1869 | | let notlen = !len; |
1870 | | let mut encoded = vec![ |
1871 | | 1, |
1872 | | len as u8, |
1873 | | (len >> 8) as u8, |
1874 | | notlen as u8, |
1875 | | (notlen >> 8) as u8, |
1876 | | ]; |
1877 | | encoded.extend_from_slice(&text[..]); |
1878 | | encoded |
1879 | | }; |
1880 | | |
1881 | | //let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER | |
1882 | | let flags = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; |
1883 | | |
1884 | | let mut b = DecompressorOxide::new(); |
1885 | | |
1886 | | let mut b_buf = [0; LEN]; |
1887 | | |
1888 | | let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags); |
1889 | | assert_eq!(b_buf[..b_status.2], text[..]); |
1890 | | assert_eq!(b_status.0, TINFLStatus::Done); |
1891 | | } |
1892 | | |
1893 | | fn masked_lookup(table: &HuffmanTable, bit_buf: BitBuffer) -> (i32, u32) { |
1894 | | let ret = table.lookup(bit_buf).unwrap(); |
1895 | | (ret.0 & 511, ret.1) |
1896 | | } |
1897 | | |
1898 | | #[test] |
1899 | | fn fixed_table_lookup() { |
1900 | | let mut d = DecompressorOxide::new(); |
1901 | | d.block_type = 1; |
1902 | | start_static_table(&mut d); |
1903 | | let mut l = LocalVars { |
1904 | | bit_buf: d.bit_buf, |
1905 | | num_bits: d.num_bits, |
1906 | | dist: d.dist, |
1907 | | counter: d.counter, |
1908 | | num_extra: d.num_extra, |
1909 | | }; |
1910 | | init_tree(&mut d, &mut l).unwrap(); |
1911 | | let llt = &d.tables[LITLEN_TABLE]; |
1912 | | let dt = &d.tables[DIST_TABLE]; |
1913 | | assert_eq!(masked_lookup(llt, 0b00001100), (0, 8)); |
1914 | | assert_eq!(masked_lookup(llt, 0b00011110), (72, 8)); |
1915 | | assert_eq!(masked_lookup(llt, 0b01011110), (74, 8)); |
1916 | | assert_eq!(masked_lookup(llt, 0b11111101), (143, 8)); |
1917 | | assert_eq!(masked_lookup(llt, 0b000010011), (144, 9)); |
1918 | | assert_eq!(masked_lookup(llt, 0b111111111), (255, 9)); |
1919 | | assert_eq!(masked_lookup(llt, 0b00000000), (256, 7)); |
1920 | | assert_eq!(masked_lookup(llt, 0b1110100), (279, 7)); |
1921 | | assert_eq!(masked_lookup(llt, 0b00000011), (280, 8)); |
1922 | | assert_eq!(masked_lookup(llt, 0b11100011), (287, 8)); |
1923 | | |
1924 | | assert_eq!(masked_lookup(dt, 0), (0, 5)); |
1925 | | assert_eq!(masked_lookup(dt, 20), (5, 5)); |
1926 | | } |
1927 | | |
1928 | | // Only run this test with alloc enabled as it uses a larger buffer. |
1929 | | #[cfg(feature = "with-alloc")] |
1930 | | fn check_result(input: &[u8], expected_status: TINFLStatus, expected_state: State, zlib: bool) { |
1931 | | let mut r = DecompressorOxide::default(); |
1932 | | let mut output_buf = vec![0; 1024 * 32]; |
1933 | | let flags = if zlib { |
1934 | | inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER |
1935 | | } else { |
1936 | | 0 |
1937 | | } | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
1938 | | | TINFL_FLAG_HAS_MORE_INPUT; |
1939 | | let (d_status, _in_bytes, _out_bytes) = |
1940 | | decompress(&mut r, input, &mut output_buf, 0, flags); |
1941 | | assert_eq!(expected_status, d_status); |
1942 | | assert_eq!(expected_state, r.state); |
1943 | | } |
1944 | | |
1945 | | #[cfg(feature = "with-alloc")] |
1946 | | #[test] |
1947 | | fn bogus_input() { |
1948 | | use self::check_result as cr; |
1949 | | const F: TINFLStatus = TINFLStatus::Failed; |
1950 | | const OK: TINFLStatus = TINFLStatus::Done; |
1951 | | // Bad CM. |
1952 | | cr(&[0x77, 0x85], F, State::BadZlibHeader, true); |
1953 | | // Bad window size (but check is correct). |
1954 | | cr(&[0x88, 0x98], F, State::BadZlibHeader, true); |
1955 | | // Bad check bits. |
1956 | | cr(&[0x78, 0x98], F, State::BadZlibHeader, true); |
1957 | | |
1958 | | // Too many code lengths. (From inflate library issues) |
1959 | | cr( |
1960 | | b"M\xff\xffM*\xad\xad\xad\xad\xad\xad\xad\xcd\xcd\xcdM", |
1961 | | F, |
1962 | | State::BadDistOrLiteralTableLength, |
1963 | | false, |
1964 | | ); |
1965 | | |
1966 | | // Bad CLEN (also from inflate library issues) |
1967 | | cr( |
1968 | | b"\xdd\xff\xff*M\x94ffffffffff", |
1969 | | F, |
1970 | | State::BadDistOrLiteralTableLength, |
1971 | | false, |
1972 | | ); |
1973 | | |
1974 | | // Port of inflate coverage tests from zlib-ng |
1975 | | // https://github.com/Dead2/zlib-ng/blob/develop/test/infcover.c |
1976 | | let c = |a, b, c| cr(a, b, c, false); |
1977 | | |
1978 | | // Invalid uncompressed/raw block length. |
1979 | | c(&[0, 0, 0, 0, 0], F, State::BadRawLength); |
1980 | | // Ok empty uncompressed block. |
1981 | | c(&[3, 0], OK, State::DoneForever); |
1982 | | // Invalid block type. |
1983 | | c(&[6], F, State::BlockTypeUnexpected); |
1984 | | // Ok uncompressed block. |
1985 | | c(&[1, 1, 0, 0xfe, 0xff, 0], OK, State::DoneForever); |
1986 | | // Too many litlens, we handle this later than zlib, so this test won't |
1987 | | // give the same result. |
1988 | | // c(&[0xfc, 0, 0], F, State::BadTotalSymbols); |
1989 | | // Invalid set of code lengths - TODO Check if this is the correct error for this. |
1990 | | c(&[4, 0, 0xfe, 0xff], F, State::BadTotalSymbols); |
1991 | | // Invalid repeat in list of code lengths. |
1992 | | // (Try to repeat a non-existent code.) |
1993 | | c(&[4, 0, 0x24, 0x49, 0], F, State::BadCodeSizeDistPrevLookup); |
1994 | | // Missing end of block code (should we have a separate error for this?) - fails on further input |
1995 | | // c(&[4, 0, 0x24, 0xe9, 0xff, 0x6d], F, State::BadTotalSymbols); |
1996 | | // Invalid set of literals/lengths |
1997 | | c( |
1998 | | &[ |
1999 | | 4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x71, 0xff, 0xff, 0x93, 0x11, 0, |
2000 | | ], |
2001 | | F, |
2002 | | State::BadTotalSymbols, |
2003 | | ); |
2004 | | // Invalid set of distances _ needsmoreinput |
2005 | | // c(&[4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x0f, 0xb4, 0xff, 0xff, 0xc3, 0x84], F, State::BadTotalSymbols); |
2006 | | // Invalid distance code |
2007 | | c(&[2, 0x7e, 0xff, 0xff], F, State::InvalidDist); |
2008 | | |
2009 | | // Distance refers to position before the start |
2010 | | c( |
2011 | | &[0x0c, 0xc0, 0x81, 0, 0, 0, 0, 0, 0x90, 0xff, 0x6b, 0x4, 0], |
2012 | | F, |
2013 | | State::DistanceOutOfBounds, |
2014 | | ); |
2015 | | |
2016 | | // Trailer |
2017 | | // Bad gzip trailer checksum GZip header not handled by miniz_oxide |
2018 | | //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0x01], F, State::BadCRC, false) |
2019 | | // Bad gzip trailer length |
2020 | | //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0x01], F, State::BadCRC, false) |
2021 | | } |
2022 | | |
2023 | | #[test] |
2024 | | fn empty_output_buffer_non_wrapping() { |
2025 | | let encoded = [ |
2026 | | 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19, |
2027 | | ]; |
2028 | | let flags = TINFL_FLAG_COMPUTE_ADLER32 |
2029 | | | TINFL_FLAG_PARSE_ZLIB_HEADER |
2030 | | | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; |
2031 | | let mut r = DecompressorOxide::new(); |
2032 | | let mut output_buf: [u8; 0] = []; |
2033 | | // Check that we handle an empty buffer properly and not panicking. |
2034 | | // https://github.com/Frommi/miniz_oxide/issues/23 |
2035 | | let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags); |
2036 | | assert_eq!(res, (TINFLStatus::HasMoreOutput, 4, 0)); |
2037 | | } |
2038 | | |
2039 | | #[test] |
2040 | | fn empty_output_buffer_wrapping() { |
2041 | | let encoded = [ |
2042 | | 0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00, |
2043 | | ]; |
2044 | | let flags = TINFL_FLAG_COMPUTE_ADLER32; |
2045 | | let mut r = DecompressorOxide::new(); |
2046 | | let mut output_buf: [u8; 0] = []; |
2047 | | // Check that we handle an empty buffer properly and not panicking. |
2048 | | // https://github.com/Frommi/miniz_oxide/issues/23 |
2049 | | let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags); |
2050 | | assert_eq!(res, (TINFLStatus::HasMoreOutput, 2, 0)); |
2051 | | } |
2052 | | } |