/rust/registry/src/index.crates.io-1949cf8c6b5b557f/fdeflate-0.3.7/src/decompress.rs
Line | Count | Source |
1 | | use simd_adler32::Adler32; |
2 | | |
3 | | use crate::{ |
4 | | huffman::{self, build_table}, |
5 | | tables::{ |
6 | | self, CLCL_ORDER, DIST_SYM_TO_DIST_BASE, DIST_SYM_TO_DIST_EXTRA, FIXED_DIST_TABLE, |
7 | | FIXED_LITLEN_TABLE, LEN_SYM_TO_LEN_BASE, LEN_SYM_TO_LEN_EXTRA, LITLEN_TABLE_ENTRIES, |
8 | | }, |
9 | | }; |
10 | | |
11 | | /// An error encountered while decompressing a deflate stream. |
12 | | #[derive(Debug, PartialEq)] |
13 | | pub enum DecompressionError { |
14 | | /// The zlib header is corrupt. |
15 | | BadZlibHeader, |
16 | | /// All input was consumed, but the end of the stream hasn't been reached. |
17 | | InsufficientInput, |
18 | | /// A block header specifies an invalid block type. |
19 | | InvalidBlockType, |
20 | | /// An uncompressed block's NLEN value is invalid. |
21 | | InvalidUncompressedBlockLength, |
22 | | /// Too many literals were specified. |
23 | | InvalidHlit, |
24 | | /// Too many distance codes were specified. |
25 | | InvalidHdist, |
26 | | /// Attempted to repeat a previous code before reading any codes, or past the end of the code |
27 | | /// lengths. |
28 | | InvalidCodeLengthRepeat, |
29 | | /// The stream doesn't specify a valid huffman tree. |
30 | | BadCodeLengthHuffmanTree, |
31 | | /// The stream doesn't specify a valid huffman tree. |
32 | | BadLiteralLengthHuffmanTree, |
33 | | /// The stream doesn't specify a valid huffman tree. |
34 | | BadDistanceHuffmanTree, |
35 | | /// The stream contains a literal/length code that was not allowed by the header. |
36 | | InvalidLiteralLengthCode, |
37 | | /// The stream contains a distance code that was not allowed by the header. |
38 | | InvalidDistanceCode, |
39 | | /// The stream contains contains back-reference as the first symbol. |
40 | | InputStartsWithRun, |
41 | | /// The stream contains a back-reference that is too far back. |
42 | | DistanceTooFarBack, |
43 | | /// The deflate stream checksum is incorrect. |
44 | | WrongChecksum, |
45 | | /// Extra input data. |
46 | | ExtraInput, |
47 | | } |
48 | | |
49 | | struct BlockHeader { |
50 | | hlit: usize, |
51 | | hdist: usize, |
52 | | hclen: usize, |
53 | | num_lengths_read: usize, |
54 | | |
55 | | /// Low 3-bits are code length code length, high 5-bits are code length code. |
56 | | table: [u32; 128], |
57 | | code_lengths: [u8; 320], |
58 | | } |
59 | | |
60 | | pub const LITERAL_ENTRY: u32 = 0x8000; |
61 | | pub const EXCEPTIONAL_ENTRY: u32 = 0x4000; |
62 | | pub const SECONDARY_TABLE_ENTRY: u32 = 0x2000; |
63 | | |
64 | | /// The Decompressor state for a compressed block. |
65 | | #[derive(Eq, PartialEq, Debug)] |
66 | | struct CompressedBlock { |
67 | | litlen_table: Box<[u32; 4096]>, |
68 | | secondary_table: Vec<u16>, |
69 | | |
70 | | dist_table: Box<[u32; 512]>, |
71 | | dist_secondary_table: Vec<u16>, |
72 | | |
73 | | eof_code: u16, |
74 | | eof_mask: u16, |
75 | | eof_bits: u8, |
76 | | } |
77 | | |
78 | | #[derive(Debug, Copy, Clone, Eq, PartialEq)] |
79 | | enum State { |
80 | | ZlibHeader, |
81 | | BlockHeader, |
82 | | CodeLengthCodes, |
83 | | CodeLengths, |
84 | | CompressedData, |
85 | | UncompressedData, |
86 | | Checksum, |
87 | | Done, |
88 | | } |
89 | | |
90 | | /// Decompressor for arbitrary zlib streams. |
91 | | pub struct Decompressor { |
92 | | /// State for decoding a compressed block. |
93 | | compression: CompressedBlock, |
94 | | // State for decoding a block header. |
95 | | header: BlockHeader, |
96 | | // Number of bytes left for uncompressed block. |
97 | | uncompressed_bytes_left: u16, |
98 | | |
99 | | buffer: u64, |
100 | | nbits: u8, |
101 | | |
102 | | queued_rle: Option<(u8, usize)>, |
103 | | queued_backref: Option<(usize, usize)>, |
104 | | last_block: bool, |
105 | | fixed_table: bool, |
106 | | |
107 | | state: State, |
108 | | checksum: Adler32, |
109 | | ignore_adler32: bool, |
110 | | } |
111 | | |
112 | | impl Default for Decompressor { |
113 | 0 | fn default() -> Self { |
114 | 0 | Self::new() |
115 | 0 | } |
116 | | } |
117 | | |
118 | | impl Decompressor { |
119 | | /// Create a new decompressor. |
120 | 17.4k | pub fn new() -> Self { |
121 | 17.4k | Self { |
122 | 17.4k | buffer: 0, |
123 | 17.4k | nbits: 0, |
124 | 17.4k | compression: CompressedBlock { |
125 | 17.4k | litlen_table: Box::new([0; 4096]), |
126 | 17.4k | dist_table: Box::new([0; 512]), |
127 | 17.4k | secondary_table: Vec::new(), |
128 | 17.4k | dist_secondary_table: Vec::new(), |
129 | 17.4k | eof_code: 0, |
130 | 17.4k | eof_mask: 0, |
131 | 17.4k | eof_bits: 0, |
132 | 17.4k | }, |
133 | 17.4k | header: BlockHeader { |
134 | 17.4k | hlit: 0, |
135 | 17.4k | hdist: 0, |
136 | 17.4k | hclen: 0, |
137 | 17.4k | table: [0; 128], |
138 | 17.4k | num_lengths_read: 0, |
139 | 17.4k | code_lengths: [0; 320], |
140 | 17.4k | }, |
141 | 17.4k | uncompressed_bytes_left: 0, |
142 | 17.4k | queued_rle: None, |
143 | 17.4k | queued_backref: None, |
144 | 17.4k | checksum: Adler32::new(), |
145 | 17.4k | state: State::ZlibHeader, |
146 | 17.4k | last_block: false, |
147 | 17.4k | ignore_adler32: false, |
148 | 17.4k | fixed_table: false, |
149 | 17.4k | } |
150 | 17.4k | } |
151 | | |
152 | | /// Ignore the checksum at the end of the stream. |
153 | 8.14k | pub fn ignore_adler32(&mut self) { |
154 | 8.14k | self.ignore_adler32 = true; |
155 | 8.14k | } |
156 | | |
157 | 59.6M | fn fill_buffer(&mut self, input: &mut &[u8]) { |
158 | 59.6M | if input.len() >= 8 { |
159 | 59.4M | self.buffer |= u64::from_le_bytes(input[..8].try_into().unwrap()) << self.nbits; |
160 | 59.4M | *input = &input[(63 - self.nbits as usize) / 8..]; |
161 | 59.4M | self.nbits |= 56; |
162 | 59.4M | } else { |
163 | 193k | let nbytes = input.len().min((63 - self.nbits as usize) / 8); |
164 | 193k | let mut input_data = [0; 8]; |
165 | 193k | input_data[..nbytes].copy_from_slice(&input[..nbytes]); |
166 | 193k | self.buffer |= u64::from_le_bytes(input_data) |
167 | 193k | .checked_shl(self.nbits as u32) |
168 | 193k | .unwrap_or(0); |
169 | 193k | self.nbits += nbytes as u8 * 8; |
170 | 193k | *input = &input[nbytes..]; |
171 | 193k | } |
172 | 59.6M | } |
173 | | |
174 | 2.45M | fn peak_bits(&mut self, nbits: u8) -> u64 { |
175 | 2.45M | debug_assert!(nbits <= 56 && nbits <= self.nbits); |
176 | 2.45M | self.buffer & ((1u64 << nbits) - 1) |
177 | 2.45M | } |
178 | 59.2M | fn consume_bits(&mut self, nbits: u8) { |
179 | 59.2M | debug_assert!(self.nbits >= nbits); |
180 | 59.2M | self.buffer >>= nbits; |
181 | 59.2M | self.nbits -= nbits; |
182 | 59.2M | } |
183 | | |
184 | 264k | fn read_block_header(&mut self, remaining_input: &mut &[u8]) -> Result<(), DecompressionError> { |
185 | 264k | self.fill_buffer(remaining_input); |
186 | 264k | if self.nbits < 10 { |
187 | 1.00k | return Ok(()); |
188 | 263k | } |
189 | | |
190 | 263k | let start = self.peak_bits(3); |
191 | 263k | self.last_block = start & 1 != 0; |
192 | 263k | match start >> 1 { |
193 | | 0b00 => { |
194 | 7.75k | let align_bits = (self.nbits - 3) % 8; |
195 | 7.75k | let header_bits = 3 + 32 + align_bits; |
196 | 7.75k | if self.nbits < header_bits { |
197 | 545 | return Ok(()); |
198 | 7.20k | } |
199 | | |
200 | 7.20k | let len = (self.peak_bits(align_bits + 19) >> (align_bits + 3)) as u16; |
201 | 7.20k | let nlen = (self.peak_bits(header_bits) >> (align_bits + 19)) as u16; |
202 | 7.20k | if nlen != !len { |
203 | 176 | return Err(DecompressionError::InvalidUncompressedBlockLength); |
204 | 7.03k | } |
205 | | |
206 | 7.03k | self.state = State::UncompressedData; |
207 | 7.03k | self.uncompressed_bytes_left = len; |
208 | 7.03k | self.consume_bits(header_bits); |
209 | 7.03k | Ok(()) |
210 | | } |
211 | | 0b01 => { |
212 | 238k | self.consume_bits(3); |
213 | | |
214 | | // Check for an entirely empty blocks which can happen if there are "partial |
215 | | // flushes" in the deflate stream. With fixed huffman codes, the EOF symbol is |
216 | | // 7-bits of zeros so we peak ahead and see if the next 7-bits are all zero. |
217 | 238k | if self.peak_bits(7) == 0 { |
218 | 110k | self.consume_bits(7); |
219 | 110k | if self.last_block { |
220 | 48 | self.state = State::Checksum; |
221 | 48 | return Ok(()); |
222 | 110k | } |
223 | | |
224 | | // At this point we've consumed the entire block and need to read the next block |
225 | | // header. If tail call optimization were guaranteed, we could just recurse |
226 | | // here. But without it, a long sequence of empty fixed-blocks might cause a |
227 | | // stack overflow. Instead, we consume all empty blocks in a loop and then |
228 | | // recurse. This is the only recursive call this function, and thus is safe. |
229 | 129k | while self.nbits >= 10 && self.peak_bits(10) == 0b010 { |
230 | 18.7k | self.consume_bits(10); |
231 | 18.7k | self.fill_buffer(remaining_input); |
232 | 18.7k | } |
233 | 110k | return self.read_block_header(remaining_input); |
234 | 127k | } |
235 | | |
236 | | // Build decoding tables if the previous block wasn't also a fixed block. |
237 | 127k | if !self.fixed_table { |
238 | 5.95k | self.fixed_table = true; |
239 | 47.6k | for chunk in self.compression.litlen_table.chunks_exact_mut(512) { |
240 | 47.6k | chunk.copy_from_slice(&FIXED_LITLEN_TABLE); |
241 | 47.6k | } |
242 | 95.2k | for chunk in self.compression.dist_table.chunks_exact_mut(32) { |
243 | 95.2k | chunk.copy_from_slice(&FIXED_DIST_TABLE); |
244 | 95.2k | } |
245 | 5.95k | self.compression.eof_bits = 7; |
246 | 5.95k | self.compression.eof_code = 0; |
247 | 5.95k | self.compression.eof_mask = 0x7f; |
248 | 121k | } |
249 | | |
250 | 127k | self.state = State::CompressedData; |
251 | 127k | Ok(()) |
252 | | } |
253 | | 0b10 => { |
254 | 17.6k | if self.nbits < 17 { |
255 | 161 | return Ok(()); |
256 | 17.4k | } |
257 | | |
258 | 17.4k | self.header.hlit = (self.peak_bits(8) >> 3) as usize + 257; |
259 | 17.4k | self.header.hdist = (self.peak_bits(13) >> 8) as usize + 1; |
260 | 17.4k | self.header.hclen = (self.peak_bits(17) >> 13) as usize + 4; |
261 | 17.4k | if self.header.hlit > 286 { |
262 | 15 | return Err(DecompressionError::InvalidHlit); |
263 | 17.4k | } |
264 | 17.4k | if self.header.hdist > 30 { |
265 | 10 | return Err(DecompressionError::InvalidHdist); |
266 | 17.4k | } |
267 | | |
268 | 17.4k | self.consume_bits(17); |
269 | 17.4k | self.state = State::CodeLengthCodes; |
270 | 17.4k | self.fixed_table = false; |
271 | 17.4k | Ok(()) |
272 | | } |
273 | 65 | 0b11 => Err(DecompressionError::InvalidBlockType), |
274 | 0 | _ => unreachable!(), |
275 | | } |
276 | 264k | } |
277 | | |
278 | 17.9k | fn read_code_length_codes( |
279 | 17.9k | &mut self, |
280 | 17.9k | remaining_input: &mut &[u8], |
281 | 17.9k | ) -> Result<(), DecompressionError> { |
282 | 17.9k | self.fill_buffer(remaining_input); |
283 | 17.9k | if self.nbits as usize + remaining_input.len() * 8 < 3 * self.header.hclen { |
284 | 574 | return Ok(()); |
285 | 17.3k | } |
286 | | |
287 | 17.3k | let mut code_length_lengths = [0; 19]; |
288 | 300k | for i in 0..self.header.hclen { |
289 | 300k | code_length_lengths[CLCL_ORDER[i]] = self.peak_bits(3) as u8; |
290 | 300k | self.consume_bits(3); |
291 | | |
292 | | // We need to refill the buffer after reading 3 * 18 = 54 bits since the buffer holds |
293 | | // between 56 and 63 bits total. |
294 | 300k | if i == 17 { |
295 | 11.8k | self.fill_buffer(remaining_input); |
296 | 288k | } |
297 | | } |
298 | | |
299 | 17.3k | let mut codes = [0; 19]; |
300 | 17.3k | if !build_table( |
301 | 17.3k | &code_length_lengths, |
302 | 17.3k | &[], |
303 | 17.3k | &mut codes, |
304 | 17.3k | &mut self.header.table, |
305 | 17.3k | &mut Vec::new(), |
306 | 17.3k | false, |
307 | 17.3k | false, |
308 | 17.3k | ) { |
309 | 230 | return Err(DecompressionError::BadCodeLengthHuffmanTree); |
310 | 17.1k | } |
311 | | |
312 | 17.1k | self.state = State::CodeLengths; |
313 | 17.1k | self.header.num_lengths_read = 0; |
314 | 17.1k | Ok(()) |
315 | 17.9k | } |
316 | | |
317 | 18.0k | fn read_code_lengths(&mut self, remaining_input: &mut &[u8]) -> Result<(), DecompressionError> { |
318 | 18.0k | let total_lengths = self.header.hlit + self.header.hdist; |
319 | 1.27M | while self.header.num_lengths_read < total_lengths { |
320 | 1.25M | self.fill_buffer(remaining_input); |
321 | 1.25M | if self.nbits < 7 { |
322 | 831 | return Ok(()); |
323 | 1.25M | } |
324 | | |
325 | 1.25M | let code = self.peak_bits(7); |
326 | 1.25M | let entry = self.header.table[code as usize]; |
327 | 1.25M | let length = (entry & 0x7) as u8; |
328 | 1.25M | let symbol = (entry >> 16) as u8; |
329 | | |
330 | 1.25M | debug_assert!(length != 0); |
331 | 1.25M | match symbol { |
332 | 1.25M | 0..=15 => { |
333 | 1.08M | self.header.code_lengths[self.header.num_lengths_read] = symbol; |
334 | 1.08M | self.header.num_lengths_read += 1; |
335 | 1.08M | self.consume_bits(length); |
336 | 1.08M | } |
337 | 172k | 16..=18 => { |
338 | 172k | let (base_repeat, extra_bits) = match symbol { |
339 | 90.3k | 16 => (3, 2), |
340 | 28.3k | 17 => (3, 3), |
341 | 54.0k | 18 => (11, 7), |
342 | 0 | _ => unreachable!(), |
343 | | }; |
344 | | |
345 | 172k | if self.nbits < length + extra_bits { |
346 | 383 | return Ok(()); |
347 | 172k | } |
348 | | |
349 | 172k | let value = match symbol { |
350 | | 16 => { |
351 | 90.2k | self.header.code_lengths[self |
352 | 90.2k | .header |
353 | 90.2k | .num_lengths_read |
354 | 90.2k | .checked_sub(1) |
355 | 90.2k | .ok_or(DecompressionError::InvalidCodeLengthRepeat)?] |
356 | | // TODO: is this right? |
357 | | } |
358 | 28.2k | 17 => 0, |
359 | 53.7k | 18 => 0, |
360 | 0 | _ => unreachable!(), |
361 | | }; |
362 | | |
363 | 172k | let repeat = |
364 | 172k | (self.peak_bits(length + extra_bits) >> length) as usize + base_repeat; |
365 | 172k | if self.header.num_lengths_read + repeat > total_lengths { |
366 | 101 | return Err(DecompressionError::InvalidCodeLengthRepeat); |
367 | 172k | } |
368 | | |
369 | 3.86M | for i in 0..repeat { |
370 | 3.86M | self.header.code_lengths[self.header.num_lengths_read + i] = value; |
371 | 3.86M | } |
372 | 172k | self.header.num_lengths_read += repeat; |
373 | 172k | self.consume_bits(length + extra_bits); |
374 | | } |
375 | 0 | _ => unreachable!(), |
376 | | } |
377 | | } |
378 | | |
379 | 16.7k | self.header |
380 | 16.7k | .code_lengths |
381 | 16.7k | .copy_within(self.header.hlit..total_lengths, 288); |
382 | 204k | for i in self.header.hlit..288 { |
383 | 204k | self.header.code_lengths[i] = 0; |
384 | 204k | } |
385 | 259k | for i in 288 + self.header.hdist..320 { |
386 | 259k | self.header.code_lengths[i] = 0; |
387 | 259k | } |
388 | | |
389 | 16.7k | Self::build_tables( |
390 | 16.7k | self.header.hlit, |
391 | 16.7k | &self.header.code_lengths, |
392 | 16.7k | &mut self.compression, |
393 | 450 | )?; |
394 | 16.2k | self.state = State::CompressedData; |
395 | 16.2k | Ok(()) |
396 | 18.0k | } |
397 | | |
398 | 16.7k | fn build_tables( |
399 | 16.7k | hlit: usize, |
400 | 16.7k | code_lengths: &[u8], |
401 | 16.7k | compression: &mut CompressedBlock, |
402 | 16.7k | ) -> Result<(), DecompressionError> { |
403 | | // If there is no code assigned for the EOF symbol then the bitstream is invalid. |
404 | 16.7k | if code_lengths[256] == 0 { |
405 | | // TODO: Return a dedicated error in this case. |
406 | 30 | return Err(DecompressionError::BadLiteralLengthHuffmanTree); |
407 | 16.7k | } |
408 | | |
409 | 16.7k | let mut codes = [0; 288]; |
410 | 16.7k | compression.secondary_table.clear(); |
411 | 16.7k | if !huffman::build_table( |
412 | 16.7k | &code_lengths[..hlit], |
413 | 16.7k | &LITLEN_TABLE_ENTRIES, |
414 | 16.7k | &mut codes[..hlit], |
415 | 16.7k | &mut *compression.litlen_table, |
416 | 16.7k | &mut compression.secondary_table, |
417 | 16.7k | false, |
418 | 16.7k | true, |
419 | 16.7k | ) { |
420 | 178 | return Err(DecompressionError::BadCodeLengthHuffmanTree); |
421 | 16.5k | } |
422 | | |
423 | 16.5k | compression.eof_code = codes[256]; |
424 | 16.5k | compression.eof_mask = (1 << code_lengths[256]) - 1; |
425 | 16.5k | compression.eof_bits = code_lengths[256]; |
426 | | |
427 | | // Build the distance code table. |
428 | 16.5k | let lengths = &code_lengths[288..320]; |
429 | 16.5k | if lengths == [0; 32] { |
430 | 954 | compression.dist_table.fill(0); |
431 | 954 | } else { |
432 | 15.5k | let mut dist_codes = [0; 32]; |
433 | 15.5k | if !huffman::build_table( |
434 | 15.5k | lengths, |
435 | 15.5k | &tables::DISTANCE_TABLE_ENTRIES, |
436 | 15.5k | &mut dist_codes, |
437 | 15.5k | &mut *compression.dist_table, |
438 | 15.5k | &mut compression.dist_secondary_table, |
439 | 15.5k | true, |
440 | 15.5k | false, |
441 | 15.5k | ) { |
442 | 242 | return Err(DecompressionError::BadDistanceHuffmanTree); |
443 | 15.3k | } |
444 | | } |
445 | | |
446 | 16.2k | Ok(()) |
447 | 16.7k | } |
448 | | |
449 | 1.26M | fn read_compressed( |
450 | 1.26M | &mut self, |
451 | 1.26M | remaining_input: &mut &[u8], |
452 | 1.26M | output: &mut [u8], |
453 | 1.26M | mut output_index: usize, |
454 | 1.26M | ) -> Result<usize, DecompressionError> { |
455 | | // Fast decoding loop. |
456 | | // |
457 | | // This loop is optimized for speed and is the main decoding loop for the decompressor, |
458 | | // which is used when there are at least 8 bytes of input and output data available. It |
459 | | // assumes that the bitbuffer is full (nbits >= 56) and that litlen_entry has been loaded. |
460 | | // |
461 | | // These assumptions enable a few optimizations: |
462 | | // - Nearly all checks for nbits are avoided. |
463 | | // - Checking the input size is optimized out in the refill function call. |
464 | | // - The litlen_entry for the next loop iteration can be loaded in parallel with refilling |
465 | | // the bit buffer. This is because when the input is non-empty, the bit buffer actually |
466 | | // has 64-bits of valid data (even though nbits will be in 56..=63). |
467 | 1.26M | self.fill_buffer(remaining_input); |
468 | 1.26M | let mut litlen_entry = self.compression.litlen_table[(self.buffer & 0xfff) as usize]; |
469 | 55.1M | while self.state == State::CompressedData |
470 | 55.1M | && output_index + 8 <= output.len() |
471 | 55.1M | && remaining_input.len() >= 8 |
472 | | { |
473 | | // First check whether the next symbol is a literal. This code does up to 2 additional |
474 | | // table lookups to decode more literals. |
475 | | let mut bits; |
476 | 55.1M | let mut litlen_code_bits = litlen_entry as u8; |
477 | 55.1M | if litlen_entry & LITERAL_ENTRY != 0 { |
478 | 7.46M | let litlen_entry2 = self.compression.litlen_table |
479 | 7.46M | [(self.buffer >> litlen_code_bits & 0xfff) as usize]; |
480 | 7.46M | let litlen_code_bits2 = litlen_entry2 as u8; |
481 | 7.46M | let litlen_entry3 = self.compression.litlen_table |
482 | 7.46M | [(self.buffer >> (litlen_code_bits + litlen_code_bits2) & 0xfff) as usize]; |
483 | 7.46M | let litlen_code_bits3 = litlen_entry3 as u8; |
484 | 7.46M | let litlen_entry4 = self.compression.litlen_table[(self.buffer |
485 | 7.46M | >> (litlen_code_bits + litlen_code_bits2 + litlen_code_bits3) |
486 | 7.46M | & 0xfff) |
487 | 7.46M | as usize]; |
488 | | |
489 | 7.46M | let advance_output_bytes = ((litlen_entry & 0xf00) >> 8) as usize; |
490 | 7.46M | output[output_index] = (litlen_entry >> 16) as u8; |
491 | 7.46M | output[output_index + 1] = (litlen_entry >> 24) as u8; |
492 | 7.46M | output_index += advance_output_bytes; |
493 | | |
494 | 7.46M | if litlen_entry2 & LITERAL_ENTRY != 0 { |
495 | 5.95M | let advance_output_bytes2 = ((litlen_entry2 & 0xf00) >> 8) as usize; |
496 | 5.95M | output[output_index] = (litlen_entry2 >> 16) as u8; |
497 | 5.95M | output[output_index + 1] = (litlen_entry2 >> 24) as u8; |
498 | 5.95M | output_index += advance_output_bytes2; |
499 | | |
500 | 5.95M | if litlen_entry3 & LITERAL_ENTRY != 0 { |
501 | 5.42M | let advance_output_bytes3 = ((litlen_entry3 & 0xf00) >> 8) as usize; |
502 | 5.42M | output[output_index] = (litlen_entry3 >> 16) as u8; |
503 | 5.42M | output[output_index + 1] = (litlen_entry3 >> 24) as u8; |
504 | 5.42M | output_index += advance_output_bytes3; |
505 | | |
506 | 5.42M | litlen_entry = litlen_entry4; |
507 | 5.42M | self.consume_bits(litlen_code_bits + litlen_code_bits2 + litlen_code_bits3); |
508 | 5.42M | self.fill_buffer(remaining_input); |
509 | 5.42M | continue; |
510 | 522k | } else { |
511 | 522k | self.consume_bits(litlen_code_bits + litlen_code_bits2); |
512 | 522k | litlen_entry = litlen_entry3; |
513 | 522k | litlen_code_bits = litlen_code_bits3; |
514 | 522k | self.fill_buffer(remaining_input); |
515 | 522k | bits = self.buffer; |
516 | 522k | } |
517 | | } else { |
518 | 1.50M | self.consume_bits(litlen_code_bits); |
519 | 1.50M | bits = self.buffer; |
520 | 1.50M | litlen_entry = litlen_entry2; |
521 | 1.50M | litlen_code_bits = litlen_code_bits2; |
522 | 1.50M | if self.nbits < 48 { |
523 | 54.1k | self.fill_buffer(remaining_input); |
524 | 1.45M | } |
525 | | } |
526 | 47.6M | } else { |
527 | 47.6M | bits = self.buffer; |
528 | 47.6M | } |
529 | | |
530 | | // The next symbol is either a 13+ bit literal, back-reference, or an EOF symbol. |
531 | 49.4M | let (length_base, length_extra_bits, litlen_code_bits) = |
532 | 49.6M | if litlen_entry & EXCEPTIONAL_ENTRY == 0 { |
533 | 49.3M | ( |
534 | 49.3M | litlen_entry >> 16, |
535 | 49.3M | (litlen_entry >> 8) as u8, |
536 | 49.3M | litlen_code_bits, |
537 | 49.3M | ) |
538 | 322k | } else if litlen_entry & SECONDARY_TABLE_ENTRY != 0 { |
539 | 190k | let secondary_table_index = |
540 | 190k | (litlen_entry >> 16) + ((bits >> 12) as u32 & (litlen_entry & 0xff)); |
541 | 190k | let secondary_entry = |
542 | 190k | self.compression.secondary_table[secondary_table_index as usize]; |
543 | 190k | let litlen_symbol = secondary_entry >> 4; |
544 | 190k | let litlen_code_bits = (secondary_entry & 0xf) as u8; |
545 | | |
546 | 190k | match litlen_symbol { |
547 | 190k | 0..=255 => { |
548 | 53.5k | self.consume_bits(litlen_code_bits); |
549 | 53.5k | litlen_entry = |
550 | 53.5k | self.compression.litlen_table[(self.buffer & 0xfff) as usize]; |
551 | 53.5k | self.fill_buffer(remaining_input); |
552 | 53.5k | output[output_index] = litlen_symbol as u8; |
553 | 53.5k | output_index += 1; |
554 | 53.5k | continue; |
555 | | } |
556 | | 256 => { |
557 | 3.31k | self.consume_bits(litlen_code_bits); |
558 | 3.31k | self.state = match self.last_block { |
559 | 11 | true => State::Checksum, |
560 | 3.30k | false => State::BlockHeader, |
561 | | }; |
562 | 3.31k | break; |
563 | | } |
564 | 133k | _ => ( |
565 | 133k | LEN_SYM_TO_LEN_BASE[litlen_symbol as usize - 257] as u32, |
566 | 133k | LEN_SYM_TO_LEN_EXTRA[litlen_symbol as usize - 257], |
567 | 133k | litlen_code_bits, |
568 | 133k | ), |
569 | | } |
570 | 132k | } else if litlen_code_bits == 0 { |
571 | 0 | return Err(DecompressionError::InvalidLiteralLengthCode); |
572 | | } else { |
573 | 132k | self.consume_bits(litlen_code_bits); |
574 | 132k | self.state = match self.last_block { |
575 | 593 | true => State::Checksum, |
576 | 132k | false => State::BlockHeader, |
577 | | }; |
578 | 132k | break; |
579 | | }; |
580 | 49.4M | bits >>= litlen_code_bits; |
581 | | |
582 | 49.4M | let length_extra_mask = (1 << length_extra_bits) - 1; |
583 | 49.4M | let length = length_base as usize + (bits & length_extra_mask) as usize; |
584 | 49.4M | bits >>= length_extra_bits; |
585 | | |
586 | 49.4M | let dist_entry = self.compression.dist_table[(bits & 0x1ff) as usize]; |
587 | 49.4M | let (dist_base, dist_extra_bits, dist_code_bits) = if dist_entry & LITERAL_ENTRY != 0 { |
588 | 49.2M | ( |
589 | 49.2M | (dist_entry >> 16) as u16, |
590 | 49.2M | (dist_entry >> 8) as u8 & 0xf, |
591 | 49.2M | dist_entry as u8, |
592 | 49.2M | ) |
593 | 191k | } else if dist_entry >> 8 == 0 { |
594 | 36 | return Err(DecompressionError::InvalidDistanceCode); |
595 | | } else { |
596 | 191k | let secondary_table_index = |
597 | 191k | (dist_entry >> 16) + ((bits >> 9) as u32 & (dist_entry & 0xff)); |
598 | 191k | let secondary_entry = |
599 | 191k | self.compression.dist_secondary_table[secondary_table_index as usize]; |
600 | 191k | let dist_symbol = (secondary_entry >> 4) as usize; |
601 | 191k | if dist_symbol >= 30 { |
602 | 0 | return Err(DecompressionError::InvalidDistanceCode); |
603 | 191k | } |
604 | | |
605 | 191k | ( |
606 | 191k | DIST_SYM_TO_DIST_BASE[dist_symbol], |
607 | 191k | DIST_SYM_TO_DIST_EXTRA[dist_symbol], |
608 | 191k | (secondary_entry & 0xf) as u8, |
609 | 191k | ) |
610 | | }; |
611 | 49.4M | bits >>= dist_code_bits; |
612 | | |
613 | 49.4M | let dist = dist_base as usize + (bits & ((1 << dist_extra_bits) - 1)) as usize; |
614 | 49.4M | if dist > output_index { |
615 | 166 | return Err(DecompressionError::DistanceTooFarBack); |
616 | 49.4M | } |
617 | | |
618 | 49.4M | self.consume_bits( |
619 | 49.4M | litlen_code_bits + length_extra_bits + dist_code_bits + dist_extra_bits, |
620 | | ); |
621 | 49.4M | self.fill_buffer(remaining_input); |
622 | 49.4M | litlen_entry = self.compression.litlen_table[(self.buffer & 0xfff) as usize]; |
623 | | |
624 | 49.4M | let copy_length = length.min(output.len() - output_index); |
625 | 49.4M | if dist == 1 { |
626 | 25.2M | let last = output[output_index - 1]; |
627 | 25.2M | output[output_index..][..copy_length].fill(last); |
628 | | |
629 | 25.2M | if copy_length < length { |
630 | 543k | self.queued_rle = Some((last, length - copy_length)); |
631 | 543k | output_index = output.len(); |
632 | 543k | break; |
633 | 24.6M | } |
634 | 24.2M | } else if output_index + length + 15 <= output.len() { |
635 | 23.6M | let start = output_index - dist; |
636 | 23.6M | output.copy_within(start..start + 16, output_index); |
637 | | |
638 | 23.6M | if length > 16 || dist < 16 { |
639 | 1.54G | for i in (0..length).step_by(dist.min(16)).skip(1) { |
640 | 1.54G | output.copy_within(start + i..start + i + 16, output_index + i); |
641 | 1.54G | } |
642 | 1.16M | } |
643 | | } else { |
644 | 565k | if dist < copy_length { |
645 | 72.6M | for i in 0..copy_length { |
646 | 72.6M | output[output_index + i] = output[output_index + i - dist]; |
647 | 72.6M | } |
648 | | } else { |
649 | 45.7k | output.copy_within( |
650 | 45.7k | output_index - dist..output_index + copy_length - dist, |
651 | 45.7k | output_index, |
652 | | ) |
653 | | } |
654 | | |
655 | 565k | if copy_length < length { |
656 | 527k | self.queued_backref = Some((dist, length - copy_length)); |
657 | 527k | output_index = output.len(); |
658 | 527k | break; |
659 | 38.1k | } |
660 | | } |
661 | 48.4M | output_index += copy_length; |
662 | | } |
663 | | |
664 | | // Careful decoding loop. |
665 | | // |
666 | | // This loop processes the remaining input when we're too close to the end of the input or |
667 | | // output to use the fast loop. |
668 | 1.39M | while let State::CompressedData = self.state { |
669 | 1.26M | self.fill_buffer(remaining_input); |
670 | 1.26M | if output_index == output.len() { |
671 | 1.07M | break; |
672 | 186k | } |
673 | | |
674 | 186k | let mut bits = self.buffer; |
675 | 186k | let litlen_entry = self.compression.litlen_table[(bits & 0xfff) as usize]; |
676 | 186k | let litlen_code_bits = litlen_entry as u8; |
677 | | |
678 | 186k | if litlen_entry & LITERAL_ENTRY != 0 { |
679 | | // Fast path: the next symbol is <= 12 bits and a literal, the table specifies the |
680 | | // output bytes and we can directly write them to the output buffer. |
681 | 64.8k | let advance_output_bytes = ((litlen_entry & 0xf00) >> 8) as usize; |
682 | | |
683 | 64.8k | if self.nbits < litlen_code_bits { |
684 | 4.63k | break; |
685 | 60.2k | } else if output_index + 1 < output.len() { |
686 | 58.8k | output[output_index] = (litlen_entry >> 16) as u8; |
687 | 58.8k | output[output_index + 1] = (litlen_entry >> 24) as u8; |
688 | 58.8k | output_index += advance_output_bytes; |
689 | 58.8k | self.consume_bits(litlen_code_bits); |
690 | 58.8k | continue; |
691 | 1.32k | } else if output_index + advance_output_bytes == output.len() { |
692 | 754 | debug_assert_eq!(advance_output_bytes, 1); |
693 | 754 | output[output_index] = (litlen_entry >> 16) as u8; |
694 | 754 | output_index += 1; |
695 | 754 | self.consume_bits(litlen_code_bits); |
696 | 754 | break; |
697 | | } else { |
698 | 568 | debug_assert_eq!(advance_output_bytes, 2); |
699 | 568 | output[output_index] = (litlen_entry >> 16) as u8; |
700 | 568 | self.queued_rle = Some(((litlen_entry >> 24) as u8, 1)); |
701 | 568 | output_index += 1; |
702 | 568 | self.consume_bits(litlen_code_bits); |
703 | 568 | break; |
704 | | } |
705 | 121k | } |
706 | | |
707 | 117k | let (length_base, length_extra_bits, litlen_code_bits) = |
708 | 121k | if litlen_entry & EXCEPTIONAL_ENTRY == 0 { |
709 | 116k | ( |
710 | 116k | litlen_entry >> 16, |
711 | 116k | (litlen_entry >> 8) as u8, |
712 | 116k | litlen_code_bits, |
713 | 116k | ) |
714 | 4.66k | } else if litlen_entry & SECONDARY_TABLE_ENTRY != 0 { |
715 | 902 | let secondary_table_index = |
716 | 902 | (litlen_entry >> 16) + ((bits >> 12) as u32 & (litlen_entry & 0xff)); |
717 | 902 | let secondary_entry = |
718 | 902 | self.compression.secondary_table[secondary_table_index as usize]; |
719 | 902 | let litlen_symbol = secondary_entry >> 4; |
720 | 902 | let litlen_code_bits = (secondary_entry & 0xf) as u8; |
721 | | |
722 | 902 | if self.nbits < litlen_code_bits { |
723 | 34 | break; |
724 | 868 | } else if litlen_symbol < 256 { |
725 | 467 | self.consume_bits(litlen_code_bits); |
726 | 467 | output[output_index] = litlen_symbol as u8; |
727 | 467 | output_index += 1; |
728 | 467 | continue; |
729 | 401 | } else if litlen_symbol == 256 { |
730 | 26 | self.consume_bits(litlen_code_bits); |
731 | 26 | self.state = match self.last_block { |
732 | 1 | true => State::Checksum, |
733 | 25 | false => State::BlockHeader, |
734 | | }; |
735 | 26 | break; |
736 | 375 | } |
737 | | |
738 | 375 | ( |
739 | 375 | LEN_SYM_TO_LEN_BASE[litlen_symbol as usize - 257] as u32, |
740 | 375 | LEN_SYM_TO_LEN_EXTRA[litlen_symbol as usize - 257], |
741 | 375 | litlen_code_bits, |
742 | 375 | ) |
743 | 3.75k | } else if litlen_code_bits == 0 { |
744 | 0 | return Err(DecompressionError::InvalidLiteralLengthCode); |
745 | | } else { |
746 | 3.75k | if self.nbits < litlen_code_bits { |
747 | 1.02k | break; |
748 | 2.73k | } |
749 | 2.73k | self.consume_bits(litlen_code_bits); |
750 | 2.73k | self.state = match self.last_block { |
751 | 1.61k | true => State::Checksum, |
752 | 1.12k | false => State::BlockHeader, |
753 | | }; |
754 | 2.73k | break; |
755 | | }; |
756 | 117k | bits >>= litlen_code_bits; |
757 | | |
758 | 117k | let length_extra_mask = (1 << length_extra_bits) - 1; |
759 | 117k | let length = length_base as usize + (bits & length_extra_mask) as usize; |
760 | 117k | bits >>= length_extra_bits; |
761 | | |
762 | 117k | let dist_entry = self.compression.dist_table[(bits & 0x1ff) as usize]; |
763 | 117k | let (dist_base, dist_extra_bits, dist_code_bits) = if dist_entry & LITERAL_ENTRY != 0 { |
764 | 115k | ( |
765 | 115k | (dist_entry >> 16) as u16, |
766 | 115k | (dist_entry >> 8) as u8 & 0xf, |
767 | 115k | dist_entry as u8, |
768 | 115k | ) |
769 | 1.20k | } else if self.nbits > litlen_code_bits + length_extra_bits + 9 { |
770 | 712 | if dist_entry >> 8 == 0 { |
771 | 64 | return Err(DecompressionError::InvalidDistanceCode); |
772 | 648 | } |
773 | | |
774 | 648 | let secondary_table_index = |
775 | 648 | (dist_entry >> 16) + ((bits >> 9) as u32 & (dist_entry & 0xff)); |
776 | 648 | let secondary_entry = |
777 | 648 | self.compression.dist_secondary_table[secondary_table_index as usize]; |
778 | 648 | let dist_symbol = (secondary_entry >> 4) as usize; |
779 | 648 | if dist_symbol >= 30 { |
780 | 0 | return Err(DecompressionError::InvalidDistanceCode); |
781 | 648 | } |
782 | | |
783 | 648 | ( |
784 | 648 | DIST_SYM_TO_DIST_BASE[dist_symbol], |
785 | 648 | DIST_SYM_TO_DIST_EXTRA[dist_symbol], |
786 | 648 | (secondary_entry & 0xf) as u8, |
787 | 648 | ) |
788 | | } else { |
789 | 490 | break; |
790 | | }; |
791 | 116k | bits >>= dist_code_bits; |
792 | | |
793 | 116k | let dist = dist_base as usize + (bits & ((1 << dist_extra_bits) - 1)) as usize; |
794 | 116k | let total_bits = |
795 | 116k | litlen_code_bits + length_extra_bits + dist_code_bits + dist_extra_bits; |
796 | | |
797 | 116k | if self.nbits < total_bits { |
798 | 2.96k | break; |
799 | 113k | } else if dist > output_index { |
800 | 185 | return Err(DecompressionError::DistanceTooFarBack); |
801 | 113k | } |
802 | | |
803 | 113k | self.consume_bits(total_bits); |
804 | | |
805 | 113k | let copy_length = length.min(output.len() - output_index); |
806 | 113k | if dist == 1 { |
807 | 39.5k | let last = output[output_index - 1]; |
808 | 39.5k | output[output_index..][..copy_length].fill(last); |
809 | | |
810 | 39.5k | if copy_length < length { |
811 | 16.9k | self.queued_rle = Some((last, length - copy_length)); |
812 | 16.9k | output_index = output.len(); |
813 | 16.9k | break; |
814 | 22.5k | } |
815 | 73.7k | } else if output_index + length + 15 <= output.len() { |
816 | 53.5k | let start = output_index - dist; |
817 | 53.5k | output.copy_within(start..start + 16, output_index); |
818 | | |
819 | 53.5k | if length > 16 || dist < 16 { |
820 | 3.94M | for i in (0..length).step_by(dist.min(16)).skip(1) { |
821 | 3.94M | output.copy_within(start + i..start + i + 16, output_index + i); |
822 | 3.94M | } |
823 | 1.81k | } |
824 | | } else { |
825 | 20.1k | if dist < copy_length { |
826 | 274k | for i in 0..copy_length { |
827 | 274k | output[output_index + i] = output[output_index + i - dist]; |
828 | 274k | } |
829 | | } else { |
830 | 10.3k | output.copy_within( |
831 | 10.3k | output_index - dist..output_index + copy_length - dist, |
832 | 10.3k | output_index, |
833 | | ) |
834 | | } |
835 | | |
836 | 20.1k | if copy_length < length { |
837 | 18.8k | self.queued_backref = Some((dist, length - copy_length)); |
838 | 18.8k | output_index = output.len(); |
839 | 18.8k | break; |
840 | 1.33k | } |
841 | | } |
842 | 77.4k | output_index += copy_length; |
843 | | } |
844 | | |
845 | 1.26M | if self.state == State::CompressedData |
846 | 1.12M | && self.queued_backref.is_none() |
847 | 576k | && self.queued_rle.is_none() |
848 | 15.2k | && self.nbits >= 15 |
849 | 6.86k | && self.peak_bits(15) as u16 & self.compression.eof_mask == self.compression.eof_code |
850 | | { |
851 | 119 | self.consume_bits(self.compression.eof_bits); |
852 | 119 | self.state = match self.last_block { |
853 | 68 | true => State::Checksum, |
854 | 51 | false => State::BlockHeader, |
855 | | }; |
856 | 1.26M | } |
857 | | |
858 | 1.26M | Ok(output_index) |
859 | 1.26M | } |
860 | | |
861 | | /// Decompresses a chunk of data. |
862 | | /// |
863 | | /// Returns the number of bytes read from `input` and the number of bytes written to `output`, |
864 | | /// or an error if the deflate stream is not valid. `input` is the compressed data. `output` is |
865 | | /// the buffer to write the decompressed data to, starting at index `output_position`. |
866 | | /// `end_of_input` indicates whether more data may be available in the future. |
867 | | /// |
868 | | /// The contents of `output` after `output_position` are ignored. However, this function may |
869 | | /// write additional data to `output` past what is indicated by the return value. |
870 | | /// |
871 | | /// When this function returns `Ok`, at least one of the following is true: |
872 | | /// - The input is fully consumed. |
873 | | /// - The output is full but there are more bytes to output. |
874 | | /// - The deflate stream is complete (and `is_done` will return true). |
875 | | /// |
876 | | /// # Panics |
877 | | /// |
878 | | /// This function will panic if `output_position` is out of bounds. |
879 | 1.13M | pub fn read( |
880 | 1.13M | &mut self, |
881 | 1.13M | input: &[u8], |
882 | 1.13M | output: &mut [u8], |
883 | 1.13M | output_position: usize, |
884 | 1.13M | end_of_input: bool, |
885 | 1.13M | ) -> Result<(usize, usize), DecompressionError> { |
886 | 1.13M | if let State::Done = self.state { |
887 | 0 | return Ok((0, 0)); |
888 | 1.13M | } |
889 | | |
890 | 1.13M | assert!(output_position <= output.len()); |
891 | | |
892 | 1.13M | let mut remaining_input = input; |
893 | 1.13M | let mut output_index = output_position; |
894 | | |
895 | 1.13M | if let Some((data, len)) = self.queued_rle.take() { |
896 | 560k | let n = len.min(output.len() - output_index); |
897 | 560k | output[output_index..][..n].fill(data); |
898 | 560k | output_index += n; |
899 | 560k | if n < len { |
900 | 8 | self.queued_rle = Some((data, len - n)); |
901 | 8 | return Ok((0, n)); |
902 | 560k | } |
903 | 573k | } |
904 | 1.13M | if let Some((dist, len)) = self.queued_backref.take() { |
905 | 545k | let n = len.min(output.len() - output_index); |
906 | 69.8M | for i in 0..n { |
907 | 69.8M | output[output_index + i] = output[output_index + i - dist]; |
908 | 69.8M | } |
909 | 545k | output_index += n; |
910 | 545k | if n < len { |
911 | 15 | self.queued_backref = Some((dist, len - n)); |
912 | 15 | return Ok((0, n)); |
913 | 545k | } |
914 | 588k | } |
915 | | |
916 | | // Main decoding state machine. |
917 | 1.13M | let mut last_state = None; |
918 | 2.60M | while last_state != Some(self.state) { |
919 | 1.47M | last_state = Some(self.state); |
920 | 1.47M | match self.state { |
921 | | State::ZlibHeader => { |
922 | 12.1k | self.fill_buffer(&mut remaining_input); |
923 | 12.1k | if self.nbits < 16 { |
924 | 3.22k | break; |
925 | 8.88k | } |
926 | | |
927 | 8.88k | let input0 = self.peak_bits(8); |
928 | 8.88k | let input1 = self.peak_bits(16) >> 8 & 0xff; |
929 | 8.88k | if input0 & 0x0f != 0x08 |
930 | 8.81k | || (input0 & 0xf0) > 0x70 |
931 | 8.80k | || input1 & 0x20 != 0 |
932 | 8.79k | || (input0 << 8 | input1) % 31 != 0 |
933 | | { |
934 | 113 | return Err(DecompressionError::BadZlibHeader); |
935 | 8.77k | } |
936 | | |
937 | 8.77k | self.consume_bits(16); |
938 | 8.77k | self.state = State::BlockHeader; |
939 | | } |
940 | | State::BlockHeader => { |
941 | 153k | self.read_block_header(&mut remaining_input)?; |
942 | | } |
943 | | State::CodeLengthCodes => { |
944 | 17.9k | self.read_code_length_codes(&mut remaining_input)?; |
945 | | } |
946 | | State::CodeLengths => { |
947 | 18.0k | self.read_code_lengths(&mut remaining_input)?; |
948 | | } |
949 | | State::CompressedData => { |
950 | | output_index = |
951 | 1.26M | self.read_compressed(&mut remaining_input, output, output_index)? |
952 | | } |
953 | | State::UncompressedData => { |
954 | | // Drain any bytes from our buffer. |
955 | 7.83k | debug_assert_eq!(self.nbits % 8, 0); |
956 | 12.2k | while self.nbits > 0 |
957 | 9.45k | && self.uncompressed_bytes_left > 0 |
958 | 4.41k | && output_index < output.len() |
959 | 4.41k | { |
960 | 4.41k | output[output_index] = self.peak_bits(8) as u8; |
961 | 4.41k | self.consume_bits(8); |
962 | 4.41k | output_index += 1; |
963 | 4.41k | self.uncompressed_bytes_left -= 1; |
964 | 4.41k | } |
965 | | // Buffer may contain one additional byte. Clear it to avoid confusion. |
966 | 7.83k | if self.nbits == 0 { |
967 | 2.79k | self.buffer = 0; |
968 | 5.04k | } |
969 | | |
970 | | // Copy subsequent bytes directly from the input. |
971 | 7.83k | let copy_bytes = (self.uncompressed_bytes_left as usize) |
972 | 7.83k | .min(remaining_input.len()) |
973 | 7.83k | .min(output.len() - output_index); |
974 | 7.83k | output[output_index..][..copy_bytes] |
975 | 7.83k | .copy_from_slice(&remaining_input[..copy_bytes]); |
976 | 7.83k | remaining_input = &remaining_input[copy_bytes..]; |
977 | 7.83k | output_index += copy_bytes; |
978 | 7.83k | self.uncompressed_bytes_left -= copy_bytes as u16; |
979 | | |
980 | 7.83k | if self.uncompressed_bytes_left == 0 { |
981 | 6.89k | self.state = if self.last_block { |
982 | 4 | State::Checksum |
983 | | } else { |
984 | 6.89k | State::BlockHeader |
985 | | }; |
986 | 940 | } |
987 | | } |
988 | | State::Checksum => { |
989 | 2.78k | self.fill_buffer(&mut remaining_input); |
990 | | |
991 | 2.78k | let align_bits = self.nbits % 8; |
992 | 2.78k | if self.nbits >= 32 + align_bits { |
993 | 1.79k | self.checksum.write(&output[output_position..output_index]); |
994 | 1.79k | if align_bits != 0 { |
995 | 1.55k | self.consume_bits(align_bits); |
996 | 1.55k | } |
997 | | #[cfg(not(fuzzing))] |
998 | | if !self.ignore_adler32 |
999 | | && (self.peak_bits(32) as u32).swap_bytes() != self.checksum.finish() |
1000 | | { |
1001 | | return Err(DecompressionError::WrongChecksum); |
1002 | | } |
1003 | 1.79k | self.state = State::Done; |
1004 | 1.79k | self.consume_bits(32); |
1005 | 1.79k | break; |
1006 | 988 | } |
1007 | | } |
1008 | 0 | State::Done => unreachable!(), |
1009 | | } |
1010 | | } |
1011 | | |
1012 | 1.13M | if !self.ignore_adler32 && self.state != State::Done { |
1013 | 111k | self.checksum.write(&output[output_position..output_index]); |
1014 | 1.02M | } |
1015 | | |
1016 | 1.13M | if self.state == State::Done || !end_of_input || output_index == output.len() { |
1017 | 1.13M | let input_left = remaining_input.len(); |
1018 | 1.13M | Ok((input.len() - input_left, output_index - output_position)) |
1019 | | } else { |
1020 | 492 | Err(DecompressionError::InsufficientInput) |
1021 | | } |
1022 | 1.13M | } |
1023 | | |
1024 | | /// Returns true if the decompressor has finished decompressing the input. |
1025 | 2.15M | pub fn is_done(&self) -> bool { |
1026 | 2.15M | self.state == State::Done |
1027 | 2.15M | } |
1028 | | } |
1029 | | |
1030 | | /// Decompress the given data. |
1031 | 0 | pub fn decompress_to_vec(input: &[u8]) -> Result<Vec<u8>, DecompressionError> { |
1032 | 0 | match decompress_to_vec_bounded(input, usize::MAX) { |
1033 | 0 | Ok(output) => Ok(output), |
1034 | 0 | Err(BoundedDecompressionError::DecompressionError { inner }) => Err(inner), |
1035 | | Err(BoundedDecompressionError::OutputTooLarge { .. }) => { |
1036 | 0 | unreachable!("Impossible to allocate more than isize::MAX bytes") |
1037 | | } |
1038 | | } |
1039 | 0 | } |
1040 | | |
1041 | | /// An error encountered while decompressing a deflate stream given a bounded maximum output. |
1042 | | pub enum BoundedDecompressionError { |
1043 | | /// The input is not a valid deflate stream. |
1044 | | DecompressionError { |
1045 | | /// The underlying error. |
1046 | | inner: DecompressionError, |
1047 | | }, |
1048 | | |
1049 | | /// The output is too large. |
1050 | | OutputTooLarge { |
1051 | | /// The output decoded so far. |
1052 | | partial_output: Vec<u8>, |
1053 | | }, |
1054 | | } |
1055 | | impl From<DecompressionError> for BoundedDecompressionError { |
1056 | 680 | fn from(inner: DecompressionError) -> Self { |
1057 | 680 | BoundedDecompressionError::DecompressionError { inner } |
1058 | 680 | } |
1059 | | } |
1060 | | |
1061 | | /// Decompress the given data, returning an error if the output is larger than |
1062 | | /// `maxlen` bytes. |
1063 | 996 | pub fn decompress_to_vec_bounded( |
1064 | 996 | input: &[u8], |
1065 | 996 | maxlen: usize, |
1066 | 996 | ) -> Result<Vec<u8>, BoundedDecompressionError> { |
1067 | 996 | let mut decoder = Decompressor::new(); |
1068 | 996 | let mut output = vec![0; 1024.min(maxlen)]; |
1069 | 996 | let mut input_index = 0; |
1070 | 996 | let mut output_index = 0; |
1071 | | loop { |
1072 | 111k | let (consumed, produced) = |
1073 | 111k | decoder.read(&input[input_index..], &mut output, output_index, true)?; |
1074 | 111k | input_index += consumed; |
1075 | 111k | output_index += produced; |
1076 | 111k | if decoder.is_done() || output_index == maxlen { |
1077 | 316 | break; |
1078 | 110k | } |
1079 | 110k | output.resize((output_index + 32 * 1024).min(maxlen), 0); |
1080 | | } |
1081 | 316 | output.resize(output_index, 0); |
1082 | | |
1083 | 316 | if decoder.is_done() { |
1084 | 308 | Ok(output) |
1085 | | } else { |
1086 | 8 | Err(BoundedDecompressionError::OutputTooLarge { |
1087 | 8 | partial_output: output, |
1088 | 8 | }) |
1089 | | } |
1090 | 996 | } |
1091 | | |
1092 | | #[cfg(test)] |
1093 | | mod tests { |
1094 | | use crate::tables::{LENGTH_TO_LEN_EXTRA, LENGTH_TO_SYMBOL}; |
1095 | | |
1096 | | use super::*; |
1097 | | use rand::Rng; |
1098 | | |
1099 | | fn roundtrip(data: &[u8]) { |
1100 | | let compressed = crate::compress_to_vec(data); |
1101 | | let decompressed = decompress_to_vec(&compressed).unwrap(); |
1102 | | assert_eq!(&decompressed, data); |
1103 | | } |
1104 | | |
1105 | | fn roundtrip_miniz_oxide(data: &[u8]) { |
1106 | | let compressed = miniz_oxide::deflate::compress_to_vec_zlib(data, 3); |
1107 | | let decompressed = decompress_to_vec(&compressed).unwrap(); |
1108 | | assert_eq!(decompressed.len(), data.len()); |
1109 | | for (i, (a, b)) in decompressed.chunks(1).zip(data.chunks(1)).enumerate() { |
1110 | | assert_eq!(a, b, "chunk {}..{}", i, i + 1); |
1111 | | } |
1112 | | assert_eq!(&decompressed, data); |
1113 | | } |
1114 | | |
1115 | | #[allow(unused)] |
1116 | | fn compare_decompression(data: &[u8]) { |
1117 | | // let decompressed0 = flate2::read::ZlibDecoder::new(std::io::Cursor::new(&data)) |
1118 | | // .bytes() |
1119 | | // .collect::<Result<Vec<_>, _>>() |
1120 | | // .unwrap(); |
1121 | | let decompressed = decompress_to_vec(data).unwrap(); |
1122 | | let decompressed2 = miniz_oxide::inflate::decompress_to_vec_zlib(data).unwrap(); |
1123 | | for i in 0..decompressed.len().min(decompressed2.len()) { |
1124 | | if decompressed[i] != decompressed2[i] { |
1125 | | panic!( |
1126 | | "mismatch at index {} {:?} {:?}", |
1127 | | i, |
1128 | | &decompressed[i.saturating_sub(1)..(i + 16).min(decompressed.len())], |
1129 | | &decompressed2[i.saturating_sub(1)..(i + 16).min(decompressed2.len())] |
1130 | | ); |
1131 | | } |
1132 | | } |
1133 | | if decompressed != decompressed2 { |
1134 | | panic!( |
1135 | | "length mismatch {} {} {:x?}", |
1136 | | decompressed.len(), |
1137 | | decompressed2.len(), |
1138 | | &decompressed2[decompressed.len()..][..16] |
1139 | | ); |
1140 | | } |
1141 | | //assert_eq!(decompressed, decompressed2); |
1142 | | } |
1143 | | |
1144 | | #[test] |
1145 | | fn tables() { |
1146 | | for (i, &bits) in LEN_SYM_TO_LEN_EXTRA.iter().enumerate() { |
1147 | | let len_base = LEN_SYM_TO_LEN_BASE[i]; |
1148 | | for j in 0..(1 << bits) { |
1149 | | if i == 27 && j == 31 { |
1150 | | continue; |
1151 | | } |
1152 | | assert_eq!(LENGTH_TO_LEN_EXTRA[len_base + j - 3], bits, "{} {}", i, j); |
1153 | | assert_eq!( |
1154 | | LENGTH_TO_SYMBOL[len_base + j - 3], |
1155 | | i as u16 + 257, |
1156 | | "{} {}", |
1157 | | i, |
1158 | | j |
1159 | | ); |
1160 | | } |
1161 | | } |
1162 | | } |
1163 | | |
1164 | | #[test] |
1165 | | fn fixed_tables() { |
1166 | | let mut compression = CompressedBlock { |
1167 | | litlen_table: Box::new([0; 4096]), |
1168 | | dist_table: Box::new([0; 512]), |
1169 | | secondary_table: Vec::new(), |
1170 | | dist_secondary_table: Vec::new(), |
1171 | | eof_code: 0, |
1172 | | eof_mask: 0, |
1173 | | eof_bits: 0, |
1174 | | }; |
1175 | | Decompressor::build_tables(288, &FIXED_CODE_LENGTHS, &mut compression).unwrap(); |
1176 | | |
1177 | | assert_eq!(compression.litlen_table[..512], FIXED_LITLEN_TABLE); |
1178 | | assert_eq!(compression.dist_table[..32], FIXED_DIST_TABLE); |
1179 | | } |
1180 | | |
1181 | | #[test] |
1182 | | fn it_works() { |
1183 | | roundtrip(b"Hello world!"); |
1184 | | } |
1185 | | |
1186 | | #[test] |
1187 | | fn constant() { |
1188 | | roundtrip_miniz_oxide(&[0; 50]); |
1189 | | roundtrip_miniz_oxide(&vec![5; 2048]); |
1190 | | roundtrip_miniz_oxide(&vec![128; 2048]); |
1191 | | roundtrip_miniz_oxide(&vec![254; 2048]); |
1192 | | } |
1193 | | |
1194 | | #[test] |
1195 | | fn random() { |
1196 | | let mut rng = rand::thread_rng(); |
1197 | | let mut data = vec![0; 50000]; |
1198 | | for _ in 0..10 { |
1199 | | for byte in &mut data { |
1200 | | *byte = rng.gen::<u8>() % 5; |
1201 | | } |
1202 | | println!("Random data: {:?}", data); |
1203 | | roundtrip_miniz_oxide(&data); |
1204 | | } |
1205 | | } |
1206 | | |
1207 | | #[test] |
1208 | | fn ignore_adler32() { |
1209 | | let mut compressed = crate::compress_to_vec(b"Hello world!"); |
1210 | | let last_byte = compressed.len() - 1; |
1211 | | compressed[last_byte] = compressed[last_byte].wrapping_add(1); |
1212 | | |
1213 | | match decompress_to_vec(&compressed) { |
1214 | | Err(DecompressionError::WrongChecksum) => {} |
1215 | | r => panic!("expected WrongChecksum, got {:?}", r), |
1216 | | } |
1217 | | |
1218 | | let mut decompressor = Decompressor::new(); |
1219 | | decompressor.ignore_adler32(); |
1220 | | let mut decompressed = vec![0; 1024]; |
1221 | | let decompressed_len = decompressor |
1222 | | .read(&compressed, &mut decompressed, 0, true) |
1223 | | .unwrap() |
1224 | | .1; |
1225 | | assert_eq!(&decompressed[..decompressed_len], b"Hello world!"); |
1226 | | } |
1227 | | |
1228 | | #[test] |
1229 | | fn checksum_after_eof() { |
1230 | | let input = b"Hello world!"; |
1231 | | let compressed = crate::compress_to_vec(input); |
1232 | | |
1233 | | let mut decompressor = Decompressor::new(); |
1234 | | let mut decompressed = vec![0; 1024]; |
1235 | | let (input_consumed, output_written) = decompressor |
1236 | | .read( |
1237 | | &compressed[..compressed.len() - 1], |
1238 | | &mut decompressed, |
1239 | | 0, |
1240 | | false, |
1241 | | ) |
1242 | | .unwrap(); |
1243 | | assert_eq!(output_written, input.len()); |
1244 | | assert_eq!(input_consumed, compressed.len() - 1); |
1245 | | |
1246 | | let (input_consumed, output_written) = decompressor |
1247 | | .read( |
1248 | | &compressed[input_consumed..], |
1249 | | &mut decompressed[..output_written], |
1250 | | output_written, |
1251 | | true, |
1252 | | ) |
1253 | | .unwrap(); |
1254 | | assert!(decompressor.is_done()); |
1255 | | assert_eq!(input_consumed, 1); |
1256 | | assert_eq!(output_written, 0); |
1257 | | |
1258 | | assert_eq!(&decompressed[..input.len()], input); |
1259 | | } |
1260 | | |
1261 | | #[test] |
1262 | | fn zero_length() { |
1263 | | let mut compressed = crate::compress_to_vec(b"").to_vec(); |
1264 | | |
1265 | | // Splice in zero-length non-compressed blocks. |
1266 | | for _ in 0..10 { |
1267 | | println!("compressed len: {}", compressed.len()); |
1268 | | compressed.splice(2..2, [0u8, 0, 0, 0xff, 0xff].into_iter()); |
1269 | | } |
1270 | | |
1271 | | // Ensure that the full input is decompressed, regardless of whether |
1272 | | // `end_of_input` is set. |
1273 | | for end_of_input in [true, false] { |
1274 | | let mut decompressor = Decompressor::new(); |
1275 | | let (input_consumed, output_written) = decompressor |
1276 | | .read(&compressed, &mut [], 0, end_of_input) |
1277 | | .unwrap(); |
1278 | | |
1279 | | assert!(decompressor.is_done()); |
1280 | | assert_eq!(input_consumed, compressed.len()); |
1281 | | assert_eq!(output_written, 0); |
1282 | | } |
1283 | | } |
1284 | | |
1285 | | mod test_utils; |
1286 | | use tables::FIXED_CODE_LENGTHS; |
1287 | | use test_utils::{decompress_by_chunks, TestDecompressionError}; |
1288 | | |
1289 | | fn verify_no_sensitivity_to_input_chunking( |
1290 | | input: &[u8], |
1291 | | ) -> Result<Vec<u8>, TestDecompressionError> { |
1292 | | let r_whole = decompress_by_chunks(input, vec![input.len()], false); |
1293 | | let r_bytewise = decompress_by_chunks(input, std::iter::repeat(1), false); |
1294 | | assert_eq!(r_whole, r_bytewise); |
1295 | | r_whole // Returning an arbitrary result, since this is equal to `r_bytewise`. |
1296 | | } |
1297 | | |
1298 | | /// This is a regression test found by the `buf_independent` fuzzer from the `png` crate. When |
1299 | | /// this test case was found, the results were unexpectedly different when 1) decompressing the |
1300 | | /// whole input (successful result) vs 2) decompressing byte-by-byte |
1301 | | /// (`Err(InvalidDistanceCode)`). |
1302 | | #[test] |
1303 | | fn test_input_chunking_sensitivity_when_handling_distance_codes() { |
1304 | | let result = verify_no_sensitivity_to_input_chunking(include_bytes!( |
1305 | | "../tests/input-chunking-sensitivity-example1.zz" |
1306 | | )) |
1307 | | .unwrap(); |
1308 | | assert_eq!(result.len(), 281); |
1309 | | assert_eq!(simd_adler32::adler32(&result.as_slice()), 751299); |
1310 | | } |
1311 | | |
1312 | | /// This is a regression test found by the `inflate_bytewise3` fuzzer from the `fdeflate` |
1313 | | /// crate. When this test case was found, the results were unexpectedly different when 1) |
1314 | | /// decompressing the whole input (`Err(DistanceTooFarBack)`) vs 2) decompressing byte-by-byte |
1315 | | /// (successful result)`). |
1316 | | #[test] |
1317 | | fn test_input_chunking_sensitivity_when_no_end_of_block_symbol_example1() { |
1318 | | let err = verify_no_sensitivity_to_input_chunking(include_bytes!( |
1319 | | "../tests/input-chunking-sensitivity-example2.zz" |
1320 | | )) |
1321 | | .unwrap_err(); |
1322 | | assert_eq!( |
1323 | | err, |
1324 | | TestDecompressionError::ProdError(DecompressionError::BadLiteralLengthHuffmanTree) |
1325 | | ); |
1326 | | } |
1327 | | |
1328 | | /// This is a regression test found by the `inflate_bytewise3` fuzzer from the `fdeflate` |
1329 | | /// crate. When this test case was found, the results were unexpectedly different when 1) |
1330 | | /// decompressing the whole input (`Err(InvalidDistanceCode)`) vs 2) decompressing byte-by-byte |
1331 | | /// (successful result)`). |
1332 | | #[test] |
1333 | | fn test_input_chunking_sensitivity_when_no_end_of_block_symbol_example2() { |
1334 | | let err = verify_no_sensitivity_to_input_chunking(include_bytes!( |
1335 | | "../tests/input-chunking-sensitivity-example3.zz" |
1336 | | )) |
1337 | | .unwrap_err(); |
1338 | | assert_eq!( |
1339 | | err, |
1340 | | TestDecompressionError::ProdError(DecompressionError::BadLiteralLengthHuffmanTree) |
1341 | | ); |
1342 | | } |
1343 | | } |