Line | Count | Source |
1 | | /* inflate_p.h -- Private inline functions and macros shared with more than one deflate method |
2 | | * |
3 | | */ |
4 | | |
5 | | #ifndef INFLATE_P_H |
6 | | #define INFLATE_P_H |
7 | | |
8 | | #include <stdlib.h> |
9 | | |
10 | | #include "zendian.h" |
11 | | #include "zmemory.h" |
12 | | |
13 | | /* Architecture-specific hooks. */ |
14 | | #ifdef S390_DFLTCC_INFLATE |
15 | | # include "arch/s390/dfltcc_inflate.h" |
16 | | /* DFLTCC instructions require window to be page-aligned */ |
17 | | # define PAD_WINDOW PAD_4096 |
18 | | # define WINDOW_PAD_SIZE 4096 |
19 | | # define HINT_ALIGNED_WINDOW HINT_ALIGNED_4096 |
20 | | #else |
21 | | # define PAD_WINDOW PAD_64 |
22 | | # define WINDOW_PAD_SIZE 64 |
23 | | # define HINT_ALIGNED_WINDOW HINT_ALIGNED_64 |
24 | | /* Adjust the window size for the arch-specific inflate code. */ |
25 | 8.68k | # define INFLATE_ADJUST_WINDOW_SIZE(n) (n) |
26 | | /* Invoked at the end of inflateResetKeep(). Useful for initializing arch-specific extension blocks. */ |
27 | 8.68k | # define INFLATE_RESET_KEEP_HOOK(strm) do {} while (0) |
28 | | /* Invoked at the beginning of inflatePrime(). Useful for updating arch-specific buffers. */ |
29 | 0 | # define INFLATE_PRIME_HOOK(strm, bits, value) do {} while (0) |
30 | | /* Invoked at the beginning of each block. Useful for plugging arch-specific inflation code. */ |
31 | 23.3k | # define INFLATE_TYPEDO_HOOK(strm, flush) do {} while (0) |
32 | | /* Returns whether zlib-ng should compute a checksum. Set to 0 if arch-specific inflation code already does that. */ |
33 | 8.68k | # define INFLATE_NEED_CHECKSUM(strm) 1 |
34 | | /* Returns whether zlib-ng should update a window. Set to 0 if arch-specific inflation code already does that. */ |
35 | 17.3k | # define INFLATE_NEED_UPDATEWINDOW(strm) 1 |
36 | | /* Invoked at the beginning of inflateMark(). Useful for updating arch-specific pointers and offsets. */ |
37 | 0 | # define INFLATE_MARK_HOOK(strm) do {} while (0) |
38 | | /* Invoked at the beginning of inflateSyncPoint(). Useful for performing arch-specific state checks. */ |
39 | 0 | # define INFLATE_SYNC_POINT_HOOK(strm) do {} while (0) |
40 | | /* Invoked at the beginning of inflateSetDictionary(). Useful for checking arch-specific window data. */ |
41 | 0 | # define INFLATE_SET_DICTIONARY_HOOK(strm, dict, dict_len) do {} while (0) |
42 | | /* Invoked at the beginning of inflateGetDictionary(). Useful for adjusting arch-specific window data. */ |
43 | 0 | # define INFLATE_GET_DICTIONARY_HOOK(strm, dict, dict_len) do {} while (0) |
44 | | #endif |
45 | | |
46 | | /* |
47 | | * Macros shared by inflate() and inflateBack() |
48 | | */ |
49 | | |
50 | | /* check function to use adler32() for zlib or crc32() for gzip */ |
51 | | #ifdef GUNZIP |
52 | | # define UPDATE(check, buf, len) \ |
53 | | (state->flags ? PREFIX(crc32)(check, buf, len) : FUNCTABLE_CALL(adler32)(check, buf, len)) |
54 | | #else |
55 | | # define UPDATE(check, buf, len) FUNCTABLE_CALL(adler32)(check, buf, len) |
56 | | #endif |
57 | | |
58 | | /* check macros for header crc */ |
59 | | #ifdef GUNZIP |
60 | | # define CRC2(check, word) \ |
61 | 0 | do { \ |
62 | 0 | uint16_t tmp = Z_U16_TO_LE((uint16_t)(word)); \ |
63 | 0 | check = PREFIX(crc32)(check, (const unsigned char *)&tmp, 2); \ |
64 | 0 | } while (0) |
65 | | |
66 | | # define CRC4(check, word) \ |
67 | 0 | do { \ |
68 | 0 | uint32_t tmp = Z_U32_TO_LE((uint32_t)(word)); \ |
69 | 0 | check = PREFIX(crc32)(check, (const unsigned char *)&tmp, 4); \ |
70 | 0 | } while (0) |
71 | | #endif |
72 | | |
73 | | /* Compiler optimization for bit accumulator on x86 architectures */ |
74 | | #ifdef ARCH_X86 |
75 | | typedef uint8_t bits_t; |
76 | | #else |
77 | | typedef unsigned bits_t; |
78 | | #endif |
79 | | |
80 | | /* Load registers with state in inflate() for speed */ |
81 | | #define LOAD() \ |
82 | 18.4k | do { \ |
83 | 18.4k | put = strm->next_out; \ |
84 | 18.4k | left = strm->avail_out; \ |
85 | 18.4k | next = strm->next_in; \ |
86 | 18.4k | have = strm->avail_in; \ |
87 | 18.4k | hold = state->hold; \ |
88 | 18.4k | bits = (bits_t)state->bits; \ |
89 | 18.4k | } while (0) |
90 | | |
91 | | /* Restore state from registers in inflate() */ |
92 | | #define RESTORE() \ |
93 | 18.4k | do { \ |
94 | 18.4k | strm->next_out = put; \ |
95 | 18.4k | strm->avail_out = left; \ |
96 | 18.4k | strm->next_in = (z_const unsigned char *)next; \ |
97 | 18.4k | strm->avail_in = have; \ |
98 | 18.4k | state->hold = hold; \ |
99 | 18.4k | state->bits = bits; \ |
100 | 18.4k | } while (0) |
101 | | |
102 | | /* Refill to have at least 56 bits in the bit accumulator */ |
103 | 40.9M | #define REFILL() do { \ |
104 | 40.9M | hold |= load_64_bits(in, bits); \ |
105 | 40.9M | in += (63 ^ bits) >> 3; \ |
106 | 40.9M | bits |= 56; \ |
107 | 40.9M | } while (0) |
108 | | |
109 | | /* Clear the input bit accumulator */ |
110 | | #define INITBITS() \ |
111 | 12.5k | do { \ |
112 | 12.5k | hold = 0; \ |
113 | 12.5k | bits = 0; \ |
114 | 12.5k | } while (0) |
115 | | |
116 | | /* Ensure that there is at least n bits in the bit accumulator. If there is |
117 | | not enough available input to do that, then return from inflate()/inflateBack(). */ |
118 | | #define NEEDBITS(n) \ |
119 | 424k | do { \ |
120 | 424k | unsigned u = (unsigned)(n); \ |
121 | 625k | while (bits < (bits_t)u) \ |
122 | 424k | PULLBYTE(); \ |
123 | 424k | } while (0) |
124 | | |
125 | | /* Return the low n bits of the bit accumulator (n < 16) */ |
126 | | #define BITS(n) \ |
127 | 3.33M | (hold & ((1U << (unsigned)(n)) - 1)) |
128 | | |
129 | | /* Remove n bits from the bit accumulator */ |
130 | | #define DROPBITS(n) \ |
131 | 123M | do { \ |
132 | 123M | unsigned u = (unsigned)(n); \ |
133 | 123M | hold >>= u; \ |
134 | 123M | bits -= (bits_t)u; \ |
135 | 123M | } while (0) |
136 | | |
137 | | /* Remove zero to seven bits as needed to go to a byte boundary */ |
138 | | #define BYTEBITS() \ |
139 | 8.22k | do { \ |
140 | 8.22k | hold >>= bits & 7; \ |
141 | 8.22k | bits -= bits & 7; \ |
142 | 8.22k | } while (0) |
143 | | |
144 | | /* Set mode=BAD and prepare error message */ |
145 | | #define SET_BAD(errmsg) \ |
146 | 0 | do { \ |
147 | 0 | state->mode = BAD; \ |
148 | 0 | strm->msg = (char *)errmsg; \ |
149 | 0 | } while (0) |
150 | | |
151 | | /* Huffman code table entry format for length/distance codes (op & 16 set): |
152 | | * bits = code_bits + extra_bits (combined for single-shift decode) |
153 | | * op = 16 | code_bits |
154 | | * val = base value |
155 | | * |
156 | | * For literals (op == 0): bits = code_bits, val = literal byte |
157 | | */ |
158 | | |
159 | | /* Extract code size from a Huffman table entry */ |
160 | | #define CODE_BITS(here) \ |
161 | 961k | ((unsigned)((here.op & 16) ? (here.op & 15) : here.bits)) |
162 | | |
163 | | /* Extract extra bits count from a length/distance code entry */ |
164 | | #define CODE_EXTRA(here) \ |
165 | 41.7k | ((unsigned)((here.op & 16) ? (here.bits - (here.op & 15)) : 0)) |
166 | | |
167 | | /* Extract extra bits value from saved bit accumulator */ |
168 | | #define EXTRA_BITS(old, here, op) \ |
169 | 6.65M | ((old & (((uint64_t)1 << here.bits) - 1)) >> (op & MAX_BITS)) |
170 | | |
171 | | /* Build combined op field: preserves extra if not len/dist, else combines with code_bits */ |
172 | | #define COMBINE_OP(extra, code_bits) \ |
173 | 189k | ((unsigned char)((extra) & 16 ? (code_bits) | 16 : (extra))) |
174 | | |
175 | | /* Build combined bits field: code_bits + extra_bits from extra's low nibble */ |
176 | | #define COMBINE_BITS(code_bits, extra) \ |
177 | 189k | ((unsigned char)((code_bits) + ((extra) & 15))) |
178 | | |
179 | | /* Trace macros for debugging */ |
180 | | #define TRACE_LITERAL(val) \ |
181 | | Tracevv((stderr, val >= 0x20 && val < 0x7f ? \ |
182 | | "inflate: literal '%c'\n" : \ |
183 | | "inflate: literal 0x%02x\n", val)) |
184 | | |
185 | | #define TRACE_LENGTH(len) \ |
186 | | Tracevv((stderr, "inflate: length %u\n", len)) |
187 | | |
188 | | #define TRACE_DISTANCE(dist) \ |
189 | | Tracevv((stderr, "inflate: distance %u\n", dist)) |
190 | | |
191 | | #define TRACE_END_OF_BLOCK() \ |
192 | | Tracevv((stderr, "inflate: end of block\n")) |
193 | | |
194 | 672k | #define INFLATE_FAST_MIN_HAVE 15 |
195 | 306k | #define INFLATE_FAST_MIN_LEFT 260 |
196 | | |
197 | | /* Load 64 bits from IN and place the bytes at offset BITS in the result. */ |
198 | 40.9M | static inline uint64_t load_64_bits(const unsigned char *in, unsigned bits) { |
199 | 40.9M | uint64_t chunk = zng_memread_8(in); |
200 | 40.9M | return Z_U64_FROM_LE(chunk) << bits; |
201 | 40.9M | } Unexecuted instantiation: inflate.c:load_64_bits Unexecuted instantiation: inftrees.c:load_64_bits Unexecuted instantiation: chunkset_sse2.c:load_64_bits Unexecuted instantiation: chunkset_ssse3.c:load_64_bits chunkset_avx2.c:load_64_bits Line | Count | Source | 198 | 40.9M | static inline uint64_t load_64_bits(const unsigned char *in, unsigned bits) { | 199 | 40.9M | uint64_t chunk = zng_memread_8(in); | 200 | 40.9M | return Z_U64_FROM_LE(chunk) << bits; | 201 | 40.9M | } |
Unexecuted instantiation: chunkset_avx512.c:load_64_bits |
202 | | |
203 | | /* Behave like chunkcopy, but avoid writing beyond of legal output. */ |
204 | 0 | static inline uint8_t* chunkcopy_safe(uint8_t *out, uint8_t *from, uint64_t len, uint8_t *safe) { |
205 | 0 | uint64_t safelen = safe - out; |
206 | 0 | len = MIN(len, safelen); |
207 | 0 | int32_t olap_src = from >= out && from < out + len; |
208 | 0 | int32_t olap_dst = out >= from && out < from + len; |
209 | 0 | uint64_t tocopy; |
210 | | |
211 | | /* For all cases without overlap, memcpy is ideal */ |
212 | 0 | if (!(olap_src || olap_dst)) { |
213 | 0 | memcpy(out, from, (size_t)len); |
214 | 0 | return out + len; |
215 | 0 | } |
216 | | |
217 | | /* Complete overlap: Source == destination */ |
218 | 0 | if (out == from) { |
219 | 0 | return out + len; |
220 | 0 | } |
221 | | |
222 | | /* We are emulating a self-modifying copy loop here. To do this in a way that doesn't produce undefined behavior, |
223 | | * we have to get a bit clever. First if the overlap is such that src falls between dst and dst+len, we can do the |
224 | | * initial bulk memcpy of the nonoverlapping region. Then, we can leverage the size of this to determine the safest |
225 | | * atomic memcpy size we can pick such that we have non-overlapping regions. This effectively becomes a safe look |
226 | | * behind or lookahead distance. */ |
227 | 0 | uint64_t non_olap_size = llabs(from - out); // llabs vs labs for compatibility with windows |
228 | | |
229 | | /* So this doesn't give use a worst case scenario of function calls in a loop, |
230 | | * we want to instead break this down into copy blocks of fixed lengths |
231 | | * |
232 | | * TODO: The memcpy calls aren't inlined on architectures with strict memory alignment |
233 | | */ |
234 | 0 | while (len) { |
235 | 0 | tocopy = MIN(non_olap_size, len); |
236 | 0 | len -= tocopy; |
237 | |
|
238 | 0 | while (tocopy >= 16) { |
239 | 0 | memcpy(out, from, 16); |
240 | 0 | out += 16; |
241 | 0 | from += 16; |
242 | 0 | tocopy -= 16; |
243 | 0 | } |
244 | |
|
245 | 0 | if (tocopy >= 8) { |
246 | 0 | memcpy(out, from, 8); |
247 | 0 | out += 8; |
248 | 0 | from += 8; |
249 | 0 | tocopy -= 8; |
250 | 0 | } |
251 | |
|
252 | 0 | if (tocopy >= 4) { |
253 | 0 | memcpy(out, from, 4); |
254 | 0 | out += 4; |
255 | 0 | from += 4; |
256 | 0 | tocopy -= 4; |
257 | 0 | } |
258 | |
|
259 | 0 | while (tocopy--) { |
260 | 0 | *out++ = *from++; |
261 | 0 | } |
262 | 0 | } |
263 | |
|
264 | 0 | return out; |
265 | 0 | } Unexecuted instantiation: inflate.c:chunkcopy_safe Unexecuted instantiation: inftrees.c:chunkcopy_safe Unexecuted instantiation: chunkset_sse2.c:chunkcopy_safe Unexecuted instantiation: chunkset_ssse3.c:chunkcopy_safe Unexecuted instantiation: chunkset_avx2.c:chunkcopy_safe Unexecuted instantiation: chunkset_avx512.c:chunkcopy_safe |
266 | | |
267 | | #endif |