/proc/self/cwd/external/snappy/snappy.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2005 Google Inc. All Rights Reserved. |
2 | | // |
3 | | // Redistribution and use in source and binary forms, with or without |
4 | | // modification, are permitted provided that the following conditions are |
5 | | // met: |
6 | | // |
7 | | // * Redistributions of source code must retain the above copyright |
8 | | // notice, this list of conditions and the following disclaimer. |
9 | | // * Redistributions in binary form must reproduce the above |
10 | | // copyright notice, this list of conditions and the following disclaimer |
11 | | // in the documentation and/or other materials provided with the |
12 | | // distribution. |
13 | | // * Neither the name of Google Inc. nor the names of its |
14 | | // contributors may be used to endorse or promote products derived from |
15 | | // this software without specific prior written permission. |
16 | | // |
17 | | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
18 | | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
19 | | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
20 | | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
21 | | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
22 | | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
23 | | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
24 | | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
25 | | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 | | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
27 | | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | | |
29 | | #include "snappy-internal.h" |
30 | | #include "snappy-sinksource.h" |
31 | | #include "snappy.h" |
32 | | #if !defined(SNAPPY_HAVE_BMI2) |
33 | | // __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2 |
34 | | // specifically, but it does define __AVX2__ when AVX2 support is available. |
35 | | // Fortunately, AVX2 was introduced in Haswell, just like BMI2. |
36 | | // |
37 | | // BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So, |
38 | | // GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which |
39 | | // case issuing BMI2 instructions results in a compiler error. |
40 | | #if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__)) |
41 | | #define SNAPPY_HAVE_BMI2 1 |
42 | | #else |
43 | | #define SNAPPY_HAVE_BMI2 0 |
44 | | #endif |
45 | | #endif // !defined(SNAPPY_HAVE_BMI2) |
46 | | |
47 | | #if !defined(SNAPPY_HAVE_X86_CRC32) |
48 | | #if defined(__SSE4_2__) |
49 | | #define SNAPPY_HAVE_X86_CRC32 1 |
50 | | #else |
51 | | #define SNAPPY_HAVE_X86_CRC32 0 |
52 | | #endif |
53 | | #endif // !defined(SNAPPY_HAVE_X86_CRC32) |
54 | | |
55 | | #if !defined(SNAPPY_HAVE_NEON_CRC32) |
56 | | #if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32) |
57 | | #define SNAPPY_HAVE_NEON_CRC32 1 |
58 | | #else |
59 | | #define SNAPPY_HAVE_NEON_CRC32 0 |
60 | | #endif |
61 | | #endif // !defined(SNAPPY_HAVE_NEON_CRC32) |
62 | | |
63 | | #if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32 |
64 | | // Please do not replace with <x86intrin.h>. or with headers that assume more |
65 | | // advanced SSE versions without checking with all the OWNERS. |
66 | | #include <immintrin.h> |
67 | | #elif SNAPPY_HAVE_NEON_CRC32 |
68 | | #include <arm_acle.h> |
69 | | #endif |
70 | | |
71 | | #include <algorithm> |
72 | | #include <array> |
73 | | #include <cstddef> |
74 | | #include <cstdint> |
75 | | #include <cstdio> |
76 | | #include <cstring> |
77 | | #include <memory> |
78 | | #include <string> |
79 | | #include <utility> |
80 | | #include <vector> |
81 | | |
82 | | namespace snappy { |
83 | | |
84 | | namespace { |
85 | | |
86 | | // The amount of slop bytes writers are using for unconditional copies. |
87 | | constexpr int kSlopBytes = 64; |
88 | | |
89 | | using internal::char_table; |
90 | | using internal::COPY_1_BYTE_OFFSET; |
91 | | using internal::COPY_2_BYTE_OFFSET; |
92 | | using internal::COPY_4_BYTE_OFFSET; |
93 | | using internal::kMaximumTagLength; |
94 | | using internal::LITERAL; |
95 | | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
96 | | using internal::V128; |
97 | | using internal::V128_Load; |
98 | | using internal::V128_LoadU; |
99 | | using internal::V128_Shuffle; |
100 | | using internal::V128_StoreU; |
101 | | using internal::V128_DupChar; |
102 | | #endif |
103 | | |
104 | | // We translate the information encoded in a tag through a lookup table to a |
105 | | // format that requires fewer instructions to decode. Effectively we store |
106 | | // the length minus the tag part of the offset. The lowest significant byte |
107 | | // thus stores the length. While total length - offset is given by |
108 | | // entry - ExtractOffset(type). The nice thing is that the subtraction |
109 | | // immediately sets the flags for the necessary check that offset >= length. |
110 | | // This folds the cmp with sub. We engineer the long literals and copy-4 to |
111 | | // always fail this check, so their presence doesn't affect the fast path. |
112 | | // To prevent literals from triggering the guard against offset < length (offset |
113 | | // does not apply to literals) the table is giving them a spurious offset of |
114 | | // 256. |
115 | 0 | inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) { |
116 | 0 | return len - (offset << 8); |
117 | 0 | } |
118 | | |
119 | 0 | inline constexpr int16_t LengthMinusOffset(int data, int type) { |
120 | 0 | return type == 3 ? 0xFF // copy-4 (or type == 3) |
121 | 0 | : type == 2 ? MakeEntry(data + 1, 0) // copy-2 |
122 | 0 | : type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1 |
123 | 0 | : data < 60 ? MakeEntry(data + 1, 1) // note spurious offset. |
124 | 0 | : 0xFF; // long literal |
125 | 0 | } |
126 | | |
127 | 0 | inline constexpr int16_t LengthMinusOffset(uint8_t tag) { |
128 | 0 | return LengthMinusOffset(tag >> 2, tag & 3); |
129 | 0 | } |
130 | | |
131 | | template <size_t... Ints> |
132 | | struct index_sequence {}; |
133 | | |
134 | | template <std::size_t N, size_t... Is> |
135 | | struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {}; |
136 | | |
137 | | template <size_t... Is> |
138 | | struct make_index_sequence<0, Is...> : index_sequence<Is...> {}; |
139 | | |
140 | | template <size_t... seq> |
141 | 0 | constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) { |
142 | 0 | return std::array<int16_t, 256>{LengthMinusOffset(seq)...}; |
143 | 0 | } |
144 | | |
145 | | alignas(64) const std::array<int16_t, 256> kLengthMinusOffset = |
146 | | MakeTable(make_index_sequence<256>{}); |
147 | | |
148 | | // Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the |
149 | | // relevant entry, if any, for the given bytes. Any hash function will do, |
150 | | // but a good hash function reduces the number of collisions and thus yields |
151 | | // better compression for compressible input. |
152 | | // |
153 | | // REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two. |
154 | 0 | inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) { |
155 | | // Our choice is quicker-and-dirtier than the typical hash function; |
156 | | // empirically, that seems beneficial. The upper bits of kMagic * bytes are a |
157 | | // higher-quality hash than the lower bits, so when using kMagic * bytes we |
158 | | // also shift right to get a higher-quality end result. There's no similar |
159 | | // issue with a CRC because all of the output bits of a CRC are equally good |
160 | | // "hashes." So, a CPU instruction for CRC, if available, tends to be a good |
161 | | // choice. |
162 | | #if SNAPPY_HAVE_NEON_CRC32 |
163 | | // We use mask as the second arg to the CRC function, as it's about to |
164 | | // be used anyway; it'd be equally correct to use 0 or some constant. |
165 | | // Mathematically, _mm_crc32_u32 (or similar) is a function of the |
166 | | // xor of its arguments. |
167 | | const uint32_t hash = __crc32cw(bytes, mask); |
168 | | #elif SNAPPY_HAVE_X86_CRC32 |
169 | | const uint32_t hash = _mm_crc32_u32(bytes, mask); |
170 | | #else |
171 | 0 | constexpr uint32_t kMagic = 0x1e35a7bd; |
172 | 0 | const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits); |
173 | 0 | #endif |
174 | 0 | return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) + |
175 | 0 | (hash & mask)); |
176 | 0 | } |
177 | | |
178 | | inline uint16_t* TableEntry4ByteMatch(uint16_t* table, uint32_t bytes, |
179 | 0 | uint32_t mask) { |
180 | 0 | constexpr uint32_t kMagic = 2654435761U; |
181 | 0 | const uint32_t hash = (kMagic * bytes) >> (32 - kMaxHashTableBits); |
182 | 0 | return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) + |
183 | 0 | (hash & mask)); |
184 | 0 | } |
185 | | |
186 | | inline uint16_t* TableEntry8ByteMatch(uint16_t* table, uint64_t bytes, |
187 | 0 | uint32_t mask) { |
188 | 0 | constexpr uint64_t kMagic = 58295818150454627ULL; |
189 | 0 | const uint32_t hash = (kMagic * bytes) >> (64 - kMaxHashTableBits); |
190 | 0 | return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) + |
191 | 0 | (hash & mask)); |
192 | 0 | } |
193 | | |
194 | | } // namespace |
195 | | |
196 | 0 | size_t MaxCompressedLength(size_t source_bytes) { |
197 | | // Compressed data can be defined as: |
198 | | // compressed := item* literal* |
199 | | // item := literal* copy |
200 | | // |
201 | | // The trailing literal sequence has a space blowup of at most 62/60 |
202 | | // since a literal of length 60 needs one tag byte + one extra byte |
203 | | // for length information. |
204 | | // |
205 | | // Item blowup is trickier to measure. Suppose the "copy" op copies |
206 | | // 4 bytes of data. Because of a special check in the encoding code, |
207 | | // we produce a 4-byte copy only if the offset is < 65536. Therefore |
208 | | // the copy op takes 3 bytes to encode, and this type of item leads |
209 | | // to at most the 62/60 blowup for representing literals. |
210 | | // |
211 | | // Suppose the "copy" op copies 5 bytes of data. If the offset is big |
212 | | // enough, it will take 5 bytes to encode the copy op. Therefore the |
213 | | // worst case here is a one-byte literal followed by a five-byte copy. |
214 | | // I.e., 6 bytes of input turn into 7 bytes of "compressed" data. |
215 | | // |
216 | | // This last factor dominates the blowup, so the final estimate is: |
217 | 0 | return 32 + source_bytes + source_bytes / 6; |
218 | 0 | } |
219 | | |
220 | | namespace { |
221 | | |
222 | 0 | void UnalignedCopy64(const void* src, void* dst) { |
223 | 0 | char tmp[8]; |
224 | 0 | std::memcpy(tmp, src, 8); |
225 | 0 | std::memcpy(dst, tmp, 8); |
226 | 0 | } |
227 | | |
228 | 0 | void UnalignedCopy128(const void* src, void* dst) { |
229 | | // std::memcpy() gets vectorized when the appropriate compiler options are |
230 | | // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2 |
231 | | // load and store. |
232 | 0 | char tmp[16]; |
233 | 0 | std::memcpy(tmp, src, 16); |
234 | 0 | std::memcpy(dst, tmp, 16); |
235 | 0 | } |
236 | | |
237 | | template <bool use_16bytes_chunk> |
238 | 0 | inline void ConditionalUnalignedCopy128(const char* src, char* dst) { |
239 | 0 | if (use_16bytes_chunk) { |
240 | 0 | UnalignedCopy128(src, dst); |
241 | 0 | } else { |
242 | 0 | UnalignedCopy64(src, dst); |
243 | 0 | UnalignedCopy64(src + 8, dst + 8); |
244 | 0 | } |
245 | 0 | } |
246 | | |
247 | | // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used |
248 | | // for handling COPY operations where the input and output regions may overlap. |
249 | | // For example, suppose: |
250 | | // src == "ab" |
251 | | // op == src + 2 |
252 | | // op_limit == op + 20 |
253 | | // After IncrementalCopySlow(src, op, op_limit), the result will have eleven |
254 | | // copies of "ab" |
255 | | // ababababababababababab |
256 | | // Note that this does not match the semantics of either std::memcpy() or |
257 | | // std::memmove(). |
258 | | inline char* IncrementalCopySlow(const char* src, char* op, |
259 | 0 | char* const op_limit) { |
260 | | // TODO: Remove pragma when LLVM is aware this |
261 | | // function is only called in cold regions and when cold regions don't get |
262 | | // vectorized or unrolled. |
263 | 0 | #ifdef __clang__ |
264 | 0 | #pragma clang loop unroll(disable) |
265 | 0 | #endif |
266 | 0 | while (op < op_limit) { |
267 | 0 | *op++ = *src++; |
268 | 0 | } |
269 | 0 | return op_limit; |
270 | 0 | } |
271 | | |
272 | | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
273 | | |
274 | | // Computes the bytes for shuffle control mask (please read comments on |
275 | | // 'pattern_generation_masks' as well) for the given index_offset and |
276 | | // pattern_size. For example, when the 'offset' is 6, it will generate a |
277 | | // repeating pattern of size 6. So, the first 16 byte indexes will correspond to |
278 | | // the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the |
279 | | // next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3, |
280 | | // 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by |
281 | | // calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and |
282 | | // MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively. |
283 | | template <size_t... indexes> |
284 | | inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes( |
285 | | int index_offset, int pattern_size, index_sequence<indexes...>) { |
286 | | return {static_cast<char>((index_offset + indexes) % pattern_size)...}; |
287 | | } |
288 | | |
289 | | // Computes the shuffle control mask bytes array for given pattern-sizes and |
290 | | // returns an array. |
291 | | template <size_t... pattern_sizes_minus_one> |
292 | | inline constexpr std::array<std::array<char, sizeof(V128)>, |
293 | | sizeof...(pattern_sizes_minus_one)> |
294 | | MakePatternMaskBytesTable(int index_offset, |
295 | | index_sequence<pattern_sizes_minus_one...>) { |
296 | | return { |
297 | | MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1, |
298 | | make_index_sequence</*indexes=*/sizeof(V128)>())...}; |
299 | | } |
300 | | |
301 | | // This is an array of shuffle control masks that can be used as the source |
302 | | // operand for PSHUFB to permute the contents of the destination XMM register |
303 | | // into a repeating byte pattern. |
304 | | alignas(16) constexpr std::array<std::array<char, sizeof(V128)>, |
305 | | 16> pattern_generation_masks = |
306 | | MakePatternMaskBytesTable( |
307 | | /*index_offset=*/0, |
308 | | /*pattern_sizes_minus_one=*/make_index_sequence<16>()); |
309 | | |
310 | | // Similar to 'pattern_generation_masks', this table is used to "rotate" the |
311 | | // pattern so that we can copy the *next 16 bytes* consistent with the pattern. |
312 | | // Basically, pattern_reshuffle_masks is a continuation of |
313 | | // pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as |
314 | | // pattern_generation_masks for offsets 1, 2, 4, 8 and 16. |
315 | | alignas(16) constexpr std::array<std::array<char, sizeof(V128)>, |
316 | | 16> pattern_reshuffle_masks = |
317 | | MakePatternMaskBytesTable( |
318 | | /*index_offset=*/16, |
319 | | /*pattern_sizes_minus_one=*/make_index_sequence<16>()); |
320 | | |
321 | | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
322 | | static inline V128 LoadPattern(const char* src, const size_t pattern_size) { |
323 | | V128 generation_mask = V128_Load(reinterpret_cast<const V128*>( |
324 | | pattern_generation_masks[pattern_size - 1].data())); |
325 | | // Uninitialized bytes are masked out by the shuffle mask. |
326 | | // TODO: remove annotation and macro defs once MSan is fixed. |
327 | | SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size); |
328 | | return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)), |
329 | | generation_mask); |
330 | | } |
331 | | |
332 | | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
333 | | static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */> |
334 | | LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) { |
335 | | V128 pattern = LoadPattern(src, pattern_size); |
336 | | |
337 | | // This mask will generate the next 16 bytes in-place. Doing so enables us to |
338 | | // write data by at most 4 V128_StoreU. |
339 | | // |
340 | | // For example, suppose pattern is: abcdefabcdefabcd |
341 | | // Shuffling with this mask will generate: efabcdefabcdefab |
342 | | // Shuffling again will generate: cdefabcdefabcdef |
343 | | V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>( |
344 | | pattern_reshuffle_masks[pattern_size - 1].data())); |
345 | | return {pattern, reshuffle_mask}; |
346 | | } |
347 | | |
348 | | #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
349 | | |
350 | | // Fallback for when we need to copy while extending the pattern, for example |
351 | | // copying 10 bytes from 3 positions back abc -> abcabcabcabca. |
352 | | // |
353 | | // REQUIRES: [dst - offset, dst + 64) is a valid address range. |
354 | | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
355 | 0 | static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) { |
356 | | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
357 | | if (SNAPPY_PREDICT_TRUE(offset <= 16)) { |
358 | | switch (offset) { |
359 | | case 0: |
360 | | return false; |
361 | | case 1: { |
362 | | // TODO: Ideally we should memset, move back once the |
363 | | // codegen issues are fixed. |
364 | | V128 pattern = V128_DupChar(dst[-1]); |
365 | | for (int i = 0; i < 4; i++) { |
366 | | V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern); |
367 | | } |
368 | | return true; |
369 | | } |
370 | | case 2: |
371 | | case 4: |
372 | | case 8: |
373 | | case 16: { |
374 | | V128 pattern = LoadPattern(dst - offset, offset); |
375 | | for (int i = 0; i < 4; i++) { |
376 | | V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern); |
377 | | } |
378 | | return true; |
379 | | } |
380 | | default: { |
381 | | auto pattern_and_reshuffle_mask = |
382 | | LoadPatternAndReshuffleMask(dst - offset, offset); |
383 | | V128 pattern = pattern_and_reshuffle_mask.first; |
384 | | V128 reshuffle_mask = pattern_and_reshuffle_mask.second; |
385 | | for (int i = 0; i < 4; i++) { |
386 | | V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern); |
387 | | pattern = V128_Shuffle(pattern, reshuffle_mask); |
388 | | } |
389 | | return true; |
390 | | } |
391 | | } |
392 | | } |
393 | | #else |
394 | 0 | if (SNAPPY_PREDICT_TRUE(offset < 16)) { |
395 | 0 | if (SNAPPY_PREDICT_FALSE(offset == 0)) return false; |
396 | | // Extend the pattern to the first 16 bytes. |
397 | | // The simpler formulation of `dst[i - offset]` induces undefined behavior. |
398 | 0 | for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i]; |
399 | | // Find a multiple of pattern >= 16. |
400 | 0 | static std::array<uint8_t, 16> pattern_sizes = []() { |
401 | 0 | std::array<uint8_t, 16> res; |
402 | 0 | for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i; |
403 | 0 | return res; |
404 | 0 | }(); |
405 | 0 | offset = pattern_sizes[offset]; |
406 | 0 | for (int i = 1; i < 4; i++) { |
407 | 0 | std::memcpy(dst + i * 16, dst + i * 16 - offset, 16); |
408 | 0 | } |
409 | 0 | return true; |
410 | 0 | } |
411 | 0 | #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
412 | | |
413 | | // Very rare. |
414 | 0 | for (int i = 0; i < 4; i++) { |
415 | 0 | std::memcpy(dst + i * 16, dst + i * 16 - offset, 16); |
416 | 0 | } |
417 | 0 | return true; |
418 | 0 | } |
419 | | |
420 | | // Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than |
421 | | // IncrementalCopySlow. buf_limit is the address past the end of the writable |
422 | | // region of the buffer. |
423 | | inline char* IncrementalCopy(const char* src, char* op, char* const op_limit, |
424 | 0 | char* const buf_limit) { |
425 | | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
426 | | constexpr int big_pattern_size_lower_bound = 16; |
427 | | #else |
428 | 0 | constexpr int big_pattern_size_lower_bound = 8; |
429 | 0 | #endif |
430 | | |
431 | | // Terminology: |
432 | | // |
433 | | // slop = buf_limit - op |
434 | | // pat = op - src |
435 | | // len = op_limit - op |
436 | 0 | assert(src < op); |
437 | 0 | assert(op < op_limit); |
438 | 0 | assert(op_limit <= buf_limit); |
439 | | // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64. |
440 | 0 | assert(op_limit - op <= 64); |
441 | | // NOTE: In practice the compressor always emits len >= 4, so it is ok to |
442 | | // assume that to optimize this function, but this is not guaranteed by the |
443 | | // compression format, so we have to also handle len < 4 in case the input |
444 | | // does not satisfy these conditions. |
445 | | |
446 | 0 | size_t pattern_size = op - src; |
447 | | // The cases are split into different branches to allow the branch predictor, |
448 | | // FDO, and static prediction hints to work better. For each input we list the |
449 | | // ratio of invocations that match each condition. |
450 | | // |
451 | | // input slop < 16 pat < 8 len > 16 |
452 | | // ------------------------------------------ |
453 | | // html|html4|cp 0% 1.01% 27.73% |
454 | | // urls 0% 0.88% 14.79% |
455 | | // jpg 0% 64.29% 7.14% |
456 | | // pdf 0% 2.56% 58.06% |
457 | | // txt[1-4] 0% 0.23% 0.97% |
458 | | // pb 0% 0.96% 13.88% |
459 | | // bin 0.01% 22.27% 41.17% |
460 | | // |
461 | | // It is very rare that we don't have enough slop for doing block copies. It |
462 | | // is also rare that we need to expand a pattern. Small patterns are common |
463 | | // for incompressible formats and for those we are plenty fast already. |
464 | | // Lengths are normally not greater than 16 but they vary depending on the |
465 | | // input. In general if we always predict len <= 16 it would be an ok |
466 | | // prediction. |
467 | | // |
468 | | // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE) |
469 | | // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a |
470 | | // time. |
471 | | |
472 | | // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE) |
473 | | // bytes. |
474 | 0 | if (pattern_size < big_pattern_size_lower_bound) { |
475 | | #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
476 | | // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB |
477 | | // to permute the register's contents in-place into a repeating sequence of |
478 | | // the first "pattern_size" bytes. |
479 | | // For example, suppose: |
480 | | // src == "abc" |
481 | | // op == op + 3 |
482 | | // After V128_Shuffle(), "pattern" will have five copies of "abc" |
483 | | // followed by one byte of slop: abcabcabcabcabca. |
484 | | // |
485 | | // The non-SSE fallback implementation suffers from store-forwarding stalls |
486 | | // because its loads and stores partly overlap. By expanding the pattern |
487 | | // in-place, we avoid the penalty. |
488 | | |
489 | | // Typically, the op_limit is the gating factor so try to simplify the loop |
490 | | // based on that. |
491 | | if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) { |
492 | | auto pattern_and_reshuffle_mask = |
493 | | LoadPatternAndReshuffleMask(src, pattern_size); |
494 | | V128 pattern = pattern_and_reshuffle_mask.first; |
495 | | V128 reshuffle_mask = pattern_and_reshuffle_mask.second; |
496 | | |
497 | | // There is at least one, and at most four 16-byte blocks. Writing four |
498 | | // conditionals instead of a loop allows FDO to layout the code with |
499 | | // respect to the actual probabilities of each length. |
500 | | // TODO: Replace with loop with trip count hint. |
501 | | V128_StoreU(reinterpret_cast<V128*>(op), pattern); |
502 | | |
503 | | if (op + 16 < op_limit) { |
504 | | pattern = V128_Shuffle(pattern, reshuffle_mask); |
505 | | V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern); |
506 | | } |
507 | | if (op + 32 < op_limit) { |
508 | | pattern = V128_Shuffle(pattern, reshuffle_mask); |
509 | | V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern); |
510 | | } |
511 | | if (op + 48 < op_limit) { |
512 | | pattern = V128_Shuffle(pattern, reshuffle_mask); |
513 | | V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern); |
514 | | } |
515 | | return op_limit; |
516 | | } |
517 | | char* const op_end = buf_limit - 15; |
518 | | if (SNAPPY_PREDICT_TRUE(op < op_end)) { |
519 | | auto pattern_and_reshuffle_mask = |
520 | | LoadPatternAndReshuffleMask(src, pattern_size); |
521 | | V128 pattern = pattern_and_reshuffle_mask.first; |
522 | | V128 reshuffle_mask = pattern_and_reshuffle_mask.second; |
523 | | |
524 | | // This code path is relatively cold however so we save code size |
525 | | // by avoiding unrolling and vectorizing. |
526 | | // |
527 | | // TODO: Remove pragma when when cold regions don't get |
528 | | // vectorized or unrolled. |
529 | | #ifdef __clang__ |
530 | | #pragma clang loop unroll(disable) |
531 | | #endif |
532 | | do { |
533 | | V128_StoreU(reinterpret_cast<V128*>(op), pattern); |
534 | | pattern = V128_Shuffle(pattern, reshuffle_mask); |
535 | | op += 16; |
536 | | } while (SNAPPY_PREDICT_TRUE(op < op_end)); |
537 | | } |
538 | | return IncrementalCopySlow(op - pattern_size, op, op_limit); |
539 | | #else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
540 | | // If plenty of buffer space remains, expand the pattern to at least 8 |
541 | | // bytes. The way the following loop is written, we need 8 bytes of buffer |
542 | | // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10 |
543 | | // bytes if pattern_size is 2. Precisely encoding that is probably not |
544 | | // worthwhile; instead, invoke the slow path if we cannot write 11 bytes |
545 | | // (because 11 are required in the worst case). |
546 | 0 | if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) { |
547 | 0 | while (pattern_size < 8) { |
548 | 0 | UnalignedCopy64(src, op); |
549 | 0 | op += pattern_size; |
550 | 0 | pattern_size *= 2; |
551 | 0 | } |
552 | 0 | if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit; |
553 | 0 | } else { |
554 | 0 | return IncrementalCopySlow(src, op, op_limit); |
555 | 0 | } |
556 | 0 | #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE |
557 | 0 | } |
558 | 0 | assert(pattern_size >= big_pattern_size_lower_bound); |
559 | 0 | constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16; |
560 | | |
561 | | // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can |
562 | | // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op. |
563 | | // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes |
564 | | // guarantees that op - src >= 8. |
565 | | // |
566 | | // Typically, the op_limit is the gating factor so try to simplify the loop |
567 | | // based on that. |
568 | 0 | if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) { |
569 | | // There is at least one, and at most four 16-byte blocks. Writing four |
570 | | // conditionals instead of a loop allows FDO to layout the code with respect |
571 | | // to the actual probabilities of each length. |
572 | | // TODO: Replace with loop with trip count hint. |
573 | 0 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op); |
574 | 0 | if (op + 16 < op_limit) { |
575 | 0 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16); |
576 | 0 | } |
577 | 0 | if (op + 32 < op_limit) { |
578 | 0 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32); |
579 | 0 | } |
580 | 0 | if (op + 48 < op_limit) { |
581 | 0 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48); |
582 | 0 | } |
583 | 0 | return op_limit; |
584 | 0 | } |
585 | | |
586 | | // Fall back to doing as much as we can with the available slop in the |
587 | | // buffer. This code path is relatively cold however so we save code size by |
588 | | // avoiding unrolling and vectorizing. |
589 | | // |
590 | | // TODO: Remove pragma when when cold regions don't get vectorized |
591 | | // or unrolled. |
592 | 0 | #ifdef __clang__ |
593 | 0 | #pragma clang loop unroll(disable) |
594 | 0 | #endif |
595 | 0 | for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) { |
596 | 0 | ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op); |
597 | 0 | } |
598 | 0 | if (op >= op_limit) return op_limit; |
599 | | |
600 | | // We only take this branch if we didn't have enough slop and we can do a |
601 | | // single 8 byte copy. |
602 | 0 | if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) { |
603 | 0 | UnalignedCopy64(src, op); |
604 | 0 | src += 8; |
605 | 0 | op += 8; |
606 | 0 | } |
607 | 0 | return IncrementalCopySlow(src, op, op_limit); |
608 | 0 | } |
609 | | |
610 | | } // namespace |
611 | | |
612 | | template <bool allow_fast_path> |
613 | 0 | static inline char* EmitLiteral(char* op, const char* literal, int len) { |
614 | | // The vast majority of copies are below 16 bytes, for which a |
615 | | // call to std::memcpy() is overkill. This fast path can sometimes |
616 | | // copy up to 15 bytes too much, but that is okay in the |
617 | | // main loop, since we have a bit to go on for both sides: |
618 | | // |
619 | | // - The input will always have kInputMarginBytes = 15 extra |
620 | | // available bytes, as long as we're in the main loop, and |
621 | | // if not, allow_fast_path = false. |
622 | | // - The output will always have 32 spare bytes (see |
623 | | // MaxCompressedLength). |
624 | 0 | assert(len > 0); // Zero-length literals are disallowed |
625 | 0 | int n = len - 1; |
626 | 0 | if (allow_fast_path && len <= 16) { |
627 | | // Fits in tag byte |
628 | 0 | *op++ = LITERAL | (n << 2); |
629 | |
|
630 | 0 | UnalignedCopy128(literal, op); |
631 | 0 | return op + len; |
632 | 0 | } |
633 | | |
634 | 0 | if (n < 60) { |
635 | | // Fits in tag byte |
636 | 0 | *op++ = LITERAL | (n << 2); |
637 | 0 | } else { |
638 | 0 | int count = (Bits::Log2Floor(n) >> 3) + 1; |
639 | 0 | assert(count >= 1); |
640 | 0 | assert(count <= 4); |
641 | 0 | *op++ = LITERAL | ((59 + count) << 2); |
642 | | // Encode in upcoming bytes. |
643 | | // Write 4 bytes, though we may care about only 1 of them. The output buffer |
644 | | // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds |
645 | | // here and there is a std::memcpy() of size 'len' below. |
646 | 0 | LittleEndian::Store32(op, n); |
647 | 0 | op += count; |
648 | 0 | } |
649 | | // When allow_fast_path is true, we can overwrite up to 16 bytes. |
650 | 0 | if (allow_fast_path) { |
651 | 0 | char* destination = op; |
652 | 0 | const char* source = literal; |
653 | 0 | const char* end = destination + len; |
654 | 0 | do { |
655 | 0 | std::memcpy(destination, source, 16); |
656 | 0 | destination += 16; |
657 | 0 | source += 16; |
658 | 0 | } while (destination < end); |
659 | 0 | } else { |
660 | 0 | std::memcpy(op, literal, len); |
661 | 0 | } |
662 | 0 | return op + len; |
663 | 0 | } Unexecuted instantiation: snappy.cc:char* snappy::EmitLiteral<true>(char*, char const*, int) Unexecuted instantiation: snappy.cc:char* snappy::EmitLiteral<false>(char*, char const*, int) |
664 | | |
665 | | template <bool len_less_than_12> |
666 | 0 | static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) { |
667 | 0 | assert(len <= 64); |
668 | 0 | assert(len >= 4); |
669 | 0 | assert(offset < 65536); |
670 | 0 | assert(len_less_than_12 == (len < 12)); |
671 | | |
672 | 0 | if (len_less_than_12) { |
673 | 0 | uint32_t u = (len << 2) + (offset << 8); |
674 | 0 | uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0); |
675 | 0 | uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2); |
676 | | // It turns out that offset < 2048 is a difficult to predict branch. |
677 | | // `perf record` shows this is the highest percentage of branch misses in |
678 | | // benchmarks. This code produces branch free code, the data dependency |
679 | | // chain that bottlenecks the throughput is so long that a few extra |
680 | | // instructions are completely free (IPC << 6 because of data deps). |
681 | 0 | u += offset < 2048 ? copy1 : copy2; |
682 | 0 | LittleEndian::Store32(op, u); |
683 | 0 | op += offset < 2048 ? 2 : 3; |
684 | 0 | } else { |
685 | | // Write 4 bytes, though we only care about 3 of them. The output buffer |
686 | | // is required to have some slack, so the extra byte won't overrun it. |
687 | 0 | uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8); |
688 | 0 | LittleEndian::Store32(op, u); |
689 | 0 | op += 3; |
690 | 0 | } |
691 | 0 | return op; |
692 | 0 | } Unexecuted instantiation: snappy.cc:char* snappy::EmitCopyAtMost64<true>(char*, unsigned long, unsigned long) Unexecuted instantiation: snappy.cc:char* snappy::EmitCopyAtMost64<false>(char*, unsigned long, unsigned long) |
693 | | |
694 | | template <bool len_less_than_12> |
695 | 0 | static inline char* EmitCopy(char* op, size_t offset, size_t len) { |
696 | 0 | assert(len_less_than_12 == (len < 12)); |
697 | 0 | if (len_less_than_12) { |
698 | 0 | return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len); |
699 | 0 | } else { |
700 | | // A special case for len <= 64 might help, but so far measurements suggest |
701 | | // it's in the noise. |
702 | | |
703 | | // Emit 64 byte copies but make sure to keep at least four bytes reserved. |
704 | 0 | while (SNAPPY_PREDICT_FALSE(len >= 68)) { |
705 | 0 | op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64); |
706 | 0 | len -= 64; |
707 | 0 | } |
708 | | |
709 | | // One or two copies will now finish the job. |
710 | 0 | if (len > 64) { |
711 | 0 | op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60); |
712 | 0 | len -= 60; |
713 | 0 | } |
714 | | |
715 | | // Emit remainder. |
716 | 0 | if (len < 12) { |
717 | 0 | op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len); |
718 | 0 | } else { |
719 | 0 | op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len); |
720 | 0 | } |
721 | 0 | return op; |
722 | 0 | } |
723 | 0 | } Unexecuted instantiation: snappy.cc:char* snappy::EmitCopy<true>(char*, unsigned long, unsigned long) Unexecuted instantiation: snappy.cc:char* snappy::EmitCopy<false>(char*, unsigned long, unsigned long) |
724 | | |
725 | 0 | bool GetUncompressedLength(const char* start, size_t n, size_t* result) { |
726 | 0 | uint32_t v = 0; |
727 | 0 | const char* limit = start + n; |
728 | 0 | if (Varint::Parse32WithLimit(start, limit, &v) != NULL) { |
729 | 0 | *result = v; |
730 | 0 | return true; |
731 | 0 | } else { |
732 | 0 | return false; |
733 | 0 | } |
734 | 0 | } |
735 | | |
736 | | namespace { |
737 | 0 | uint32_t CalculateTableSize(uint32_t input_size) { |
738 | 0 | static_assert( |
739 | 0 | kMaxHashTableSize >= kMinHashTableSize, |
740 | 0 | "kMaxHashTableSize should be greater or equal to kMinHashTableSize."); |
741 | 0 | if (input_size > kMaxHashTableSize) { |
742 | 0 | return kMaxHashTableSize; |
743 | 0 | } |
744 | 0 | if (input_size < kMinHashTableSize) { |
745 | 0 | return kMinHashTableSize; |
746 | 0 | } |
747 | | // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1. |
748 | | // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)). |
749 | 0 | return 2u << Bits::Log2Floor(input_size - 1); |
750 | 0 | } |
751 | | } // namespace |
752 | | |
753 | | namespace internal { |
754 | 0 | WorkingMemory::WorkingMemory(size_t input_size) { |
755 | 0 | const size_t max_fragment_size = std::min(input_size, kBlockSize); |
756 | 0 | const size_t table_size = CalculateTableSize(max_fragment_size); |
757 | 0 | size_ = table_size * sizeof(*table_) + max_fragment_size + |
758 | 0 | MaxCompressedLength(max_fragment_size); |
759 | 0 | mem_ = std::allocator<char>().allocate(size_); |
760 | 0 | table_ = reinterpret_cast<uint16_t*>(mem_); |
761 | 0 | input_ = mem_ + table_size * sizeof(*table_); |
762 | 0 | output_ = input_ + max_fragment_size; |
763 | 0 | } |
764 | | |
765 | 0 | WorkingMemory::~WorkingMemory() { |
766 | 0 | std::allocator<char>().deallocate(mem_, size_); |
767 | 0 | } |
768 | | |
769 | | uint16_t* WorkingMemory::GetHashTable(size_t fragment_size, |
770 | 0 | int* table_size) const { |
771 | 0 | const size_t htsize = CalculateTableSize(fragment_size); |
772 | 0 | memset(table_, 0, htsize * sizeof(*table_)); |
773 | 0 | *table_size = htsize; |
774 | 0 | return table_; |
775 | 0 | } |
776 | | } // end namespace internal |
777 | | |
778 | | // Flat array compression that does not emit the "uncompressed length" |
779 | | // prefix. Compresses "input" string to the "*op" buffer. |
780 | | // |
781 | | // REQUIRES: "input" is at most "kBlockSize" bytes long. |
782 | | // REQUIRES: "op" points to an array of memory that is at least |
783 | | // "MaxCompressedLength(input.size())" in size. |
784 | | // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. |
785 | | // REQUIRES: "table_size" is a power of two |
786 | | // |
787 | | // Returns an "end" pointer into "op" buffer. |
788 | | // "end - op" is the compressed size of "input". |
789 | | namespace internal { |
790 | | char* CompressFragment(const char* input, size_t input_size, char* op, |
791 | 0 | uint16_t* table, const int table_size) { |
792 | | // "ip" is the input pointer, and "op" is the output pointer. |
793 | 0 | const char* ip = input; |
794 | 0 | assert(input_size <= kBlockSize); |
795 | 0 | assert((table_size & (table_size - 1)) == 0); // table must be power of two |
796 | 0 | const uint32_t mask = 2 * (table_size - 1); |
797 | 0 | const char* ip_end = input + input_size; |
798 | 0 | const char* base_ip = ip; |
799 | |
|
800 | 0 | const size_t kInputMarginBytes = 15; |
801 | 0 | if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) { |
802 | 0 | const char* ip_limit = input + input_size - kInputMarginBytes; |
803 | |
|
804 | 0 | for (uint32_t preload = LittleEndian::Load32(ip + 1);;) { |
805 | | // Bytes in [next_emit, ip) will be emitted as literal bytes. Or |
806 | | // [next_emit, ip_end) after the main loop. |
807 | 0 | const char* next_emit = ip++; |
808 | 0 | uint64_t data = LittleEndian::Load64(ip); |
809 | | // The body of this loop calls EmitLiteral once and then EmitCopy one or |
810 | | // more times. (The exception is that when we're close to exhausting |
811 | | // the input we goto emit_remainder.) |
812 | | // |
813 | | // In the first iteration of this loop we're just starting, so |
814 | | // there's nothing to copy, so calling EmitLiteral once is |
815 | | // necessary. And we only start a new iteration when the |
816 | | // current iteration has determined that a call to EmitLiteral will |
817 | | // precede the next call to EmitCopy (if any). |
818 | | // |
819 | | // Step 1: Scan forward in the input looking for a 4-byte-long match. |
820 | | // If we get close to exhausting the input then goto emit_remainder. |
821 | | // |
822 | | // Heuristic match skipping: If 32 bytes are scanned with no matches |
823 | | // found, start looking only at every other byte. If 32 more bytes are |
824 | | // scanned (or skipped), look at every third byte, etc.. When a match is |
825 | | // found, immediately go back to looking at every byte. This is a small |
826 | | // loss (~5% performance, ~0.1% density) for compressible data due to more |
827 | | // bookkeeping, but for non-compressible data (such as JPEG) it's a huge |
828 | | // win since the compressor quickly "realizes" the data is incompressible |
829 | | // and doesn't bother looking for matches everywhere. |
830 | | // |
831 | | // The "skip" variable keeps track of how many bytes there are since the |
832 | | // last match; dividing it by 32 (ie. right-shifting by five) gives the |
833 | | // number of bytes to move ahead for each iteration. |
834 | 0 | uint32_t skip = 32; |
835 | |
|
836 | 0 | const char* candidate; |
837 | 0 | if (ip_limit - ip >= 16) { |
838 | 0 | auto delta = ip - base_ip; |
839 | 0 | for (int j = 0; j < 4; ++j) { |
840 | 0 | for (int k = 0; k < 4; ++k) { |
841 | 0 | int i = 4 * j + k; |
842 | | // These for-loops are meant to be unrolled. So we can freely |
843 | | // special case the first iteration to use the value already |
844 | | // loaded in preload. |
845 | 0 | uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data); |
846 | 0 | assert(dword == LittleEndian::Load32(ip + i)); |
847 | 0 | uint16_t* table_entry = TableEntry(table, dword, mask); |
848 | 0 | candidate = base_ip + *table_entry; |
849 | 0 | assert(candidate >= base_ip); |
850 | 0 | assert(candidate < ip + i); |
851 | 0 | *table_entry = delta + i; |
852 | 0 | if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) { |
853 | 0 | *op = LITERAL | (i << 2); |
854 | 0 | UnalignedCopy128(next_emit, op + 1); |
855 | 0 | ip += i; |
856 | 0 | op = op + i + 2; |
857 | 0 | goto emit_match; |
858 | 0 | } |
859 | 0 | data >>= 8; |
860 | 0 | } |
861 | 0 | data = LittleEndian::Load64(ip + 4 * j + 4); |
862 | 0 | } |
863 | 0 | ip += 16; |
864 | 0 | skip += 16; |
865 | 0 | } |
866 | 0 | while (true) { |
867 | 0 | assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip)); |
868 | 0 | uint16_t* table_entry = TableEntry(table, data, mask); |
869 | 0 | uint32_t bytes_between_hash_lookups = skip >> 5; |
870 | 0 | skip += bytes_between_hash_lookups; |
871 | 0 | const char* next_ip = ip + bytes_between_hash_lookups; |
872 | 0 | if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) { |
873 | 0 | ip = next_emit; |
874 | 0 | goto emit_remainder; |
875 | 0 | } |
876 | 0 | candidate = base_ip + *table_entry; |
877 | 0 | assert(candidate >= base_ip); |
878 | 0 | assert(candidate < ip); |
879 | | |
880 | 0 | *table_entry = ip - base_ip; |
881 | 0 | if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) == |
882 | 0 | LittleEndian::Load32(candidate))) { |
883 | 0 | break; |
884 | 0 | } |
885 | 0 | data = LittleEndian::Load32(next_ip); |
886 | 0 | ip = next_ip; |
887 | 0 | } |
888 | | |
889 | | // Step 2: A 4-byte match has been found. We'll later see if more |
890 | | // than 4 bytes match. But, prior to the match, input |
891 | | // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes." |
892 | 0 | assert(next_emit + 16 <= ip_end); |
893 | 0 | op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit); |
894 | | |
895 | | // Step 3: Call EmitCopy, and then see if another EmitCopy could |
896 | | // be our next move. Repeat until we find no match for the |
897 | | // input immediately after what was consumed by the last EmitCopy call. |
898 | | // |
899 | | // If we exit this loop normally then we need to call EmitLiteral next, |
900 | | // though we don't yet know how big the literal will be. We handle that |
901 | | // by proceeding to the next iteration of the main loop. We also can exit |
902 | | // this loop via goto if we get close to exhausting the input. |
903 | 0 | emit_match: |
904 | 0 | do { |
905 | | // We have a 4-byte match at ip, and no need to emit any |
906 | | // "literal bytes" prior to ip. |
907 | 0 | const char* base = ip; |
908 | 0 | std::pair<size_t, bool> p = |
909 | 0 | FindMatchLength(candidate + 4, ip + 4, ip_end, &data); |
910 | 0 | size_t matched = 4 + p.first; |
911 | 0 | ip += matched; |
912 | 0 | size_t offset = base - candidate; |
913 | 0 | assert(0 == memcmp(base, candidate, matched)); |
914 | 0 | if (p.second) { |
915 | 0 | op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched); |
916 | 0 | } else { |
917 | 0 | op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched); |
918 | 0 | } |
919 | 0 | if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) { |
920 | 0 | goto emit_remainder; |
921 | 0 | } |
922 | | // Expect 5 bytes to match |
923 | 0 | assert((data & 0xFFFFFFFFFF) == |
924 | 0 | (LittleEndian::Load64(ip) & 0xFFFFFFFFFF)); |
925 | | // We are now looking for a 4-byte match again. We read |
926 | | // table[Hash(ip, mask)] for that. To improve compression, |
927 | | // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)]. |
928 | 0 | *TableEntry(table, LittleEndian::Load32(ip - 1), mask) = |
929 | 0 | ip - base_ip - 1; |
930 | 0 | uint16_t* table_entry = TableEntry(table, data, mask); |
931 | 0 | candidate = base_ip + *table_entry; |
932 | 0 | *table_entry = ip - base_ip; |
933 | | // Measurements on the benchmarks have shown the following probabilities |
934 | | // for the loop to exit (ie. avg. number of iterations is reciprocal). |
935 | | // BM_Flat/6 txt1 p = 0.3-0.4 |
936 | | // BM_Flat/7 txt2 p = 0.35 |
937 | | // BM_Flat/8 txt3 p = 0.3-0.4 |
938 | | // BM_Flat/9 txt3 p = 0.34-0.4 |
939 | | // BM_Flat/10 pb p = 0.4 |
940 | | // BM_Flat/11 gaviota p = 0.1 |
941 | | // BM_Flat/12 cp p = 0.5 |
942 | | // BM_Flat/13 c p = 0.3 |
943 | 0 | } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate)); |
944 | | // Because the least significant 5 bytes matched, we can utilize data |
945 | | // for the next iteration. |
946 | 0 | preload = data >> 8; |
947 | 0 | } |
948 | 0 | } |
949 | | |
950 | 0 | emit_remainder: |
951 | | // Emit the remaining bytes as a literal |
952 | 0 | if (ip < ip_end) { |
953 | 0 | op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip); |
954 | 0 | } |
955 | |
|
956 | 0 | return op; |
957 | 0 | } |
958 | | |
959 | | char* CompressFragmentDoubleHash(const char* input, size_t input_size, char* op, |
960 | | uint16_t* table, const int table_size, |
961 | 0 | uint16_t* table2, const int table_size2) { |
962 | 0 | (void)table_size2; |
963 | 0 | assert(table_size == table_size2); |
964 | | // "ip" is the input pointer, and "op" is the output pointer. |
965 | 0 | const char* ip = input; |
966 | 0 | assert(input_size <= kBlockSize); |
967 | 0 | assert((table_size & (table_size - 1)) == 0); // table must be power of two |
968 | 0 | const uint32_t mask = 2 * (table_size - 1); |
969 | 0 | const char* ip_end = input + input_size; |
970 | 0 | const char* base_ip = ip; |
971 | |
|
972 | 0 | const size_t kInputMarginBytes = 15; |
973 | 0 | if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) { |
974 | 0 | const char* ip_limit = input + input_size - kInputMarginBytes; |
975 | |
|
976 | 0 | for (;;) { |
977 | 0 | const char* next_emit = ip++; |
978 | 0 | uint64_t data = LittleEndian::Load64(ip); |
979 | 0 | uint32_t skip = 512; |
980 | |
|
981 | 0 | const char* candidate; |
982 | 0 | uint32_t candidate_length; |
983 | 0 | while (true) { |
984 | 0 | assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip)); |
985 | 0 | uint16_t* table_entry2 = TableEntry8ByteMatch(table2, data, mask); |
986 | 0 | uint32_t bytes_between_hash_lookups = skip >> 9; |
987 | 0 | skip++; |
988 | 0 | const char* next_ip = ip + bytes_between_hash_lookups; |
989 | 0 | if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) { |
990 | 0 | ip = next_emit; |
991 | 0 | goto emit_remainder; |
992 | 0 | } |
993 | 0 | candidate = base_ip + *table_entry2; |
994 | 0 | assert(candidate >= base_ip); |
995 | 0 | assert(candidate < ip); |
996 | | |
997 | 0 | *table_entry2 = ip - base_ip; |
998 | 0 | if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) == |
999 | 0 | LittleEndian::Load32(candidate))) { |
1000 | 0 | candidate_length = |
1001 | 0 | FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4; |
1002 | 0 | break; |
1003 | 0 | } |
1004 | | |
1005 | 0 | uint16_t* table_entry = TableEntry4ByteMatch(table, data, mask); |
1006 | 0 | candidate = base_ip + *table_entry; |
1007 | 0 | assert(candidate >= base_ip); |
1008 | 0 | assert(candidate < ip); |
1009 | | |
1010 | 0 | *table_entry = ip - base_ip; |
1011 | 0 | if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) == |
1012 | 0 | LittleEndian::Load32(candidate))) { |
1013 | 0 | candidate_length = |
1014 | 0 | FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4; |
1015 | 0 | table_entry2 = |
1016 | 0 | TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 1), mask); |
1017 | 0 | auto candidate2 = base_ip + *table_entry2; |
1018 | 0 | size_t candidate_length2 = |
1019 | 0 | FindMatchLengthPlain(candidate2, ip + 1, ip_end); |
1020 | 0 | if (candidate_length2 > candidate_length) { |
1021 | 0 | *table_entry2 = ip - base_ip; |
1022 | 0 | candidate = candidate2; |
1023 | 0 | candidate_length = candidate_length2; |
1024 | 0 | ++ip; |
1025 | 0 | } |
1026 | 0 | break; |
1027 | 0 | } |
1028 | 0 | data = LittleEndian::Load64(next_ip); |
1029 | 0 | ip = next_ip; |
1030 | 0 | } |
1031 | | // Backtrack to the point it matches fully. |
1032 | 0 | while (ip > next_emit && candidate > base_ip && |
1033 | 0 | *(ip - 1) == *(candidate - 1)) { |
1034 | 0 | --ip; |
1035 | 0 | --candidate; |
1036 | 0 | ++candidate_length; |
1037 | 0 | } |
1038 | 0 | *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 1), mask) = |
1039 | 0 | ip - base_ip + 1; |
1040 | 0 | *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 2), mask) = |
1041 | 0 | ip - base_ip + 2; |
1042 | 0 | *TableEntry4ByteMatch(table, LittleEndian::Load32(ip + 1), mask) = |
1043 | 0 | ip - base_ip + 1; |
1044 | | // Step 2: A 4-byte or 8-byte match has been found. |
1045 | | // We'll later see if more than 4 bytes match. But, prior to the match, |
1046 | | // input bytes [next_emit, ip) are unmatched. Emit them as |
1047 | | // "literal bytes." |
1048 | 0 | assert(next_emit + 16 <= ip_end); |
1049 | 0 | if (ip - next_emit > 0) { |
1050 | 0 | op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, |
1051 | 0 | ip - next_emit); |
1052 | 0 | } |
1053 | | // Step 3: Call EmitCopy, and then see if another EmitCopy could |
1054 | | // be our next move. Repeat until we find no match for the |
1055 | | // input immediately after what was consumed by the last EmitCopy call. |
1056 | | // |
1057 | | // If we exit this loop normally then we need to call EmitLiteral next, |
1058 | | // though we don't yet know how big the literal will be. We handle that |
1059 | | // by proceeding to the next iteration of the main loop. We also can exit |
1060 | | // this loop via goto if we get close to exhausting the input. |
1061 | 0 | do { |
1062 | | // We have a 4-byte match at ip, and no need to emit any |
1063 | | // "literal bytes" prior to ip. |
1064 | 0 | const char* base = ip; |
1065 | 0 | ip += candidate_length; |
1066 | 0 | size_t offset = base - candidate; |
1067 | 0 | if (candidate_length < 12) { |
1068 | 0 | op = |
1069 | 0 | EmitCopy</*len_less_than_12=*/true>(op, offset, candidate_length); |
1070 | 0 | } else { |
1071 | 0 | op = EmitCopy</*len_less_than_12=*/false>(op, offset, |
1072 | 0 | candidate_length); |
1073 | 0 | } |
1074 | 0 | if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) { |
1075 | 0 | goto emit_remainder; |
1076 | 0 | } |
1077 | | // We are now looking for a 4-byte match again. We read |
1078 | | // table[Hash(ip, mask)] for that. To improve compression, |
1079 | | // we also update several previous table entries. |
1080 | 0 | if (ip - base_ip > 7) { |
1081 | 0 | *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 7), mask) = |
1082 | 0 | ip - base_ip - 7; |
1083 | 0 | *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 4), mask) = |
1084 | 0 | ip - base_ip - 4; |
1085 | 0 | } |
1086 | 0 | *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 3), mask) = |
1087 | 0 | ip - base_ip - 3; |
1088 | 0 | *TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 2), mask) = |
1089 | 0 | ip - base_ip - 2; |
1090 | 0 | *TableEntry4ByteMatch(table, LittleEndian::Load32(ip - 2), mask) = |
1091 | 0 | ip - base_ip - 2; |
1092 | 0 | *TableEntry4ByteMatch(table, LittleEndian::Load32(ip - 1), mask) = |
1093 | 0 | ip - base_ip - 1; |
1094 | |
|
1095 | 0 | uint16_t* table_entry = |
1096 | 0 | TableEntry8ByteMatch(table2, LittleEndian::Load64(ip), mask); |
1097 | 0 | candidate = base_ip + *table_entry; |
1098 | 0 | *table_entry = ip - base_ip; |
1099 | 0 | if (LittleEndian::Load32(ip) == LittleEndian::Load32(candidate)) { |
1100 | 0 | candidate_length = |
1101 | 0 | FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4; |
1102 | 0 | continue; |
1103 | 0 | } |
1104 | 0 | table_entry = |
1105 | 0 | TableEntry4ByteMatch(table, LittleEndian::Load32(ip), mask); |
1106 | 0 | candidate = base_ip + *table_entry; |
1107 | 0 | *table_entry = ip - base_ip; |
1108 | 0 | if (LittleEndian::Load32(ip) == LittleEndian::Load32(candidate)) { |
1109 | 0 | candidate_length = |
1110 | 0 | FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4; |
1111 | 0 | continue; |
1112 | 0 | } |
1113 | 0 | break; |
1114 | 0 | } while (true); |
1115 | 0 | } |
1116 | 0 | } |
1117 | | |
1118 | 0 | emit_remainder: |
1119 | | // Emit the remaining bytes as a literal |
1120 | 0 | if (ip < ip_end) { |
1121 | 0 | op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip); |
1122 | 0 | } |
1123 | |
|
1124 | 0 | return op; |
1125 | 0 | } |
1126 | | } // end namespace internal |
1127 | | |
1128 | | static inline void Report(int token, const char *algorithm, size_t |
1129 | 0 | compressed_size, size_t uncompressed_size) { |
1130 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1131 | 0 | (void)token; |
1132 | 0 | (void)algorithm; |
1133 | 0 | (void)compressed_size; |
1134 | 0 | (void)uncompressed_size; |
1135 | 0 | } |
1136 | | |
1137 | | // Signature of output types needed by decompression code. |
1138 | | // The decompression code is templatized on a type that obeys this |
1139 | | // signature so that we do not pay virtual function call overhead in |
1140 | | // the middle of a tight decompression loop. |
1141 | | // |
1142 | | // class DecompressionWriter { |
1143 | | // public: |
1144 | | // // Called before decompression |
1145 | | // void SetExpectedLength(size_t length); |
1146 | | // |
1147 | | // // For performance a writer may choose to donate the cursor variable to the |
1148 | | // // decompression function. The decompression will inject it in all its |
1149 | | // // function calls to the writer. Keeping the important output cursor as a |
1150 | | // // function local stack variable allows the compiler to keep it in |
1151 | | // // register, which greatly aids performance by avoiding loads and stores of |
1152 | | // // this variable in the fast path loop iterations. |
1153 | | // T GetOutputPtr() const; |
1154 | | // |
1155 | | // // At end of decompression the loop donates the ownership of the cursor |
1156 | | // // variable back to the writer by calling this function. |
1157 | | // void SetOutputPtr(T op); |
1158 | | // |
1159 | | // // Called after decompression |
1160 | | // bool CheckLength() const; |
1161 | | // |
1162 | | // // Called repeatedly during decompression |
1163 | | // // Each function get a pointer to the op (output pointer), that the writer |
1164 | | // // can use and update. Note it's important that these functions get fully |
1165 | | // // inlined so that no actual address of the local variable needs to be |
1166 | | // // taken. |
1167 | | // bool Append(const char* ip, size_t length, T* op); |
1168 | | // bool AppendFromSelf(uint32_t offset, size_t length, T* op); |
1169 | | // |
1170 | | // // The rules for how TryFastAppend differs from Append are somewhat |
1171 | | // // convoluted: |
1172 | | // // |
1173 | | // // - TryFastAppend is allowed to decline (return false) at any |
1174 | | // // time, for any reason -- just "return false" would be |
1175 | | // // a perfectly legal implementation of TryFastAppend. |
1176 | | // // The intention is for TryFastAppend to allow a fast path |
1177 | | // // in the common case of a small append. |
1178 | | // // - TryFastAppend is allowed to read up to <available> bytes |
1179 | | // // from the input buffer, whereas Append is allowed to read |
1180 | | // // <length>. However, if it returns true, it must leave |
1181 | | // // at least five (kMaximumTagLength) bytes in the input buffer |
1182 | | // // afterwards, so that there is always enough space to read the |
1183 | | // // next tag without checking for a refill. |
1184 | | // // - TryFastAppend must always return decline (return false) |
1185 | | // // if <length> is 61 or more, as in this case the literal length is not |
1186 | | // // decoded fully. In practice, this should not be a big problem, |
1187 | | // // as it is unlikely that one would implement a fast path accepting |
1188 | | // // this much data. |
1189 | | // // |
1190 | | // bool TryFastAppend(const char* ip, size_t available, size_t length, T* op); |
1191 | | // }; |
1192 | | |
1193 | 0 | static inline uint32_t ExtractLowBytes(const uint32_t& v, int n) { |
1194 | 0 | assert(n >= 0); |
1195 | 0 | assert(n <= 4); |
1196 | | #if SNAPPY_HAVE_BMI2 |
1197 | | return _bzhi_u32(v, 8 * n); |
1198 | | #else |
1199 | | // This needs to be wider than uint32_t otherwise `mask << 32` will be |
1200 | | // undefined. |
1201 | 0 | uint64_t mask = 0xffffffff; |
1202 | 0 | return v & ~(mask << (8 * n)); |
1203 | 0 | #endif |
1204 | 0 | } |
1205 | | |
1206 | 0 | static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) { |
1207 | 0 | assert(shift < 32); |
1208 | 0 | static const uint8_t masks[] = { |
1209 | 0 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |
1210 | 0 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |
1211 | 0 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // |
1212 | 0 | 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe}; |
1213 | 0 | return (value & masks[shift]) != 0; |
1214 | 0 | } |
1215 | | |
1216 | 0 | inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) { |
1217 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1218 | 0 | (void)dst; |
1219 | 0 | return offset != 0; |
1220 | 0 | } |
1221 | | |
1222 | | // Copies between size bytes and 64 bytes from src to dest. size cannot exceed |
1223 | | // 64. More than size bytes, but never exceeding 64, might be copied if doing |
1224 | | // so gives better performance. [src, src + size) must not overlap with |
1225 | | // [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64). |
1226 | 0 | void MemCopy64(char* dst, const void* src, size_t size) { |
1227 | | // Always copy this many bytes. If that's below size then copy the full 64. |
1228 | 0 | constexpr int kShortMemCopy = 32; |
1229 | |
|
1230 | 0 | assert(size <= 64); |
1231 | 0 | assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size, |
1232 | 0 | dst) || |
1233 | 0 | std::less_equal<const void*>()(dst + size, src)); |
1234 | | |
1235 | | // We know that src and dst are at least size bytes apart. However, because we |
1236 | | // might copy more than size bytes the copy still might overlap past size. |
1237 | | // E.g. if src and dst appear consecutively in memory (src + size >= dst). |
1238 | | // TODO: Investigate wider copies on other platforms. |
1239 | | #if defined(__x86_64__) && defined(__AVX__) |
1240 | | assert(kShortMemCopy <= 32); |
1241 | | __m256i data = _mm256_lddqu_si256(static_cast<const __m256i *>(src)); |
1242 | | _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data); |
1243 | | // Profiling shows that nearly all copies are short. |
1244 | | if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { |
1245 | | data = _mm256_lddqu_si256(static_cast<const __m256i *>(src) + 1); |
1246 | | _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data); |
1247 | | } |
1248 | | #else |
1249 | 0 | std::memmove(dst, src, kShortMemCopy); |
1250 | | // Profiling shows that nearly all copies are short. |
1251 | 0 | if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) { |
1252 | 0 | std::memmove(dst + kShortMemCopy, |
1253 | 0 | static_cast<const uint8_t*>(src) + kShortMemCopy, |
1254 | 0 | 64 - kShortMemCopy); |
1255 | 0 | } |
1256 | 0 | #endif |
1257 | 0 | } |
1258 | | |
1259 | 0 | void MemCopy64(ptrdiff_t dst, const void* src, size_t size) { |
1260 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
1261 | 0 | (void)dst; |
1262 | 0 | (void)src; |
1263 | 0 | (void)size; |
1264 | 0 | } |
1265 | | |
1266 | | void ClearDeferred(const void** deferred_src, size_t* deferred_length, |
1267 | 0 | uint8_t* safe_source) { |
1268 | 0 | *deferred_src = safe_source; |
1269 | 0 | *deferred_length = 0; |
1270 | 0 | } |
1271 | | |
1272 | | void DeferMemCopy(const void** deferred_src, size_t* deferred_length, |
1273 | 0 | const void* src, size_t length) { |
1274 | 0 | *deferred_src = src; |
1275 | 0 | *deferred_length = length; |
1276 | 0 | } |
1277 | | |
1278 | | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
1279 | 0 | inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) { |
1280 | 0 | const uint8_t*& ip = *ip_p; |
1281 | 0 | // This section is crucial for the throughput of the decompression loop. |
1282 | 0 | // The latency of an iteration is fundamentally constrained by the |
1283 | 0 | // following data chain on ip. |
1284 | 0 | // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2 |
1285 | 0 | // delta2 = ((c >> 2) + 1) ip++ |
1286 | 0 | // This is different from X86 optimizations because ARM has conditional add |
1287 | 0 | // instruction (csinc) and it removes several register moves. |
1288 | 0 | const size_t tag_type = *tag & 3; |
1289 | 0 | const bool is_literal = (tag_type == 0); |
1290 | 0 | if (is_literal) { |
1291 | 0 | size_t next_literal_tag = (*tag >> 2) + 1; |
1292 | 0 | *tag = ip[next_literal_tag]; |
1293 | 0 | ip += next_literal_tag + 1; |
1294 | 0 | } else { |
1295 | 0 | *tag = ip[tag_type]; |
1296 | 0 | ip += tag_type + 1; |
1297 | 0 | } |
1298 | 0 | return tag_type; |
1299 | 0 | } |
1300 | | |
1301 | | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
1302 | 0 | inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) { |
1303 | 0 | const uint8_t*& ip = *ip_p; |
1304 | | // This section is crucial for the throughput of the decompression loop. |
1305 | | // The latency of an iteration is fundamentally constrained by the |
1306 | | // following data chain on ip. |
1307 | | // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2 |
1308 | | // ip2 = ip + 2 + (c >> 2) |
1309 | | // This amounts to 8 cycles. |
1310 | | // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov) |
1311 | 0 | size_t literal_len = *tag >> 2; |
1312 | 0 | size_t tag_type = *tag; |
1313 | 0 | bool is_literal; |
1314 | 0 | #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__) |
1315 | | // TODO clang misses the fact that the (c & 3) already correctly |
1316 | | // sets the zero flag. |
1317 | 0 | asm("and $3, %k[tag_type]\n\t" |
1318 | 0 | : [tag_type] "+r"(tag_type), "=@ccz"(is_literal) |
1319 | 0 | :: "cc"); |
1320 | | #else |
1321 | | tag_type &= 3; |
1322 | | is_literal = (tag_type == 0); |
1323 | | #endif |
1324 | | // TODO |
1325 | | // This is code is subtle. Loading the values first and then cmov has less |
1326 | | // latency then cmov ip and then load. However clang would move the loads |
1327 | | // in an optimization phase, volatile prevents this transformation. |
1328 | | // Note that we have enough slop bytes (64) that the loads are always valid. |
1329 | 0 | size_t tag_literal = |
1330 | 0 | static_cast<const volatile uint8_t*>(ip)[1 + literal_len]; |
1331 | 0 | size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type]; |
1332 | 0 | *tag = is_literal ? tag_literal : tag_copy; |
1333 | 0 | const uint8_t* ip_copy = ip + 1 + tag_type; |
1334 | 0 | const uint8_t* ip_literal = ip + 2 + literal_len; |
1335 | 0 | ip = is_literal ? ip_literal : ip_copy; |
1336 | 0 | #if defined(__GNUC__) && defined(__x86_64__) |
1337 | | // TODO Clang is "optimizing" zero-extension (a totally free |
1338 | | // operation) this means that after the cmov of tag, it emits another movzb |
1339 | | // tag, byte(tag). It really matters as it's on the core chain. This dummy |
1340 | | // asm, persuades clang to do the zero-extension at the load (it's automatic) |
1341 | | // removing the expensive movzb. |
1342 | 0 | asm("" ::"r"(tag_copy)); |
1343 | 0 | #endif |
1344 | 0 | return tag_type; |
1345 | 0 | } |
1346 | | |
1347 | | // Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4. |
1348 | 0 | inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) { |
1349 | | // For x86 non-static storage works better. For ARM static storage is better. |
1350 | | // TODO: Once the array is recognized as a register, improve the |
1351 | | // readability for x86. |
1352 | 0 | #if defined(__x86_64__) |
1353 | 0 | constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull; |
1354 | 0 | uint16_t result; |
1355 | 0 | memcpy(&result, |
1356 | 0 | reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type, |
1357 | 0 | sizeof(result)); |
1358 | 0 | return val & result; |
1359 | | #elif defined(__aarch64__) |
1360 | | constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull; |
1361 | | return val & static_cast<uint32_t>( |
1362 | | (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF); |
1363 | | #else |
1364 | | static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0}; |
1365 | | return val & kExtractMasks[tag_type]; |
1366 | | #endif |
1367 | 0 | }; |
1368 | | |
1369 | | // Core decompression loop, when there is enough data available. |
1370 | | // Decompresses the input buffer [ip, ip_limit) into the output buffer |
1371 | | // [op, op_limit_min_slop). Returning when either we are too close to the end |
1372 | | // of the input buffer, or we exceed op_limit_min_slop or when a exceptional |
1373 | | // tag is encountered (literal of length > 60) or a copy-4. |
1374 | | // Returns {ip, op} at the points it stopped decoding. |
1375 | | // TODO This function probably does not need to be inlined, as it |
1376 | | // should decode large chunks at a time. This allows runtime dispatch to |
1377 | | // implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy). |
1378 | | template <typename T> |
1379 | | std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless( |
1380 | | const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base, |
1381 | 0 | ptrdiff_t op_limit_min_slop) { |
1382 | | // If deferred_src is invalid point it here. |
1383 | 0 | uint8_t safe_source[64]; |
1384 | 0 | const void* deferred_src; |
1385 | 0 | size_t deferred_length; |
1386 | 0 | ClearDeferred(&deferred_src, &deferred_length, safe_source); |
1387 | | |
1388 | | // We unroll the inner loop twice so we need twice the spare room. |
1389 | 0 | op_limit_min_slop -= kSlopBytes; |
1390 | 0 | if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) { |
1391 | 0 | const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1; |
1392 | 0 | ip++; |
1393 | | // ip points just past the tag and we are touching at maximum kSlopBytes |
1394 | | // in an iteration. |
1395 | 0 | size_t tag = ip[-1]; |
1396 | | #if defined(__clang__) && defined(__aarch64__) |
1397 | | // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317 |
1398 | | // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb) |
1399 | | // comes with free zero-extension, so clang generates another |
1400 | | // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is |
1401 | | // redundant and can be removed by adding this dummy asm, which gives |
1402 | | // clang a hint that we're doing the zero-extension at the load. |
1403 | | asm("" ::"r"(tag)); |
1404 | | #endif |
1405 | 0 | do { |
1406 | | // The throughput is limited by instructions, unrolling the inner loop |
1407 | | // twice reduces the amount of instructions checking limits and also |
1408 | | // leads to reduced mov's. |
1409 | |
|
1410 | 0 | SNAPPY_PREFETCH(ip + 128); |
1411 | 0 | for (int i = 0; i < 2; i++) { |
1412 | 0 | const uint8_t* old_ip = ip; |
1413 | 0 | assert(tag == ip[-1]); |
1414 | | // For literals tag_type = 0, hence we will always obtain 0 from |
1415 | | // ExtractLowBytes. For literals offset will thus be kLiteralOffset. |
1416 | 0 | ptrdiff_t len_minus_offset = kLengthMinusOffset[tag]; |
1417 | 0 | uint32_t next; |
1418 | | #if defined(__aarch64__) |
1419 | | size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag); |
1420 | | // We never need more than 16 bits. Doing a Load16 allows the compiler |
1421 | | // to elide the masking operation in ExtractOffset. |
1422 | | next = LittleEndian::Load16(old_ip); |
1423 | | #else |
1424 | 0 | size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag); |
1425 | 0 | next = LittleEndian::Load32(old_ip); |
1426 | 0 | #endif |
1427 | 0 | size_t len = len_minus_offset & 0xFF; |
1428 | 0 | ptrdiff_t extracted = ExtractOffset(next, tag_type); |
1429 | 0 | ptrdiff_t len_min_offset = len_minus_offset - extracted; |
1430 | 0 | if (SNAPPY_PREDICT_FALSE(len_minus_offset > extracted)) { |
1431 | 0 | if (SNAPPY_PREDICT_FALSE(len & 0x80)) { |
1432 | | // Exceptional case (long literal or copy 4). |
1433 | | // Actually doing the copy here is negatively impacting the main |
1434 | | // loop due to compiler incorrectly allocating a register for |
1435 | | // this fallback. Hence we just break. |
1436 | 0 | break_loop: |
1437 | 0 | ip = old_ip; |
1438 | 0 | goto exit; |
1439 | 0 | } |
1440 | | // Only copy-1 or copy-2 tags can get here. |
1441 | 0 | assert(tag_type == 1 || tag_type == 2); |
1442 | 0 | std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len; |
1443 | | // Guard against copies before the buffer start. |
1444 | | // Execute any deferred MemCopy since we write to dst here. |
1445 | 0 | MemCopy64(op_base + op, deferred_src, deferred_length); |
1446 | 0 | op += deferred_length; |
1447 | 0 | ClearDeferred(&deferred_src, &deferred_length, safe_source); |
1448 | 0 | if (SNAPPY_PREDICT_FALSE(delta < 0 || |
1449 | 0 | !Copy64BytesWithPatternExtension( |
1450 | 0 | op_base + op, len - len_min_offset))) { |
1451 | 0 | goto break_loop; |
1452 | 0 | } |
1453 | | // We aren't deferring this copy so add length right away. |
1454 | 0 | op += len; |
1455 | 0 | continue; |
1456 | 0 | } |
1457 | 0 | std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len; |
1458 | 0 | if (SNAPPY_PREDICT_FALSE(delta < 0)) { |
1459 | | // Due to the spurious offset in literals have this will trigger |
1460 | | // at the start of a block when op is still smaller than 256. |
1461 | 0 | if (tag_type != 0) goto break_loop; |
1462 | 0 | MemCopy64(op_base + op, deferred_src, deferred_length); |
1463 | 0 | op += deferred_length; |
1464 | 0 | DeferMemCopy(&deferred_src, &deferred_length, old_ip, len); |
1465 | 0 | continue; |
1466 | 0 | } |
1467 | | |
1468 | | // For copies we need to copy from op_base + delta, for literals |
1469 | | // we need to copy from ip instead of from the stream. |
1470 | 0 | const void* from = |
1471 | 0 | tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip; |
1472 | 0 | MemCopy64(op_base + op, deferred_src, deferred_length); |
1473 | 0 | op += deferred_length; |
1474 | 0 | DeferMemCopy(&deferred_src, &deferred_length, from, len); |
1475 | 0 | } |
1476 | 0 | } while (ip < ip_limit_min_slop && |
1477 | 0 | static_cast<ptrdiff_t>(op + deferred_length) < op_limit_min_slop); |
1478 | 0 | exit: |
1479 | 0 | ip--; |
1480 | 0 | assert(ip <= ip_limit); |
1481 | 0 | } |
1482 | | // If we deferred a copy then we can perform. If we are up to date then we |
1483 | | // might not have enough slop bytes and could run past the end. |
1484 | 0 | if (deferred_length) { |
1485 | 0 | MemCopy64(op_base + op, deferred_src, deferred_length); |
1486 | 0 | op += deferred_length; |
1487 | 0 | ClearDeferred(&deferred_src, &deferred_length, safe_source); |
1488 | 0 | } |
1489 | 0 | return {ip, op}; |
1490 | 0 | } Unexecuted instantiation: std::__1::pair<unsigned char const*, long> snappy::DecompressBranchless<char*>(unsigned char const*, unsigned char const*, long, char*, long) Unexecuted instantiation: std::__1::pair<unsigned char const*, long> snappy::DecompressBranchless<unsigned long>(unsigned char const*, unsigned char const*, long, unsigned long, long) |
1491 | | |
1492 | | // Helper class for decompression |
1493 | | class SnappyDecompressor { |
1494 | | private: |
1495 | | Source* reader_; // Underlying source of bytes to decompress |
1496 | | const char* ip_; // Points to next buffered byte |
1497 | | const char* ip_limit_; // Points just past buffered bytes |
1498 | | // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from |
1499 | | // buffer. |
1500 | | const char* ip_limit_min_maxtaglen_; |
1501 | | uint32_t peeked_; // Bytes peeked from reader (need to skip) |
1502 | | bool eof_; // Hit end of input without an error? |
1503 | | char scratch_[kMaximumTagLength]; // See RefillTag(). |
1504 | | |
1505 | | // Ensure that all of the tag metadata for the next tag is available |
1506 | | // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even |
1507 | | // if (ip_limit_ - ip_ < 5). |
1508 | | // |
1509 | | // Returns true on success, false on error or end of input. |
1510 | | bool RefillTag(); |
1511 | | |
1512 | 0 | void ResetLimit(const char* ip) { |
1513 | 0 | ip_limit_min_maxtaglen_ = |
1514 | 0 | ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1); |
1515 | 0 | } |
1516 | | |
1517 | | public: |
1518 | | explicit SnappyDecompressor(Source* reader) |
1519 | 0 | : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {} |
1520 | | |
1521 | 0 | ~SnappyDecompressor() { |
1522 | | // Advance past any bytes we peeked at from the reader |
1523 | 0 | reader_->Skip(peeked_); |
1524 | 0 | } |
1525 | | |
1526 | | // Returns true iff we have hit the end of the input without an error. |
1527 | 0 | bool eof() const { return eof_; } |
1528 | | |
1529 | | // Read the uncompressed length stored at the start of the compressed data. |
1530 | | // On success, stores the length in *result and returns true. |
1531 | | // On failure, returns false. |
1532 | 0 | bool ReadUncompressedLength(uint32_t* result) { |
1533 | 0 | assert(ip_ == NULL); // Must not have read anything yet |
1534 | | // Length is encoded in 1..5 bytes |
1535 | 0 | *result = 0; |
1536 | 0 | uint32_t shift = 0; |
1537 | 0 | while (true) { |
1538 | 0 | if (shift >= 32) return false; |
1539 | 0 | size_t n; |
1540 | 0 | const char* ip = reader_->Peek(&n); |
1541 | 0 | if (n == 0) return false; |
1542 | 0 | const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip)); |
1543 | 0 | reader_->Skip(1); |
1544 | 0 | uint32_t val = c & 0x7f; |
1545 | 0 | if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false; |
1546 | 0 | *result |= val << shift; |
1547 | 0 | if (c < 128) { |
1548 | 0 | break; |
1549 | 0 | } |
1550 | 0 | shift += 7; |
1551 | 0 | } |
1552 | 0 | return true; |
1553 | 0 | } |
1554 | | |
1555 | | // Process the next item found in the input. |
1556 | | // Returns true if successful, false on error or end of input. |
1557 | | template <class Writer> |
1558 | | #if defined(__GNUC__) && defined(__x86_64__) |
1559 | | __attribute__((aligned(32))) |
1560 | | #endif |
1561 | | void |
1562 | 0 | DecompressAllTags(Writer* writer) { |
1563 | 0 | const char* ip = ip_; |
1564 | 0 | ResetLimit(ip); |
1565 | 0 | auto op = writer->GetOutputPtr(); |
1566 | | // We could have put this refill fragment only at the beginning of the loop. |
1567 | | // However, duplicating it at the end of each branch gives the compiler more |
1568 | | // scope to optimize the <ip_limit_ - ip> expression based on the local |
1569 | | // context, which overall increases speed. |
1570 | 0 | #define MAYBE_REFILL() \ |
1571 | 0 | if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \ |
1572 | 0 | ip_ = ip; \ |
1573 | 0 | if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \ |
1574 | 0 | ip = ip_; \ |
1575 | 0 | ResetLimit(ip); \ |
1576 | 0 | } \ |
1577 | 0 | preload = static_cast<uint8_t>(*ip) |
1578 | | |
1579 | | // At the start of the for loop below the least significant byte of preload |
1580 | | // contains the tag. |
1581 | 0 | uint32_t preload; |
1582 | 0 | MAYBE_REFILL(); |
1583 | 0 | for (;;) { |
1584 | 0 | { |
1585 | 0 | ptrdiff_t op_limit_min_slop; |
1586 | 0 | auto op_base = writer->GetBase(&op_limit_min_slop); |
1587 | 0 | if (op_base) { |
1588 | 0 | auto res = |
1589 | 0 | DecompressBranchless(reinterpret_cast<const uint8_t*>(ip), |
1590 | 0 | reinterpret_cast<const uint8_t*>(ip_limit_), |
1591 | 0 | op - op_base, op_base, op_limit_min_slop); |
1592 | 0 | ip = reinterpret_cast<const char*>(res.first); |
1593 | 0 | op = op_base + res.second; |
1594 | 0 | MAYBE_REFILL(); |
1595 | 0 | } |
1596 | 0 | } |
1597 | 0 | const uint8_t c = static_cast<uint8_t>(preload); |
1598 | 0 | ip++; |
1599 | | |
1600 | | // Ratio of iterations that have LITERAL vs non-LITERAL for different |
1601 | | // inputs. |
1602 | | // |
1603 | | // input LITERAL NON_LITERAL |
1604 | | // ----------------------------------- |
1605 | | // html|html4|cp 23% 77% |
1606 | | // urls 36% 64% |
1607 | | // jpg 47% 53% |
1608 | | // pdf 19% 81% |
1609 | | // txt[1-4] 25% 75% |
1610 | | // pb 24% 76% |
1611 | | // bin 24% 76% |
1612 | 0 | if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) { |
1613 | 0 | size_t literal_length = (c >> 2) + 1u; |
1614 | 0 | if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) { |
1615 | 0 | assert(literal_length < 61); |
1616 | 0 | ip += literal_length; |
1617 | | // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend() |
1618 | | // will not return true unless there's already at least five spare |
1619 | | // bytes in addition to the literal. |
1620 | 0 | preload = static_cast<uint8_t>(*ip); |
1621 | 0 | continue; |
1622 | 0 | } |
1623 | 0 | if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) { |
1624 | | // Long literal. |
1625 | 0 | const size_t literal_length_length = literal_length - 60; |
1626 | 0 | literal_length = |
1627 | 0 | ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) + |
1628 | 0 | 1; |
1629 | 0 | ip += literal_length_length; |
1630 | 0 | } |
1631 | |
|
1632 | 0 | size_t avail = ip_limit_ - ip; |
1633 | 0 | while (avail < literal_length) { |
1634 | 0 | if (!writer->Append(ip, avail, &op)) goto exit; |
1635 | 0 | literal_length -= avail; |
1636 | 0 | reader_->Skip(peeked_); |
1637 | 0 | size_t n; |
1638 | 0 | ip = reader_->Peek(&n); |
1639 | 0 | avail = n; |
1640 | 0 | peeked_ = avail; |
1641 | 0 | if (avail == 0) goto exit; |
1642 | 0 | ip_limit_ = ip + avail; |
1643 | 0 | ResetLimit(ip); |
1644 | 0 | } |
1645 | 0 | if (!writer->Append(ip, literal_length, &op)) goto exit; |
1646 | 0 | ip += literal_length; |
1647 | 0 | MAYBE_REFILL(); |
1648 | 0 | } else { |
1649 | 0 | if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) { |
1650 | 0 | const size_t copy_offset = LittleEndian::Load32(ip); |
1651 | 0 | const size_t length = (c >> 2) + 1; |
1652 | 0 | ip += 4; |
1653 | |
|
1654 | 0 | if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; |
1655 | 0 | } else { |
1656 | 0 | const ptrdiff_t entry = kLengthMinusOffset[c]; |
1657 | 0 | preload = LittleEndian::Load32(ip); |
1658 | 0 | const uint32_t trailer = ExtractLowBytes(preload, c & 3); |
1659 | 0 | const uint32_t length = entry & 0xff; |
1660 | 0 | assert(length > 0); |
1661 | | |
1662 | | // copy_offset/256 is encoded in bits 8..10. By just fetching |
1663 | | // those bits, we get copy_offset (since the bit-field starts at |
1664 | | // bit 8). |
1665 | 0 | const uint32_t copy_offset = trailer - entry + length; |
1666 | 0 | if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit; |
1667 | | |
1668 | 0 | ip += (c & 3); |
1669 | | // By using the result of the previous load we reduce the critical |
1670 | | // dependency chain of ip to 4 cycles. |
1671 | 0 | preload >>= (c & 3) * 8; |
1672 | 0 | if (ip < ip_limit_min_maxtaglen_) continue; |
1673 | 0 | } |
1674 | 0 | MAYBE_REFILL(); |
1675 | 0 | } |
1676 | 0 | } |
1677 | 0 | #undef MAYBE_REFILL |
1678 | 0 | exit: |
1679 | 0 | writer->SetOutputPtr(op); |
1680 | 0 | } Unexecuted instantiation: void snappy::SnappyDecompressor::DecompressAllTags<snappy::SnappyIOVecWriter>(snappy::SnappyIOVecWriter*) Unexecuted instantiation: void snappy::SnappyDecompressor::DecompressAllTags<snappy::SnappyDecompressionValidator>(snappy::SnappyDecompressionValidator*) Unexecuted instantiation: void snappy::SnappyDecompressor::DecompressAllTags<snappy::SnappyArrayWriter>(snappy::SnappyArrayWriter*) Unexecuted instantiation: void snappy::SnappyDecompressor::DecompressAllTags<snappy::SnappyScatteredWriter<snappy::SnappySinkAllocator> >(snappy::SnappyScatteredWriter<snappy::SnappySinkAllocator>*) |
1681 | | }; |
1682 | | |
1683 | 0 | constexpr uint32_t CalculateNeeded(uint8_t tag) { |
1684 | 0 | return ((tag & 3) == 0 && tag >= (60 * 4)) |
1685 | 0 | ? (tag >> 2) - 58 |
1686 | 0 | : (0x05030201 >> ((tag * 8) & 31)) & 0xFF; |
1687 | 0 | } |
1688 | | |
1689 | | #if __cplusplus >= 201402L |
1690 | 0 | constexpr bool VerifyCalculateNeeded() { |
1691 | 0 | for (int i = 0; i < 1; i++) { |
1692 | 0 | if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false; |
1693 | 0 | } |
1694 | 0 | return true; |
1695 | 0 | } |
1696 | | |
1697 | | // Make sure CalculateNeeded is correct by verifying it against the established |
1698 | | // table encoding the number of added bytes needed. |
1699 | | static_assert(VerifyCalculateNeeded(), ""); |
1700 | | #endif // c++14 |
1701 | | |
1702 | 0 | bool SnappyDecompressor::RefillTag() { |
1703 | 0 | const char* ip = ip_; |
1704 | 0 | if (ip == ip_limit_) { |
1705 | | // Fetch a new fragment from the reader |
1706 | 0 | reader_->Skip(peeked_); // All peeked bytes are used up |
1707 | 0 | size_t n; |
1708 | 0 | ip = reader_->Peek(&n); |
1709 | 0 | peeked_ = n; |
1710 | 0 | eof_ = (n == 0); |
1711 | 0 | if (eof_) return false; |
1712 | 0 | ip_limit_ = ip + n; |
1713 | 0 | } |
1714 | | |
1715 | | // Read the tag character |
1716 | 0 | assert(ip < ip_limit_); |
1717 | 0 | const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip)); |
1718 | | // At this point make sure that the data for the next tag is consecutive. |
1719 | | // For copy 1 this means the next 2 bytes (tag and 1 byte offset) |
1720 | | // For copy 2 the next 3 bytes (tag and 2 byte offset) |
1721 | | // For copy 4 the next 5 bytes (tag and 4 byte offset) |
1722 | | // For all small literals we only need 1 byte buf for literals 60...63 the |
1723 | | // length is encoded in 1...4 extra bytes. |
1724 | 0 | const uint32_t needed = CalculateNeeded(c); |
1725 | 0 | assert(needed <= sizeof(scratch_)); |
1726 | | |
1727 | | // Read more bytes from reader if needed |
1728 | 0 | uint32_t nbuf = ip_limit_ - ip; |
1729 | 0 | if (nbuf < needed) { |
1730 | | // Stitch together bytes from ip and reader to form the word |
1731 | | // contents. We store the needed bytes in "scratch_". They |
1732 | | // will be consumed immediately by the caller since we do not |
1733 | | // read more than we need. |
1734 | 0 | std::memmove(scratch_, ip, nbuf); |
1735 | 0 | reader_->Skip(peeked_); // All peeked bytes are used up |
1736 | 0 | peeked_ = 0; |
1737 | 0 | while (nbuf < needed) { |
1738 | 0 | size_t length; |
1739 | 0 | const char* src = reader_->Peek(&length); |
1740 | 0 | if (length == 0) return false; |
1741 | 0 | uint32_t to_add = std::min<uint32_t>(needed - nbuf, length); |
1742 | 0 | std::memcpy(scratch_ + nbuf, src, to_add); |
1743 | 0 | nbuf += to_add; |
1744 | 0 | reader_->Skip(to_add); |
1745 | 0 | } |
1746 | 0 | assert(nbuf == needed); |
1747 | 0 | ip_ = scratch_; |
1748 | 0 | ip_limit_ = scratch_ + needed; |
1749 | 0 | } else if (nbuf < kMaximumTagLength) { |
1750 | | // Have enough bytes, but move into scratch_ so that we do not |
1751 | | // read past end of input |
1752 | 0 | std::memmove(scratch_, ip, nbuf); |
1753 | 0 | reader_->Skip(peeked_); // All peeked bytes are used up |
1754 | 0 | peeked_ = 0; |
1755 | 0 | ip_ = scratch_; |
1756 | 0 | ip_limit_ = scratch_ + nbuf; |
1757 | 0 | } else { |
1758 | | // Pass pointer to buffer returned by reader_. |
1759 | 0 | ip_ = ip; |
1760 | 0 | } |
1761 | 0 | return true; |
1762 | 0 | } |
1763 | | |
1764 | | template <typename Writer> |
1765 | 0 | static bool InternalUncompress(Source* r, Writer* writer) { |
1766 | | // Read the uncompressed length from the front of the compressed input |
1767 | 0 | SnappyDecompressor decompressor(r); |
1768 | 0 | uint32_t uncompressed_len = 0; |
1769 | 0 | if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false; |
1770 | | |
1771 | 0 | return InternalUncompressAllTags(&decompressor, writer, r->Available(), |
1772 | 0 | uncompressed_len); |
1773 | 0 | } Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompress<snappy::SnappyIOVecWriter>(snappy::Source*, snappy::SnappyIOVecWriter*) Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompress<snappy::SnappyArrayWriter>(snappy::Source*, snappy::SnappyArrayWriter*) Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompress<snappy::SnappyDecompressionValidator>(snappy::Source*, snappy::SnappyDecompressionValidator*) Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompress<snappy::SnappyScatteredWriter<snappy::SnappySinkAllocator> >(snappy::Source*, snappy::SnappyScatteredWriter<snappy::SnappySinkAllocator>*) |
1774 | | |
1775 | | template <typename Writer> |
1776 | | static bool InternalUncompressAllTags(SnappyDecompressor* decompressor, |
1777 | | Writer* writer, uint32_t compressed_len, |
1778 | 0 | uint32_t uncompressed_len) { |
1779 | 0 | int token = 0; |
1780 | 0 | Report(token, "snappy_uncompress", compressed_len, uncompressed_len); |
1781 | |
|
1782 | 0 | writer->SetExpectedLength(uncompressed_len); |
1783 | | |
1784 | | // Process the entire input |
1785 | 0 | decompressor->DecompressAllTags(writer); |
1786 | 0 | writer->Flush(); |
1787 | 0 | return (decompressor->eof() && writer->CheckLength()); |
1788 | 0 | } Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompressAllTags<snappy::SnappyIOVecWriter>(snappy::SnappyDecompressor*, snappy::SnappyIOVecWriter*, unsigned int, unsigned int) Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompressAllTags<snappy::SnappyDecompressionValidator>(snappy::SnappyDecompressor*, snappy::SnappyDecompressionValidator*, unsigned int, unsigned int) Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompressAllTags<snappy::SnappyArrayWriter>(snappy::SnappyDecompressor*, snappy::SnappyArrayWriter*, unsigned int, unsigned int) Unexecuted instantiation: snappy.cc:bool snappy::InternalUncompressAllTags<snappy::SnappyScatteredWriter<snappy::SnappySinkAllocator> >(snappy::SnappyDecompressor*, snappy::SnappyScatteredWriter<snappy::SnappySinkAllocator>*, unsigned int, unsigned int) |
1789 | | |
1790 | 0 | bool GetUncompressedLength(Source* source, uint32_t* result) { |
1791 | 0 | SnappyDecompressor decompressor(source); |
1792 | 0 | return decompressor.ReadUncompressedLength(result); |
1793 | 0 | } |
1794 | | |
1795 | 0 | size_t Compress(Source* reader, Sink* writer, CompressionOptions options) { |
1796 | 0 | assert(options.level == 1 || options.level == 2); |
1797 | 0 | int token = 0; |
1798 | 0 | size_t written = 0; |
1799 | 0 | size_t N = reader->Available(); |
1800 | 0 | const size_t uncompressed_size = N; |
1801 | 0 | char ulength[Varint::kMax32]; |
1802 | 0 | char* p = Varint::Encode32(ulength, N); |
1803 | 0 | writer->Append(ulength, p - ulength); |
1804 | 0 | written += (p - ulength); |
1805 | |
|
1806 | 0 | internal::WorkingMemory wmem(N); |
1807 | |
|
1808 | 0 | while (N > 0) { |
1809 | | // Get next block to compress (without copying if possible) |
1810 | 0 | size_t fragment_size; |
1811 | 0 | const char* fragment = reader->Peek(&fragment_size); |
1812 | 0 | assert(fragment_size != 0); // premature end of input |
1813 | 0 | const size_t num_to_read = std::min(N, kBlockSize); |
1814 | 0 | size_t bytes_read = fragment_size; |
1815 | |
|
1816 | 0 | size_t pending_advance = 0; |
1817 | 0 | if (bytes_read >= num_to_read) { |
1818 | | // Buffer returned by reader is large enough |
1819 | 0 | pending_advance = num_to_read; |
1820 | 0 | fragment_size = num_to_read; |
1821 | 0 | } else { |
1822 | 0 | char* scratch = wmem.GetScratchInput(); |
1823 | 0 | std::memcpy(scratch, fragment, bytes_read); |
1824 | 0 | reader->Skip(bytes_read); |
1825 | |
|
1826 | 0 | while (bytes_read < num_to_read) { |
1827 | 0 | fragment = reader->Peek(&fragment_size); |
1828 | 0 | size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read); |
1829 | 0 | std::memcpy(scratch + bytes_read, fragment, n); |
1830 | 0 | bytes_read += n; |
1831 | 0 | reader->Skip(n); |
1832 | 0 | } |
1833 | 0 | assert(bytes_read == num_to_read); |
1834 | 0 | fragment = scratch; |
1835 | 0 | fragment_size = num_to_read; |
1836 | 0 | } |
1837 | 0 | assert(fragment_size == num_to_read); |
1838 | | |
1839 | | // Get encoding table for compression |
1840 | 0 | int table_size; |
1841 | 0 | uint16_t* table = wmem.GetHashTable(num_to_read, &table_size); |
1842 | | |
1843 | | // Compress input_fragment and append to dest |
1844 | 0 | int max_output = MaxCompressedLength(num_to_read); |
1845 | | |
1846 | | // Since we encode kBlockSize regions followed by a region |
1847 | | // which is <= kBlockSize in length, a previously allocated |
1848 | | // scratch_output[] region is big enough for this iteration. |
1849 | | // Need a scratch buffer for the output, in case the byte sink doesn't |
1850 | | // have room for us directly. |
1851 | 0 | char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput()); |
1852 | 0 | char* end = nullptr; |
1853 | 0 | if (options.level == 1) { |
1854 | 0 | end = internal::CompressFragment(fragment, fragment_size, dest, table, |
1855 | 0 | table_size); |
1856 | 0 | } else if (options.level == 2) { |
1857 | 0 | end = internal::CompressFragmentDoubleHash( |
1858 | 0 | fragment, fragment_size, dest, table, table_size >> 1, |
1859 | 0 | table + (table_size >> 1), table_size >> 1); |
1860 | 0 | } |
1861 | 0 | writer->Append(dest, end - dest); |
1862 | 0 | written += (end - dest); |
1863 | |
|
1864 | 0 | N -= num_to_read; |
1865 | 0 | reader->Skip(pending_advance); |
1866 | 0 | } |
1867 | |
|
1868 | 0 | Report(token, "snappy_compress", written, uncompressed_size); |
1869 | 0 | return written; |
1870 | 0 | } |
1871 | | |
1872 | | // ----------------------------------------------------------------------- |
1873 | | // IOVec interfaces |
1874 | | // ----------------------------------------------------------------------- |
1875 | | |
1876 | | // A `Source` implementation that yields the contents of an `iovec` array. Note |
1877 | | // that `total_size` is the total number of bytes to be read from the elements |
1878 | | // of `iov` (_not_ the total number of elements in `iov`). |
1879 | | class SnappyIOVecReader : public Source { |
1880 | | public: |
1881 | | SnappyIOVecReader(const struct iovec* iov, size_t total_size) |
1882 | | : curr_iov_(iov), |
1883 | | curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base) |
1884 | | : nullptr), |
1885 | | curr_size_remaining_(total_size > 0 ? iov->iov_len : 0), |
1886 | 0 | total_size_remaining_(total_size) { |
1887 | | // Skip empty leading `iovec`s. |
1888 | 0 | if (total_size > 0 && curr_size_remaining_ == 0) Advance(); |
1889 | 0 | } |
1890 | | |
1891 | | ~SnappyIOVecReader() override = default; |
1892 | | |
1893 | 0 | size_t Available() const override { return total_size_remaining_; } |
1894 | | |
1895 | 0 | const char* Peek(size_t* len) override { |
1896 | 0 | *len = curr_size_remaining_; |
1897 | 0 | return curr_pos_; |
1898 | 0 | } |
1899 | | |
1900 | 0 | void Skip(size_t n) override { |
1901 | 0 | while (n >= curr_size_remaining_ && n > 0) { |
1902 | 0 | n -= curr_size_remaining_; |
1903 | 0 | Advance(); |
1904 | 0 | } |
1905 | 0 | curr_size_remaining_ -= n; |
1906 | 0 | total_size_remaining_ -= n; |
1907 | 0 | curr_pos_ += n; |
1908 | 0 | } |
1909 | | |
1910 | | private: |
1911 | | // Advances to the next nonempty `iovec` and updates related variables. |
1912 | 0 | void Advance() { |
1913 | 0 | do { |
1914 | 0 | assert(total_size_remaining_ >= curr_size_remaining_); |
1915 | 0 | total_size_remaining_ -= curr_size_remaining_; |
1916 | 0 | if (total_size_remaining_ == 0) { |
1917 | 0 | curr_pos_ = nullptr; |
1918 | 0 | curr_size_remaining_ = 0; |
1919 | 0 | return; |
1920 | 0 | } |
1921 | 0 | ++curr_iov_; |
1922 | 0 | curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base); |
1923 | 0 | curr_size_remaining_ = curr_iov_->iov_len; |
1924 | 0 | } while (curr_size_remaining_ == 0); |
1925 | 0 | } |
1926 | | |
1927 | | // The `iovec` currently being read. |
1928 | | const struct iovec* curr_iov_; |
1929 | | // The location in `curr_iov_` currently being read. |
1930 | | const char* curr_pos_; |
1931 | | // The amount of unread data in `curr_iov_`. |
1932 | | size_t curr_size_remaining_; |
1933 | | // The amount of unread data in the entire input array. |
1934 | | size_t total_size_remaining_; |
1935 | | }; |
1936 | | |
1937 | | // A type that writes to an iovec. |
1938 | | // Note that this is not a "ByteSink", but a type that matches the |
1939 | | // Writer template argument to SnappyDecompressor::DecompressAllTags(). |
1940 | | class SnappyIOVecWriter { |
1941 | | private: |
1942 | | // output_iov_end_ is set to iov + count and used to determine when |
1943 | | // the end of the iovs is reached. |
1944 | | const struct iovec* output_iov_end_; |
1945 | | |
1946 | | #if !defined(NDEBUG) |
1947 | | const struct iovec* output_iov_; |
1948 | | #endif // !defined(NDEBUG) |
1949 | | |
1950 | | // Current iov that is being written into. |
1951 | | const struct iovec* curr_iov_; |
1952 | | |
1953 | | // Pointer to current iov's write location. |
1954 | | char* curr_iov_output_; |
1955 | | |
1956 | | // Remaining bytes to write into curr_iov_output. |
1957 | | size_t curr_iov_remaining_; |
1958 | | |
1959 | | // Total bytes decompressed into output_iov_ so far. |
1960 | | size_t total_written_; |
1961 | | |
1962 | | // Maximum number of bytes that will be decompressed into output_iov_. |
1963 | | size_t output_limit_; |
1964 | | |
1965 | 0 | static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) { |
1966 | 0 | return reinterpret_cast<char*>(iov->iov_base) + offset; |
1967 | 0 | } |
1968 | | |
1969 | | public: |
1970 | | // Does not take ownership of iov. iov must be valid during the |
1971 | | // entire lifetime of the SnappyIOVecWriter. |
1972 | | inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count) |
1973 | | : output_iov_end_(iov + iov_count), |
1974 | | #if !defined(NDEBUG) |
1975 | | output_iov_(iov), |
1976 | | #endif // !defined(NDEBUG) |
1977 | | curr_iov_(iov), |
1978 | | curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base) |
1979 | | : nullptr), |
1980 | | curr_iov_remaining_(iov_count ? iov->iov_len : 0), |
1981 | | total_written_(0), |
1982 | 0 | output_limit_(-1) { |
1983 | 0 | } |
1984 | | |
1985 | 0 | inline void SetExpectedLength(size_t len) { output_limit_ = len; } |
1986 | | |
1987 | 0 | inline bool CheckLength() const { return total_written_ == output_limit_; } |
1988 | | |
1989 | 0 | inline bool Append(const char* ip, size_t len, char**) { |
1990 | 0 | if (total_written_ + len > output_limit_) { |
1991 | 0 | return false; |
1992 | 0 | } |
1993 | | |
1994 | 0 | return AppendNoCheck(ip, len); |
1995 | 0 | } |
1996 | | |
1997 | 0 | char* GetOutputPtr() { return nullptr; } |
1998 | 0 | char* GetBase(ptrdiff_t*) { return nullptr; } |
1999 | 0 | void SetOutputPtr(char* op) { |
2000 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
2001 | 0 | (void)op; |
2002 | 0 | } |
2003 | | |
2004 | 0 | inline bool AppendNoCheck(const char* ip, size_t len) { |
2005 | 0 | while (len > 0) { |
2006 | 0 | if (curr_iov_remaining_ == 0) { |
2007 | | // This iovec is full. Go to the next one. |
2008 | 0 | if (curr_iov_ + 1 >= output_iov_end_) { |
2009 | 0 | return false; |
2010 | 0 | } |
2011 | 0 | ++curr_iov_; |
2012 | 0 | curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base); |
2013 | 0 | curr_iov_remaining_ = curr_iov_->iov_len; |
2014 | 0 | } |
2015 | | |
2016 | 0 | const size_t to_write = std::min(len, curr_iov_remaining_); |
2017 | 0 | std::memcpy(curr_iov_output_, ip, to_write); |
2018 | 0 | curr_iov_output_ += to_write; |
2019 | 0 | curr_iov_remaining_ -= to_write; |
2020 | 0 | total_written_ += to_write; |
2021 | 0 | ip += to_write; |
2022 | 0 | len -= to_write; |
2023 | 0 | } |
2024 | | |
2025 | 0 | return true; |
2026 | 0 | } |
2027 | | |
2028 | | inline bool TryFastAppend(const char* ip, size_t available, size_t len, |
2029 | 0 | char**) { |
2030 | 0 | const size_t space_left = output_limit_ - total_written_; |
2031 | 0 | if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 && |
2032 | 0 | curr_iov_remaining_ >= 16) { |
2033 | | // Fast path, used for the majority (about 95%) of invocations. |
2034 | 0 | UnalignedCopy128(ip, curr_iov_output_); |
2035 | 0 | curr_iov_output_ += len; |
2036 | 0 | curr_iov_remaining_ -= len; |
2037 | 0 | total_written_ += len; |
2038 | 0 | return true; |
2039 | 0 | } |
2040 | | |
2041 | 0 | return false; |
2042 | 0 | } |
2043 | | |
2044 | 0 | inline bool AppendFromSelf(size_t offset, size_t len, char**) { |
2045 | | // See SnappyArrayWriter::AppendFromSelf for an explanation of |
2046 | | // the "offset - 1u" trick. |
2047 | 0 | if (offset - 1u >= total_written_) { |
2048 | 0 | return false; |
2049 | 0 | } |
2050 | 0 | const size_t space_left = output_limit_ - total_written_; |
2051 | 0 | if (len > space_left) { |
2052 | 0 | return false; |
2053 | 0 | } |
2054 | | |
2055 | | // Locate the iovec from which we need to start the copy. |
2056 | 0 | const iovec* from_iov = curr_iov_; |
2057 | 0 | size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_; |
2058 | 0 | while (offset > 0) { |
2059 | 0 | if (from_iov_offset >= offset) { |
2060 | 0 | from_iov_offset -= offset; |
2061 | 0 | break; |
2062 | 0 | } |
2063 | | |
2064 | 0 | offset -= from_iov_offset; |
2065 | 0 | --from_iov; |
2066 | 0 | #if !defined(NDEBUG) |
2067 | 0 | assert(from_iov >= output_iov_); |
2068 | 0 | #endif // !defined(NDEBUG) |
2069 | 0 | from_iov_offset = from_iov->iov_len; |
2070 | 0 | } |
2071 | | |
2072 | | // Copy <len> bytes starting from the iovec pointed to by from_iov_index to |
2073 | | // the current iovec. |
2074 | 0 | while (len > 0) { |
2075 | 0 | assert(from_iov <= curr_iov_); |
2076 | 0 | if (from_iov != curr_iov_) { |
2077 | 0 | const size_t to_copy = |
2078 | 0 | std::min(from_iov->iov_len - from_iov_offset, len); |
2079 | 0 | AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy); |
2080 | 0 | len -= to_copy; |
2081 | 0 | if (len > 0) { |
2082 | 0 | ++from_iov; |
2083 | 0 | from_iov_offset = 0; |
2084 | 0 | } |
2085 | 0 | } else { |
2086 | 0 | size_t to_copy = curr_iov_remaining_; |
2087 | 0 | if (to_copy == 0) { |
2088 | | // This iovec is full. Go to the next one. |
2089 | 0 | if (curr_iov_ + 1 >= output_iov_end_) { |
2090 | 0 | return false; |
2091 | 0 | } |
2092 | 0 | ++curr_iov_; |
2093 | 0 | curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base); |
2094 | 0 | curr_iov_remaining_ = curr_iov_->iov_len; |
2095 | 0 | continue; |
2096 | 0 | } |
2097 | 0 | if (to_copy > len) { |
2098 | 0 | to_copy = len; |
2099 | 0 | } |
2100 | 0 | assert(to_copy > 0); |
2101 | | |
2102 | 0 | IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset), |
2103 | 0 | curr_iov_output_, curr_iov_output_ + to_copy, |
2104 | 0 | curr_iov_output_ + curr_iov_remaining_); |
2105 | 0 | curr_iov_output_ += to_copy; |
2106 | 0 | curr_iov_remaining_ -= to_copy; |
2107 | 0 | from_iov_offset += to_copy; |
2108 | 0 | total_written_ += to_copy; |
2109 | 0 | len -= to_copy; |
2110 | 0 | } |
2111 | 0 | } |
2112 | | |
2113 | 0 | return true; |
2114 | 0 | } |
2115 | | |
2116 | 0 | inline void Flush() {} |
2117 | | }; |
2118 | | |
2119 | | bool RawUncompressToIOVec(const char* compressed, size_t compressed_length, |
2120 | 0 | const struct iovec* iov, size_t iov_cnt) { |
2121 | 0 | ByteArraySource reader(compressed, compressed_length); |
2122 | 0 | return RawUncompressToIOVec(&reader, iov, iov_cnt); |
2123 | 0 | } |
2124 | | |
2125 | | bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov, |
2126 | 0 | size_t iov_cnt) { |
2127 | 0 | SnappyIOVecWriter output(iov, iov_cnt); |
2128 | 0 | return InternalUncompress(compressed, &output); |
2129 | 0 | } |
2130 | | |
2131 | | // ----------------------------------------------------------------------- |
2132 | | // Flat array interfaces |
2133 | | // ----------------------------------------------------------------------- |
2134 | | |
2135 | | // A type that writes to a flat array. |
2136 | | // Note that this is not a "ByteSink", but a type that matches the |
2137 | | // Writer template argument to SnappyDecompressor::DecompressAllTags(). |
2138 | | class SnappyArrayWriter { |
2139 | | private: |
2140 | | char* base_; |
2141 | | char* op_; |
2142 | | char* op_limit_; |
2143 | | // If op < op_limit_min_slop_ then it's safe to unconditionally write |
2144 | | // kSlopBytes starting at op. |
2145 | | char* op_limit_min_slop_; |
2146 | | |
2147 | | public: |
2148 | | inline explicit SnappyArrayWriter(char* dst) |
2149 | | : base_(dst), |
2150 | | op_(dst), |
2151 | | op_limit_(dst), |
2152 | 0 | op_limit_min_slop_(dst) {} // Safe default see invariant. |
2153 | | |
2154 | 0 | inline void SetExpectedLength(size_t len) { |
2155 | 0 | op_limit_ = op_ + len; |
2156 | | // Prevent pointer from being past the buffer. |
2157 | 0 | op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len); |
2158 | 0 | } |
2159 | | |
2160 | 0 | inline bool CheckLength() const { return op_ == op_limit_; } |
2161 | | |
2162 | 0 | char* GetOutputPtr() { return op_; } |
2163 | 0 | char* GetBase(ptrdiff_t* op_limit_min_slop) { |
2164 | 0 | *op_limit_min_slop = op_limit_min_slop_ - base_; |
2165 | 0 | return base_; |
2166 | 0 | } |
2167 | 0 | void SetOutputPtr(char* op) { op_ = op; } |
2168 | | |
2169 | 0 | inline bool Append(const char* ip, size_t len, char** op_p) { |
2170 | 0 | char* op = *op_p; |
2171 | 0 | const size_t space_left = op_limit_ - op; |
2172 | 0 | if (space_left < len) return false; |
2173 | 0 | std::memcpy(op, ip, len); |
2174 | 0 | *op_p = op + len; |
2175 | 0 | return true; |
2176 | 0 | } |
2177 | | |
2178 | | inline bool TryFastAppend(const char* ip, size_t available, size_t len, |
2179 | 0 | char** op_p) { |
2180 | 0 | char* op = *op_p; |
2181 | 0 | const size_t space_left = op_limit_ - op; |
2182 | 0 | if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) { |
2183 | | // Fast path, used for the majority (about 95%) of invocations. |
2184 | 0 | UnalignedCopy128(ip, op); |
2185 | 0 | *op_p = op + len; |
2186 | 0 | return true; |
2187 | 0 | } else { |
2188 | 0 | return false; |
2189 | 0 | } |
2190 | 0 | } |
2191 | | |
2192 | | SNAPPY_ATTRIBUTE_ALWAYS_INLINE |
2193 | 0 | inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) { |
2194 | 0 | assert(len > 0); |
2195 | 0 | char* const op = *op_p; |
2196 | 0 | assert(op >= base_); |
2197 | 0 | char* const op_end = op + len; |
2198 | | |
2199 | | // Check if we try to append from before the start of the buffer. |
2200 | 0 | if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset)) |
2201 | 0 | return false; |
2202 | | |
2203 | 0 | if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) || |
2204 | 0 | op >= op_limit_min_slop_ || offset < len)) { |
2205 | 0 | if (op_end > op_limit_ || offset == 0) return false; |
2206 | 0 | *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_); |
2207 | 0 | return true; |
2208 | 0 | } |
2209 | 0 | std::memmove(op, op - offset, kSlopBytes); |
2210 | 0 | *op_p = op_end; |
2211 | 0 | return true; |
2212 | 0 | } |
2213 | 0 | inline size_t Produced() const { |
2214 | 0 | assert(op_ >= base_); |
2215 | 0 | return op_ - base_; |
2216 | 0 | } |
2217 | 0 | inline void Flush() {} |
2218 | | }; |
2219 | | |
2220 | | bool RawUncompress(const char* compressed, size_t compressed_length, |
2221 | 0 | char* uncompressed) { |
2222 | 0 | ByteArraySource reader(compressed, compressed_length); |
2223 | 0 | return RawUncompress(&reader, uncompressed); |
2224 | 0 | } |
2225 | | |
2226 | 0 | bool RawUncompress(Source* compressed, char* uncompressed) { |
2227 | 0 | SnappyArrayWriter output(uncompressed); |
2228 | 0 | return InternalUncompress(compressed, &output); |
2229 | 0 | } |
2230 | | |
2231 | | bool Uncompress(const char* compressed, size_t compressed_length, |
2232 | 0 | std::string* uncompressed) { |
2233 | 0 | size_t ulength; |
2234 | 0 | if (!GetUncompressedLength(compressed, compressed_length, &ulength)) { |
2235 | 0 | return false; |
2236 | 0 | } |
2237 | | // On 32-bit builds: max_size() < kuint32max. Check for that instead |
2238 | | // of crashing (e.g., consider externally specified compressed data). |
2239 | 0 | if (ulength > uncompressed->max_size()) { |
2240 | 0 | return false; |
2241 | 0 | } |
2242 | 0 | STLStringResizeUninitialized(uncompressed, ulength); |
2243 | 0 | return RawUncompress(compressed, compressed_length, |
2244 | 0 | string_as_array(uncompressed)); |
2245 | 0 | } |
2246 | | |
2247 | | // A Writer that drops everything on the floor and just does validation |
2248 | | class SnappyDecompressionValidator { |
2249 | | private: |
2250 | | size_t expected_; |
2251 | | size_t produced_; |
2252 | | |
2253 | | public: |
2254 | 0 | inline SnappyDecompressionValidator() : expected_(0), produced_(0) {} |
2255 | 0 | inline void SetExpectedLength(size_t len) { expected_ = len; } |
2256 | 0 | size_t GetOutputPtr() { return produced_; } |
2257 | 0 | size_t GetBase(ptrdiff_t* op_limit_min_slop) { |
2258 | 0 | *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1; |
2259 | 0 | return 1; |
2260 | 0 | } |
2261 | 0 | void SetOutputPtr(size_t op) { produced_ = op; } |
2262 | 0 | inline bool CheckLength() const { return expected_ == produced_; } |
2263 | 0 | inline bool Append(const char* ip, size_t len, size_t* produced) { |
2264 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
2265 | 0 | (void)ip; |
2266 | |
|
2267 | 0 | *produced += len; |
2268 | 0 | return *produced <= expected_; |
2269 | 0 | } |
2270 | | inline bool TryFastAppend(const char* ip, size_t available, size_t length, |
2271 | 0 | size_t* produced) { |
2272 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
2273 | 0 | (void)ip; |
2274 | 0 | (void)available; |
2275 | 0 | (void)length; |
2276 | 0 | (void)produced; |
2277 | |
|
2278 | 0 | return false; |
2279 | 0 | } |
2280 | 0 | inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) { |
2281 | | // See SnappyArrayWriter::AppendFromSelf for an explanation of |
2282 | | // the "offset - 1u" trick. |
2283 | 0 | if (*produced <= offset - 1u) return false; |
2284 | 0 | *produced += len; |
2285 | 0 | return *produced <= expected_; |
2286 | 0 | } |
2287 | 0 | inline void Flush() {} |
2288 | | }; |
2289 | | |
2290 | 0 | bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) { |
2291 | 0 | ByteArraySource reader(compressed, compressed_length); |
2292 | 0 | SnappyDecompressionValidator writer; |
2293 | 0 | return InternalUncompress(&reader, &writer); |
2294 | 0 | } |
2295 | | |
2296 | 0 | bool IsValidCompressed(Source* compressed) { |
2297 | 0 | SnappyDecompressionValidator writer; |
2298 | 0 | return InternalUncompress(compressed, &writer); |
2299 | 0 | } |
2300 | | |
2301 | | void RawCompress(const char* input, size_t input_length, char* compressed, |
2302 | 0 | size_t* compressed_length, CompressionOptions options) { |
2303 | 0 | ByteArraySource reader(input, input_length); |
2304 | 0 | UncheckedByteArraySink writer(compressed); |
2305 | 0 | Compress(&reader, &writer, options); |
2306 | | |
2307 | | // Compute how many bytes were added |
2308 | 0 | *compressed_length = (writer.CurrentDestination() - compressed); |
2309 | 0 | } |
2310 | | |
2311 | | void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length, |
2312 | | char* compressed, size_t* compressed_length, |
2313 | 0 | CompressionOptions options) { |
2314 | 0 | SnappyIOVecReader reader(iov, uncompressed_length); |
2315 | 0 | UncheckedByteArraySink writer(compressed); |
2316 | 0 | Compress(&reader, &writer, options); |
2317 | | |
2318 | | // Compute how many bytes were added. |
2319 | 0 | *compressed_length = writer.CurrentDestination() - compressed; |
2320 | 0 | } |
2321 | | |
2322 | | size_t Compress(const char* input, size_t input_length, std::string* compressed, |
2323 | 0 | CompressionOptions options) { |
2324 | | // Pre-grow the buffer to the max length of the compressed output |
2325 | 0 | STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length)); |
2326 | |
|
2327 | 0 | size_t compressed_length; |
2328 | 0 | RawCompress(input, input_length, string_as_array(compressed), |
2329 | 0 | &compressed_length, options); |
2330 | 0 | compressed->erase(compressed_length); |
2331 | 0 | return compressed_length; |
2332 | 0 | } |
2333 | | |
2334 | | size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt, |
2335 | 0 | std::string* compressed, CompressionOptions options) { |
2336 | | // Compute the number of bytes to be compressed. |
2337 | 0 | size_t uncompressed_length = 0; |
2338 | 0 | for (size_t i = 0; i < iov_cnt; ++i) { |
2339 | 0 | uncompressed_length += iov[i].iov_len; |
2340 | 0 | } |
2341 | | |
2342 | | // Pre-grow the buffer to the max length of the compressed output. |
2343 | 0 | STLStringResizeUninitialized(compressed, MaxCompressedLength( |
2344 | 0 | uncompressed_length)); |
2345 | |
|
2346 | 0 | size_t compressed_length; |
2347 | 0 | RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed), |
2348 | 0 | &compressed_length, options); |
2349 | 0 | compressed->erase(compressed_length); |
2350 | 0 | return compressed_length; |
2351 | 0 | } |
2352 | | |
2353 | | // ----------------------------------------------------------------------- |
2354 | | // Sink interface |
2355 | | // ----------------------------------------------------------------------- |
2356 | | |
2357 | | // A type that decompresses into a Sink. The template parameter |
2358 | | // Allocator must export one method "char* Allocate(int size);", which |
2359 | | // allocates a buffer of "size" and appends that to the destination. |
2360 | | template <typename Allocator> |
2361 | | class SnappyScatteredWriter { |
2362 | | Allocator allocator_; |
2363 | | |
2364 | | // We need random access into the data generated so far. Therefore |
2365 | | // we keep track of all of the generated data as an array of blocks. |
2366 | | // All of the blocks except the last have length kBlockSize. |
2367 | | std::vector<char*> blocks_; |
2368 | | size_t expected_; |
2369 | | |
2370 | | // Total size of all fully generated blocks so far |
2371 | | size_t full_size_; |
2372 | | |
2373 | | // Pointer into current output block |
2374 | | char* op_base_; // Base of output block |
2375 | | char* op_ptr_; // Pointer to next unfilled byte in block |
2376 | | char* op_limit_; // Pointer just past block |
2377 | | // If op < op_limit_min_slop_ then it's safe to unconditionally write |
2378 | | // kSlopBytes starting at op. |
2379 | | char* op_limit_min_slop_; |
2380 | | |
2381 | 0 | inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); } |
2382 | | |
2383 | | bool SlowAppend(const char* ip, size_t len); |
2384 | | bool SlowAppendFromSelf(size_t offset, size_t len); |
2385 | | |
2386 | | public: |
2387 | | inline explicit SnappyScatteredWriter(const Allocator& allocator) |
2388 | | : allocator_(allocator), |
2389 | | full_size_(0), |
2390 | | op_base_(NULL), |
2391 | | op_ptr_(NULL), |
2392 | | op_limit_(NULL), |
2393 | 0 | op_limit_min_slop_(NULL) {} |
2394 | 0 | char* GetOutputPtr() { return op_ptr_; } |
2395 | 0 | char* GetBase(ptrdiff_t* op_limit_min_slop) { |
2396 | 0 | *op_limit_min_slop = op_limit_min_slop_ - op_base_; |
2397 | 0 | return op_base_; |
2398 | 0 | } |
2399 | 0 | void SetOutputPtr(char* op) { op_ptr_ = op; } |
2400 | | |
2401 | 0 | inline void SetExpectedLength(size_t len) { |
2402 | 0 | assert(blocks_.empty()); |
2403 | 0 | expected_ = len; |
2404 | 0 | } |
2405 | | |
2406 | 0 | inline bool CheckLength() const { return Size() == expected_; } |
2407 | | |
2408 | | // Return the number of bytes actually uncompressed so far |
2409 | 0 | inline size_t Produced() const { return Size(); } |
2410 | | |
2411 | 0 | inline bool Append(const char* ip, size_t len, char** op_p) { |
2412 | 0 | char* op = *op_p; |
2413 | 0 | size_t avail = op_limit_ - op; |
2414 | 0 | if (len <= avail) { |
2415 | | // Fast path |
2416 | 0 | std::memcpy(op, ip, len); |
2417 | 0 | *op_p = op + len; |
2418 | 0 | return true; |
2419 | 0 | } else { |
2420 | 0 | op_ptr_ = op; |
2421 | 0 | bool res = SlowAppend(ip, len); |
2422 | 0 | *op_p = op_ptr_; |
2423 | 0 | return res; |
2424 | 0 | } |
2425 | 0 | } |
2426 | | |
2427 | | inline bool TryFastAppend(const char* ip, size_t available, size_t length, |
2428 | 0 | char** op_p) { |
2429 | 0 | char* op = *op_p; |
2430 | 0 | const int space_left = op_limit_ - op; |
2431 | 0 | if (length <= 16 && available >= 16 + kMaximumTagLength && |
2432 | 0 | space_left >= 16) { |
2433 | | // Fast path, used for the majority (about 95%) of invocations. |
2434 | 0 | UnalignedCopy128(ip, op); |
2435 | 0 | *op_p = op + length; |
2436 | 0 | return true; |
2437 | 0 | } else { |
2438 | 0 | return false; |
2439 | 0 | } |
2440 | 0 | } |
2441 | | |
2442 | 0 | inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) { |
2443 | 0 | char* op = *op_p; |
2444 | 0 | assert(op >= op_base_); |
2445 | | // Check if we try to append from before the start of the buffer. |
2446 | 0 | if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) || |
2447 | 0 | static_cast<size_t>(op - op_base_) < offset || |
2448 | 0 | op >= op_limit_min_slop_ || offset < len)) { |
2449 | 0 | if (offset == 0) return false; |
2450 | 0 | if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset || |
2451 | 0 | op + len > op_limit_)) { |
2452 | 0 | op_ptr_ = op; |
2453 | 0 | bool res = SlowAppendFromSelf(offset, len); |
2454 | 0 | *op_p = op_ptr_; |
2455 | 0 | return res; |
2456 | 0 | } |
2457 | 0 | *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_); |
2458 | 0 | return true; |
2459 | 0 | } |
2460 | | // Fast path |
2461 | 0 | char* const op_end = op + len; |
2462 | 0 | std::memmove(op, op - offset, kSlopBytes); |
2463 | 0 | *op_p = op_end; |
2464 | 0 | return true; |
2465 | 0 | } |
2466 | | |
2467 | | // Called at the end of the decompress. We ask the allocator |
2468 | | // write all blocks to the sink. |
2469 | 0 | inline void Flush() { allocator_.Flush(Produced()); } |
2470 | | }; |
2471 | | |
2472 | | template <typename Allocator> |
2473 | 0 | bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) { |
2474 | 0 | size_t avail = op_limit_ - op_ptr_; |
2475 | 0 | while (len > avail) { |
2476 | | // Completely fill this block |
2477 | 0 | std::memcpy(op_ptr_, ip, avail); |
2478 | 0 | op_ptr_ += avail; |
2479 | 0 | assert(op_limit_ - op_ptr_ == 0); |
2480 | 0 | full_size_ += (op_ptr_ - op_base_); |
2481 | 0 | len -= avail; |
2482 | 0 | ip += avail; |
2483 | | |
2484 | | // Bounds check |
2485 | 0 | if (full_size_ + len > expected_) return false; |
2486 | | |
2487 | | // Make new block |
2488 | 0 | size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_); |
2489 | 0 | op_base_ = allocator_.Allocate(bsize); |
2490 | 0 | op_ptr_ = op_base_; |
2491 | 0 | op_limit_ = op_base_ + bsize; |
2492 | 0 | op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize); |
2493 | |
|
2494 | 0 | blocks_.push_back(op_base_); |
2495 | 0 | avail = bsize; |
2496 | 0 | } |
2497 | | |
2498 | 0 | std::memcpy(op_ptr_, ip, len); |
2499 | 0 | op_ptr_ += len; |
2500 | 0 | return true; |
2501 | 0 | } |
2502 | | |
2503 | | template <typename Allocator> |
2504 | | bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset, |
2505 | 0 | size_t len) { |
2506 | | // Overflow check |
2507 | | // See SnappyArrayWriter::AppendFromSelf for an explanation of |
2508 | | // the "offset - 1u" trick. |
2509 | 0 | const size_t cur = Size(); |
2510 | 0 | if (offset - 1u >= cur) return false; |
2511 | 0 | if (expected_ - cur < len) return false; |
2512 | | |
2513 | | // Currently we shouldn't ever hit this path because Compress() chops the |
2514 | | // input into blocks and does not create cross-block copies. However, it is |
2515 | | // nice if we do not rely on that, since we can get better compression if we |
2516 | | // allow cross-block copies and thus might want to change the compressor in |
2517 | | // the future. |
2518 | | // TODO Replace this with a properly optimized path. This is not |
2519 | | // triggered right now. But this is so super slow, that it would regress |
2520 | | // performance unacceptably if triggered. |
2521 | 0 | size_t src = cur - offset; |
2522 | 0 | char* op = op_ptr_; |
2523 | 0 | while (len-- > 0) { |
2524 | 0 | char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)]; |
2525 | 0 | if (!Append(&c, 1, &op)) { |
2526 | 0 | op_ptr_ = op; |
2527 | 0 | return false; |
2528 | 0 | } |
2529 | 0 | src++; |
2530 | 0 | } |
2531 | 0 | op_ptr_ = op; |
2532 | 0 | return true; |
2533 | 0 | } |
2534 | | |
2535 | | class SnappySinkAllocator { |
2536 | | public: |
2537 | 0 | explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {} |
2538 | | |
2539 | 0 | char* Allocate(int size) { |
2540 | 0 | Datablock block(new char[size], size); |
2541 | 0 | blocks_.push_back(block); |
2542 | 0 | return block.data; |
2543 | 0 | } |
2544 | | |
2545 | | // We flush only at the end, because the writer wants |
2546 | | // random access to the blocks and once we hand the |
2547 | | // block over to the sink, we can't access it anymore. |
2548 | | // Also we don't write more than has been actually written |
2549 | | // to the blocks. |
2550 | 0 | void Flush(size_t size) { |
2551 | 0 | size_t size_written = 0; |
2552 | 0 | for (Datablock& block : blocks_) { |
2553 | 0 | size_t block_size = std::min<size_t>(block.size, size - size_written); |
2554 | 0 | dest_->AppendAndTakeOwnership(block.data, block_size, |
2555 | 0 | &SnappySinkAllocator::Deleter, NULL); |
2556 | 0 | size_written += block_size; |
2557 | 0 | } |
2558 | 0 | blocks_.clear(); |
2559 | 0 | } |
2560 | | |
2561 | | private: |
2562 | | struct Datablock { |
2563 | | char* data; |
2564 | | size_t size; |
2565 | 0 | Datablock(char* p, size_t s) : data(p), size(s) {} |
2566 | | }; |
2567 | | |
2568 | 0 | static void Deleter(void* arg, const char* bytes, size_t size) { |
2569 | | // TODO: Switch to [[maybe_unused]] when we can assume C++17. |
2570 | 0 | (void)arg; |
2571 | 0 | (void)size; |
2572 | |
|
2573 | 0 | delete[] bytes; |
2574 | 0 | } |
2575 | | |
2576 | | Sink* dest_; |
2577 | | std::vector<Datablock> blocks_; |
2578 | | |
2579 | | // Note: copying this object is allowed |
2580 | | }; |
2581 | | |
2582 | 0 | size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) { |
2583 | 0 | SnappySinkAllocator allocator(uncompressed); |
2584 | 0 | SnappyScatteredWriter<SnappySinkAllocator> writer(allocator); |
2585 | 0 | InternalUncompress(compressed, &writer); |
2586 | 0 | return writer.Produced(); |
2587 | 0 | } |
2588 | | |
2589 | 0 | bool Uncompress(Source* compressed, Sink* uncompressed) { |
2590 | | // Read the uncompressed length from the front of the compressed input |
2591 | 0 | SnappyDecompressor decompressor(compressed); |
2592 | 0 | uint32_t uncompressed_len = 0; |
2593 | 0 | if (!decompressor.ReadUncompressedLength(&uncompressed_len)) { |
2594 | 0 | return false; |
2595 | 0 | } |
2596 | | |
2597 | 0 | char c; |
2598 | 0 | size_t allocated_size; |
2599 | 0 | char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1, |
2600 | 0 | &allocated_size); |
2601 | |
|
2602 | 0 | const size_t compressed_len = compressed->Available(); |
2603 | | // If we can get a flat buffer, then use it, otherwise do block by block |
2604 | | // uncompression |
2605 | 0 | if (allocated_size >= uncompressed_len) { |
2606 | 0 | SnappyArrayWriter writer(buf); |
2607 | 0 | bool result = InternalUncompressAllTags(&decompressor, &writer, |
2608 | 0 | compressed_len, uncompressed_len); |
2609 | 0 | uncompressed->Append(buf, writer.Produced()); |
2610 | 0 | return result; |
2611 | 0 | } else { |
2612 | 0 | SnappySinkAllocator allocator(uncompressed); |
2613 | 0 | SnappyScatteredWriter<SnappySinkAllocator> writer(allocator); |
2614 | 0 | return InternalUncompressAllTags(&decompressor, &writer, compressed_len, |
2615 | 0 | uncompressed_len); |
2616 | 0 | } |
2617 | 0 | } |
2618 | | |
2619 | | } // namespace snappy |