Coverage Report

Created: 2025-11-03 06:25

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/snappy/snappy-internal.h
Line
Count
Source
1
// Copyright 2008 Google Inc. All Rights Reserved.
2
//
3
// Redistribution and use in source and binary forms, with or without
4
// modification, are permitted provided that the following conditions are
5
// met:
6
//
7
//     * Redistributions of source code must retain the above copyright
8
// notice, this list of conditions and the following disclaimer.
9
//     * Redistributions in binary form must reproduce the above
10
// copyright notice, this list of conditions and the following disclaimer
11
// in the documentation and/or other materials provided with the
12
// distribution.
13
//     * Neither the name of Google Inc. nor the names of its
14
// contributors may be used to endorse or promote products derived from
15
// this software without specific prior written permission.
16
//
17
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
//
29
// Internals shared between the Snappy implementation and its unittest.
30
31
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
32
#define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
33
34
#include <utility>
35
36
#include "snappy-stubs-internal.h"
37
38
#if SNAPPY_HAVE_SSSE3
39
// Please do not replace with <x86intrin.h> or with headers that assume more
40
// advanced SSE versions without checking with all the OWNERS.
41
#include <emmintrin.h>
42
#include <tmmintrin.h>
43
#endif
44
45
#if SNAPPY_HAVE_NEON
46
#include <arm_neon.h>
47
#endif
48
49
#if SNAPPY_RVV_1 || SNAPPY_RVV_0_7
50
#define SNAPPY_HAVE_RVV 1
51
#include <riscv_vector.h>
52
#else
53
#define SNAPPY_HAVE_RVV 0
54
#endif
55
56
#ifdef SNAPPY_RVV_1
57
#define VSETVL_E8M2 __riscv_vsetvl_e8m2
58
#define VLE8_V_U8M2 __riscv_vle8_v_u8m2
59
#define VSE8_V_U8M2 __riscv_vse8_v_u8m2
60
#elif SNAPPY_RVV_0_7
61
#define VSETVL_E8M2 vsetvl_e8m2
62
#define VLE8_V_U8M2 vle8_v_u8m2
63
#define VSE8_V_U8M2 vse8_v_u8m2
64
#endif
65
66
#if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON 
67
#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1
68
#else
69
#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0
70
#endif
71
72
namespace snappy {
73
namespace internal {
74
75
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
76
#if SNAPPY_HAVE_SSSE3
77
using V128 = __m128i;
78
#elif SNAPPY_HAVE_NEON
79
using V128 = uint8x16_t;
80
#endif
81
 
82
// Load 128 bits of integer data. `src` must be 16-byte aligned.
83
inline V128 V128_Load(const V128* src);
84
85
// Load 128 bits of integer data. `src` does not need to be aligned.
86
inline V128 V128_LoadU(const V128* src);
87
88
// Store 128 bits of integer data. `dst` does not need to be aligned.
89
inline void V128_StoreU(V128* dst, V128 val);
90
91
// Shuffle packed 8-bit integers using a shuffle mask.
92
// Each packed integer in the shuffle mask must be in [0,16).
93
inline V128 V128_Shuffle(V128 input, V128 shuffle_mask);
94
95
// Constructs V128 with 16 chars |c|.
96
inline V128 V128_DupChar(char c);
97
98
#if SNAPPY_HAVE_SSSE3
99
inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
100
101
inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
102
103
inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); }
104
105
inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
106
  return _mm_shuffle_epi8(input, shuffle_mask);
107
}
108
109
inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); }
110
111
#elif SNAPPY_HAVE_NEON
112
inline V128 V128_Load(const V128* src) {
113
  return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
114
}
115
116
inline V128 V128_LoadU(const V128* src) {
117
  return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
118
}
119
120
inline void V128_StoreU(V128* dst, V128 val) {
121
  vst1q_u8(reinterpret_cast<uint8_t*>(dst), val);
122
}
123
124
inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
125
  assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15);
126
  return vqtbl1q_u8(input, shuffle_mask);
127
}
128
129
inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); }
130
131
132
#endif
133
#endif  // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
134
135
// Working memory performs a single allocation to hold all scratch space
136
// required for compression.
137
class WorkingMemory {
138
 public:
139
  explicit WorkingMemory(size_t input_size);
140
  ~WorkingMemory();
141
142
  // Allocates and clears a hash table using memory in "*this",
143
  // stores the number of buckets in "*table_size" and returns a pointer to
144
  // the base of the hash table.
145
  uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
146
0
  char* GetScratchInput() const { return input_; }
147
0
  char* GetScratchOutput() const { return output_; }
148
149
 private:
150
  char* mem_;        // the allocated memory, never nullptr
151
  size_t size_;      // the size of the allocated memory, never 0
152
  uint16_t* table_;  // the pointer to the hashtable
153
  char* input_;      // the pointer to the input scratch buffer
154
  char* output_;     // the pointer to the output scratch buffer
155
156
  // No copying
157
  WorkingMemory(const WorkingMemory&);
158
  void operator=(const WorkingMemory&);
159
};
160
161
// Flat array compression that does not emit the "uncompressed length"
162
// prefix. Compresses "input" string to the "*op" buffer.
163
//
164
// REQUIRES: "input_length <= kBlockSize"
165
// REQUIRES: "op" points to an array of memory that is at least
166
// "MaxCompressedLength(input_length)" in size.
167
// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
168
// REQUIRES: "table_size" is a power of two
169
//
170
// Returns an "end" pointer into "op" buffer.
171
// "end - op" is the compressed size of "input".
172
char* CompressFragment(const char* input,
173
                       size_t input_length,
174
                       char* op,
175
                       uint16_t* table,
176
                       const int table_size);
177
178
// Find the largest n such that
179
//
180
//   s1[0,n-1] == s2[0,n-1]
181
//   and n <= (s2_limit - s2).
182
//
183
// Return make_pair(n, n < 8).
184
// Does not read *s2_limit or beyond.
185
// Does not read *(s1 + (s2_limit - s2)) or beyond.
186
// Requires that s2_limit >= s2.
187
//
188
// In addition populate *data with the next 5 bytes from the end of the match.
189
// This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
190
// that on some arch's this can be done faster in this routine than subsequent
191
// loading from s2 + n.
192
//
193
// Separate implementation for 64-bit, little-endian cpus.
194
// riscv and little-endian cpu choose this routinue can be done faster too.
195
#if !SNAPPY_IS_BIG_ENDIAN && \
196
    (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \
197
     defined(ARCH_ARM) || defined(__riscv))
198
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
199
                                                      const char* s2,
200
                                                      const char* s2_limit,
201
0
                                                      uint64_t* data) {
202
0
  assert(s2_limit >= s2);
203
0
  size_t matched = 0;
204
205
  // This block isn't necessary for correctness; we could just start looping
206
  // immediately.  As an optimization though, it is useful.  It creates some not
207
  // uncommon code paths that determine, without extra effort, whether the match
208
  // length is less than 8.  In short, we are hoping to avoid a conditional
209
  // branch, and perhaps get better code layout from the C++ compiler.
210
0
  if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
211
0
    uint64_t a1 = UNALIGNED_LOAD64(s1);
212
0
    uint64_t a2 = UNALIGNED_LOAD64(s2);
213
0
    if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
214
      // This code is critical for performance. The reason is that it determines
215
      // how much to advance `ip` (s2). This obviously depends on both the loads
216
      // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
217
      // depends on the advanced `ip` calculated here through a load, hash and
218
      // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
219
      // `candidate`) the variable that limits throughput. This is the reason we
220
      // go through hoops to have this function update `data` for the next iter.
221
      // The straightforward code would use *data, given by
222
      //
223
      // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
224
      //
225
      // as input for the hash table lookup to find next candidate. However
226
      // this forces the load on the data dependency chain of s1, because
227
      // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
228
      // we can also calculate *data by
229
      //
230
      // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
231
      //                    matched_bytes);
232
      //
233
      // The loads do not depend on s1 anymore and are thus off the bottleneck.
234
      // The straightforward implementation on x86_64 would be to use
235
      //
236
      // shrd rax, rdx, cl  (cl being matched_bytes * 8)
237
      //
238
      // unfortunately shrd with a variable shift has a 4 cycle latency. So this
239
      // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
240
      // shift instruction but can only shift 64 bits. If we focus on just
241
      // obtaining the least significant 4 bytes, we can obtain this by
242
      //
243
      // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
244
      //     UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
245
      //
246
      // Writen like above this is not a big win, the conditional move would be
247
      // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
248
      // However matched_bytes < 4 is equal to
249
      // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
250
      // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
251
      // (tzcnt), which takes 3 cycles.
252
0
      uint64_t xorval = a1 ^ a2;
253
0
      int shift = Bits::FindLSBSetNonZero64(xorval);
254
0
      size_t matched_bytes = shift >> 3;
255
0
      uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
256
#ifndef __x86_64__
257
      a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
258
#else
259
      // Ideally this would just be
260
      //
261
      // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
262
      //
263
      // However clang correctly infers that the above statement participates on
264
      // a critical data dependency chain and thus, unfortunately, refuses to
265
      // use a conditional move (it's tuned to cut data dependencies). In this
266
      // case there is a longer parallel chain anyway AND this will be fairly
267
      // unpredictable.
268
0
      asm("testl %k2, %k2\n\t"
269
0
          "cmovzq %1, %0\n\t"
270
0
          : "+r"(a2)
271
0
          : "r"(a3), "r"(xorval)
272
0
          : "cc");
273
0
#endif
274
0
      *data = a2 >> (shift & (3 * 8));
275
0
      return std::pair<size_t, bool>(matched_bytes, true);
276
0
    } else {
277
0
      matched = 8;
278
0
      s2 += 8;
279
0
    }
280
0
  }
281
0
  SNAPPY_PREFETCH(s1 + 64);
282
0
  SNAPPY_PREFETCH(s2 + 64);
283
284
  // Find out how long the match is. We loop over the data 64 bits at a
285
  // time until we find a 64-bit block that doesn't match; then we find
286
  // the first non-matching bit and use that to calculate the total
287
  // length of the match.
288
0
  while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
289
0
    uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
290
0
    uint64_t a2 = UNALIGNED_LOAD64(s2);
291
0
    if (a1 == a2) {
292
0
      s2 += 8;
293
0
      matched += 8;
294
0
    } else {
295
0
      uint64_t xorval = a1 ^ a2;
296
0
      int shift = Bits::FindLSBSetNonZero64(xorval);
297
0
      size_t matched_bytes = shift >> 3;
298
0
      uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
299
#ifndef __x86_64__
300
      a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
301
#else
302
0
      asm("testl %k2, %k2\n\t"
303
0
          "cmovzq %1, %0\n\t"
304
0
          : "+r"(a2)
305
0
          : "r"(a3), "r"(xorval)
306
0
          : "cc");
307
0
#endif
308
0
      *data = a2 >> (shift & (3 * 8));
309
0
      matched += matched_bytes;
310
0
      assert(matched >= 8);
311
0
      return std::pair<size_t, bool>(matched, false);
312
0
    }
313
0
  }
314
0
  while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
315
0
    if (s1[matched] == *s2) {
316
0
      ++s2;
317
0
      ++matched;
318
0
    } else {
319
0
      if (s2 <= s2_limit - 8) {
320
0
        *data = UNALIGNED_LOAD64(s2);
321
0
      }
322
0
      return std::pair<size_t, bool>(matched, matched < 8);
323
0
    }
324
0
  }
325
0
  return std::pair<size_t, bool>(matched, matched < 8);
326
0
}
327
#else
328
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
329
                                                      const char* s2,
330
                                                      const char* s2_limit,
331
                                                      uint64_t* data) {
332
  // Implementation based on the x86-64 version, above.
333
  assert(s2_limit >= s2);
334
  int matched = 0;
335
336
  while (s2 <= s2_limit - 4 &&
337
         UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
338
    s2 += 4;
339
    matched += 4;
340
  }
341
  if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
342
    uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
343
    int matching_bits = Bits::FindLSBSetNonZero(x);
344
    matched += matching_bits >> 3;
345
    s2 += matching_bits >> 3;
346
  } else {
347
    while ((s2 < s2_limit) && (s1[matched] == *s2)) {
348
      ++s2;
349
      ++matched;
350
    }
351
  }
352
  if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
353
  return std::pair<size_t, bool>(matched, matched < 8);
354
}
355
#endif
356
357
static inline size_t FindMatchLengthPlain(const char* s1, const char* s2,
358
0
                                          const char* s2_limit) {
359
  // Implementation based on the x86-64 version, above.
360
0
  assert(s2_limit >= s2);
361
0
  int matched = 0;
362
363
0
  while (s2 <= s2_limit - 8 &&
364
0
         UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
365
0
    s2 += 8;
366
0
    matched += 8;
367
0
  }
368
0
  if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 8) {
369
0
    uint64_t x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
370
0
    int matching_bits = Bits::FindLSBSetNonZero64(x);
371
0
    matched += matching_bits >> 3;
372
0
    s2 += matching_bits >> 3;
373
0
  } else {
374
0
    while ((s2 < s2_limit) && (s1[matched] == *s2)) {
375
0
      ++s2;
376
0
      ++matched;
377
0
    }
378
0
  }
379
0
  return matched;
380
0
}
381
382
// Lookup tables for decompression code.  Give --snappy_dump_decompression_table
383
// to the unit test to recompute char_table.
384
385
enum {
386
  LITERAL = 0,
387
  COPY_1_BYTE_OFFSET = 1,  // 3 bit length + 3 bits of offset in opcode
388
  COPY_2_BYTE_OFFSET = 2,
389
  COPY_4_BYTE_OFFSET = 3
390
};
391
static const int kMaximumTagLength = 5;  // COPY_4_BYTE_OFFSET plus the actual offset.
392
393
// Data stored per entry in lookup table:
394
//      Range   Bits-used       Description
395
//      ------------------------------------
396
//      1..64   0..7            Literal/copy length encoded in opcode byte
397
//      0..7    8..10           Copy offset encoded in opcode byte / 256
398
//      0..4    11..13          Extra bytes after opcode
399
//
400
// We use eight bits for the length even though 7 would have sufficed
401
// because of efficiency reasons:
402
//      (1) Extracting a byte is faster than a bit-field
403
//      (2) It properly aligns copy offset so we do not need a <<8
404
static constexpr uint16_t char_table[256] = {
405
    // clang-format off
406
  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
407
  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
408
  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
409
  0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
410
  0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
411
  0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
412
  0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
413
  0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
414
  0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
415
  0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
416
  0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
417
  0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
418
  0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
419
  0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
420
  0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
421
  0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
422
  0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
423
  0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
424
  0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
425
  0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
426
  0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
427
  0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
428
  0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
429
  0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
430
  0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
431
  0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
432
  0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
433
  0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
434
  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
435
  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
436
  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
437
  0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
438
    // clang-format on
439
};
440
441
}  // end namespace internal
442
}  // end namespace snappy
443
444
#endif  // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_