Coverage Report

Created: 2025-07-11 06:37

/src/abseil-cpp/absl/hash/internal/hash.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// -----------------------------------------------------------------------------
16
// File: hash.h
17
// -----------------------------------------------------------------------------
18
//
19
#ifndef ABSL_HASH_INTERNAL_HASH_H_
20
#define ABSL_HASH_INTERNAL_HASH_H_
21
22
#ifdef __APPLE__
23
#include <Availability.h>
24
#include <TargetConditionals.h>
25
#endif
26
27
// We include config.h here to make sure that ABSL_INTERNAL_CPLUSPLUS_LANG is
28
// defined.
29
#include "absl/base/config.h"
30
31
// GCC15 warns that <ciso646> is deprecated in C++17 and suggests using
32
// <version> instead, even though <version> is not available in C++17 mode prior
33
// to GCC9.
34
#if defined(__has_include)
35
#if __has_include(<version>)
36
#define ABSL_INTERNAL_VERSION_HEADER_AVAILABLE 1
37
#endif
38
#endif
39
40
// For feature testing and determining which headers can be included.
41
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L || \
42
    defined(ABSL_INTERNAL_VERSION_HEADER_AVAILABLE)
43
#include <version>
44
#else
45
#include <ciso646>
46
#endif
47
48
#undef ABSL_INTERNAL_VERSION_HEADER_AVAILABLE
49
50
#include <algorithm>
51
#include <array>
52
#include <bitset>
53
#include <cassert>
54
#include <cmath>
55
#include <cstddef>
56
#include <cstdint>
57
#include <cstring>
58
#include <deque>
59
#include <forward_list>
60
#include <functional>
61
#include <iterator>
62
#include <limits>
63
#include <list>
64
#include <map>
65
#include <memory>
66
#include <set>
67
#include <string>
68
#include <string_view>
69
#include <tuple>
70
#include <type_traits>
71
#include <unordered_map>
72
#include <unordered_set>
73
#include <utility>
74
#include <vector>
75
76
#include "absl/base/attributes.h"
77
#include "absl/base/internal/unaligned_access.h"
78
#include "absl/base/optimization.h"
79
#include "absl/base/port.h"
80
#include "absl/container/fixed_array.h"
81
#include "absl/hash/internal/city.h"
82
#include "absl/hash/internal/weakly_mixed_integer.h"
83
#include "absl/meta/type_traits.h"
84
#include "absl/numeric/bits.h"
85
#include "absl/numeric/int128.h"
86
#include "absl/strings/string_view.h"
87
#include "absl/types/optional.h"
88
#include "absl/types/variant.h"
89
#include "absl/utility/utility.h"
90
91
#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L
92
#include <filesystem>  // NOLINT
93
#endif
94
95
namespace absl {
96
ABSL_NAMESPACE_BEGIN
97
98
class HashState;
99
100
namespace hash_internal {
101
102
// Internal detail: Large buffers are hashed in smaller chunks.  This function
103
// returns the size of these chunks.
104
8.00M
constexpr size_t PiecewiseChunkSize() { return 1024; }
105
106
// PiecewiseCombiner is an internal-only helper class for hashing a piecewise
107
// buffer of `char` or `unsigned char` as though it were contiguous.  This class
108
// provides two methods:
109
//
110
//   H add_buffer(state, data, size)
111
//   H finalize(state)
112
//
113
// `add_buffer` can be called zero or more times, followed by a single call to
114
// `finalize`.  This will produce the same hash expansion as concatenating each
115
// buffer piece into a single contiguous buffer, and passing this to
116
// `H::combine_contiguous`.
117
//
118
//  Example usage:
119
//    PiecewiseCombiner combiner;
120
//    for (const auto& piece : pieces) {
121
//      state = combiner.add_buffer(std::move(state), piece.data, piece.size);
122
//    }
123
//    return combiner.finalize(std::move(state));
124
class PiecewiseCombiner {
125
 public:
126
  PiecewiseCombiner() = default;
127
  PiecewiseCombiner(const PiecewiseCombiner&) = delete;
128
  PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;
129
130
  // Appends the given range of bytes to the sequence to be hashed, which may
131
  // modify the provided hash state.
132
  template <typename H>
133
  H add_buffer(H state, const unsigned char* data, size_t size);
134
  template <typename H>
135
0
  H add_buffer(H state, const char* data, size_t size) {
136
0
    return add_buffer(std::move(state),
137
0
                      reinterpret_cast<const unsigned char*>(data), size);
138
0
  }
139
140
  // Finishes combining the hash sequence, which may may modify the provided
141
  // hash state.
142
  //
143
  // Once finalize() is called, add_buffer() may no longer be called. The
144
  // resulting hash state will be the same as if the pieces passed to
145
  // add_buffer() were concatenated into a single flat buffer, and then provided
146
  // to H::combine_contiguous().
147
  template <typename H>
148
  H finalize(H state);
149
150
 private:
151
  unsigned char buf_[PiecewiseChunkSize()];
152
  size_t position_ = 0;
153
  bool added_something_ = false;
154
};
155
156
// Trait class which returns true if T is hashable by the absl::Hash framework.
157
// Used for the AbslHashValue implementations for composite types below.
158
template <typename T>
159
struct is_hashable;
160
161
// HashStateBase is an internal implementation detail that contains common
162
// implementation details for all of the "hash state objects" objects generated
163
// by Abseil.  This is not a public API; users should not create classes that
164
// inherit from this.
165
//
166
// A hash state object is the template argument `H` passed to `AbslHashValue`.
167
// It represents an intermediate state in the computation of an unspecified hash
168
// algorithm. `HashStateBase` provides a CRTP style base class for hash state
169
// implementations. Developers adding type support for `absl::Hash` should not
170
// rely on any parts of the state object other than the following member
171
// functions:
172
//
173
//   * HashStateBase::combine()
174
//   * HashStateBase::combine_contiguous()
175
//   * HashStateBase::combine_unordered()
176
//
177
// A derived hash state class of type `H` must provide a public member function
178
// with a signature similar to the following:
179
//
180
//    `static H combine_contiguous(H state, const unsigned char*, size_t)`.
181
//
182
// It must also provide a private template method named RunCombineUnordered.
183
//
184
// A "consumer" is a 1-arg functor returning void.  Its argument is a reference
185
// to an inner hash state object, and it may be called multiple times.  When
186
// called, the functor consumes the entropy from the provided state object,
187
// and resets that object to its empty state.
188
//
189
// A "combiner" is a stateless 2-arg functor returning void.  Its arguments are
190
// an inner hash state object and an ElementStateConsumer functor.  A combiner
191
// uses the provided inner hash state object to hash each element of the
192
// container, passing the inner hash state object to the consumer after hashing
193
// each element.
194
//
195
// Given these definitions, a derived hash state class of type H
196
// must provide a private template method with a signature similar to the
197
// following:
198
//
199
//    `template <typename CombinerT>`
200
//    `static H RunCombineUnordered(H outer_state, CombinerT combiner)`
201
//
202
// This function is responsible for constructing the inner state object and
203
// providing a consumer to the combiner.  It uses side effects of the consumer
204
// and combiner to mix the state of each element in an order-independent manner,
205
// and uses this to return an updated value of `outer_state`.
206
//
207
// This inside-out approach generates efficient object code in the normal case,
208
// but allows us to use stack storage to implement the absl::HashState type
209
// erasure mechanism (avoiding heap allocations while hashing).
210
//
211
// `HashStateBase` will provide a complete implementation for a hash state
212
// object in terms of these two methods.
213
//
214
// Example:
215
//
216
//   // Use CRTP to define your derived class.
217
//   struct MyHashState : HashStateBase<MyHashState> {
218
//       static H combine_contiguous(H state, const unsigned char*, size_t);
219
//       using MyHashState::HashStateBase::combine;
220
//       using MyHashState::HashStateBase::combine_contiguous;
221
//       using MyHashState::HashStateBase::combine_unordered;
222
//     private:
223
//       template <typename CombinerT>
224
//       static H RunCombineUnordered(H state, CombinerT combiner);
225
//   };
226
template <typename H>
227
class HashStateBase {
228
 public:
229
  // Combines an arbitrary number of values into a hash state, returning the
230
  // updated state.
231
  //
232
  // Each of the value types `T` must be separately hashable by the Abseil
233
  // hashing framework.
234
  //
235
  // NOTE:
236
  //
237
  //   state = H::combine(std::move(state), value1, value2, value3);
238
  //
239
  // is guaranteed to produce the same hash expansion as:
240
  //
241
  //   state = H::combine(std::move(state), value1);
242
  //   state = H::combine(std::move(state), value2);
243
  //   state = H::combine(std::move(state), value3);
244
  template <typename T, typename... Ts>
245
  static H combine(H state, const T& value, const Ts&... values);
246
4.99M
  static H combine(H state) { return state; }
247
248
  // Combines a contiguous array of `size` elements into a hash state, returning
249
  // the updated state.
250
  //
251
  // NOTE:
252
  //
253
  //   state = H::combine_contiguous(std::move(state), data, size);
254
  //
255
  // is NOT guaranteed to produce the same hash expansion as a for-loop (it may
256
  // perform internal optimizations).  If you need this guarantee, use the
257
  // for-loop instead.
258
  template <typename T>
259
  static H combine_contiguous(H state, const T* data, size_t size);
260
261
  template <typename I>
262
  static H combine_unordered(H state, I begin, I end);
263
264
  using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
265
266
  template <typename T>
267
  using is_hashable = absl::hash_internal::is_hashable<T>;
268
269
 private:
270
  // Common implementation of the iteration step of a "combiner", as described
271
  // above.
272
  template <typename I>
273
  struct CombineUnorderedCallback {
274
    I begin;
275
    I end;
276
277
    template <typename InnerH, typename ElementStateConsumer>
278
    void operator()(InnerH inner_state, ElementStateConsumer cb) {
279
      for (; begin != end; ++begin) {
280
        inner_state = H::combine(std::move(inner_state), *begin);
281
        cb(inner_state);
282
      }
283
    }
284
  };
285
};
286
287
// `is_uniquely_represented<T>` is a trait class that indicates whether `T`
288
// is uniquely represented.
289
//
290
// A type is "uniquely represented" if two equal values of that type are
291
// guaranteed to have the same bytes in their underlying storage. In other
292
// words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be
293
// zero. This property cannot be detected automatically, so this trait is false
294
// by default, but can be specialized by types that wish to assert that they are
295
// uniquely represented. This makes them eligible for certain optimizations.
296
//
297
// If you have any doubt whatsoever, do not specialize this template.
298
// The default is completely safe, and merely disables some optimizations
299
// that will not matter for most types. Specializing this template,
300
// on the other hand, can be very hazardous.
301
//
302
// To be uniquely represented, a type must not have multiple ways of
303
// representing the same value; for example, float and double are not
304
// uniquely represented, because they have distinct representations for
305
// +0 and -0. Furthermore, the type's byte representation must consist
306
// solely of user-controlled data, with no padding bits and no compiler-
307
// controlled data such as vptrs or sanitizer metadata. This is usually
308
// very difficult to guarantee, because in most cases the compiler can
309
// insert data and padding bits at its own discretion.
310
//
311
// If you specialize this template for a type `T`, you must do so in the file
312
// that defines that type (or in this file). If you define that specialization
313
// anywhere else, `is_uniquely_represented<T>` could have different meanings
314
// in different places.
315
//
316
// The Enable parameter is meaningless; it is provided as a convenience,
317
// to support certain SFINAE techniques when defining specializations.
318
template <typename T, typename Enable = void>
319
struct is_uniquely_represented : std::false_type {};
320
321
// unsigned char is a synonym for "byte", so it is guaranteed to be
322
// uniquely represented.
323
template <>
324
struct is_uniquely_represented<unsigned char> : std::true_type {};
325
326
// is_uniquely_represented for non-standard integral types
327
//
328
// Integral types other than bool should be uniquely represented on any
329
// platform that this will plausibly be ported to.
330
template <typename Integral>
331
struct is_uniquely_represented<
332
    Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>
333
    : std::true_type {};
334
335
template <>
336
struct is_uniquely_represented<bool> : std::false_type {};
337
338
#ifdef ABSL_HAVE_INTRINSIC_INT128
339
// Specialize the trait for GNU extension types.
340
template <>
341
struct is_uniquely_represented<__int128> : std::true_type {};
342
template <>
343
struct is_uniquely_represented<unsigned __int128> : std::true_type {};
344
#endif  // ABSL_HAVE_INTRINSIC_INT128
345
346
template <typename T>
347
struct FitsIn64Bits : std::integral_constant<bool, sizeof(T) <= 8> {};
348
349
struct CombineRaw {
350
  template <typename H>
351
2.28M
  H operator()(H state, uint64_t value) const {
352
2.28M
    return H::combine_raw(std::move(state), value);
353
2.28M
  }
354
};
355
356
// For use in `raw_hash_set` to pass a seed to the hash function.
357
struct HashWithSeed {
358
  template <typename Hasher, typename T>
359
0
  size_t hash(const Hasher& hasher, const T& value, size_t seed) const {
360
0
    // NOLINTNEXTLINE(clang-diagnostic-sign-conversion)
361
0
    return hasher.hash_with_seed(value, seed);
362
0
  }
Unexecuted instantiation: unsigned long absl::hash_internal::HashWithSeed::hash<absl::hash_internal::Hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > >, std::__1::basic_string_view<char, std::__1::char_traits<char> > >(absl::hash_internal::Hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > > const&, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const
Unexecuted instantiation: unsigned long absl::hash_internal::HashWithSeed::hash<absl::hash_internal::Hash<absl::Cord>, absl::Cord>(absl::hash_internal::Hash<absl::Cord> const&, absl::Cord const&, unsigned long) const
363
};
364
365
// Convenience function that combines `hash_state` with the byte representation
366
// of `value`.
367
template <typename H, typename T,
368
          absl::enable_if_t<FitsIn64Bits<T>::value, int> = 0>
369
819k
H hash_bytes(H hash_state, const T& value) {
370
819k
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
371
819k
  uint64_t v;
372
  if constexpr (sizeof(T) == 1) {
373
    v = *start;
374
  } else if constexpr (sizeof(T) == 2) {
375
    v = absl::base_internal::UnalignedLoad16(start);
376
598k
  } else if constexpr (sizeof(T) == 4) {
377
598k
    v = absl::base_internal::UnalignedLoad32(start);
378
598k
  } else {
379
220k
    static_assert(sizeof(T) == 8);
380
220k
    v = absl::base_internal::UnalignedLoad64(start);
381
220k
  }
382
819k
  return CombineRaw()(std::move(hash_state), v);
383
819k
}
_ZN4absl13hash_internal10hash_bytesINS0_15MixingHashStateEiTnNSt3__19enable_ifIXsr12FitsIn64BitsIT0_EE5valueEiE4typeELi0EEET_S8_RKS5_
Line
Count
Source
369
598k
H hash_bytes(H hash_state, const T& value) {
370
598k
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
371
598k
  uint64_t v;
372
  if constexpr (sizeof(T) == 1) {
373
    v = *start;
374
  } else if constexpr (sizeof(T) == 2) {
375
    v = absl::base_internal::UnalignedLoad16(start);
376
598k
  } else if constexpr (sizeof(T) == 4) {
377
598k
    v = absl::base_internal::UnalignedLoad32(start);
378
  } else {
379
    static_assert(sizeof(T) == 8);
380
    v = absl::base_internal::UnalignedLoad64(start);
381
  }
382
598k
  return CombineRaw()(std::move(hash_state), v);
383
598k
}
_ZN4absl13hash_internal10hash_bytesINS0_15MixingHashStateEmTnNSt3__19enable_ifIXsr12FitsIn64BitsIT0_EE5valueEiE4typeELi0EEET_S8_RKS5_
Line
Count
Source
369
220k
H hash_bytes(H hash_state, const T& value) {
370
220k
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
371
220k
  uint64_t v;
372
  if constexpr (sizeof(T) == 1) {
373
    v = *start;
374
  } else if constexpr (sizeof(T) == 2) {
375
    v = absl::base_internal::UnalignedLoad16(start);
376
  } else if constexpr (sizeof(T) == 4) {
377
    v = absl::base_internal::UnalignedLoad32(start);
378
220k
  } else {
379
220k
    static_assert(sizeof(T) == 8);
380
220k
    v = absl::base_internal::UnalignedLoad64(start);
381
220k
  }
382
220k
  return CombineRaw()(std::move(hash_state), v);
383
220k
}
384
template <typename H, typename T,
385
          absl::enable_if_t<!FitsIn64Bits<T>::value, int> = 0>
386
H hash_bytes(H hash_state, const T& value) {
387
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
388
  return H::combine_contiguous(std::move(hash_state), start, sizeof(value));
389
}
390
391
template <typename H>
392
H hash_weakly_mixed_integer(H hash_state, WeaklyMixedInteger value) {
393
  return H::combine_weakly_mixed_integer(std::move(hash_state), value);
394
}
395
396
// -----------------------------------------------------------------------------
397
// AbslHashValue for Basic Types
398
// -----------------------------------------------------------------------------
399
400
// Note: Default `AbslHashValue` implementations live in `hash_internal`. This
401
// allows us to block lexical scope lookup when doing an unqualified call to
402
// `AbslHashValue` below. User-defined implementations of `AbslHashValue` can
403
// only be found via ADL.
404
405
// AbslHashValue() for hashing bool values
406
//
407
// We use SFINAE to ensure that this overload only accepts bool, not types that
408
// are convertible to bool.
409
template <typename H, typename B>
410
typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue(
411
    H hash_state, B value) {
412
  // We use ~size_t{} instead of 1 so that all bits are different between
413
  // true/false instead of only 1.
414
  return H::combine(std::move(hash_state),
415
                    static_cast<size_t>(value ? ~size_t{} : 0));
416
}
417
418
// AbslHashValue() for hashing enum values
419
template <typename H, typename Enum>
420
typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue(
421
    H hash_state, Enum e) {
422
  // In practice, we could almost certainly just invoke hash_bytes directly,
423
  // but it's possible that a sanitizer might one day want to
424
  // store data in the unused bits of an enum. To avoid that risk, we
425
  // convert to the underlying type before hashing. Hopefully this will get
426
  // optimized away; if not, we can reopen discussion with c-toolchain-team.
427
  return H::combine(std::move(hash_state),
428
                    static_cast<typename std::underlying_type<Enum>::type>(e));
429
}
430
// AbslHashValue() for hashing floating-point values
431
template <typename H, typename Float>
432
typename std::enable_if<std::is_same<Float, float>::value ||
433
                            std::is_same<Float, double>::value,
434
                        H>::type
435
AbslHashValue(H hash_state, Float value) {
436
  return hash_internal::hash_bytes(std::move(hash_state),
437
                                   value == 0 ? 0 : value);
438
}
439
440
// Long double has the property that it might have extra unused bytes in it.
441
// For example, in x86 sizeof(long double)==16 but it only really uses 80-bits
442
// of it. This means we can't use hash_bytes on a long double and have to
443
// convert it to something else first.
444
template <typename H, typename LongDouble>
445
typename std::enable_if<std::is_same<LongDouble, long double>::value, H>::type
446
AbslHashValue(H hash_state, LongDouble value) {
447
  const int category = std::fpclassify(value);
448
  switch (category) {
449
    case FP_INFINITE:
450
      // Add the sign bit to differentiate between +Inf and -Inf
451
      hash_state = H::combine(std::move(hash_state), std::signbit(value));
452
      break;
453
454
    case FP_NAN:
455
    case FP_ZERO:
456
    default:
457
      // Category is enough for these.
458
      break;
459
460
    case FP_NORMAL:
461
    case FP_SUBNORMAL:
462
      // We can't convert `value` directly to double because this would have
463
      // undefined behavior if the value is out of range.
464
      // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is
465
      // guaranteed to be in range for `double`. The truncation is
466
      // implementation defined, but that works as long as it is deterministic.
467
      int exp;
468
      auto mantissa = static_cast<double>(std::frexp(value, &exp));
469
      hash_state = H::combine(std::move(hash_state), mantissa, exp);
470
  }
471
472
  return H::combine(std::move(hash_state), category);
473
}
474
475
// Without this overload, an array decays to a pointer and we hash that, which
476
// is not likely to be what the caller intended.
477
template <typename H, typename T, size_t N>
478
H AbslHashValue(H hash_state, T (&)[N]) {
479
  static_assert(
480
      sizeof(T) == -1,
481
      "Hashing C arrays is not allowed. For string literals, wrap the literal "
482
      "in absl::string_view(). To hash the array contents, use "
483
      "absl::MakeSpan() or make the array an std::array. To hash the array "
484
      "address, use &array[0].");
485
  return hash_state;
486
}
487
488
// AbslHashValue() for hashing pointers
489
template <typename H, typename T>
490
std::enable_if_t<std::is_pointer<T>::value, H> AbslHashValue(H hash_state,
491
                                                             T ptr) {
492
  auto v = reinterpret_cast<uintptr_t>(ptr);
493
  // Due to alignment, pointers tend to have low bits as zero, and the next few
494
  // bits follow a pattern since they are also multiples of some base value.
495
  // The PointerAlignment test verifies that our mixing is good enough to handle
496
  // these cases.
497
  return H::combine(std::move(hash_state), v);
498
}
499
500
// AbslHashValue() for hashing nullptr_t
501
template <typename H>
502
H AbslHashValue(H hash_state, std::nullptr_t) {
503
  return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
504
}
505
506
// AbslHashValue() for hashing pointers-to-member
507
template <typename H, typename T, typename C>
508
H AbslHashValue(H hash_state, T C::*ptr) {
509
  auto salient_ptm_size = [](std::size_t n) -> std::size_t {
510
#if defined(_MSC_VER)
511
    // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
512
    // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain
513
    // padding (namely when they have 1 or 3 ints). The value below is a lower
514
    // bound on the number of salient, non-padding bytes that we use for
515
    // hashing.
516
    if constexpr (alignof(T C::*) == alignof(int)) {
517
      // No padding when all subobjects have the same size as the total
518
      // alignment. This happens in 32-bit mode.
519
      return n;
520
    } else {
521
      // Padding for 1 int (size 16) or 3 ints (size 24).
522
      // With 2 ints, the size is 16 with no padding, which we pessimize.
523
      return n == 24 ? 20 : n == 16 ? 12 : n;
524
    }
525
#else
526
  // On other platforms, we assume that pointers-to-members do not have
527
  // padding.
528
#ifdef __cpp_lib_has_unique_object_representations
529
    static_assert(std::has_unique_object_representations<T C::*>::value);
530
#endif  // __cpp_lib_has_unique_object_representations
531
    return n;
532
#endif
533
  };
534
  return H::combine_contiguous(std::move(hash_state),
535
                               reinterpret_cast<unsigned char*>(&ptr),
536
                               salient_ptm_size(sizeof ptr));
537
}
538
539
// -----------------------------------------------------------------------------
540
// AbslHashValue for Composite Types
541
// -----------------------------------------------------------------------------
542
543
// AbslHashValue() for hashing pairs
544
template <typename H, typename T1, typename T2>
545
typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
546
                        H>::type
547
AbslHashValue(H hash_state, const std::pair<T1, T2>& p) {
548
  return H::combine(std::move(hash_state), p.first, p.second);
549
}
550
551
// Helper function for hashing a tuple. The third argument should
552
// be an index_sequence running from 0 to tuple_size<Tuple> - 1.
553
template <typename H, typename Tuple, size_t... Is>
554
0
H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence<Is...>) {
555
0
  return H::combine(std::move(hash_state), std::get<Is>(t)...);
556
0
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&>, 0ul, 1ul>(absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>)
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&>, 0ul>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&, std::__1::integer_sequence<unsigned long, 0ul>)
557
558
// AbslHashValue for hashing tuples
559
template <typename H, typename... Ts>
560
#if defined(_MSC_VER)
561
// This SFINAE gets MSVC confused under some conditions. Let's just disable it
562
// for now.
563
H
564
#else   // _MSC_VER
565
typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type
566
#endif  // _MSC_VER
567
0
AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
568
0
  return hash_internal::hash_tuple(std::move(hash_state), t,
569
0
                                   absl::make_index_sequence<sizeof...(Ts)>());
570
0
}
Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKNSt3__117basic_string_viewIcNS3_11char_traitsIcEEEERKiEEENS3_9enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESH_RKNS3_5tupleIJDpSE_EEE
Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKmEEENSt3__19enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESB_RKNS5_5tupleIJDpS8_EEE
571
572
// -----------------------------------------------------------------------------
573
// AbslHashValue for Pointers
574
// -----------------------------------------------------------------------------
575
576
// AbslHashValue for hashing unique_ptr
577
template <typename H, typename T, typename D>
578
H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) {
579
  return H::combine(std::move(hash_state), ptr.get());
580
}
581
582
// AbslHashValue for hashing shared_ptr
583
template <typename H, typename T>
584
H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
585
  return H::combine(std::move(hash_state), ptr.get());
586
}
587
588
// -----------------------------------------------------------------------------
589
// AbslHashValue for String-Like Types
590
// -----------------------------------------------------------------------------
591
592
// AbslHashValue for hashing strings
593
//
594
// All the string-like types supported here provide the same hash expansion for
595
// the same character sequence. These types are:
596
//
597
//  - `absl::Cord`
598
//  - `std::string` (and std::basic_string<T, std::char_traits<T>, A> for
599
//      any allocator A and any T in {char, wchar_t, char16_t, char32_t})
600
//  - `absl::string_view`, `std::string_view`, `std::wstring_view`,
601
//    `std::u16string_view`, and `std::u32_string_view`.
602
//
603
// For simplicity, we currently support only strings built on `char`, `wchar_t`,
604
// `char16_t`, or `char32_t`. This support may be broadened, if necessary, but
605
// with some caution - this overload would misbehave in cases where the traits'
606
// `eq()` member isn't equivalent to `==` on the underlying character type.
607
template <typename H>
608
511k
H AbslHashValue(H hash_state, absl::string_view str) {
609
511k
  return H::combine_contiguous(std::move(hash_state), str.data(), str.size());
610
511k
}
611
612
// Support std::wstring, std::u16string and std::u32string.
613
template <typename Char, typename Alloc, typename H,
614
          typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
615
                                       std::is_same<Char, char16_t>::value ||
616
                                       std::is_same<Char, char32_t>::value>>
617
H AbslHashValue(
618
    H hash_state,
619
    const std::basic_string<Char, std::char_traits<Char>, Alloc>& str) {
620
  return H::combine_contiguous(std::move(hash_state), str.data(), str.size());
621
}
622
623
// Support std::wstring_view, std::u16string_view and std::u32string_view.
624
template <typename Char, typename H,
625
          typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
626
                                       std::is_same<Char, char16_t>::value ||
627
                                       std::is_same<Char, char32_t>::value>>
628
H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
629
  return H::combine_contiguous(std::move(hash_state), str.data(), str.size());
630
}
631
632
#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
633
    (!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ||        \
634
     __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) &&       \
635
    (!defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ||         \
636
     __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500)
637
638
#define ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE 1
639
640
// Support std::filesystem::path. The SFINAE is required because some string
641
// types are implicitly convertible to std::filesystem::path.
642
template <typename Path, typename H,
643
          typename = absl::enable_if_t<
644
              std::is_same_v<Path, std::filesystem::path>>>
645
H AbslHashValue(H hash_state, const Path& path) {
646
  // This is implemented by deferring to the standard library to compute the
647
  // hash.  The standard library requires that for two paths, `p1 == p2`, then
648
  // `hash_value(p1) == hash_value(p2)`. `AbslHashValue` has the same
649
  // requirement. Since `operator==` does platform specific matching, deferring
650
  // to the standard library is the simplest approach.
651
  return H::combine(std::move(hash_state), std::filesystem::hash_value(path));
652
}
653
654
#endif  // ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE
655
656
// -----------------------------------------------------------------------------
657
// AbslHashValue for Sequence Containers
658
// -----------------------------------------------------------------------------
659
660
// AbslHashValue for hashing std::array
661
template <typename H, typename T, size_t N>
662
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
663
    H hash_state, const std::array<T, N>& array) {
664
  return H::combine_contiguous(std::move(hash_state), array.data(),
665
                               array.size());
666
}
667
668
// AbslHashValue for hashing std::deque
669
template <typename H, typename T, typename Allocator>
670
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
671
    H hash_state, const std::deque<T, Allocator>& deque) {
672
  // TODO(gromer): investigate a more efficient implementation taking
673
  // advantage of the chunk structure.
674
  for (const auto& t : deque) {
675
    hash_state = H::combine(std::move(hash_state), t);
676
  }
677
  return H::combine(std::move(hash_state), WeaklyMixedInteger{deque.size()});
678
}
679
680
// AbslHashValue for hashing std::forward_list
681
template <typename H, typename T, typename Allocator>
682
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
683
    H hash_state, const std::forward_list<T, Allocator>& list) {
684
  size_t size = 0;
685
  for (const T& t : list) {
686
    hash_state = H::combine(std::move(hash_state), t);
687
    ++size;
688
  }
689
  return H::combine(std::move(hash_state), WeaklyMixedInteger{size});
690
}
691
692
// AbslHashValue for hashing std::list
693
template <typename H, typename T, typename Allocator>
694
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
695
    H hash_state, const std::list<T, Allocator>& list) {
696
  for (const auto& t : list) {
697
    hash_state = H::combine(std::move(hash_state), t);
698
  }
699
  return H::combine(std::move(hash_state), WeaklyMixedInteger{list.size()});
700
}
701
702
// AbslHashValue for hashing std::vector
703
//
704
// Do not use this for vector<bool> on platforms that have a working
705
// implementation of std::hash. It does not have a .data(), and a fallback for
706
// std::hash<> is most likely faster.
707
template <typename H, typename T, typename Allocator>
708
typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
709
                        H>::type
710
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
711
  return H::combine_contiguous(std::move(hash_state), vector.data(),
712
                               vector.size());
713
}
714
715
// AbslHashValue special cases for hashing std::vector<bool>
716
717
#if defined(ABSL_IS_BIG_ENDIAN) && \
718
    (defined(__GLIBCXX__) || defined(__GLIBCPP__))
719
720
// std::hash in libstdc++ does not work correctly with vector<bool> on Big
721
// Endian platforms therefore we need to implement a custom AbslHashValue for
722
// it. More details on the bug:
723
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
724
template <typename H, typename T, typename Allocator>
725
typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
726
                        H>::type
727
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
728
  typename H::AbslInternalPiecewiseCombiner combiner;
729
  for (const auto& i : vector) {
730
    unsigned char c = static_cast<unsigned char>(i);
731
    hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
732
  }
733
  return H::combine(combiner.finalize(std::move(hash_state)),
734
                    WeaklyMixedInteger{vector.size()});
735
}
736
#else
737
// When not working around the libstdc++ bug above, we still have to contend
738
// with the fact that std::hash<vector<bool>> is often poor quality, hashing
739
// directly on the internal words and on no other state.  On these platforms,
740
// vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value.
741
//
742
// Mixing in the size (as we do in our other vector<> implementations) on top
743
// of the library-provided hash implementation avoids this QOI issue.
744
template <typename H, typename T, typename Allocator>
745
typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
746
                        H>::type
747
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
748
  return H::combine(std::move(hash_state),
749
                    std::hash<std::vector<T, Allocator>>{}(vector),
750
                    WeaklyMixedInteger{vector.size()});
751
}
752
#endif
753
754
// -----------------------------------------------------------------------------
755
// AbslHashValue for Ordered Associative Containers
756
// -----------------------------------------------------------------------------
757
758
// AbslHashValue for hashing std::map
759
template <typename H, typename Key, typename T, typename Compare,
760
          typename Allocator>
761
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
762
                        H>::type
763
AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) {
764
  for (const auto& t : map) {
765
    hash_state = H::combine(std::move(hash_state), t);
766
  }
767
  return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()});
768
}
769
770
// AbslHashValue for hashing std::multimap
771
template <typename H, typename Key, typename T, typename Compare,
772
          typename Allocator>
773
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
774
                        H>::type
775
AbslHashValue(H hash_state,
776
              const std::multimap<Key, T, Compare, Allocator>& map) {
777
  for (const auto& t : map) {
778
    hash_state = H::combine(std::move(hash_state), t);
779
  }
780
  return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()});
781
}
782
783
// AbslHashValue for hashing std::set
784
template <typename H, typename Key, typename Compare, typename Allocator>
785
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
786
    H hash_state, const std::set<Key, Compare, Allocator>& set) {
787
  for (const auto& t : set) {
788
    hash_state = H::combine(std::move(hash_state), t);
789
  }
790
  return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()});
791
}
792
793
// AbslHashValue for hashing std::multiset
794
template <typename H, typename Key, typename Compare, typename Allocator>
795
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
796
    H hash_state, const std::multiset<Key, Compare, Allocator>& set) {
797
  for (const auto& t : set) {
798
    hash_state = H::combine(std::move(hash_state), t);
799
  }
800
  return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()});
801
}
802
803
// -----------------------------------------------------------------------------
804
// AbslHashValue for Unordered Associative Containers
805
// -----------------------------------------------------------------------------
806
807
// AbslHashValue for hashing std::unordered_set
808
template <typename H, typename Key, typename Hash, typename KeyEqual,
809
          typename Alloc>
810
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
811
    H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
812
  return H::combine(
813
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
814
      WeaklyMixedInteger{s.size()});
815
}
816
817
// AbslHashValue for hashing std::unordered_multiset
818
template <typename H, typename Key, typename Hash, typename KeyEqual,
819
          typename Alloc>
820
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
821
    H hash_state,
822
    const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
823
  return H::combine(
824
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
825
      WeaklyMixedInteger{s.size()});
826
}
827
828
// AbslHashValue for hashing std::unordered_set
829
template <typename H, typename Key, typename T, typename Hash,
830
          typename KeyEqual, typename Alloc>
831
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
832
                        H>::type
833
AbslHashValue(H hash_state,
834
              const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
835
  return H::combine(
836
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
837
      WeaklyMixedInteger{s.size()});
838
}
839
840
// AbslHashValue for hashing std::unordered_multiset
841
template <typename H, typename Key, typename T, typename Hash,
842
          typename KeyEqual, typename Alloc>
843
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
844
                        H>::type
845
AbslHashValue(H hash_state,
846
              const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
847
  return H::combine(
848
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
849
      WeaklyMixedInteger{s.size()});
850
}
851
852
// -----------------------------------------------------------------------------
853
// AbslHashValue for Wrapper Types
854
// -----------------------------------------------------------------------------
855
856
// AbslHashValue for hashing std::reference_wrapper
857
template <typename H, typename T>
858
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
859
    H hash_state, std::reference_wrapper<T> opt) {
860
  return H::combine(std::move(hash_state), opt.get());
861
}
862
863
// AbslHashValue for hashing absl::optional
864
template <typename H, typename T>
865
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
866
    H hash_state, const absl::optional<T>& opt) {
867
  if (opt) hash_state = H::combine(std::move(hash_state), *opt);
868
  return H::combine(std::move(hash_state), opt.has_value());
869
}
870
871
template <typename H>
872
struct VariantVisitor {
873
  H&& hash_state;
874
  template <typename T>
875
  H operator()(const T& t) const {
876
    return H::combine(std::move(hash_state), t);
877
  }
878
};
879
880
// AbslHashValue for hashing absl::variant
881
template <typename H, typename... T>
882
typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type
883
AbslHashValue(H hash_state, const absl::variant<T...>& v) {
884
  if (!v.valueless_by_exception()) {
885
    hash_state = absl::visit(VariantVisitor<H>{std::move(hash_state)}, v);
886
  }
887
  return H::combine(std::move(hash_state), v.index());
888
}
889
890
// -----------------------------------------------------------------------------
891
// AbslHashValue for Other Types
892
// -----------------------------------------------------------------------------
893
894
// AbslHashValue for hashing std::bitset is not defined on Little Endian
895
// platforms, for the same reason as for vector<bool> (see std::vector above):
896
// It does not expose the raw bytes, and a fallback to std::hash<> is most
897
// likely faster.
898
899
#if defined(ABSL_IS_BIG_ENDIAN) && \
900
    (defined(__GLIBCXX__) || defined(__GLIBCPP__))
901
// AbslHashValue for hashing std::bitset
902
//
903
// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian
904
// platforms therefore we need to implement a custom AbslHashValue for it. More
905
// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
906
template <typename H, size_t N>
907
H AbslHashValue(H hash_state, const std::bitset<N>& set) {
908
  typename H::AbslInternalPiecewiseCombiner combiner;
909
  for (size_t i = 0; i < N; i++) {
910
    unsigned char c = static_cast<unsigned char>(set[i]);
911
    hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
912
  }
913
  return H::combine(combiner.finalize(std::move(hash_state)), N);
914
}
915
#endif
916
917
// -----------------------------------------------------------------------------
918
919
// Mixes all values in the range [data, data+size) into the hash state.
920
// This overload accepts only uniquely-represented types, and hashes them by
921
// hashing the entire range of bytes.
922
template <typename H, typename T>
923
typename std::enable_if<is_uniquely_represented<T>::value, H>::type
924
511k
hash_range_or_bytes(H hash_state, const T* data, size_t size) {
925
511k
  const auto* bytes = reinterpret_cast<const unsigned char*>(data);
926
511k
  return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);
927
511k
}
928
929
template <typename H, typename T>
930
typename std::enable_if<!is_uniquely_represented<T>::value, H>::type
931
hash_range_or_bytes(H hash_state, const T* data, size_t size) {
932
  for (const auto end = data + size; data < end; ++data) {
933
    hash_state = H::combine(std::move(hash_state), *data);
934
  }
935
  return H::combine(std::move(hash_state),
936
                    hash_internal::WeaklyMixedInteger{size});
937
}
938
939
inline constexpr uint64_t kMul = uint64_t{0x79d5f9e0de1e8cf5};
940
941
// Random data taken from the hexadecimal digits of Pi's fractional component.
942
// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
943
ABSL_CACHELINE_ALIGNED inline constexpr uint64_t kStaticRandomData[] = {
944
    0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0,
945
    0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377,
946
};
947
948
// Extremely weak mixture of length that is mixed into the state before
949
// combining the data. It is used only for small strings. This also ensures that
950
// we have high entropy in all bits of the state.
951
inline uint64_t PrecombineLengthMix(uint64_t state, size_t len) {
952
  ABSL_ASSUME(len + sizeof(uint64_t) <= sizeof(kStaticRandomData));
953
  uint64_t data = absl::base_internal::UnalignedLoad64(
954
      reinterpret_cast<const unsigned char*>(&kStaticRandomData[0]) + len);
955
  return state ^ data;
956
}
957
958
202M
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t Mix(uint64_t lhs, uint64_t rhs) {
959
  // Though the 128-bit product needs multiple instructions on non-x86-64
960
  // platforms, it is still a good balance between speed and hash quality.
961
202M
  absl::uint128 m = lhs;
962
202M
  m *= rhs;
963
202M
  return Uint128High64(m) ^ Uint128Low64(m);
964
202M
}
965
966
// Reads 8 bytes from p.
967
718k
inline uint64_t Read8(const unsigned char* p) {
968
// Suppress erroneous array bounds errors on GCC.
969
#if defined(__GNUC__) && !defined(__clang__)
970
#pragma GCC diagnostic push
971
#pragma GCC diagnostic ignored "-Warray-bounds"
972
#endif
973
718k
  return absl::base_internal::UnalignedLoad64(p);
974
#if defined(__GNUC__) && !defined(__clang__)
975
#pragma GCC diagnostic pop
976
#endif
977
718k
}
978
979
// Reads 9 to 16 bytes from p.
980
// The first 8 bytes are in .first, and the rest of the bytes are in .second
981
// along with duplicated bytes from .first if len<16.
982
inline std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
983
161k
                                               size_t len) {
984
161k
  return {Read8(p), Read8(p + len - 8)};
985
161k
}
986
987
// Reads 4 to 8 bytes from p.
988
// Bytes are permuted and some input bytes may be duplicated in output.
989
399k
inline uint64_t Read4To8(const unsigned char* p, size_t len) {
990
  // If `len < 8`, we duplicate bytes. We always put low memory at the end.
991
  // E.g., on little endian platforms:
992
  // `ABCD` will be read as `ABCDABCD`.
993
  // `ABCDE` will be read as `BCDEABCD`.
994
  // `ABCDEF` will be read as `CDEFABCD`.
995
  // `ABCDEFG` will be read as `DEFGABCD`.
996
  // `ABCDEFGH` will be read as `EFGHABCD`.
997
  // We also do not care about endianness. On big-endian platforms, bytes will
998
  // be permuted differently. We always shift low memory by 32, because that
999
  // can be pipelined earlier. Reading high memory requires computing
1000
  // `p + len - 4`.
1001
399k
  uint64_t most_significant =
1002
399k
      static_cast<uint64_t>(absl::base_internal::UnalignedLoad32(p)) << 32;
1003
399k
  uint64_t least_significant =
1004
399k
      absl::base_internal::UnalignedLoad32(p + len - 4);
1005
399k
  return most_significant | least_significant;
1006
399k
}
1007
1008
// Reads 1 to 3 bytes from p. Some input bytes may be duplicated in output.
1009
117k
inline uint32_t Read1To3(const unsigned char* p, size_t len) {
1010
  // The trick used by this implementation is to avoid branches.
1011
  // We always read three bytes by duplicating.
1012
  // E.g.,
1013
  // `A` is read as `AAA`.
1014
  // `AB` is read as `ABB`.
1015
  // `ABC` is read as `ABC`.
1016
  // We always shift `p[0]` so that it can be pipelined better.
1017
  // Other bytes require extra computation to find indices.
1018
117k
  uint32_t mem0 = (static_cast<uint32_t>(p[0]) << 16) | p[len - 1];
1019
117k
  uint32_t mem1 = static_cast<uint32_t>(p[len / 2]) << 8;
1020
117k
  return mem0 | mem1;
1021
117k
}
1022
1023
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineRawImpl(uint64_t state,
1024
77.2M
                                                            uint64_t value) {
1025
77.2M
  return Mix(state ^ value, kMul);
1026
77.2M
}
1027
1028
// Slow dispatch path for calls to CombineContiguousImpl with a size argument
1029
// larger than inlined size. Has the same effect as calling
1030
// CombineContiguousImpl() repeatedly with the chunk stride size.
1031
uint64_t CombineLargeContiguousImplOn32BitLengthGt8(const unsigned char* first,
1032
                                                    size_t len, uint64_t state);
1033
uint64_t CombineLargeContiguousImplOn64BitLengthGt32(const unsigned char* first,
1034
                                                     size_t len,
1035
                                                     uint64_t state);
1036
1037
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineSmallContiguousImpl(
1038
    uint64_t state, const unsigned char* first, size_t len) {
1039
  ABSL_ASSUME(len <= 8);
1040
  uint64_t v;
1041
  if (len >= 4) {
1042
    v = Read4To8(first, len);
1043
  } else if (len > 0) {
1044
    v = Read1To3(first, len);
1045
  } else {
1046
    // Empty string must modify the state.
1047
    v = 0x57;
1048
  }
1049
  return CombineRawImpl(state, v);
1050
}
1051
1052
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl9to16(
1053
    uint64_t state, const unsigned char* first, size_t len) {
1054
  ABSL_ASSUME(len >= 9);
1055
  ABSL_ASSUME(len <= 16);
1056
  // Note: any time one half of the mix function becomes zero it will fail to
1057
  // incorporate any bits from the other half. However, there is exactly 1 in
1058
  // 2^64 values for each side that achieve this, and only when the size is
1059
  // exactly 16 -- for smaller sizes there is an overlapping byte that makes
1060
  // this impossible unless the seed is *also* incredibly unlucky.
1061
  auto p = Read9To16(first, len);
1062
  return Mix(state ^ p.first, kMul ^ p.second);
1063
}
1064
1065
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl17to32(
1066
    uint64_t state, const unsigned char* first, size_t len) {
1067
  ABSL_ASSUME(len >= 17);
1068
  ABSL_ASSUME(len <= 32);
1069
  // Do two mixes of overlapping 16-byte ranges in parallel to minimize
1070
  // latency.
1071
  const uint64_t m0 =
1072
      Mix(Read8(first) ^ kStaticRandomData[1], Read8(first + 8) ^ state);
1073
1074
  const unsigned char* tail_16b_ptr = first + (len - 16);
1075
  const uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ kStaticRandomData[3],
1076
                          Read8(tail_16b_ptr + 8) ^ state);
1077
  return m0 ^ m1;
1078
}
1079
1080
// Implementation of the base case for combine_contiguous where we actually
1081
// mix the bytes into the state.
1082
// Dispatch to different implementations of combine_contiguous depending
1083
// on the value of `sizeof(size_t)`.
1084
inline uint64_t CombineContiguousImpl(
1085
    uint64_t state, const unsigned char* first, size_t len,
1086
0
    std::integral_constant<int, 4> /* sizeof_size_t */) {
1087
0
  // For large values we use CityHash, for small ones we use custom low latency
1088
0
  // hash.
1089
0
  if (len <= 8) {
1090
0
    return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first,
1091
0
                                      len);
1092
0
  }
1093
0
  return CombineLargeContiguousImplOn32BitLengthGt8(first, len, state);
1094
0
}
1095
1096
inline uint64_t CombineContiguousImpl(
1097
    uint64_t state, const unsigned char* first, size_t len,
1098
1.54M
    std::integral_constant<int, 8> /* sizeof_size_t */) {
1099
  // For large values we use LowLevelHash or CityHash depending on the platform,
1100
  // for small ones we use custom low latency hash.
1101
1.54M
  if (len <= 8) {
1102
590k
    return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first,
1103
590k
                                      len);
1104
590k
  }
1105
952k
  if (len <= 16) {
1106
161k
    return CombineContiguousImpl9to16(PrecombineLengthMix(state, len), first,
1107
161k
                                      len);
1108
161k
  }
1109
791k
  if (len <= 32) {
1110
99.1k
    return CombineContiguousImpl17to32(PrecombineLengthMix(state, len), first,
1111
99.1k
                                       len);
1112
99.1k
  }
1113
  // We must not mix length into the state here because calling
1114
  // CombineContiguousImpl twice with PiecewiseChunkSize() must be equivalent
1115
  // to calling CombineLargeContiguousImpl once with 2 * PiecewiseChunkSize().
1116
692k
  return CombineLargeContiguousImplOn64BitLengthGt32(first, len, state);
1117
791k
}
1118
1119
#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \
1120
    ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
1121
#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1
1122
#else
1123
#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0
1124
#endif
1125
1126
// Type trait to select the appropriate hash implementation to use.
1127
// HashSelect::type<T> will give the proper hash implementation, to be invoked
1128
// as:
1129
//   HashSelect::type<T>::Invoke(state, value)
1130
// Also, HashSelect::type<T>::value is a boolean equal to `true` if there is a
1131
// valid `Invoke` function. Types that are not hashable will have a ::value of
1132
// `false`.
1133
struct HashSelect {
1134
 private:
1135
  struct WeaklyMixedIntegerProbe {
1136
    template <typename H>
1137
    static H Invoke(H state, WeaklyMixedInteger value) {
1138
      return hash_internal::hash_weakly_mixed_integer(std::move(state), value);
1139
    }
1140
  };
1141
1142
  struct State : HashStateBase<State> {
1143
    static State combine_contiguous(State hash_state, const unsigned char*,
1144
                                    size_t);
1145
    using State::HashStateBase::combine_contiguous;
1146
    static State combine_raw(State state, uint64_t value);
1147
    static State combine_weakly_mixed_integer(State hash_state,
1148
                                              WeaklyMixedInteger value);
1149
  };
1150
1151
  struct UniquelyRepresentedProbe {
1152
    template <typename H, typename T>
1153
    static auto Invoke(H state, const T& value)
1154
819k
        -> absl::enable_if_t<is_uniquely_represented<T>::value, H> {
1155
819k
      return hash_internal::hash_bytes(std::move(state), value);
1156
819k
    }
_ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEiEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_
Line
Count
Source
1154
598k
        -> absl::enable_if_t<is_uniquely_represented<T>::value, H> {
1155
598k
      return hash_internal::hash_bytes(std::move(state), value);
1156
598k
    }
_ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEmEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_
Line
Count
Source
1154
220k
        -> absl::enable_if_t<is_uniquely_represented<T>::value, H> {
1155
220k
      return hash_internal::hash_bytes(std::move(state), value);
1156
220k
    }
1157
  };
1158
1159
  struct HashValueProbe {
1160
    template <typename H, typename T>
1161
    static auto Invoke(H state, const T& value) -> absl::enable_if_t<
1162
        std::is_same<H,
1163
                     decltype(AbslHashValue(std::move(state), value))>::value,
1164
0
        H> {
1165
0
      return AbslHashValue(std::move(state), value);
1166
0
    }
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKNS5_17basic_string_viewIcNS5_11char_traitsIcEEEERKiEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESH_E4typeESH_RKT0_
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__117basic_string_viewIcNS5_11char_traitsIcEEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKmEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENS_4CordEEENSt3__19enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueES8_E4typeES8_RKT0_
1167
  };
1168
1169
  struct LegacyHashProbe {
1170
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
1171
    template <typename H, typename T>
1172
    static auto Invoke(H state, const T& value) -> absl::enable_if_t<
1173
        std::is_convertible<
1174
            decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>()(value)),
1175
            size_t>::value,
1176
        H> {
1177
      return hash_internal::hash_bytes(
1178
          std::move(state),
1179
          ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value));
1180
    }
1181
#endif  // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
1182
  };
1183
1184
  struct StdHashProbe {
1185
    template <typename H, typename T>
1186
    static auto Invoke(H state, const T& value)
1187
        -> absl::enable_if_t<type_traits_internal::IsHashable<T>::value, H> {
1188
      return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value));
1189
    }
1190
  };
1191
1192
  template <typename Hash, typename T>
1193
  struct Probe : Hash {
1194
   private:
1195
    template <typename H, typename = decltype(H::Invoke(
1196
                              std::declval<State>(), std::declval<const T&>()))>
1197
    static std::true_type Test(int);
1198
    template <typename U>
1199
    static std::false_type Test(char);
1200
1201
   public:
1202
    static constexpr bool value = decltype(Test<Hash>(0))::value;
1203
  };
1204
1205
 public:
1206
  // Probe each implementation in order.
1207
  // disjunction provides short circuiting wrt instantiation.
1208
  template <typename T>
1209
  using Apply = absl::disjunction<         //
1210
      Probe<WeaklyMixedIntegerProbe, T>,   //
1211
      Probe<UniquelyRepresentedProbe, T>,  //
1212
      Probe<HashValueProbe, T>,            //
1213
      Probe<LegacyHashProbe, T>,           //
1214
      Probe<StdHashProbe, T>,              //
1215
      std::false_type>;
1216
};
1217
1218
template <typename T>
1219
struct is_hashable
1220
    : std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
1221
1222
class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
1223
  template <typename T>
1224
  using IntegralFastPath =
1225
      conjunction<std::is_integral<T>, is_uniquely_represented<T>,
1226
                  FitsIn64Bits<T>>;
1227
1228
 public:
1229
  // Move only
1230
  MixingHashState(MixingHashState&&) = default;
1231
  MixingHashState& operator=(MixingHashState&&) = default;
1232
1233
  // Fundamental base case for hash recursion: mixes the given range of bytes
1234
  // into the hash state.
1235
  static MixingHashState combine_contiguous(MixingHashState hash_state,
1236
                                            const unsigned char* first,
1237
1.37M
                                            size_t size) {
1238
1.37M
    return MixingHashState(
1239
1.37M
        CombineContiguousImpl(hash_state.state_, first, size,
1240
1.37M
                              std::integral_constant<int, sizeof(size_t)>{}));
1241
1.37M
  }
1242
  using MixingHashState::HashStateBase::combine_contiguous;
1243
1244
  template <typename T>
1245
0
  static size_t hash(const T& value) {
1246
0
    return hash_with_seed(value, Seed());
1247
0
  }
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> >(std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&)
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<unsigned long const&> >(std::__1::tuple<unsigned long const&> const&)
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<absl::Cord>(absl::Cord const&)
1248
1249
  // For performance reasons in non-opt mode, we specialize this for
1250
  // integral types.
1251
  // Otherwise we would be instantiating and calling dozens of functions for
1252
  // something that is just one multiplication and a couple xor's.
1253
  // The result should be the same as running the whole algorithm, but faster.
1254
  template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
1255
  static size_t hash_with_seed(T value, size_t seed) {
1256
    return static_cast<size_t>(
1257
        CombineRawImpl(seed, static_cast<std::make_unsigned_t<T>>(value)));
1258
  }
1259
1260
  template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
1261
0
  static size_t hash_with_seed(const T& value, size_t seed) {
1262
0
    return static_cast<size_t>(combine(MixingHashState{seed}, value).state_);
1263
0
  }
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__15tupleIJRKNS3_17basic_string_viewIcNS3_11char_traitsIcEEEERKiEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKSF_m
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__15tupleIJRKmEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS9_m
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__117basic_string_viewIcNS3_11char_traitsIcEEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS9_m
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINS_4CordETnNSt3__19enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS6_m
1264
1265
 private:
1266
  friend class MixingHashState::HashStateBase;
1267
  template <typename H>
1268
  friend H absl::hash_internal::hash_weakly_mixed_integer(H,
1269
                                                          WeaklyMixedInteger);
1270
  // Allow the HashState type-erasure implementation to invoke
1271
  // RunCombinedUnordered() directly.
1272
  friend class absl::HashState;
1273
  friend struct CombineRaw;
1274
1275
  // For use in Seed().
1276
  static const void* const kSeed;
1277
1278
  // Invoked only once for a given argument; that plus the fact that this is
1279
  // move-only ensures that there is only one non-moved-from object.
1280
0
  MixingHashState() : state_(Seed()) {}
1281
1282
  // Workaround for MSVC bug.
1283
  // We make the type copyable to fix the calling convention, even though we
1284
  // never actually copy it. Keep it private to not affect the public API of the
1285
  // type.
1286
  MixingHashState(const MixingHashState&) = default;
1287
1288
5.25M
  explicit MixingHashState(uint64_t state) : state_(state) {}
1289
1290
  // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows
1291
  // us to be consistent with IntegralFastPath when combining raw types, but
1292
  // optimize Read1To3 and Read4To8 differently for the string case.
1293
  static MixingHashState combine_raw(MixingHashState hash_state,
1294
2.28M
                                     uint64_t value) {
1295
2.28M
    return MixingHashState(CombineRawImpl(hash_state.state_, value));
1296
2.28M
  }
1297
1298
  static MixingHashState combine_weakly_mixed_integer(
1299
0
      MixingHashState hash_state, WeaklyMixedInteger value) {
1300
0
    // Some transformation for the value is needed to make an empty
1301
0
    // string/container change the mixing hash state.
1302
0
    // We use constant smaller than 8 bits to make compiler use
1303
0
    // `add` with an immediate operand with 1 byte value.
1304
0
    return MixingHashState{hash_state.state_ + (0x57 + value.value)};
1305
0
  }
1306
1307
  template <typename CombinerT>
1308
  static MixingHashState RunCombineUnordered(MixingHashState state,
1309
                                             CombinerT combiner) {
1310
    uint64_t unordered_state = 0;
1311
    combiner(MixingHashState{}, [&](MixingHashState& inner_state) {
1312
      // Add the hash state of the element to the running total, but mix the
1313
      // carry bit back into the low bit.  This in intended to avoid losing
1314
      // entropy to overflow, especially when unordered_multisets contain
1315
      // multiple copies of the same value.
1316
      auto element_state = inner_state.state_;
1317
      unordered_state += element_state;
1318
      if (unordered_state < element_state) {
1319
        ++unordered_state;
1320
      }
1321
      inner_state = MixingHashState{};
1322
    });
1323
    return MixingHashState::combine(std::move(state), unordered_state);
1324
  }
1325
1326
  // A non-deterministic seed.
1327
  //
1328
  // The current purpose of this seed is to generate non-deterministic results
1329
  // and prevent having users depend on the particular hash values.
1330
  // It is not meant as a security feature right now, but it leaves the door
1331
  // open to upgrade it to a true per-process random seed. A true random seed
1332
  // costs more and we don't need to pay for that right now.
1333
  //
1334
  // On platforms with ASLR, we take advantage of it to make a per-process
1335
  // random value.
1336
  // See https://en.wikipedia.org/wiki/Address_space_layout_randomization
1337
  //
1338
  // On other platforms this is still going to be non-deterministic but most
1339
  // probably per-build and not per-process.
1340
1.59M
  ABSL_ATTRIBUTE_ALWAYS_INLINE static size_t Seed() {
1341
1.59M
#if (!defined(__clang__) || __clang_major__ > 11) && \
1342
1.59M
    (!defined(__apple_build_version__) ||            \
1343
1.59M
     __apple_build_version__ >= 19558921)  // Xcode 12
1344
1.59M
    return static_cast<size_t>(reinterpret_cast<uintptr_t>(&kSeed));
1345
#else
1346
    // Workaround the absence of
1347
    // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
1348
    return static_cast<size_t>(reinterpret_cast<uintptr_t>(kSeed));
1349
#endif
1350
1.59M
  }
1351
1352
  uint64_t state_;
1353
};
1354
1355
struct AggregateBarrier {};
1356
1357
// Add a private base class to make sure this type is not an aggregate.
1358
// Aggregates can be aggregate initialized even if the default constructor is
1359
// deleted.
1360
struct PoisonedHash : private AggregateBarrier {
1361
  PoisonedHash() = delete;
1362
  PoisonedHash(const PoisonedHash&) = delete;
1363
  PoisonedHash& operator=(const PoisonedHash&) = delete;
1364
};
1365
1366
template <typename T>
1367
struct HashImpl {
1368
0
  size_t operator()(const T& value) const {
1369
0
    return MixingHashState::hash(value);
1370
0
  }
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> >::operator()(std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&) const
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<unsigned long const&> >::operator()(std::__1::tuple<unsigned long const&> const&) const
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::basic_string_view<char, std::__1::char_traits<char> > >::operator()(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&) const
Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::operator()(absl::Cord const&) const
1371
1372
 private:
1373
  friend struct HashWithSeed;
1374
1375
0
  size_t hash_with_seed(const T& value, size_t seed) const {
1376
0
    return MixingHashState::hash_with_seed(value, seed);
1377
0
  }
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::basic_string_view<char, std::__1::char_traits<char> > >::hash_with_seed(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const
Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::hash_with_seed(absl::Cord const&, unsigned long) const
1378
};
1379
1380
template <typename T>
1381
struct Hash
1382
    : absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {};
1383
1384
template <typename H>
1385
template <typename T, typename... Ts>
1386
819k
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
1387
819k
  return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
1388
819k
                        std::move(state), value),
1389
819k
                    values...);
1390
819k
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&)
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::basic_string_view<char, std::__1::char_traits<char> >, int>(absl::hash_internal::MixingHashState, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&)
absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<int>(absl::hash_internal::MixingHashState, int const&)
Line
Count
Source
1386
598k
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
1387
598k
  return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
1388
598k
                        std::move(state), value),
1389
598k
                    values...);
1390
598k
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<unsigned long const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&)
absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<unsigned long>(absl::hash_internal::MixingHashState, unsigned long const&)
Line
Count
Source
1386
220k
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
1387
220k
  return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
1388
220k
                        std::move(state), value),
1389
220k
                    values...);
1390
220k
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::basic_string_view<char, std::__1::char_traits<char> >>(absl::hash_internal::MixingHashState, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<absl::Cord>(absl::hash_internal::MixingHashState, absl::Cord const&)
1391
1392
template <typename H>
1393
template <typename T>
1394
511k
H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
1395
511k
  return hash_internal::hash_range_or_bytes(std::move(state), data, size);
1396
511k
}
1397
1398
template <typename H>
1399
template <typename I>
1400
H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
1401
  return H::RunCombineUnordered(std::move(state),
1402
                                CombineUnorderedCallback<I>{begin, end});
1403
}
1404
1405
template <typename H>
1406
H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
1407
0
                                size_t size) {
1408
0
  if (position_ + size < PiecewiseChunkSize()) {
1409
0
    // This partial chunk does not fill our existing buffer
1410
0
    memcpy(buf_ + position_, data, size);
1411
0
    position_ += size;
1412
0
    return state;
1413
0
  }
1414
0
  added_something_ = true;
1415
0
  // If the buffer is partially filled we need to complete the buffer
1416
0
  // and hash it.
1417
0
  if (position_ != 0) {
1418
0
    const size_t bytes_needed = PiecewiseChunkSize() - position_;
1419
0
    memcpy(buf_ + position_, data, bytes_needed);
1420
0
    state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());
1421
0
    data += bytes_needed;
1422
0
    size -= bytes_needed;
1423
0
  }
1424
0
1425
0
  // Hash whatever chunks we can without copying
1426
0
  while (size >= PiecewiseChunkSize()) {
1427
0
    state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize());
1428
0
    data += PiecewiseChunkSize();
1429
0
    size -= PiecewiseChunkSize();
1430
0
  }
1431
0
  // Fill the buffer with the remainder
1432
0
  memcpy(buf_, data, size);
1433
0
  position_ = size;
1434
0
  return state;
1435
0
}
1436
1437
template <typename H>
1438
0
H PiecewiseCombiner::finalize(H state) {
1439
0
  // Do not call combine_contiguous with empty remainder since it is modifying
1440
0
  // state.
1441
0
  if (added_something_ && position_ == 0) {
1442
0
    return state;
1443
0
  }
1444
0
  // We still call combine_contiguous for the entirely empty buffer.
1445
0
  return H::combine_contiguous(std::move(state), buf_, position_);
1446
0
}
1447
1448
}  // namespace hash_internal
1449
ABSL_NAMESPACE_END
1450
}  // namespace absl
1451
1452
#endif  // ABSL_HASH_INTERNAL_HASH_H_