Coverage Report

Created: 2025-07-09 06:39

/src/abseil-cpp/absl/hash/internal/hash.h
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2018 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// -----------------------------------------------------------------------------
16
// File: hash.h
17
// -----------------------------------------------------------------------------
18
//
19
#ifndef ABSL_HASH_INTERNAL_HASH_H_
20
#define ABSL_HASH_INTERNAL_HASH_H_
21
22
#ifdef __APPLE__
23
#include <Availability.h>
24
#include <TargetConditionals.h>
25
#endif
26
27
// We include config.h here to make sure that ABSL_INTERNAL_CPLUSPLUS_LANG is
28
// defined.
29
#include "absl/base/config.h"
30
31
// GCC15 warns that <ciso646> is deprecated in C++17 and suggests using
32
// <version> instead, even though <version> is not available in C++17 mode prior
33
// to GCC9.
34
#if defined(__has_include)
35
#if __has_include(<version>)
36
#define ABSL_INTERNAL_VERSION_HEADER_AVAILABLE 1
37
#endif
38
#endif
39
40
// For feature testing and determining which headers can be included.
41
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L || \
42
    defined(ABSL_INTERNAL_VERSION_HEADER_AVAILABLE)
43
#include <version>
44
#else
45
#include <ciso646>
46
#endif
47
48
#undef ABSL_INTERNAL_VERSION_HEADER_AVAILABLE
49
50
#include <algorithm>
51
#include <array>
52
#include <bitset>
53
#include <cassert>
54
#include <cmath>
55
#include <cstddef>
56
#include <cstdint>
57
#include <cstring>
58
#include <deque>
59
#include <forward_list>
60
#include <functional>
61
#include <iterator>
62
#include <limits>
63
#include <list>
64
#include <map>
65
#include <memory>
66
#include <set>
67
#include <string>
68
#include <string_view>
69
#include <tuple>
70
#include <type_traits>
71
#include <unordered_map>
72
#include <unordered_set>
73
#include <utility>
74
#include <vector>
75
76
#include "absl/base/attributes.h"
77
#include "absl/base/internal/unaligned_access.h"
78
#include "absl/base/optimization.h"
79
#include "absl/base/port.h"
80
#include "absl/container/fixed_array.h"
81
#include "absl/hash/internal/city.h"
82
#include "absl/hash/internal/weakly_mixed_integer.h"
83
#include "absl/meta/type_traits.h"
84
#include "absl/numeric/bits.h"
85
#include "absl/numeric/int128.h"
86
#include "absl/strings/string_view.h"
87
#include "absl/types/optional.h"
88
#include "absl/types/variant.h"
89
#include "absl/utility/utility.h"
90
91
#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L
92
#include <filesystem>  // NOLINT
93
#endif
94
95
namespace absl {
96
ABSL_NAMESPACE_BEGIN
97
98
class HashState;
99
100
namespace hash_internal {
101
102
// Internal detail: Large buffers are hashed in smaller chunks.  This function
103
// returns the size of these chunks.
104
7.19M
constexpr size_t PiecewiseChunkSize() { return 1024; }
105
106
// PiecewiseCombiner is an internal-only helper class for hashing a piecewise
107
// buffer of `char` or `unsigned char` as though it were contiguous.  This class
108
// provides two methods:
109
//
110
//   H add_buffer(state, data, size)
111
//   H finalize(state)
112
//
113
// `add_buffer` can be called zero or more times, followed by a single call to
114
// `finalize`.  This will produce the same hash expansion as concatenating each
115
// buffer piece into a single contiguous buffer, and passing this to
116
// `H::combine_contiguous`.
117
//
118
//  Example usage:
119
//    PiecewiseCombiner combiner;
120
//    for (const auto& piece : pieces) {
121
//      state = combiner.add_buffer(std::move(state), piece.data, piece.size);
122
//    }
123
//    return combiner.finalize(std::move(state));
124
class PiecewiseCombiner {
125
 public:
126
  PiecewiseCombiner() = default;
127
  PiecewiseCombiner(const PiecewiseCombiner&) = delete;
128
  PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;
129
130
  // Appends the given range of bytes to the sequence to be hashed, which may
131
  // modify the provided hash state.
132
  template <typename H>
133
  H add_buffer(H state, const unsigned char* data, size_t size);
134
  template <typename H>
135
0
  H add_buffer(H state, const char* data, size_t size) {
136
0
    return add_buffer(std::move(state),
137
0
                      reinterpret_cast<const unsigned char*>(data), size);
138
0
  }
139
140
  // Finishes combining the hash sequence, which may may modify the provided
141
  // hash state.
142
  //
143
  // Once finalize() is called, add_buffer() may no longer be called. The
144
  // resulting hash state will be the same as if the pieces passed to
145
  // add_buffer() were concatenated into a single flat buffer, and then provided
146
  // to H::combine_contiguous().
147
  template <typename H>
148
  H finalize(H state);
149
150
 private:
151
  unsigned char buf_[PiecewiseChunkSize()];
152
  size_t position_ = 0;
153
  bool added_something_ = false;
154
};
155
156
// Trait class which returns true if T is hashable by the absl::Hash framework.
157
// Used for the AbslHashValue implementations for composite types below.
158
template <typename T>
159
struct is_hashable;
160
161
// HashStateBase is an internal implementation detail that contains common
162
// implementation details for all of the "hash state objects" objects generated
163
// by Abseil.  This is not a public API; users should not create classes that
164
// inherit from this.
165
//
166
// A hash state object is the template argument `H` passed to `AbslHashValue`.
167
// It represents an intermediate state in the computation of an unspecified hash
168
// algorithm. `HashStateBase` provides a CRTP style base class for hash state
169
// implementations. Developers adding type support for `absl::Hash` should not
170
// rely on any parts of the state object other than the following member
171
// functions:
172
//
173
//   * HashStateBase::combine()
174
//   * HashStateBase::combine_contiguous()
175
//   * HashStateBase::combine_unordered()
176
//
177
// A derived hash state class of type `H` must provide a public member function
178
// with a signature similar to the following:
179
//
180
//    `static H combine_contiguous(H state, const unsigned char*, size_t)`.
181
//
182
// It must also provide a private template method named RunCombineUnordered.
183
//
184
// A "consumer" is a 1-arg functor returning void.  Its argument is a reference
185
// to an inner hash state object, and it may be called multiple times.  When
186
// called, the functor consumes the entropy from the provided state object,
187
// and resets that object to its empty state.
188
//
189
// A "combiner" is a stateless 2-arg functor returning void.  Its arguments are
190
// an inner hash state object and an ElementStateConsumer functor.  A combiner
191
// uses the provided inner hash state object to hash each element of the
192
// container, passing the inner hash state object to the consumer after hashing
193
// each element.
194
//
195
// Given these definitions, a derived hash state class of type H
196
// must provide a private template method with a signature similar to the
197
// following:
198
//
199
//    `template <typename CombinerT>`
200
//    `static H RunCombineUnordered(H outer_state, CombinerT combiner)`
201
//
202
// This function is responsible for constructing the inner state object and
203
// providing a consumer to the combiner.  It uses side effects of the consumer
204
// and combiner to mix the state of each element in an order-independent manner,
205
// and uses this to return an updated value of `outer_state`.
206
//
207
// This inside-out approach generates efficient object code in the normal case,
208
// but allows us to use stack storage to implement the absl::HashState type
209
// erasure mechanism (avoiding heap allocations while hashing).
210
//
211
// `HashStateBase` will provide a complete implementation for a hash state
212
// object in terms of these two methods.
213
//
214
// Example:
215
//
216
//   // Use CRTP to define your derived class.
217
//   struct MyHashState : HashStateBase<MyHashState> {
218
//       static H combine_contiguous(H state, const unsigned char*, size_t);
219
//       using MyHashState::HashStateBase::combine;
220
//       using MyHashState::HashStateBase::combine_contiguous;
221
//       using MyHashState::HashStateBase::combine_unordered;
222
//     private:
223
//       template <typename CombinerT>
224
//       static H RunCombineUnordered(H state, CombinerT combiner);
225
//   };
226
template <typename H>
227
class HashStateBase {
228
 public:
229
  // Combines an arbitrary number of values into a hash state, returning the
230
  // updated state.
231
  //
232
  // Each of the value types `T` must be separately hashable by the Abseil
233
  // hashing framework.
234
  //
235
  // NOTE:
236
  //
237
  //   state = H::combine(std::move(state), value1, value2, value3);
238
  //
239
  // is guaranteed to produce the same hash expansion as:
240
  //
241
  //   state = H::combine(std::move(state), value1);
242
  //   state = H::combine(std::move(state), value2);
243
  //   state = H::combine(std::move(state), value3);
244
  template <typename T, typename... Ts>
245
  static H combine(H state, const T& value, const Ts&... values);
246
3.18M
  static H combine(H state) { return state; }
247
248
  // Combines a contiguous array of `size` elements into a hash state, returning
249
  // the updated state.
250
  //
251
  // NOTE:
252
  //
253
  //   state = H::combine_contiguous(std::move(state), data, size);
254
  //
255
  // is NOT guaranteed to produce the same hash expansion as a for-loop (it may
256
  // perform internal optimizations).  If you need this guarantee, use the
257
  // for-loop instead.
258
  template <typename T>
259
  static H combine_contiguous(H state, const T* data, size_t size);
260
261
  template <typename I>
262
  static H combine_unordered(H state, I begin, I end);
263
264
  using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
265
266
  template <typename T>
267
  using is_hashable = absl::hash_internal::is_hashable<T>;
268
269
 private:
270
  // Common implementation of the iteration step of a "combiner", as described
271
  // above.
272
  template <typename I>
273
  struct CombineUnorderedCallback {
274
    I begin;
275
    I end;
276
277
    template <typename InnerH, typename ElementStateConsumer>
278
    void operator()(InnerH inner_state, ElementStateConsumer cb) {
279
      for (; begin != end; ++begin) {
280
        inner_state = H::combine(std::move(inner_state), *begin);
281
        cb(inner_state);
282
      }
283
    }
284
  };
285
};
286
287
// `is_uniquely_represented<T>` is a trait class that indicates whether `T`
288
// is uniquely represented.
289
//
290
// A type is "uniquely represented" if two equal values of that type are
291
// guaranteed to have the same bytes in their underlying storage. In other
292
// words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be
293
// zero. This property cannot be detected automatically, so this trait is false
294
// by default, but can be specialized by types that wish to assert that they are
295
// uniquely represented. This makes them eligible for certain optimizations.
296
//
297
// If you have any doubt whatsoever, do not specialize this template.
298
// The default is completely safe, and merely disables some optimizations
299
// that will not matter for most types. Specializing this template,
300
// on the other hand, can be very hazardous.
301
//
302
// To be uniquely represented, a type must not have multiple ways of
303
// representing the same value; for example, float and double are not
304
// uniquely represented, because they have distinct representations for
305
// +0 and -0. Furthermore, the type's byte representation must consist
306
// solely of user-controlled data, with no padding bits and no compiler-
307
// controlled data such as vptrs or sanitizer metadata. This is usually
308
// very difficult to guarantee, because in most cases the compiler can
309
// insert data and padding bits at its own discretion.
310
//
311
// If you specialize this template for a type `T`, you must do so in the file
312
// that defines that type (or in this file). If you define that specialization
313
// anywhere else, `is_uniquely_represented<T>` could have different meanings
314
// in different places.
315
//
316
// The Enable parameter is meaningless; it is provided as a convenience,
317
// to support certain SFINAE techniques when defining specializations.
318
template <typename T, typename Enable = void>
319
struct is_uniquely_represented : std::false_type {};
320
321
// unsigned char is a synonym for "byte", so it is guaranteed to be
322
// uniquely represented.
323
template <>
324
struct is_uniquely_represented<unsigned char> : std::true_type {};
325
326
// is_uniquely_represented for non-standard integral types
327
//
328
// Integral types other than bool should be uniquely represented on any
329
// platform that this will plausibly be ported to.
330
template <typename Integral>
331
struct is_uniquely_represented<
332
    Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>
333
    : std::true_type {};
334
335
template <>
336
struct is_uniquely_represented<bool> : std::false_type {};
337
338
#ifdef ABSL_HAVE_INTRINSIC_INT128
339
// Specialize the trait for GNU extension types.
340
template <>
341
struct is_uniquely_represented<__int128> : std::true_type {};
342
template <>
343
struct is_uniquely_represented<unsigned __int128> : std::true_type {};
344
#endif  // ABSL_HAVE_INTRINSIC_INT128
345
346
template <typename T>
347
struct FitsIn64Bits : std::integral_constant<bool, sizeof(T) <= 8> {};
348
349
struct CombineRaw {
350
  template <typename H>
351
1.59M
  H operator()(H state, uint64_t value) const {
352
1.59M
    return H::combine_raw(std::move(state), value);
353
1.59M
  }
354
};
355
356
// For use in `raw_hash_set` to pass a seed to the hash function.
357
struct HashWithSeed {
358
  template <typename Hasher, typename T>
359
0
  size_t hash(const Hasher& hasher, const T& value, size_t seed) const {
360
0
    // NOLINTNEXTLINE(clang-diagnostic-sign-conversion)
361
0
    return hasher.hash_with_seed(value, seed);
362
0
  }
Unexecuted instantiation: unsigned long absl::hash_internal::HashWithSeed::hash<absl::hash_internal::Hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > >, std::__1::basic_string_view<char, std::__1::char_traits<char> > >(absl::hash_internal::Hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > > const&, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const
Unexecuted instantiation: unsigned long absl::hash_internal::HashWithSeed::hash<absl::hash_internal::Hash<absl::Cord>, absl::Cord>(absl::hash_internal::Hash<absl::Cord> const&, absl::Cord const&, unsigned long) const
363
};
364
365
// Convenience function that combines `hash_state` with the byte representation
366
// of `value`.
367
template <typename H, typename T,
368
          absl::enable_if_t<FitsIn64Bits<T>::value, int> = 0>
369
640k
H hash_bytes(H hash_state, const T& value) {
370
640k
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
371
640k
  uint64_t v;
372
  if constexpr (sizeof(T) == 1) {
373
    v = *start;
374
  } else if constexpr (sizeof(T) == 2) {
375
    v = absl::base_internal::UnalignedLoad16(start);
376
376k
  } else if constexpr (sizeof(T) == 4) {
377
376k
    v = absl::base_internal::UnalignedLoad32(start);
378
376k
  } else {
379
263k
    static_assert(sizeof(T) == 8);
380
263k
    v = absl::base_internal::UnalignedLoad64(start);
381
263k
  }
382
640k
  return CombineRaw()(std::move(hash_state), v);
383
640k
}
_ZN4absl13hash_internal10hash_bytesINS0_15MixingHashStateEiTnNSt3__19enable_ifIXsr12FitsIn64BitsIT0_EE5valueEiE4typeELi0EEET_S8_RKS5_
Line
Count
Source
369
376k
H hash_bytes(H hash_state, const T& value) {
370
376k
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
371
376k
  uint64_t v;
372
  if constexpr (sizeof(T) == 1) {
373
    v = *start;
374
  } else if constexpr (sizeof(T) == 2) {
375
    v = absl::base_internal::UnalignedLoad16(start);
376
376k
  } else if constexpr (sizeof(T) == 4) {
377
376k
    v = absl::base_internal::UnalignedLoad32(start);
378
  } else {
379
    static_assert(sizeof(T) == 8);
380
    v = absl::base_internal::UnalignedLoad64(start);
381
  }
382
376k
  return CombineRaw()(std::move(hash_state), v);
383
376k
}
_ZN4absl13hash_internal10hash_bytesINS0_15MixingHashStateEmTnNSt3__19enable_ifIXsr12FitsIn64BitsIT0_EE5valueEiE4typeELi0EEET_S8_RKS5_
Line
Count
Source
369
263k
H hash_bytes(H hash_state, const T& value) {
370
263k
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
371
263k
  uint64_t v;
372
  if constexpr (sizeof(T) == 1) {
373
    v = *start;
374
  } else if constexpr (sizeof(T) == 2) {
375
    v = absl::base_internal::UnalignedLoad16(start);
376
  } else if constexpr (sizeof(T) == 4) {
377
    v = absl::base_internal::UnalignedLoad32(start);
378
263k
  } else {
379
263k
    static_assert(sizeof(T) == 8);
380
263k
    v = absl::base_internal::UnalignedLoad64(start);
381
263k
  }
382
263k
  return CombineRaw()(std::move(hash_state), v);
383
263k
}
384
template <typename H, typename T,
385
          absl::enable_if_t<!FitsIn64Bits<T>::value, int> = 0>
386
H hash_bytes(H hash_state, const T& value) {
387
  const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);
388
  return H::combine_contiguous(std::move(hash_state), start, sizeof(value));
389
}
390
391
template <typename H>
392
H hash_weakly_mixed_integer(H hash_state, WeaklyMixedInteger value) {
393
  return H::combine_weakly_mixed_integer(std::move(hash_state), value);
394
}
395
396
// -----------------------------------------------------------------------------
397
// AbslHashValue for Basic Types
398
// -----------------------------------------------------------------------------
399
400
// Note: Default `AbslHashValue` implementations live in `hash_internal`. This
401
// allows us to block lexical scope lookup when doing an unqualified call to
402
// `AbslHashValue` below. User-defined implementations of `AbslHashValue` can
403
// only be found via ADL.
404
405
// AbslHashValue() for hashing bool values
406
//
407
// We use SFINAE to ensure that this overload only accepts bool, not types that
408
// are convertible to bool.
409
template <typename H, typename B>
410
typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue(
411
    H hash_state, B value) {
412
  // We use ~size_t{} instead of 1 so that all bits are different between
413
  // true/false instead of only 1.
414
  return H::combine(std::move(hash_state),
415
                    static_cast<size_t>(value ? ~size_t{} : 0));
416
}
417
418
// AbslHashValue() for hashing enum values
419
template <typename H, typename Enum>
420
typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue(
421
    H hash_state, Enum e) {
422
  // In practice, we could almost certainly just invoke hash_bytes directly,
423
  // but it's possible that a sanitizer might one day want to
424
  // store data in the unused bits of an enum. To avoid that risk, we
425
  // convert to the underlying type before hashing. Hopefully this will get
426
  // optimized away; if not, we can reopen discussion with c-toolchain-team.
427
  return H::combine(std::move(hash_state),
428
                    static_cast<typename std::underlying_type<Enum>::type>(e));
429
}
430
// AbslHashValue() for hashing floating-point values
431
template <typename H, typename Float>
432
typename std::enable_if<std::is_same<Float, float>::value ||
433
                            std::is_same<Float, double>::value,
434
                        H>::type
435
AbslHashValue(H hash_state, Float value) {
436
  return hash_internal::hash_bytes(std::move(hash_state),
437
                                   value == 0 ? 0 : value);
438
}
439
440
// Long double has the property that it might have extra unused bytes in it.
441
// For example, in x86 sizeof(long double)==16 but it only really uses 80-bits
442
// of it. This means we can't use hash_bytes on a long double and have to
443
// convert it to something else first.
444
template <typename H, typename LongDouble>
445
typename std::enable_if<std::is_same<LongDouble, long double>::value, H>::type
446
AbslHashValue(H hash_state, LongDouble value) {
447
  const int category = std::fpclassify(value);
448
  switch (category) {
449
    case FP_INFINITE:
450
      // Add the sign bit to differentiate between +Inf and -Inf
451
      hash_state = H::combine(std::move(hash_state), std::signbit(value));
452
      break;
453
454
    case FP_NAN:
455
    case FP_ZERO:
456
    default:
457
      // Category is enough for these.
458
      break;
459
460
    case FP_NORMAL:
461
    case FP_SUBNORMAL:
462
      // We can't convert `value` directly to double because this would have
463
      // undefined behavior if the value is out of range.
464
      // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is
465
      // guaranteed to be in range for `double`. The truncation is
466
      // implementation defined, but that works as long as it is deterministic.
467
      int exp;
468
      auto mantissa = static_cast<double>(std::frexp(value, &exp));
469
      hash_state = H::combine(std::move(hash_state), mantissa, exp);
470
  }
471
472
  return H::combine(std::move(hash_state), category);
473
}
474
475
// Without this overload, an array decays to a pointer and we hash that, which
476
// is not likely to be what the caller intended.
477
template <typename H, typename T, size_t N>
478
H AbslHashValue(H hash_state, T (&)[N]) {
479
  static_assert(
480
      sizeof(T) == -1,
481
      "Hashing C arrays is not allowed. For string literals, wrap the literal "
482
      "in absl::string_view(). To hash the array contents, use "
483
      "absl::MakeSpan() or make the array an std::array. To hash the array "
484
      "address, use &array[0].");
485
  return hash_state;
486
}
487
488
// AbslHashValue() for hashing pointers
489
template <typename H, typename T>
490
std::enable_if_t<std::is_pointer<T>::value, H> AbslHashValue(H hash_state,
491
                                                             T ptr) {
492
  auto v = reinterpret_cast<uintptr_t>(ptr);
493
  // Due to alignment, pointers tend to have low bits as zero, and the next few
494
  // bits follow a pattern since they are also multiples of some base value.
495
  // Mix pointers twice to ensure we have good entropy in low bits.
496
  return H::combine(std::move(hash_state), v, v);
497
}
498
499
// AbslHashValue() for hashing nullptr_t
500
template <typename H>
501
H AbslHashValue(H hash_state, std::nullptr_t) {
502
  return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
503
}
504
505
// AbslHashValue() for hashing pointers-to-member
506
template <typename H, typename T, typename C>
507
H AbslHashValue(H hash_state, T C::*ptr) {
508
  auto salient_ptm_size = [](std::size_t n) -> std::size_t {
509
#if defined(_MSC_VER)
510
    // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
511
    // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain
512
    // padding (namely when they have 1 or 3 ints). The value below is a lower
513
    // bound on the number of salient, non-padding bytes that we use for
514
    // hashing.
515
    if constexpr (alignof(T C::*) == alignof(int)) {
516
      // No padding when all subobjects have the same size as the total
517
      // alignment. This happens in 32-bit mode.
518
      return n;
519
    } else {
520
      // Padding for 1 int (size 16) or 3 ints (size 24).
521
      // With 2 ints, the size is 16 with no padding, which we pessimize.
522
      return n == 24 ? 20 : n == 16 ? 12 : n;
523
    }
524
#else
525
  // On other platforms, we assume that pointers-to-members do not have
526
  // padding.
527
#ifdef __cpp_lib_has_unique_object_representations
528
    static_assert(std::has_unique_object_representations<T C::*>::value);
529
#endif  // __cpp_lib_has_unique_object_representations
530
    return n;
531
#endif
532
  };
533
  return H::combine_contiguous(std::move(hash_state),
534
                               reinterpret_cast<unsigned char*>(&ptr),
535
                               salient_ptm_size(sizeof ptr));
536
}
537
538
// -----------------------------------------------------------------------------
539
// AbslHashValue for Composite Types
540
// -----------------------------------------------------------------------------
541
542
// AbslHashValue() for hashing pairs
543
template <typename H, typename T1, typename T2>
544
typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
545
                        H>::type
546
AbslHashValue(H hash_state, const std::pair<T1, T2>& p) {
547
  return H::combine(std::move(hash_state), p.first, p.second);
548
}
549
550
// Helper function for hashing a tuple. The third argument should
551
// be an index_sequence running from 0 to tuple_size<Tuple> - 1.
552
template <typename H, typename Tuple, size_t... Is>
553
0
H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence<Is...>) {
554
0
  return H::combine(std::move(hash_state), std::get<Is>(t)...);
555
0
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&>, 0ul, 1ul>(absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>)
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&>, 0ul>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&, std::__1::integer_sequence<unsigned long, 0ul>)
556
557
// AbslHashValue for hashing tuples
558
template <typename H, typename... Ts>
559
#if defined(_MSC_VER)
560
// This SFINAE gets MSVC confused under some conditions. Let's just disable it
561
// for now.
562
H
563
#else   // _MSC_VER
564
typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type
565
#endif  // _MSC_VER
566
0
AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
567
0
  return hash_internal::hash_tuple(std::move(hash_state), t,
568
0
                                   absl::make_index_sequence<sizeof...(Ts)>());
569
0
}
Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKNSt3__117basic_string_viewIcNS3_11char_traitsIcEEEERKiEEENS3_9enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESH_RKNS3_5tupleIJDpSE_EEE
Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKmEEENSt3__19enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESB_RKNS5_5tupleIJDpS8_EEE
570
571
// -----------------------------------------------------------------------------
572
// AbslHashValue for Pointers
573
// -----------------------------------------------------------------------------
574
575
// AbslHashValue for hashing unique_ptr
576
template <typename H, typename T, typename D>
577
H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) {
578
  return H::combine(std::move(hash_state), ptr.get());
579
}
580
581
// AbslHashValue for hashing shared_ptr
582
template <typename H, typename T>
583
H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
584
  return H::combine(std::move(hash_state), ptr.get());
585
}
586
587
// -----------------------------------------------------------------------------
588
// AbslHashValue for String-Like Types
589
// -----------------------------------------------------------------------------
590
591
// AbslHashValue for hashing strings
592
//
593
// All the string-like types supported here provide the same hash expansion for
594
// the same character sequence. These types are:
595
//
596
//  - `absl::Cord`
597
//  - `std::string` (and std::basic_string<T, std::char_traits<T>, A> for
598
//      any allocator A and any T in {char, wchar_t, char16_t, char32_t})
599
//  - `absl::string_view`, `std::string_view`, `std::wstring_view`,
600
//    `std::u16string_view`, and `std::u32_string_view`.
601
//
602
// For simplicity, we currently support only strings built on `char`, `wchar_t`,
603
// `char16_t`, or `char32_t`. This support may be broadened, if necessary, but
604
// with some caution - this overload would misbehave in cases where the traits'
605
// `eq()` member isn't equivalent to `==` on the underlying character type.
606
template <typename H>
607
289k
H AbslHashValue(H hash_state, absl::string_view str) {
608
289k
  return H::combine_contiguous(std::move(hash_state), str.data(), str.size());
609
289k
}
610
611
// Support std::wstring, std::u16string and std::u32string.
612
template <typename Char, typename Alloc, typename H,
613
          typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
614
                                       std::is_same<Char, char16_t>::value ||
615
                                       std::is_same<Char, char32_t>::value>>
616
H AbslHashValue(
617
    H hash_state,
618
    const std::basic_string<Char, std::char_traits<Char>, Alloc>& str) {
619
  return H::combine_contiguous(std::move(hash_state), str.data(), str.size());
620
}
621
622
// Support std::wstring_view, std::u16string_view and std::u32string_view.
623
template <typename Char, typename H,
624
          typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
625
                                       std::is_same<Char, char16_t>::value ||
626
                                       std::is_same<Char, char32_t>::value>>
627
H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
628
  return H::combine_contiguous(std::move(hash_state), str.data(), str.size());
629
}
630
631
#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \
632
    (!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ||        \
633
     __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) &&       \
634
    (!defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ||         \
635
     __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500)
636
637
#define ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE 1
638
639
// Support std::filesystem::path. The SFINAE is required because some string
640
// types are implicitly convertible to std::filesystem::path.
641
template <typename Path, typename H,
642
          typename = absl::enable_if_t<
643
              std::is_same_v<Path, std::filesystem::path>>>
644
H AbslHashValue(H hash_state, const Path& path) {
645
  // This is implemented by deferring to the standard library to compute the
646
  // hash.  The standard library requires that for two paths, `p1 == p2`, then
647
  // `hash_value(p1) == hash_value(p2)`. `AbslHashValue` has the same
648
  // requirement. Since `operator==` does platform specific matching, deferring
649
  // to the standard library is the simplest approach.
650
  return H::combine(std::move(hash_state), std::filesystem::hash_value(path));
651
}
652
653
#endif  // ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE
654
655
// -----------------------------------------------------------------------------
656
// AbslHashValue for Sequence Containers
657
// -----------------------------------------------------------------------------
658
659
// AbslHashValue for hashing std::array
660
template <typename H, typename T, size_t N>
661
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
662
    H hash_state, const std::array<T, N>& array) {
663
  return H::combine_contiguous(std::move(hash_state), array.data(),
664
                               array.size());
665
}
666
667
// AbslHashValue for hashing std::deque
668
template <typename H, typename T, typename Allocator>
669
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
670
    H hash_state, const std::deque<T, Allocator>& deque) {
671
  // TODO(gromer): investigate a more efficient implementation taking
672
  // advantage of the chunk structure.
673
  for (const auto& t : deque) {
674
    hash_state = H::combine(std::move(hash_state), t);
675
  }
676
  return H::combine(std::move(hash_state), WeaklyMixedInteger{deque.size()});
677
}
678
679
// AbslHashValue for hashing std::forward_list
680
template <typename H, typename T, typename Allocator>
681
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
682
    H hash_state, const std::forward_list<T, Allocator>& list) {
683
  size_t size = 0;
684
  for (const T& t : list) {
685
    hash_state = H::combine(std::move(hash_state), t);
686
    ++size;
687
  }
688
  return H::combine(std::move(hash_state), WeaklyMixedInteger{size});
689
}
690
691
// AbslHashValue for hashing std::list
692
template <typename H, typename T, typename Allocator>
693
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
694
    H hash_state, const std::list<T, Allocator>& list) {
695
  for (const auto& t : list) {
696
    hash_state = H::combine(std::move(hash_state), t);
697
  }
698
  return H::combine(std::move(hash_state), WeaklyMixedInteger{list.size()});
699
}
700
701
// AbslHashValue for hashing std::vector
702
//
703
// Do not use this for vector<bool> on platforms that have a working
704
// implementation of std::hash. It does not have a .data(), and a fallback for
705
// std::hash<> is most likely faster.
706
template <typename H, typename T, typename Allocator>
707
typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
708
                        H>::type
709
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
710
  return H::combine_contiguous(std::move(hash_state), vector.data(),
711
                               vector.size());
712
}
713
714
// AbslHashValue special cases for hashing std::vector<bool>
715
716
#if defined(ABSL_IS_BIG_ENDIAN) && \
717
    (defined(__GLIBCXX__) || defined(__GLIBCPP__))
718
719
// std::hash in libstdc++ does not work correctly with vector<bool> on Big
720
// Endian platforms therefore we need to implement a custom AbslHashValue for
721
// it. More details on the bug:
722
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
723
template <typename H, typename T, typename Allocator>
724
typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
725
                        H>::type
726
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
727
  typename H::AbslInternalPiecewiseCombiner combiner;
728
  for (const auto& i : vector) {
729
    unsigned char c = static_cast<unsigned char>(i);
730
    hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
731
  }
732
  return H::combine(combiner.finalize(std::move(hash_state)),
733
                    WeaklyMixedInteger{vector.size()});
734
}
735
#else
736
// When not working around the libstdc++ bug above, we still have to contend
737
// with the fact that std::hash<vector<bool>> is often poor quality, hashing
738
// directly on the internal words and on no other state.  On these platforms,
739
// vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value.
740
//
741
// Mixing in the size (as we do in our other vector<> implementations) on top
742
// of the library-provided hash implementation avoids this QOI issue.
743
template <typename H, typename T, typename Allocator>
744
typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
745
                        H>::type
746
AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
747
  return H::combine(std::move(hash_state),
748
                    std::hash<std::vector<T, Allocator>>{}(vector),
749
                    WeaklyMixedInteger{vector.size()});
750
}
751
#endif
752
753
// -----------------------------------------------------------------------------
754
// AbslHashValue for Ordered Associative Containers
755
// -----------------------------------------------------------------------------
756
757
// AbslHashValue for hashing std::map
758
template <typename H, typename Key, typename T, typename Compare,
759
          typename Allocator>
760
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
761
                        H>::type
762
AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) {
763
  for (const auto& t : map) {
764
    hash_state = H::combine(std::move(hash_state), t);
765
  }
766
  return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()});
767
}
768
769
// AbslHashValue for hashing std::multimap
770
template <typename H, typename Key, typename T, typename Compare,
771
          typename Allocator>
772
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
773
                        H>::type
774
AbslHashValue(H hash_state,
775
              const std::multimap<Key, T, Compare, Allocator>& map) {
776
  for (const auto& t : map) {
777
    hash_state = H::combine(std::move(hash_state), t);
778
  }
779
  return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()});
780
}
781
782
// AbslHashValue for hashing std::set
783
template <typename H, typename Key, typename Compare, typename Allocator>
784
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
785
    H hash_state, const std::set<Key, Compare, Allocator>& set) {
786
  for (const auto& t : set) {
787
    hash_state = H::combine(std::move(hash_state), t);
788
  }
789
  return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()});
790
}
791
792
// AbslHashValue for hashing std::multiset
793
template <typename H, typename Key, typename Compare, typename Allocator>
794
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
795
    H hash_state, const std::multiset<Key, Compare, Allocator>& set) {
796
  for (const auto& t : set) {
797
    hash_state = H::combine(std::move(hash_state), t);
798
  }
799
  return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()});
800
}
801
802
// -----------------------------------------------------------------------------
803
// AbslHashValue for Unordered Associative Containers
804
// -----------------------------------------------------------------------------
805
806
// AbslHashValue for hashing std::unordered_set
807
template <typename H, typename Key, typename Hash, typename KeyEqual,
808
          typename Alloc>
809
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
810
    H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
811
  return H::combine(
812
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
813
      WeaklyMixedInteger{s.size()});
814
}
815
816
// AbslHashValue for hashing std::unordered_multiset
817
template <typename H, typename Key, typename Hash, typename KeyEqual,
818
          typename Alloc>
819
typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
820
    H hash_state,
821
    const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
822
  return H::combine(
823
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
824
      WeaklyMixedInteger{s.size()});
825
}
826
827
// AbslHashValue for hashing std::unordered_set
828
template <typename H, typename Key, typename T, typename Hash,
829
          typename KeyEqual, typename Alloc>
830
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
831
                        H>::type
832
AbslHashValue(H hash_state,
833
              const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
834
  return H::combine(
835
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
836
      WeaklyMixedInteger{s.size()});
837
}
838
839
// AbslHashValue for hashing std::unordered_multiset
840
template <typename H, typename Key, typename T, typename Hash,
841
          typename KeyEqual, typename Alloc>
842
typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
843
                        H>::type
844
AbslHashValue(H hash_state,
845
              const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
846
  return H::combine(
847
      H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
848
      WeaklyMixedInteger{s.size()});
849
}
850
851
// -----------------------------------------------------------------------------
852
// AbslHashValue for Wrapper Types
853
// -----------------------------------------------------------------------------
854
855
// AbslHashValue for hashing std::reference_wrapper
856
template <typename H, typename T>
857
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
858
    H hash_state, std::reference_wrapper<T> opt) {
859
  return H::combine(std::move(hash_state), opt.get());
860
}
861
862
// AbslHashValue for hashing absl::optional
863
template <typename H, typename T>
864
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
865
    H hash_state, const absl::optional<T>& opt) {
866
  if (opt) hash_state = H::combine(std::move(hash_state), *opt);
867
  return H::combine(std::move(hash_state), opt.has_value());
868
}
869
870
template <typename H>
871
struct VariantVisitor {
872
  H&& hash_state;
873
  template <typename T>
874
  H operator()(const T& t) const {
875
    return H::combine(std::move(hash_state), t);
876
  }
877
};
878
879
// AbslHashValue for hashing absl::variant
880
template <typename H, typename... T>
881
typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type
882
AbslHashValue(H hash_state, const absl::variant<T...>& v) {
883
  if (!v.valueless_by_exception()) {
884
    hash_state = absl::visit(VariantVisitor<H>{std::move(hash_state)}, v);
885
  }
886
  return H::combine(std::move(hash_state), v.index());
887
}
888
889
// -----------------------------------------------------------------------------
890
// AbslHashValue for Other Types
891
// -----------------------------------------------------------------------------
892
893
// AbslHashValue for hashing std::bitset is not defined on Little Endian
894
// platforms, for the same reason as for vector<bool> (see std::vector above):
895
// It does not expose the raw bytes, and a fallback to std::hash<> is most
896
// likely faster.
897
898
#if defined(ABSL_IS_BIG_ENDIAN) && \
899
    (defined(__GLIBCXX__) || defined(__GLIBCPP__))
900
// AbslHashValue for hashing std::bitset
901
//
902
// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian
903
// platforms therefore we need to implement a custom AbslHashValue for it. More
904
// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
905
template <typename H, size_t N>
906
H AbslHashValue(H hash_state, const std::bitset<N>& set) {
907
  typename H::AbslInternalPiecewiseCombiner combiner;
908
  for (size_t i = 0; i < N; i++) {
909
    unsigned char c = static_cast<unsigned char>(set[i]);
910
    hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
911
  }
912
  return H::combine(combiner.finalize(std::move(hash_state)), N);
913
}
914
#endif
915
916
// -----------------------------------------------------------------------------
917
918
// Mixes all values in the range [data, data+size) into the hash state.
919
// This overload accepts only uniquely-represented types, and hashes them by
920
// hashing the entire range of bytes.
921
template <typename H, typename T>
922
typename std::enable_if<is_uniquely_represented<T>::value, H>::type
923
289k
hash_range_or_bytes(H hash_state, const T* data, size_t size) {
924
289k
  const auto* bytes = reinterpret_cast<const unsigned char*>(data);
925
289k
  return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);
926
289k
}
927
928
template <typename H, typename T>
929
typename std::enable_if<!is_uniquely_represented<T>::value, H>::type
930
hash_range_or_bytes(H hash_state, const T* data, size_t size) {
931
  for (const auto end = data + size; data < end; ++data) {
932
    hash_state = H::combine(std::move(hash_state), *data);
933
  }
934
  return H::combine(std::move(hash_state),
935
                    hash_internal::WeaklyMixedInteger{size});
936
}
937
938
inline constexpr uint64_t kMul = uint64_t{0x79d5f9e0de1e8cf5};
939
940
// Random data taken from the hexadecimal digits of Pi's fractional component.
941
// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number
942
ABSL_CACHELINE_ALIGNED inline constexpr uint64_t kStaticRandomData[] = {
943
    0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0,
944
    0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377,
945
};
946
947
// Extremely weak mixture of length that is mixed into the state before
948
// combining the data. It is used only for small strings. This also ensures that
949
// we have high entropy in all bits of the state.
950
inline uint64_t PrecombineLengthMix(uint64_t state, size_t len) {
951
  ABSL_ASSUME(len + sizeof(uint64_t) <= sizeof(kStaticRandomData));
952
  uint64_t data = absl::base_internal::UnalignedLoad64(
953
      reinterpret_cast<const unsigned char*>(&kStaticRandomData[0]) + len);
954
  return state ^ data;
955
}
956
957
176M
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t Mix(uint64_t lhs, uint64_t rhs) {
958
  // Though the 128-bit product needs multiple instructions on non-x86-64
959
  // platforms, it is still a good balance between speed and hash quality.
960
176M
  absl::uint128 m = lhs;
961
176M
  m *= rhs;
962
176M
  return Uint128High64(m) ^ Uint128Low64(m);
963
176M
}
964
965
// Reads 8 bytes from p.
966
399k
inline uint64_t Read8(const unsigned char* p) {
967
// Suppress erroneous array bounds errors on GCC.
968
#if defined(__GNUC__) && !defined(__clang__)
969
#pragma GCC diagnostic push
970
#pragma GCC diagnostic ignored "-Warray-bounds"
971
#endif
972
399k
  return absl::base_internal::UnalignedLoad64(p);
973
#if defined(__GNUC__) && !defined(__clang__)
974
#pragma GCC diagnostic pop
975
#endif
976
399k
}
977
978
// Reads 9 to 16 bytes from p.
979
// The first 8 bytes are in .first, and the rest of the bytes are in .second
980
// along with duplicated bytes from .first if len<16.
981
inline std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
982
82.4k
                                               size_t len) {
983
82.4k
  return {Read8(p), Read8(p + len - 8)};
984
82.4k
}
985
986
// Reads 4 to 8 bytes from p.
987
// Bytes are permuted and some input bytes may be duplicated in output.
988
250k
inline uint64_t Read4To8(const unsigned char* p, size_t len) {
989
  // If `len < 8`, we duplicate bytes. We always put low memory at the end.
990
  // E.g., on little endian platforms:
991
  // `ABCD` will be read as `ABCDABCD`.
992
  // `ABCDE` will be read as `BCDEABCD`.
993
  // `ABCDEF` will be read as `CDEFABCD`.
994
  // `ABCDEFG` will be read as `DEFGABCD`.
995
  // `ABCDEFGH` will be read as `EFGHABCD`.
996
  // We also do not care about endianness. On big-endian platforms, bytes will
997
  // be permuted differently. We always shift low memory by 32, because that
998
  // can be pipelined earlier. Reading high memory requires computing
999
  // `p + len - 4`.
1000
250k
  uint64_t most_significant =
1001
250k
      static_cast<uint64_t>(absl::base_internal::UnalignedLoad32(p)) << 32;
1002
250k
  uint64_t least_significant =
1003
250k
      absl::base_internal::UnalignedLoad32(p + len - 4);
1004
250k
  return most_significant | least_significant;
1005
250k
}
1006
1007
// Reads 1 to 3 bytes from p. Some input bytes may be duplicated in output.
1008
85.5k
inline uint32_t Read1To3(const unsigned char* p, size_t len) {
1009
  // The trick used by this implementation is to avoid branches.
1010
  // We always read three bytes by duplicating.
1011
  // E.g.,
1012
  // `A` is read as `AAA`.
1013
  // `AB` is read as `ABB`.
1014
  // `ABC` is read as `ABC`.
1015
  // We always shift `p[0]` so that it can be pipelined better.
1016
  // Other bytes require extra computation to find indices.
1017
85.5k
  uint32_t mem0 = (static_cast<uint32_t>(p[0]) << 16) | p[len - 1];
1018
85.5k
  uint32_t mem1 = static_cast<uint32_t>(p[len / 2]) << 8;
1019
85.5k
  return mem0 | mem1;
1020
85.5k
}
1021
1022
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineRawImpl(uint64_t state,
1023
63.5M
                                                            uint64_t value) {
1024
63.5M
  return Mix(state ^ value, kMul);
1025
63.5M
}
1026
1027
// Slow dispatch path for calls to CombineContiguousImpl with a size argument
1028
// larger than inlined size. Has the same effect as calling
1029
// CombineContiguousImpl() repeatedly with the chunk stride size.
1030
uint64_t CombineLargeContiguousImplOn32BitLengthGt8(const unsigned char* first,
1031
                                                    size_t len, uint64_t state);
1032
uint64_t CombineLargeContiguousImplOn64BitLengthGt32(const unsigned char* first,
1033
                                                     size_t len,
1034
                                                     uint64_t state);
1035
1036
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineSmallContiguousImpl(
1037
    uint64_t state, const unsigned char* first, size_t len) {
1038
  ABSL_ASSUME(len <= 8);
1039
  uint64_t v;
1040
  if (len >= 4) {
1041
    v = Read4To8(first, len);
1042
  } else if (len > 0) {
1043
    v = Read1To3(first, len);
1044
  } else {
1045
    // Empty string must modify the state.
1046
    v = 0x57;
1047
  }
1048
  return CombineRawImpl(state, v);
1049
}
1050
1051
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl9to16(
1052
    uint64_t state, const unsigned char* first, size_t len) {
1053
  ABSL_ASSUME(len >= 9);
1054
  ABSL_ASSUME(len <= 16);
1055
  // Note: any time one half of the mix function becomes zero it will fail to
1056
  // incorporate any bits from the other half. However, there is exactly 1 in
1057
  // 2^64 values for each side that achieve this, and only when the size is
1058
  // exactly 16 -- for smaller sizes there is an overlapping byte that makes
1059
  // this impossible unless the seed is *also* incredibly unlucky.
1060
  auto p = Read9To16(first, len);
1061
  return Mix(state ^ p.first, kMul ^ p.second);
1062
}
1063
1064
ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl17to32(
1065
    uint64_t state, const unsigned char* first, size_t len) {
1066
  ABSL_ASSUME(len >= 17);
1067
  ABSL_ASSUME(len <= 32);
1068
  // Do two mixes of overlapping 16-byte ranges in parallel to minimize
1069
  // latency.
1070
  const uint64_t m0 =
1071
      Mix(Read8(first) ^ kStaticRandomData[1], Read8(first + 8) ^ state);
1072
1073
  const unsigned char* tail_16b_ptr = first + (len - 16);
1074
  const uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ kStaticRandomData[3],
1075
                          Read8(tail_16b_ptr + 8) ^ state);
1076
  return m0 ^ m1;
1077
}
1078
1079
// Implementation of the base case for combine_contiguous where we actually
1080
// mix the bytes into the state.
1081
// Dispatch to different implementations of combine_contiguous depending
1082
// on the value of `sizeof(size_t)`.
1083
inline uint64_t CombineContiguousImpl(
1084
    uint64_t state, const unsigned char* first, size_t len,
1085
0
    std::integral_constant<int, 4> /* sizeof_size_t */) {
1086
0
  // For large values we use CityHash, for small ones we use custom low latency
1087
0
  // hash.
1088
0
  if (len <= 8) {
1089
0
    return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first,
1090
0
                                      len);
1091
0
  }
1092
0
  return CombineLargeContiguousImplOn32BitLengthGt8(first, len, state);
1093
0
}
1094
1095
inline uint64_t CombineContiguousImpl(
1096
    uint64_t state, const unsigned char* first, size_t len,
1097
1.05M
    std::integral_constant<int, 8> /* sizeof_size_t */) {
1098
  // For large values we use LowLevelHash or CityHash depending on the platform,
1099
  // for small ones we use custom low latency hash.
1100
1.05M
  if (len <= 8) {
1101
392k
    return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first,
1102
392k
                                      len);
1103
392k
  }
1104
661k
  if (len <= 16) {
1105
82.4k
    return CombineContiguousImpl9to16(PrecombineLengthMix(state, len), first,
1106
82.4k
                                      len);
1107
82.4k
  }
1108
578k
  if (len <= 32) {
1109
58.5k
    return CombineContiguousImpl17to32(PrecombineLengthMix(state, len), first,
1110
58.5k
                                       len);
1111
58.5k
  }
1112
  // We must not mix length into the state here because calling
1113
  // CombineContiguousImpl twice with PiecewiseChunkSize() must be equivalent
1114
  // to calling CombineLargeContiguousImpl once with 2 * PiecewiseChunkSize().
1115
520k
  return CombineLargeContiguousImplOn64BitLengthGt32(first, len, state);
1116
578k
}
1117
1118
#if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \
1119
    ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
1120
#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1
1121
#else
1122
#define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0
1123
#endif
1124
1125
// Type trait to select the appropriate hash implementation to use.
1126
// HashSelect::type<T> will give the proper hash implementation, to be invoked
1127
// as:
1128
//   HashSelect::type<T>::Invoke(state, value)
1129
// Also, HashSelect::type<T>::value is a boolean equal to `true` if there is a
1130
// valid `Invoke` function. Types that are not hashable will have a ::value of
1131
// `false`.
1132
struct HashSelect {
1133
 private:
1134
  struct WeaklyMixedIntegerProbe {
1135
    template <typename H>
1136
    static H Invoke(H state, WeaklyMixedInteger value) {
1137
      return hash_internal::hash_weakly_mixed_integer(std::move(state), value);
1138
    }
1139
  };
1140
1141
  struct State : HashStateBase<State> {
1142
    static State combine_contiguous(State hash_state, const unsigned char*,
1143
                                    size_t);
1144
    using State::HashStateBase::combine_contiguous;
1145
    static State combine_raw(State state, uint64_t value);
1146
    static State combine_weakly_mixed_integer(State hash_state,
1147
                                              WeaklyMixedInteger value);
1148
  };
1149
1150
  struct UniquelyRepresentedProbe {
1151
    template <typename H, typename T>
1152
    static auto Invoke(H state, const T& value)
1153
640k
        -> absl::enable_if_t<is_uniquely_represented<T>::value, H> {
1154
640k
      return hash_internal::hash_bytes(std::move(state), value);
1155
640k
    }
_ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEiEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_
Line
Count
Source
1153
376k
        -> absl::enable_if_t<is_uniquely_represented<T>::value, H> {
1154
376k
      return hash_internal::hash_bytes(std::move(state), value);
1155
376k
    }
_ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEmEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_
Line
Count
Source
1153
263k
        -> absl::enable_if_t<is_uniquely_represented<T>::value, H> {
1154
263k
      return hash_internal::hash_bytes(std::move(state), value);
1155
263k
    }
1156
  };
1157
1158
  struct HashValueProbe {
1159
    template <typename H, typename T>
1160
    static auto Invoke(H state, const T& value) -> absl::enable_if_t<
1161
        std::is_same<H,
1162
                     decltype(AbslHashValue(std::move(state), value))>::value,
1163
0
        H> {
1164
0
      return AbslHashValue(std::move(state), value);
1165
0
    }
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKNS5_17basic_string_viewIcNS5_11char_traitsIcEEEERKiEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESH_E4typeESH_RKT0_
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__117basic_string_viewIcNS5_11char_traitsIcEEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKmEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_
Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENS_4CordEEENSt3__19enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueES8_E4typeES8_RKT0_
1166
  };
1167
1168
  struct LegacyHashProbe {
1169
#if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
1170
    template <typename H, typename T>
1171
    static auto Invoke(H state, const T& value) -> absl::enable_if_t<
1172
        std::is_convertible<
1173
            decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>()(value)),
1174
            size_t>::value,
1175
        H> {
1176
      return hash_internal::hash_bytes(
1177
          std::move(state),
1178
          ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value));
1179
    }
1180
#endif  // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_
1181
  };
1182
1183
  struct StdHashProbe {
1184
    template <typename H, typename T>
1185
    static auto Invoke(H state, const T& value)
1186
        -> absl::enable_if_t<type_traits_internal::IsHashable<T>::value, H> {
1187
      return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value));
1188
    }
1189
  };
1190
1191
  template <typename Hash, typename T>
1192
  struct Probe : Hash {
1193
   private:
1194
    template <typename H, typename = decltype(H::Invoke(
1195
                              std::declval<State>(), std::declval<const T&>()))>
1196
    static std::true_type Test(int);
1197
    template <typename U>
1198
    static std::false_type Test(char);
1199
1200
   public:
1201
    static constexpr bool value = decltype(Test<Hash>(0))::value;
1202
  };
1203
1204
 public:
1205
  // Probe each implementation in order.
1206
  // disjunction provides short circuiting wrt instantiation.
1207
  template <typename T>
1208
  using Apply = absl::disjunction<         //
1209
      Probe<WeaklyMixedIntegerProbe, T>,   //
1210
      Probe<UniquelyRepresentedProbe, T>,  //
1211
      Probe<HashValueProbe, T>,            //
1212
      Probe<LegacyHashProbe, T>,           //
1213
      Probe<StdHashProbe, T>,              //
1214
      std::false_type>;
1215
};
1216
1217
template <typename T>
1218
struct is_hashable
1219
    : std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
1220
1221
class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
1222
  template <typename T>
1223
  using IntegralFastPath =
1224
      conjunction<std::is_integral<T>, is_uniquely_represented<T>,
1225
                  FitsIn64Bits<T>>;
1226
1227
 public:
1228
  // Move only
1229
  MixingHashState(MixingHashState&&) = default;
1230
  MixingHashState& operator=(MixingHashState&&) = default;
1231
1232
  // Fundamental base case for hash recursion: mixes the given range of bytes
1233
  // into the hash state.
1234
  static MixingHashState combine_contiguous(MixingHashState hash_state,
1235
                                            const unsigned char* first,
1236
902k
                                            size_t size) {
1237
902k
    return MixingHashState(
1238
902k
        CombineContiguousImpl(hash_state.state_, first, size,
1239
902k
                              std::integral_constant<int, sizeof(size_t)>{}));
1240
902k
  }
1241
  using MixingHashState::HashStateBase::combine_contiguous;
1242
1243
  template <typename T>
1244
0
  static size_t hash(const T& value) {
1245
0
    return hash_with_seed(value, Seed());
1246
0
  }
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> >(std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&)
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<unsigned long const&> >(std::__1::tuple<unsigned long const&> const&)
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)
Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<absl::Cord>(absl::Cord const&)
1247
1248
  // For performance reasons in non-opt mode, we specialize this for
1249
  // integral types.
1250
  // Otherwise we would be instantiating and calling dozens of functions for
1251
  // something that is just one multiplication and a couple xor's.
1252
  // The result should be the same as running the whole algorithm, but faster.
1253
  template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
1254
  static size_t hash_with_seed(T value, size_t seed) {
1255
    return static_cast<size_t>(
1256
        CombineRawImpl(seed, static_cast<std::make_unsigned_t<T>>(value)));
1257
  }
1258
1259
  template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
1260
0
  static size_t hash_with_seed(const T& value, size_t seed) {
1261
0
    return static_cast<size_t>(combine(MixingHashState{seed}, value).state_);
1262
0
  }
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__15tupleIJRKNS3_17basic_string_viewIcNS3_11char_traitsIcEEEERKiEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKSF_m
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__15tupleIJRKmEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS9_m
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__117basic_string_viewIcNS3_11char_traitsIcEEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS9_m
Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINS_4CordETnNSt3__19enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS6_m
1263
1264
 private:
1265
  friend class MixingHashState::HashStateBase;
1266
  template <typename H>
1267
  friend H absl::hash_internal::hash_weakly_mixed_integer(H,
1268
                                                          WeaklyMixedInteger);
1269
  // Allow the HashState type-erasure implementation to invoke
1270
  // RunCombinedUnordered() directly.
1271
  friend class absl::HashState;
1272
  friend struct CombineRaw;
1273
1274
  // For use in Seed().
1275
  static const void* const kSeed;
1276
1277
  // Invoked only once for a given argument; that plus the fact that this is
1278
  // move-only ensures that there is only one non-moved-from object.
1279
0
  MixingHashState() : state_(Seed()) {}
1280
1281
  // Workaround for MSVC bug.
1282
  // We make the type copyable to fix the calling convention, even though we
1283
  // never actually copy it. Keep it private to not affect the public API of the
1284
  // type.
1285
  MixingHashState(const MixingHashState&) = default;
1286
1287
3.56M
  explicit MixingHashState(uint64_t state) : state_(state) {}
1288
1289
  // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows
1290
  // us to be consistent with IntegralFastPath when combining raw types, but
1291
  // optimize Read1To3 and Read4To8 differently for the string case.
1292
  static MixingHashState combine_raw(MixingHashState hash_state,
1293
1.59M
                                     uint64_t value) {
1294
1.59M
    return MixingHashState(CombineRawImpl(hash_state.state_, value));
1295
1.59M
  }
1296
1297
  static MixingHashState combine_weakly_mixed_integer(
1298
0
      MixingHashState hash_state, WeaklyMixedInteger value) {
1299
0
    // Some transformation for the value is needed to make an empty
1300
0
    // string/container change the mixing hash state.
1301
0
    // We use constant smaller than 8 bits to make compiler use
1302
0
    // `add` with an immediate operand with 1 byte value.
1303
0
    return MixingHashState{hash_state.state_ + (0x57 + value.value)};
1304
0
  }
1305
1306
  template <typename CombinerT>
1307
  static MixingHashState RunCombineUnordered(MixingHashState state,
1308
                                             CombinerT combiner) {
1309
    uint64_t unordered_state = 0;
1310
    combiner(MixingHashState{}, [&](MixingHashState& inner_state) {
1311
      // Add the hash state of the element to the running total, but mix the
1312
      // carry bit back into the low bit.  This in intended to avoid losing
1313
      // entropy to overflow, especially when unordered_multisets contain
1314
      // multiple copies of the same value.
1315
      auto element_state = inner_state.state_;
1316
      unordered_state += element_state;
1317
      if (unordered_state < element_state) {
1318
        ++unordered_state;
1319
      }
1320
      inner_state = MixingHashState{};
1321
    });
1322
    return MixingHashState::combine(std::move(state), unordered_state);
1323
  }
1324
1325
  // A non-deterministic seed.
1326
  //
1327
  // The current purpose of this seed is to generate non-deterministic results
1328
  // and prevent having users depend on the particular hash values.
1329
  // It is not meant as a security feature right now, but it leaves the door
1330
  // open to upgrade it to a true per-process random seed. A true random seed
1331
  // costs more and we don't need to pay for that right now.
1332
  //
1333
  // On platforms with ASLR, we take advantage of it to make a per-process
1334
  // random value.
1335
  // See https://en.wikipedia.org/wiki/Address_space_layout_randomization
1336
  //
1337
  // On other platforms this is still going to be non-deterministic but most
1338
  // probably per-build and not per-process.
1339
1.06M
  ABSL_ATTRIBUTE_ALWAYS_INLINE static size_t Seed() {
1340
1.06M
#if (!defined(__clang__) || __clang_major__ > 11) && \
1341
1.06M
    (!defined(__apple_build_version__) ||            \
1342
1.06M
     __apple_build_version__ >= 19558921)  // Xcode 12
1343
1.06M
    return static_cast<size_t>(reinterpret_cast<uintptr_t>(&kSeed));
1344
#else
1345
    // Workaround the absence of
1346
    // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
1347
    return static_cast<size_t>(reinterpret_cast<uintptr_t>(kSeed));
1348
#endif
1349
1.06M
  }
1350
1351
  uint64_t state_;
1352
};
1353
1354
struct AggregateBarrier {};
1355
1356
// Add a private base class to make sure this type is not an aggregate.
1357
// Aggregates can be aggregate initialized even if the default constructor is
1358
// deleted.
1359
struct PoisonedHash : private AggregateBarrier {
1360
  PoisonedHash() = delete;
1361
  PoisonedHash(const PoisonedHash&) = delete;
1362
  PoisonedHash& operator=(const PoisonedHash&) = delete;
1363
};
1364
1365
template <typename T>
1366
struct HashImpl {
1367
0
  size_t operator()(const T& value) const {
1368
0
    return MixingHashState::hash(value);
1369
0
  }
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> >::operator()(std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&) const
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<unsigned long const&> >::operator()(std::__1::tuple<unsigned long const&> const&) const
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::basic_string_view<char, std::__1::char_traits<char> > >::operator()(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&) const
Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::operator()(absl::Cord const&) const
1370
1371
 private:
1372
  friend struct HashWithSeed;
1373
1374
0
  size_t hash_with_seed(const T& value, size_t seed) const {
1375
0
    return MixingHashState::hash_with_seed(value, seed);
1376
0
  }
Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::basic_string_view<char, std::__1::char_traits<char> > >::hash_with_seed(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const
Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::hash_with_seed(absl::Cord const&, unsigned long) const
1377
};
1378
1379
template <typename T>
1380
struct Hash
1381
    : absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {};
1382
1383
template <typename H>
1384
template <typename T, typename... Ts>
1385
536k
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
1386
536k
  return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
1387
536k
                        std::move(state), value),
1388
536k
                    values...);
1389
536k
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&)
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::basic_string_view<char, std::__1::char_traits<char> >, int>(absl::hash_internal::MixingHashState, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&)
absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<int>(absl::hash_internal::MixingHashState, int const&)
Line
Count
Source
1385
376k
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
1386
376k
  return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
1387
376k
                        std::move(state), value),
1388
376k
                    values...);
1389
376k
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<unsigned long const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&)
absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<unsigned long>(absl::hash_internal::MixingHashState, unsigned long const&)
Line
Count
Source
1385
159k
H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) {
1386
159k
  return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(
1387
159k
                        std::move(state), value),
1388
159k
                    values...);
1389
159k
}
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::basic_string_view<char, std::__1::char_traits<char> >>(absl::hash_internal::MixingHashState, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&)
Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<absl::Cord>(absl::hash_internal::MixingHashState, absl::Cord const&)
1390
1391
template <typename H>
1392
template <typename T>
1393
289k
H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
1394
289k
  return hash_internal::hash_range_or_bytes(std::move(state), data, size);
1395
289k
}
1396
1397
template <typename H>
1398
template <typename I>
1399
H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
1400
  return H::RunCombineUnordered(std::move(state),
1401
                                CombineUnorderedCallback<I>{begin, end});
1402
}
1403
1404
template <typename H>
1405
H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
1406
0
                                size_t size) {
1407
0
  if (position_ + size < PiecewiseChunkSize()) {
1408
0
    // This partial chunk does not fill our existing buffer
1409
0
    memcpy(buf_ + position_, data, size);
1410
0
    position_ += size;
1411
0
    return state;
1412
0
  }
1413
0
  added_something_ = true;
1414
0
  // If the buffer is partially filled we need to complete the buffer
1415
0
  // and hash it.
1416
0
  if (position_ != 0) {
1417
0
    const size_t bytes_needed = PiecewiseChunkSize() - position_;
1418
0
    memcpy(buf_ + position_, data, bytes_needed);
1419
0
    state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());
1420
0
    data += bytes_needed;
1421
0
    size -= bytes_needed;
1422
0
  }
1423
0
1424
0
  // Hash whatever chunks we can without copying
1425
0
  while (size >= PiecewiseChunkSize()) {
1426
0
    state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize());
1427
0
    data += PiecewiseChunkSize();
1428
0
    size -= PiecewiseChunkSize();
1429
0
  }
1430
0
  // Fill the buffer with the remainder
1431
0
  memcpy(buf_, data, size);
1432
0
  position_ = size;
1433
0
  return state;
1434
0
}
1435
1436
template <typename H>
1437
0
H PiecewiseCombiner::finalize(H state) {
1438
0
  // Do not call combine_contiguous with empty remainder since it is modifying
1439
0
  // state.
1440
0
  if (added_something_ && position_ == 0) {
1441
0
    return state;
1442
0
  }
1443
0
  // We still call combine_contiguous for the entirely empty buffer.
1444
0
  return H::combine_contiguous(std::move(state), buf_, position_);
1445
0
}
1446
1447
}  // namespace hash_internal
1448
ABSL_NAMESPACE_END
1449
}  // namespace absl
1450
1451
#endif  // ABSL_HASH_INTERNAL_HASH_H_