/src/abseil-cpp/absl/hash/internal/hash.h
Line  | Count  | Source  | 
1  |  | // Copyright 2018 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  | //  | 
15  |  | // -----------------------------------------------------------------------------  | 
16  |  | // File: hash.h  | 
17  |  | // -----------------------------------------------------------------------------  | 
18  |  | //  | 
19  |  | #ifndef ABSL_HASH_INTERNAL_HASH_H_  | 
20  |  | #define ABSL_HASH_INTERNAL_HASH_H_  | 
21  |  |  | 
22  |  | #ifdef __APPLE__  | 
23  |  | #include <Availability.h>  | 
24  |  | #include <TargetConditionals.h>  | 
25  |  | #endif  | 
26  |  |  | 
27  |  | // We include config.h here to make sure that ABSL_INTERNAL_CPLUSPLUS_LANG is  | 
28  |  | // defined.  | 
29  |  | #include "absl/base/config.h"  | 
30  |  |  | 
31  |  | // GCC15 warns that <ciso646> is deprecated in C++17 and suggests using  | 
32  |  | // <version> instead, even though <version> is not available in C++17 mode prior  | 
33  |  | // to GCC9.  | 
34  |  | #if defined(__has_include)  | 
35  |  | #if __has_include(<version>)  | 
36  |  | #define ABSL_INTERNAL_VERSION_HEADER_AVAILABLE 1  | 
37  |  | #endif  | 
38  |  | #endif  | 
39  |  |  | 
40  |  | // For feature testing and determining which headers can be included.  | 
41  |  | #if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L || \  | 
42  |  |     defined(ABSL_INTERNAL_VERSION_HEADER_AVAILABLE)  | 
43  |  | #include <version>  | 
44  |  | #else  | 
45  |  | #include <ciso646>  | 
46  |  | #endif  | 
47  |  |  | 
48  |  | #undef ABSL_INTERNAL_VERSION_HEADER_AVAILABLE  | 
49  |  |  | 
50  |  | #include <algorithm>  | 
51  |  | #include <array>  | 
52  |  | #include <bitset>  | 
53  |  | #include <cassert>  | 
54  |  | #include <cmath>  | 
55  |  | #include <cstddef>  | 
56  |  | #include <cstdint>  | 
57  |  | #include <cstring>  | 
58  |  | #include <deque>  | 
59  |  | #include <forward_list>  | 
60  |  | #include <functional>  | 
61  |  | #include <iterator>  | 
62  |  | #include <limits>  | 
63  |  | #include <list>  | 
64  |  | #include <map>  | 
65  |  | #include <memory>  | 
66  |  | #include <set>  | 
67  |  | #include <string>  | 
68  |  | #include <string_view>  | 
69  |  | #include <tuple>  | 
70  |  | #include <type_traits>  | 
71  |  | #include <unordered_map>  | 
72  |  | #include <unordered_set>  | 
73  |  | #include <utility>  | 
74  |  | #include <vector>  | 
75  |  |  | 
76  |  | #include "absl/base/attributes.h"  | 
77  |  | #include "absl/base/internal/unaligned_access.h"  | 
78  |  | #include "absl/base/optimization.h"  | 
79  |  | #include "absl/base/port.h"  | 
80  |  | #include "absl/container/fixed_array.h"  | 
81  |  | #include "absl/hash/internal/city.h"  | 
82  |  | #include "absl/hash/internal/weakly_mixed_integer.h"  | 
83  |  | #include "absl/meta/type_traits.h"  | 
84  |  | #include "absl/numeric/bits.h"  | 
85  |  | #include "absl/numeric/int128.h"  | 
86  |  | #include "absl/strings/string_view.h"  | 
87  |  | #include "absl/types/optional.h"  | 
88  |  | #include "absl/types/variant.h"  | 
89  |  | #include "absl/utility/utility.h"  | 
90  |  |  | 
91  |  | #if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \  | 
92  |  |     !defined(__XTENSA__)  | 
93  |  | #include <filesystem>  // NOLINT  | 
94  |  | #endif  | 
95  |  |  | 
96  |  | namespace absl { | 
97  |  | ABSL_NAMESPACE_BEGIN  | 
98  |  |  | 
99  |  | class HashState;  | 
100  |  |  | 
101  |  | namespace hash_internal { | 
102  |  |  | 
103  |  | // Internal detail: Large buffers are hashed in smaller chunks.  This function  | 
104  |  | // returns the size of these chunks.  | 
105  | 18  | constexpr size_t PiecewiseChunkSize() { return 1024; } | 
106  |  |  | 
107  |  | // PiecewiseCombiner is an internal-only helper class for hashing a piecewise  | 
108  |  | // buffer of `char` or `unsigned char` as though it were contiguous.  This class  | 
109  |  | // provides two methods:  | 
110  |  | //  | 
111  |  | //   H add_buffer(state, data, size)  | 
112  |  | //   H finalize(state)  | 
113  |  | //  | 
114  |  | // `add_buffer` can be called zero or more times, followed by a single call to  | 
115  |  | // `finalize`.  This will produce the same hash expansion as concatenating each  | 
116  |  | // buffer piece into a single contiguous buffer, and passing this to  | 
117  |  | // `H::combine_contiguous`.  | 
118  |  | //  | 
119  |  | //  Example usage:  | 
120  |  | //    PiecewiseCombiner combiner;  | 
121  |  | //    for (const auto& piece : pieces) { | 
122  |  | //      state = combiner.add_buffer(std::move(state), piece.data, piece.size);  | 
123  |  | //    }  | 
124  |  | //    return combiner.finalize(std::move(state));  | 
125  |  | class PiecewiseCombiner { | 
126  |  |  public:  | 
127  |  |   PiecewiseCombiner() = default;  | 
128  |  |   PiecewiseCombiner(const PiecewiseCombiner&) = delete;  | 
129  |  |   PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;  | 
130  |  |  | 
131  |  |   // Appends the given range of bytes to the sequence to be hashed, which may  | 
132  |  |   // modify the provided hash state.  | 
133  |  |   template <typename H>  | 
134  |  |   H add_buffer(H state, const unsigned char* data, size_t size);  | 
135  |  |   template <typename H>  | 
136  | 0  |   H add_buffer(H state, const char* data, size_t size) { | 
137  | 0  |     return add_buffer(std::move(state),  | 
138  | 0  |                       reinterpret_cast<const unsigned char*>(data), size);  | 
139  | 0  |   }  | 
140  |  |  | 
141  |  |   // Finishes combining the hash sequence, which may may modify the provided  | 
142  |  |   // hash state.  | 
143  |  |   //  | 
144  |  |   // Once finalize() is called, add_buffer() may no longer be called. The  | 
145  |  |   // resulting hash state will be the same as if the pieces passed to  | 
146  |  |   // add_buffer() were concatenated into a single flat buffer, and then provided  | 
147  |  |   // to H::combine_contiguous().  | 
148  |  |   template <typename H>  | 
149  |  |   H finalize(H state);  | 
150  |  |  | 
151  |  |  private:  | 
152  |  |   unsigned char buf_[PiecewiseChunkSize()];  | 
153  |  |   size_t position_ = 0;  | 
154  |  |   bool added_something_ = false;  | 
155  |  | };  | 
156  |  |  | 
157  |  | // Trait class which returns true if T is hashable by the absl::Hash framework.  | 
158  |  | // Used for the AbslHashValue implementations for composite types below.  | 
159  |  | template <typename T>  | 
160  |  | struct is_hashable;  | 
161  |  |  | 
162  |  | // HashStateBase is an internal implementation detail that contains common  | 
163  |  | // implementation details for all of the "hash state objects" objects generated  | 
164  |  | // by Abseil.  This is not a public API; users should not create classes that  | 
165  |  | // inherit from this.  | 
166  |  | //  | 
167  |  | // A hash state object is the template argument `H` passed to `AbslHashValue`.  | 
168  |  | // It represents an intermediate state in the computation of an unspecified hash  | 
169  |  | // algorithm. `HashStateBase` provides a CRTP style base class for hash state  | 
170  |  | // implementations. Developers adding type support for `absl::Hash` should not  | 
171  |  | // rely on any parts of the state object other than the following member  | 
172  |  | // functions:  | 
173  |  | //  | 
174  |  | //   * HashStateBase::combine()  | 
175  |  | //   * HashStateBase::combine_contiguous()  | 
176  |  | //   * HashStateBase::combine_unordered()  | 
177  |  | //  | 
178  |  | // A derived hash state class of type `H` must provide a public member function  | 
179  |  | // with a signature similar to the following:  | 
180  |  | //  | 
181  |  | //    `static H combine_contiguous(H state, const unsigned char*, size_t)`.  | 
182  |  | //  | 
183  |  | // It must also provide a private template method named RunCombineUnordered.  | 
184  |  | //  | 
185  |  | // A "consumer" is a 1-arg functor returning void.  Its argument is a reference  | 
186  |  | // to an inner hash state object, and it may be called multiple times.  When  | 
187  |  | // called, the functor consumes the entropy from the provided state object,  | 
188  |  | // and resets that object to its empty state.  | 
189  |  | //  | 
190  |  | // A "combiner" is a stateless 2-arg functor returning void.  Its arguments are  | 
191  |  | // an inner hash state object and an ElementStateConsumer functor.  A combiner  | 
192  |  | // uses the provided inner hash state object to hash each element of the  | 
193  |  | // container, passing the inner hash state object to the consumer after hashing  | 
194  |  | // each element.  | 
195  |  | //  | 
196  |  | // Given these definitions, a derived hash state class of type H  | 
197  |  | // must provide a private template method with a signature similar to the  | 
198  |  | // following:  | 
199  |  | //  | 
200  |  | //    `template <typename CombinerT>`  | 
201  |  | //    `static H RunCombineUnordered(H outer_state, CombinerT combiner)`  | 
202  |  | //  | 
203  |  | // This function is responsible for constructing the inner state object and  | 
204  |  | // providing a consumer to the combiner.  It uses side effects of the consumer  | 
205  |  | // and combiner to mix the state of each element in an order-independent manner,  | 
206  |  | // and uses this to return an updated value of `outer_state`.  | 
207  |  | //  | 
208  |  | // This inside-out approach generates efficient object code in the normal case,  | 
209  |  | // but allows us to use stack storage to implement the absl::HashState type  | 
210  |  | // erasure mechanism (avoiding heap allocations while hashing).  | 
211  |  | //  | 
212  |  | // `HashStateBase` will provide a complete implementation for a hash state  | 
213  |  | // object in terms of these two methods.  | 
214  |  | //  | 
215  |  | // Example:  | 
216  |  | //  | 
217  |  | //   // Use CRTP to define your derived class.  | 
218  |  | //   struct MyHashState : HashStateBase<MyHashState> { | 
219  |  | //       static H combine_contiguous(H state, const unsigned char*, size_t);  | 
220  |  | //       using MyHashState::HashStateBase::combine;  | 
221  |  | //       using MyHashState::HashStateBase::combine_contiguous;  | 
222  |  | //       using MyHashState::HashStateBase::combine_unordered;  | 
223  |  | //     private:  | 
224  |  | //       template <typename CombinerT>  | 
225  |  | //       static H RunCombineUnordered(H state, CombinerT combiner);  | 
226  |  | //   };  | 
227  |  | template <typename H>  | 
228  |  | class HashStateBase { | 
229  |  |  public:  | 
230  |  |   // Combines an arbitrary number of values into a hash state, returning the  | 
231  |  |   // updated state.  | 
232  |  |   //  | 
233  |  |   // Each of the value types `T` must be separately hashable by the Abseil  | 
234  |  |   // hashing framework.  | 
235  |  |   //  | 
236  |  |   // NOTE:  | 
237  |  |   //  | 
238  |  |   //   state = H::combine(std::move(state), value1, value2, value3);  | 
239  |  |   //  | 
240  |  |   // is guaranteed to produce the same hash expansion as:  | 
241  |  |   //  | 
242  |  |   //   state = H::combine(std::move(state), value1);  | 
243  |  |   //   state = H::combine(std::move(state), value2);  | 
244  |  |   //   state = H::combine(std::move(state), value3);  | 
245  |  |   template <typename T, typename... Ts>  | 
246  |  |   static H combine(H state, const T& value, const Ts&... values);  | 
247  | 30  |   static H combine(H state) { return state; } | 
248  |  |  | 
249  |  |   // Combines a contiguous array of `size` elements into a hash state, returning  | 
250  |  |   // the updated state.  | 
251  |  |   //  | 
252  |  |   // NOTE:  | 
253  |  |   //  | 
254  |  |   //   state = H::combine_contiguous(std::move(state), data, size);  | 
255  |  |   //  | 
256  |  |   // is NOT guaranteed to produce the same hash expansion as a for-loop (it may  | 
257  |  |   // perform internal optimizations).  If you need this guarantee, use the  | 
258  |  |   // for-loop instead.  | 
259  |  |   template <typename T>  | 
260  |  |   static H combine_contiguous(H state, const T* data, size_t size);  | 
261  |  |  | 
262  |  |   template <typename I>  | 
263  |  |   static H combine_unordered(H state, I begin, I end);  | 
264  |  |  | 
265  |  |   using AbslInternalPiecewiseCombiner = PiecewiseCombiner;  | 
266  |  |  | 
267  |  |   template <typename T>  | 
268  |  |   using is_hashable = absl::hash_internal::is_hashable<T>;  | 
269  |  |  | 
270  |  |  private:  | 
271  |  |   // Common implementation of the iteration step of a "combiner", as described  | 
272  |  |   // above.  | 
273  |  |   template <typename I>  | 
274  |  |   struct CombineUnorderedCallback { | 
275  |  |     I begin;  | 
276  |  |     I end;  | 
277  |  |  | 
278  |  |     template <typename InnerH, typename ElementStateConsumer>  | 
279  |  |     void operator()(InnerH inner_state, ElementStateConsumer cb) { | 
280  |  |       for (; begin != end; ++begin) { | 
281  |  |         inner_state = H::combine(std::move(inner_state), *begin);  | 
282  |  |         cb(inner_state);  | 
283  |  |       }  | 
284  |  |     }  | 
285  |  |   };  | 
286  |  | };  | 
287  |  |  | 
288  |  | // `is_uniquely_represented<T>` is a trait class that indicates whether `T`  | 
289  |  | // is uniquely represented.  | 
290  |  | //  | 
291  |  | // A type is "uniquely represented" if two equal values of that type are  | 
292  |  | // guaranteed to have the same bytes in their underlying storage. In other  | 
293  |  | // words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be  | 
294  |  | // zero. This property cannot be detected automatically, so this trait is false  | 
295  |  | // by default, but can be specialized by types that wish to assert that they are  | 
296  |  | // uniquely represented. This makes them eligible for certain optimizations.  | 
297  |  | //  | 
298  |  | // If you have any doubt whatsoever, do not specialize this template.  | 
299  |  | // The default is completely safe, and merely disables some optimizations  | 
300  |  | // that will not matter for most types. Specializing this template,  | 
301  |  | // on the other hand, can be very hazardous.  | 
302  |  | //  | 
303  |  | // To be uniquely represented, a type must not have multiple ways of  | 
304  |  | // representing the same value; for example, float and double are not  | 
305  |  | // uniquely represented, because they have distinct representations for  | 
306  |  | // +0 and -0. Furthermore, the type's byte representation must consist  | 
307  |  | // solely of user-controlled data, with no padding bits and no compiler-  | 
308  |  | // controlled data such as vptrs or sanitizer metadata. This is usually  | 
309  |  | // very difficult to guarantee, because in most cases the compiler can  | 
310  |  | // insert data and padding bits at its own discretion.  | 
311  |  | //  | 
312  |  | // If you specialize this template for a type `T`, you must do so in the file  | 
313  |  | // that defines that type (or in this file). If you define that specialization  | 
314  |  | // anywhere else, `is_uniquely_represented<T>` could have different meanings  | 
315  |  | // in different places.  | 
316  |  | //  | 
317  |  | // The Enable parameter is meaningless; it is provided as a convenience,  | 
318  |  | // to support certain SFINAE techniques when defining specializations.  | 
319  |  | template <typename T, typename Enable = void>  | 
320  |  | struct is_uniquely_represented : std::false_type {}; | 
321  |  |  | 
322  |  | // unsigned char is a synonym for "byte", so it is guaranteed to be  | 
323  |  | // uniquely represented.  | 
324  |  | template <>  | 
325  |  | struct is_uniquely_represented<unsigned char> : std::true_type {}; | 
326  |  |  | 
327  |  | // is_uniquely_represented for non-standard integral types  | 
328  |  | //  | 
329  |  | // Integral types other than bool should be uniquely represented on any  | 
330  |  | // platform that this will plausibly be ported to.  | 
331  |  | template <typename Integral>  | 
332  |  | struct is_uniquely_represented<  | 
333  |  |     Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>  | 
334  |  |     : std::true_type {}; | 
335  |  |  | 
336  |  | template <>  | 
337  |  | struct is_uniquely_represented<bool> : std::false_type {}; | 
338  |  |  | 
339  |  | #ifdef ABSL_HAVE_INTRINSIC_INT128  | 
340  |  | // Specialize the trait for GNU extension types.  | 
341  |  | template <>  | 
342  |  | struct is_uniquely_represented<__int128> : std::true_type {}; | 
343  |  | template <>  | 
344  |  | struct is_uniquely_represented<unsigned __int128> : std::true_type {}; | 
345  |  | #endif  // ABSL_HAVE_INTRINSIC_INT128  | 
346  |  |  | 
347  |  | template <typename T>  | 
348  |  | struct FitsIn64Bits : std::integral_constant<bool, sizeof(T) <= 8> {}; | 
349  |  |  | 
350  |  | struct CombineRaw { | 
351  |  |   template <typename H>  | 
352  | 0  |   H operator()(H state, uint64_t value) const { | 
353  | 0  |     return H::combine_raw(std::move(state), value);  | 
354  | 0  |   }  | 
355  |  | };  | 
356  |  |  | 
357  |  | // For use in `raw_hash_set` to pass a seed to the hash function.  | 
358  |  | struct HashWithSeed { | 
359  |  |   template <typename Hasher, typename T>  | 
360  | 60  |   size_t hash(const Hasher& hasher, const T& value, size_t seed) const { | 
361  |  |     // NOLINTNEXTLINE(clang-diagnostic-sign-conversion)  | 
362  | 60  |     return hasher.hash_with_seed(value, seed);  | 
363  | 60  |   } unsigned long absl::hash_internal::HashWithSeed::hash<absl::container_internal::StringHash, std::__1::basic_string_view<char, std::__1::char_traits<char> > >(absl::container_internal::StringHash const&, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const Line  | Count  | Source  |  360  | 30  |   size_t hash(const Hasher& hasher, const T& value, size_t seed) const { |  361  |  |     // NOLINTNEXTLINE(clang-diagnostic-sign-conversion)  |  362  | 30  |     return hasher.hash_with_seed(value, seed);  |  363  | 30  |   }  |  
 unsigned long absl::hash_internal::HashWithSeed::hash<absl::hash_internal::Hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > >, std::__1::basic_string_view<char, std::__1::char_traits<char> > >(absl::hash_internal::Hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > > const&, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const Line  | Count  | Source  |  360  | 30  |   size_t hash(const Hasher& hasher, const T& value, size_t seed) const { |  361  |  |     // NOLINTNEXTLINE(clang-diagnostic-sign-conversion)  |  362  | 30  |     return hasher.hash_with_seed(value, seed);  |  363  | 30  |   }  |  
 Unexecuted instantiation: unsigned long absl::hash_internal::HashWithSeed::hash<absl::hash_internal::Hash<absl::Cord>, absl::Cord>(absl::hash_internal::Hash<absl::Cord> const&, absl::Cord const&, unsigned long) const  | 
364  |  | };  | 
365  |  |  | 
366  |  | // Convenience function that combines `hash_state` with the byte representation  | 
367  |  | // of `value`.  | 
368  |  | template <typename H, typename T,  | 
369  |  |           absl::enable_if_t<FitsIn64Bits<T>::value, int> = 0>  | 
370  | 0  | H hash_bytes(H hash_state, const T& value) { | 
371  | 0  |   const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);  | 
372  | 0  |   uint64_t v;  | 
373  |  |   if constexpr (sizeof(T) == 1) { | 
374  |  |     v = *start;  | 
375  |  |   } else if constexpr (sizeof(T) == 2) { | 
376  |  |     v = absl::base_internal::UnalignedLoad16(start);  | 
377  | 0  |   } else if constexpr (sizeof(T) == 4) { | 
378  | 0  |     v = absl::base_internal::UnalignedLoad32(start);  | 
379  | 0  |   } else { | 
380  | 0  |     static_assert(sizeof(T) == 8);  | 
381  | 0  |     v = absl::base_internal::UnalignedLoad64(start);  | 
382  | 0  |   }  | 
383  | 0  |   return CombineRaw()(std::move(hash_state), v);  | 
384  | 0  | } Unexecuted instantiation: _ZN4absl13hash_internal10hash_bytesINS0_15MixingHashStateEmTnNSt3__19enable_ifIXsr12FitsIn64BitsIT0_EE5valueEiE4typeELi0EEET_S8_RKS5_ Unexecuted instantiation: _ZN4absl13hash_internal10hash_bytesINS0_15MixingHashStateEiTnNSt3__19enable_ifIXsr12FitsIn64BitsIT0_EE5valueEiE4typeELi0EEET_S8_RKS5_  | 
385  |  | template <typename H, typename T,  | 
386  |  |           absl::enable_if_t<!FitsIn64Bits<T>::value, int> = 0>  | 
387  |  | H hash_bytes(H hash_state, const T& value) { | 
388  |  |   const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);  | 
389  |  |   return H::combine_contiguous(std::move(hash_state), start, sizeof(value));  | 
390  |  | }  | 
391  |  |  | 
392  |  | template <typename H>  | 
393  |  | H hash_weakly_mixed_integer(H hash_state, WeaklyMixedInteger value) { | 
394  |  |   return H::combine_weakly_mixed_integer(std::move(hash_state), value);  | 
395  |  | }  | 
396  |  |  | 
397  |  | // -----------------------------------------------------------------------------  | 
398  |  | // AbslHashValue for Basic Types  | 
399  |  | // -----------------------------------------------------------------------------  | 
400  |  |  | 
401  |  | // Note: Default `AbslHashValue` implementations live in `hash_internal`. This  | 
402  |  | // allows us to block lexical scope lookup when doing an unqualified call to  | 
403  |  | // `AbslHashValue` below. User-defined implementations of `AbslHashValue` can  | 
404  |  | // only be found via ADL.  | 
405  |  |  | 
406  |  | // AbslHashValue() for hashing bool values  | 
407  |  | //  | 
408  |  | // We use SFINAE to ensure that this overload only accepts bool, not types that  | 
409  |  | // are convertible to bool.  | 
410  |  | template <typename H, typename B>  | 
411  |  | typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue(  | 
412  |  |     H hash_state, B value) { | 
413  |  |   // We use ~size_t{} instead of 1 so that all bits are different between | 
414  |  |   // true/false instead of only 1.  | 
415  |  |   return H::combine(std::move(hash_state),  | 
416  |  |                     static_cast<size_t>(value ? ~size_t{} : 0)); | 
417  |  | }  | 
418  |  |  | 
419  |  | // AbslHashValue() for hashing enum values  | 
420  |  | template <typename H, typename Enum>  | 
421  |  | typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue(  | 
422  |  |     H hash_state, Enum e) { | 
423  |  |   // In practice, we could almost certainly just invoke hash_bytes directly,  | 
424  |  |   // but it's possible that a sanitizer might one day want to  | 
425  |  |   // store data in the unused bits of an enum. To avoid that risk, we  | 
426  |  |   // convert to the underlying type before hashing. Hopefully this will get  | 
427  |  |   // optimized away; if not, we can reopen discussion with c-toolchain-team.  | 
428  |  |   return H::combine(std::move(hash_state),  | 
429  |  |                     static_cast<typename std::underlying_type<Enum>::type>(e));  | 
430  |  | }  | 
431  |  | // AbslHashValue() for hashing floating-point values  | 
432  |  | template <typename H, typename Float>  | 
433  |  | typename std::enable_if<std::is_same<Float, float>::value ||  | 
434  |  |                             std::is_same<Float, double>::value,  | 
435  |  |                         H>::type  | 
436  |  | AbslHashValue(H hash_state, Float value) { | 
437  |  |   return hash_internal::hash_bytes(std::move(hash_state),  | 
438  |  |                                    value == 0 ? 0 : value);  | 
439  |  | }  | 
440  |  |  | 
441  |  | // Long double has the property that it might have extra unused bytes in it.  | 
442  |  | // For example, in x86 sizeof(long double)==16 but it only really uses 80-bits  | 
443  |  | // of it. This means we can't use hash_bytes on a long double and have to  | 
444  |  | // convert it to something else first.  | 
445  |  | template <typename H, typename LongDouble>  | 
446  |  | typename std::enable_if<std::is_same<LongDouble, long double>::value, H>::type  | 
447  |  | AbslHashValue(H hash_state, LongDouble value) { | 
448  |  |   const int category = std::fpclassify(value);  | 
449  |  |   switch (category) { | 
450  |  |     case FP_INFINITE:  | 
451  |  |       // Add the sign bit to differentiate between +Inf and -Inf  | 
452  |  |       hash_state = H::combine(std::move(hash_state), std::signbit(value));  | 
453  |  |       break;  | 
454  |  |  | 
455  |  |     case FP_NAN:  | 
456  |  |     case FP_ZERO:  | 
457  |  |     default:  | 
458  |  |       // Category is enough for these.  | 
459  |  |       break;  | 
460  |  |  | 
461  |  |     case FP_NORMAL:  | 
462  |  |     case FP_SUBNORMAL:  | 
463  |  |       // We can't convert `value` directly to double because this would have  | 
464  |  |       // undefined behavior if the value is out of range.  | 
465  |  |       // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is  | 
466  |  |       // guaranteed to be in range for `double`. The truncation is  | 
467  |  |       // implementation defined, but that works as long as it is deterministic.  | 
468  |  |       int exp;  | 
469  |  |       auto mantissa = static_cast<double>(std::frexp(value, &exp));  | 
470  |  |       hash_state = H::combine(std::move(hash_state), mantissa, exp);  | 
471  |  |   }  | 
472  |  |  | 
473  |  |   return H::combine(std::move(hash_state), category);  | 
474  |  | }  | 
475  |  |  | 
476  |  | // Without this overload, an array decays to a pointer and we hash that, which  | 
477  |  | // is not likely to be what the caller intended.  | 
478  |  | template <typename H, typename T, size_t N>  | 
479  |  | H AbslHashValue(H hash_state, T (&)[N]) { | 
480  |  |   static_assert(  | 
481  |  |       sizeof(T) == -1,  | 
482  |  |       "Hashing C arrays is not allowed. For string literals, wrap the literal "  | 
483  |  |       "in absl::string_view(). To hash the array contents, use "  | 
484  |  |       "absl::MakeSpan() or make the array an std::array. To hash the array "  | 
485  |  |       "address, use &array[0].");  | 
486  |  |   return hash_state;  | 
487  |  | }  | 
488  |  |  | 
489  |  | // AbslHashValue() for hashing pointers  | 
490  |  | template <typename H, typename T>  | 
491  |  | std::enable_if_t<std::is_pointer<T>::value, H> AbslHashValue(H hash_state,  | 
492  |  |                                                              T ptr) { | 
493  |  |   auto v = reinterpret_cast<uintptr_t>(ptr);  | 
494  |  |   // Due to alignment, pointers tend to have low bits as zero, and the next few  | 
495  |  |   // bits follow a pattern since they are also multiples of some base value.  | 
496  |  |   // The PointerAlignment test verifies that our mixing is good enough to handle  | 
497  |  |   // these cases.  | 
498  |  |   return H::combine(std::move(hash_state), v);  | 
499  |  | }  | 
500  |  |  | 
501  |  | // AbslHashValue() for hashing nullptr_t  | 
502  |  | template <typename H>  | 
503  |  | H AbslHashValue(H hash_state, std::nullptr_t) { | 
504  |  |   return H::combine(std::move(hash_state), static_cast<void*>(nullptr));  | 
505  |  | }  | 
506  |  |  | 
507  |  | // AbslHashValue() for hashing pointers-to-member  | 
508  |  | template <typename H, typename T, typename C>  | 
509  |  | H AbslHashValue(H hash_state, T C::*ptr) { | 
510  |  |   auto salient_ptm_size = [](std::size_t n) -> std::size_t { | 
511  |  | #if defined(_MSC_VER)  | 
512  |  |     // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,  | 
513  |  |     // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain  | 
514  |  |     // padding (namely when they have 1 or 3 ints). The value below is a lower  | 
515  |  |     // bound on the number of salient, non-padding bytes that we use for  | 
516  |  |     // hashing.  | 
517  |  |     if constexpr (alignof(T C::*) == alignof(int)) { | 
518  |  |       // No padding when all subobjects have the same size as the total  | 
519  |  |       // alignment. This happens in 32-bit mode.  | 
520  |  |       return n;  | 
521  |  |     } else { | 
522  |  |       // Padding for 1 int (size 16) or 3 ints (size 24).  | 
523  |  |       // With 2 ints, the size is 16 with no padding, which we pessimize.  | 
524  |  |       return n == 24 ? 20 : n == 16 ? 12 : n;  | 
525  |  |     }  | 
526  |  | #else  | 
527  |  |   // On other platforms, we assume that pointers-to-members do not have  | 
528  |  |   // padding.  | 
529  |  | #ifdef __cpp_lib_has_unique_object_representations  | 
530  |  |     static_assert(std::has_unique_object_representations<T C::*>::value);  | 
531  |  | #endif  // __cpp_lib_has_unique_object_representations  | 
532  |  |     return n;  | 
533  |  | #endif  | 
534  |  |   };  | 
535  |  |   return H::combine_contiguous(std::move(hash_state),  | 
536  |  |                                reinterpret_cast<unsigned char*>(&ptr),  | 
537  |  |                                salient_ptm_size(sizeof ptr));  | 
538  |  | }  | 
539  |  |  | 
540  |  | // -----------------------------------------------------------------------------  | 
541  |  | // AbslHashValue for Composite Types  | 
542  |  | // -----------------------------------------------------------------------------  | 
543  |  |  | 
544  |  | // AbslHashValue() for hashing pairs  | 
545  |  | template <typename H, typename T1, typename T2>  | 
546  |  | typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,  | 
547  |  |                         H>::type  | 
548  |  | AbslHashValue(H hash_state, const std::pair<T1, T2>& p) { | 
549  |  |   return H::combine(std::move(hash_state), p.first, p.second);  | 
550  |  | }  | 
551  |  |  | 
552  |  | // Helper function for hashing a tuple. The third argument should  | 
553  |  | // be an index_sequence running from 0 to tuple_size<Tuple> - 1.  | 
554  |  | template <typename H, typename Tuple, size_t... Is>  | 
555  | 0  | H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence<Is...>) { | 
556  | 0  |   return H::combine(std::move(hash_state), std::get<Is>(t)...);  | 
557  | 0  | } Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&>, 0ul>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&, std::__1::integer_sequence<unsigned long, 0ul>) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&>, 0ul, 1ul>(absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>)  | 
558  |  |  | 
559  |  | // AbslHashValue for hashing tuples  | 
560  |  | template <typename H, typename... Ts>  | 
561  |  | #if defined(_MSC_VER)  | 
562  |  | // This SFINAE gets MSVC confused under some conditions. Let's just disable it  | 
563  |  | // for now.  | 
564  |  | H  | 
565  |  | #else   // _MSC_VER  | 
566  |  | typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type  | 
567  |  | #endif  // _MSC_VER  | 
568  | 0  | AbslHashValue(H hash_state, const std::tuple<Ts...>& t) { | 
569  | 0  |   return hash_internal::hash_tuple(std::move(hash_state), t,  | 
570  | 0  |                                    absl::make_index_sequence<sizeof...(Ts)>());  | 
571  | 0  | } Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKmEEENSt3__19enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESB_RKNS5_5tupleIJDpS8_EEE Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKNSt3__117basic_string_viewIcNS3_11char_traitsIcEEEERKiEEENS3_9enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESH_RKNS3_5tupleIJDpSE_EEE  | 
572  |  |  | 
573  |  | // -----------------------------------------------------------------------------  | 
574  |  | // AbslHashValue for Pointers  | 
575  |  | // -----------------------------------------------------------------------------  | 
576  |  |  | 
577  |  | // AbslHashValue for hashing unique_ptr  | 
578  |  | template <typename H, typename T, typename D>  | 
579  |  | H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) { | 
580  |  |   return H::combine(std::move(hash_state), ptr.get());  | 
581  |  | }  | 
582  |  |  | 
583  |  | // AbslHashValue for hashing shared_ptr  | 
584  |  | template <typename H, typename T>  | 
585  |  | H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) { | 
586  |  |   return H::combine(std::move(hash_state), ptr.get());  | 
587  |  | }  | 
588  |  |  | 
589  |  | // -----------------------------------------------------------------------------  | 
590  |  | // AbslHashValue for String-Like Types  | 
591  |  | // -----------------------------------------------------------------------------  | 
592  |  |  | 
593  |  | // AbslHashValue for hashing strings  | 
594  |  | //  | 
595  |  | // All the string-like types supported here provide the same hash expansion for  | 
596  |  | // the same character sequence. These types are:  | 
597  |  | //  | 
598  |  | //  - `absl::Cord`  | 
599  |  | //  - `std::string` (and std::basic_string<T, std::char_traits<T>, A> for  | 
600  |  | //      any allocator A and any T in {char, wchar_t, char16_t, char32_t}) | 
601  |  | //  - `absl::string_view`, `std::string_view`, `std::wstring_view`,  | 
602  |  | //    `std::u16string_view`, and `std::u32_string_view`.  | 
603  |  | //  | 
604  |  | // For simplicity, we currently support only strings built on `char`, `wchar_t`,  | 
605  |  | // `char16_t`, or `char32_t`. This support may be broadened, if necessary, but  | 
606  |  | // with some caution - this overload would misbehave in cases where the traits'  | 
607  |  | // `eq()` member isn't equivalent to `==` on the underlying character type.  | 
608  |  | template <typename H>  | 
609  | 30  | H AbslHashValue(H hash_state, absl::string_view str) { | 
610  | 30  |   return H::combine_contiguous(std::move(hash_state), str.data(), str.size());  | 
611  | 30  | }  | 
612  |  |  | 
613  |  | // Support std::wstring, std::u16string and std::u32string.  | 
614  |  | template <typename Char, typename Alloc, typename H,  | 
615  |  |           typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||  | 
616  |  |                                        std::is_same<Char, char16_t>::value ||  | 
617  |  |                                        std::is_same<Char, char32_t>::value>>  | 
618  |  | H AbslHashValue(  | 
619  |  |     H hash_state,  | 
620  |  |     const std::basic_string<Char, std::char_traits<Char>, Alloc>& str) { | 
621  |  |   return H::combine_contiguous(std::move(hash_state), str.data(), str.size());  | 
622  |  | }  | 
623  |  |  | 
624  |  | // Support std::wstring_view, std::u16string_view and std::u32string_view.  | 
625  |  | template <typename Char, typename H,  | 
626  |  |           typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||  | 
627  |  |                                        std::is_same<Char, char16_t>::value ||  | 
628  |  |                                        std::is_same<Char, char32_t>::value>>  | 
629  |  | H AbslHashValue(H hash_state, std::basic_string_view<Char> str) { | 
630  |  |   return H::combine_contiguous(std::move(hash_state), str.data(), str.size());  | 
631  |  | }  | 
632  |  |  | 
633  |  | #if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \  | 
634  |  |     (!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) ||        \  | 
635  |  |      __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) &&       \  | 
636  |  |     (!defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) ||         \  | 
637  |  |      __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500) &&        \  | 
638  |  |     (!defined(__XTENSA__))  | 
639  |  |  | 
640  |  | #define ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE 1  | 
641  |  |  | 
642  |  | // Support std::filesystem::path. The SFINAE is required because some string  | 
643  |  | // types are implicitly convertible to std::filesystem::path.  | 
644  |  | template <typename Path, typename H,  | 
645  |  |           typename = absl::enable_if_t<  | 
646  |  |               std::is_same_v<Path, std::filesystem::path>>>  | 
647  |  | H AbslHashValue(H hash_state, const Path& path) { | 
648  |  |   // This is implemented by deferring to the standard library to compute the  | 
649  |  |   // hash.  The standard library requires that for two paths, `p1 == p2`, then  | 
650  |  |   // `hash_value(p1) == hash_value(p2)`. `AbslHashValue` has the same  | 
651  |  |   // requirement. Since `operator==` does platform specific matching, deferring  | 
652  |  |   // to the standard library is the simplest approach.  | 
653  |  |   return H::combine(std::move(hash_state), std::filesystem::hash_value(path));  | 
654  |  | }  | 
655  |  |  | 
656  |  | #endif  // ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE  | 
657  |  |  | 
658  |  | // -----------------------------------------------------------------------------  | 
659  |  | // AbslHashValue for Sequence Containers  | 
660  |  | // -----------------------------------------------------------------------------  | 
661  |  |  | 
662  |  | // AbslHashValue for hashing std::array  | 
663  |  | template <typename H, typename T, size_t N>  | 
664  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
665  |  |     H hash_state, const std::array<T, N>& array) { | 
666  |  |   return H::combine_contiguous(std::move(hash_state), array.data(),  | 
667  |  |                                array.size());  | 
668  |  | }  | 
669  |  |  | 
670  |  | // AbslHashValue for hashing std::deque  | 
671  |  | template <typename H, typename T, typename Allocator>  | 
672  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
673  |  |     H hash_state, const std::deque<T, Allocator>& deque) { | 
674  |  |   // TODO(gromer): investigate a more efficient implementation taking  | 
675  |  |   // advantage of the chunk structure.  | 
676  |  |   for (const auto& t : deque) { | 
677  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
678  |  |   }  | 
679  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{deque.size()}); | 
680  |  | }  | 
681  |  |  | 
682  |  | // AbslHashValue for hashing std::forward_list  | 
683  |  | template <typename H, typename T, typename Allocator>  | 
684  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
685  |  |     H hash_state, const std::forward_list<T, Allocator>& list) { | 
686  |  |   size_t size = 0;  | 
687  |  |   for (const T& t : list) { | 
688  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
689  |  |     ++size;  | 
690  |  |   }  | 
691  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{size}); | 
692  |  | }  | 
693  |  |  | 
694  |  | // AbslHashValue for hashing std::list  | 
695  |  | template <typename H, typename T, typename Allocator>  | 
696  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
697  |  |     H hash_state, const std::list<T, Allocator>& list) { | 
698  |  |   for (const auto& t : list) { | 
699  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
700  |  |   }  | 
701  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{list.size()}); | 
702  |  | }  | 
703  |  |  | 
704  |  | // AbslHashValue for hashing std::vector  | 
705  |  | //  | 
706  |  | // Do not use this for vector<bool> on platforms that have a working  | 
707  |  | // implementation of std::hash. It does not have a .data(), and a fallback for  | 
708  |  | // std::hash<> is most likely faster.  | 
709  |  | template <typename H, typename T, typename Allocator>  | 
710  |  | typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,  | 
711  |  |                         H>::type  | 
712  |  | AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { | 
713  |  |   return H::combine_contiguous(std::move(hash_state), vector.data(),  | 
714  |  |                                vector.size());  | 
715  |  | }  | 
716  |  |  | 
717  |  | // AbslHashValue special cases for hashing std::vector<bool>  | 
718  |  |  | 
719  |  | #if defined(ABSL_IS_BIG_ENDIAN) && \  | 
720  |  |     (defined(__GLIBCXX__) || defined(__GLIBCPP__))  | 
721  |  |  | 
722  |  | // std::hash in libstdc++ does not work correctly with vector<bool> on Big  | 
723  |  | // Endian platforms therefore we need to implement a custom AbslHashValue for  | 
724  |  | // it. More details on the bug:  | 
725  |  | // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531  | 
726  |  | template <typename H, typename T, typename Allocator>  | 
727  |  | typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,  | 
728  |  |                         H>::type  | 
729  |  | AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { | 
730  |  |   typename H::AbslInternalPiecewiseCombiner combiner;  | 
731  |  |   for (const auto& i : vector) { | 
732  |  |     unsigned char c = static_cast<unsigned char>(i);  | 
733  |  |     hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));  | 
734  |  |   }  | 
735  |  |   return H::combine(combiner.finalize(std::move(hash_state)),  | 
736  |  |                     WeaklyMixedInteger{vector.size()}); | 
737  |  | }  | 
738  |  | #else  | 
739  |  | // When not working around the libstdc++ bug above, we still have to contend  | 
740  |  | // with the fact that std::hash<vector<bool>> is often poor quality, hashing  | 
741  |  | // directly on the internal words and on no other state.  On these platforms,  | 
742  |  | // vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value. | 
743  |  | //  | 
744  |  | // Mixing in the size (as we do in our other vector<> implementations) on top  | 
745  |  | // of the library-provided hash implementation avoids this QOI issue.  | 
746  |  | template <typename H, typename T, typename Allocator>  | 
747  |  | typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,  | 
748  |  |                         H>::type  | 
749  |  | AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { | 
750  |  |   return H::combine(std::move(hash_state),  | 
751  |  |                     std::hash<std::vector<T, Allocator>>{}(vector), | 
752  |  |                     WeaklyMixedInteger{vector.size()}); | 
753  |  | }  | 
754  |  | #endif  | 
755  |  |  | 
756  |  | // -----------------------------------------------------------------------------  | 
757  |  | // AbslHashValue for Ordered Associative Containers  | 
758  |  | // -----------------------------------------------------------------------------  | 
759  |  |  | 
760  |  | // AbslHashValue for hashing std::map  | 
761  |  | template <typename H, typename Key, typename T, typename Compare,  | 
762  |  |           typename Allocator>  | 
763  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
764  |  |                         H>::type  | 
765  |  | AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) { | 
766  |  |   for (const auto& t : map) { | 
767  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
768  |  |   }  | 
769  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()}); | 
770  |  | }  | 
771  |  |  | 
772  |  | // AbslHashValue for hashing std::multimap  | 
773  |  | template <typename H, typename Key, typename T, typename Compare,  | 
774  |  |           typename Allocator>  | 
775  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
776  |  |                         H>::type  | 
777  |  | AbslHashValue(H hash_state,  | 
778  |  |               const std::multimap<Key, T, Compare, Allocator>& map) { | 
779  |  |   for (const auto& t : map) { | 
780  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
781  |  |   }  | 
782  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{map.size()}); | 
783  |  | }  | 
784  |  |  | 
785  |  | // AbslHashValue for hashing std::set  | 
786  |  | template <typename H, typename Key, typename Compare, typename Allocator>  | 
787  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
788  |  |     H hash_state, const std::set<Key, Compare, Allocator>& set) { | 
789  |  |   for (const auto& t : set) { | 
790  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
791  |  |   }  | 
792  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()}); | 
793  |  | }  | 
794  |  |  | 
795  |  | // AbslHashValue for hashing std::multiset  | 
796  |  | template <typename H, typename Key, typename Compare, typename Allocator>  | 
797  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
798  |  |     H hash_state, const std::multiset<Key, Compare, Allocator>& set) { | 
799  |  |   for (const auto& t : set) { | 
800  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
801  |  |   }  | 
802  |  |   return H::combine(std::move(hash_state), WeaklyMixedInteger{set.size()}); | 
803  |  | }  | 
804  |  |  | 
805  |  | // -----------------------------------------------------------------------------  | 
806  |  | // AbslHashValue for Unordered Associative Containers  | 
807  |  | // -----------------------------------------------------------------------------  | 
808  |  |  | 
809  |  | // AbslHashValue for hashing std::unordered_set  | 
810  |  | template <typename H, typename Key, typename Hash, typename KeyEqual,  | 
811  |  |           typename Alloc>  | 
812  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
813  |  |     H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) { | 
814  |  |   return H::combine(  | 
815  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
816  |  |       WeaklyMixedInteger{s.size()}); | 
817  |  | }  | 
818  |  |  | 
819  |  | // AbslHashValue for hashing std::unordered_multiset  | 
820  |  | template <typename H, typename Key, typename Hash, typename KeyEqual,  | 
821  |  |           typename Alloc>  | 
822  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
823  |  |     H hash_state,  | 
824  |  |     const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) { | 
825  |  |   return H::combine(  | 
826  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
827  |  |       WeaklyMixedInteger{s.size()}); | 
828  |  | }  | 
829  |  |  | 
830  |  | // AbslHashValue for hashing std::unordered_set  | 
831  |  | template <typename H, typename Key, typename T, typename Hash,  | 
832  |  |           typename KeyEqual, typename Alloc>  | 
833  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
834  |  |                         H>::type  | 
835  |  | AbslHashValue(H hash_state,  | 
836  |  |               const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) { | 
837  |  |   return H::combine(  | 
838  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
839  |  |       WeaklyMixedInteger{s.size()}); | 
840  |  | }  | 
841  |  |  | 
842  |  | // AbslHashValue for hashing std::unordered_multiset  | 
843  |  | template <typename H, typename Key, typename T, typename Hash,  | 
844  |  |           typename KeyEqual, typename Alloc>  | 
845  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
846  |  |                         H>::type  | 
847  |  | AbslHashValue(H hash_state,  | 
848  |  |               const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) { | 
849  |  |   return H::combine(  | 
850  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
851  |  |       WeaklyMixedInteger{s.size()}); | 
852  |  | }  | 
853  |  |  | 
854  |  | // -----------------------------------------------------------------------------  | 
855  |  | // AbslHashValue for Wrapper Types  | 
856  |  | // -----------------------------------------------------------------------------  | 
857  |  |  | 
858  |  | // AbslHashValue for hashing std::reference_wrapper  | 
859  |  | template <typename H, typename T>  | 
860  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
861  |  |     H hash_state, std::reference_wrapper<T> opt) { | 
862  |  |   return H::combine(std::move(hash_state), opt.get());  | 
863  |  | }  | 
864  |  |  | 
865  |  | // AbslHashValue for hashing absl::optional  | 
866  |  | template <typename H, typename T>  | 
867  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
868  |  |     H hash_state, const absl::optional<T>& opt) { | 
869  |  |   if (opt) hash_state = H::combine(std::move(hash_state), *opt);  | 
870  |  |   return H::combine(std::move(hash_state), opt.has_value());  | 
871  |  | }  | 
872  |  |  | 
873  |  | template <typename H>  | 
874  |  | struct VariantVisitor { | 
875  |  |   H&& hash_state;  | 
876  |  |   template <typename T>  | 
877  |  |   H operator()(const T& t) const { | 
878  |  |     return H::combine(std::move(hash_state), t);  | 
879  |  |   }  | 
880  |  | };  | 
881  |  |  | 
882  |  | // AbslHashValue for hashing absl::variant  | 
883  |  | template <typename H, typename... T>  | 
884  |  | typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type  | 
885  |  | AbslHashValue(H hash_state, const absl::variant<T...>& v) { | 
886  |  |   if (!v.valueless_by_exception()) { | 
887  |  |     hash_state = absl::visit(VariantVisitor<H>{std::move(hash_state)}, v); | 
888  |  |   }  | 
889  |  |   return H::combine(std::move(hash_state), v.index());  | 
890  |  | }  | 
891  |  |  | 
892  |  | // -----------------------------------------------------------------------------  | 
893  |  | // AbslHashValue for Other Types  | 
894  |  | // -----------------------------------------------------------------------------  | 
895  |  |  | 
896  |  | // AbslHashValue for hashing std::bitset is not defined on Little Endian  | 
897  |  | // platforms, for the same reason as for vector<bool> (see std::vector above):  | 
898  |  | // It does not expose the raw bytes, and a fallback to std::hash<> is most  | 
899  |  | // likely faster.  | 
900  |  |  | 
901  |  | #if defined(ABSL_IS_BIG_ENDIAN) && \  | 
902  |  |     (defined(__GLIBCXX__) || defined(__GLIBCPP__))  | 
903  |  | // AbslHashValue for hashing std::bitset  | 
904  |  | //  | 
905  |  | // std::hash in libstdc++ does not work correctly with std::bitset on Big Endian  | 
906  |  | // platforms therefore we need to implement a custom AbslHashValue for it. More  | 
907  |  | // details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531  | 
908  |  | template <typename H, size_t N>  | 
909  |  | H AbslHashValue(H hash_state, const std::bitset<N>& set) { | 
910  |  |   typename H::AbslInternalPiecewiseCombiner combiner;  | 
911  |  |   for (size_t i = 0; i < N; i++) { | 
912  |  |     unsigned char c = static_cast<unsigned char>(set[i]);  | 
913  |  |     hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));  | 
914  |  |   }  | 
915  |  |   return H::combine(combiner.finalize(std::move(hash_state)), N);  | 
916  |  | }  | 
917  |  | #endif  | 
918  |  |  | 
919  |  | // -----------------------------------------------------------------------------  | 
920  |  |  | 
921  |  | // Mixes all values in the range [data, data+size) into the hash state.  | 
922  |  | // This overload accepts only uniquely-represented types, and hashes them by  | 
923  |  | // hashing the entire range of bytes.  | 
924  |  | template <typename H, typename T>  | 
925  |  | typename std::enable_if<is_uniquely_represented<T>::value, H>::type  | 
926  | 30  | hash_range_or_bytes(H hash_state, const T* data, size_t size) { | 
927  | 30  |   const auto* bytes = reinterpret_cast<const unsigned char*>(data);  | 
928  | 30  |   return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);  | 
929  | 30  | }  | 
930  |  |  | 
931  |  | template <typename H, typename T>  | 
932  |  | typename std::enable_if<!is_uniquely_represented<T>::value, H>::type  | 
933  |  | hash_range_or_bytes(H hash_state, const T* data, size_t size) { | 
934  |  |   for (const auto end = data + size; data < end; ++data) { | 
935  |  |     hash_state = H::combine(std::move(hash_state), *data);  | 
936  |  |   }  | 
937  |  |   return H::combine(std::move(hash_state),  | 
938  |  |                     hash_internal::WeaklyMixedInteger{size}); | 
939  |  | }  | 
940  |  |  | 
941  |  | inline constexpr uint64_t kMul = uint64_t{0x79d5f9e0de1e8cf5}; | 
942  |  |  | 
943  |  | // Random data taken from the hexadecimal digits of Pi's fractional component.  | 
944  |  | // https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number  | 
945  |  | ABSL_CACHELINE_ALIGNED inline constexpr uint64_t kStaticRandomData[] = { | 
946  |  |     0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0,  | 
947  |  |     0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377,  | 
948  |  | };  | 
949  |  |  | 
950  |  | // Extremely weak mixture of length that is mixed into the state before  | 
951  |  | // combining the data. It is used only for small strings. This also ensures that  | 
952  |  | // we have high entropy in all bits of the state.  | 
953  | 12  | inline uint64_t PrecombineLengthMix(uint64_t state, size_t len) { | 
954  | 12  |   ABSL_ASSUME(len + sizeof(uint64_t) <= sizeof(kStaticRandomData));  | 
955  | 12  |   uint64_t data = absl::base_internal::UnalignedLoad64(  | 
956  | 12  |       reinterpret_cast<const unsigned char*>(&kStaticRandomData[0]) + len);  | 
957  | 12  |   return state ^ data;  | 
958  | 12  | }  | 
959  |  |  | 
960  | 92  | ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t Mix(uint64_t lhs, uint64_t rhs) { | 
961  |  |   // Though the 128-bit product needs multiple instructions on non-x86-64  | 
962  |  |   // platforms, it is still a good balance between speed and hash quality.  | 
963  | 92  |   absl::uint128 m = lhs;  | 
964  | 92  |   m *= rhs;  | 
965  | 92  |   return Uint128High64(m) ^ Uint128Low64(m);  | 
966  | 92  | }  | 
967  |  |  | 
968  |  | // Reads 8 bytes from p.  | 
969  | 32  | inline uint64_t Read8(const unsigned char* p) { | 
970  |  | // Suppress erroneous array bounds errors on GCC.  | 
971  |  | #if defined(__GNUC__) && !defined(__clang__)  | 
972  |  | #pragma GCC diagnostic push  | 
973  |  | #pragma GCC diagnostic ignored "-Warray-bounds"  | 
974  |  | #endif  | 
975  | 32  |   return absl::base_internal::UnalignedLoad64(p);  | 
976  |  | #if defined(__GNUC__) && !defined(__clang__)  | 
977  |  | #pragma GCC diagnostic pop  | 
978  |  | #endif  | 
979  | 32  | }  | 
980  |  |  | 
981  |  | // Reads 9 to 16 bytes from p.  | 
982  |  | // The first 8 bytes are in .first, and the rest of the bytes are in .second  | 
983  |  | // along with duplicated bytes from .first if len<16.  | 
984  |  | inline std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,  | 
985  | 0  |                                                size_t len) { | 
986  | 0  |   return {Read8(p), Read8(p + len - 8)}; | 
987  | 0  | }  | 
988  |  |  | 
989  |  | // Reads 4 to 8 bytes from p.  | 
990  |  | // Bytes are permuted and some input bytes may be duplicated in output.  | 
991  | 4  | inline uint64_t Read4To8(const unsigned char* p, size_t len) { | 
992  |  |   // If `len < 8`, we duplicate bytes. We always put low memory at the end.  | 
993  |  |   // E.g., on little endian platforms:  | 
994  |  |   // `ABCD` will be read as `ABCDABCD`.  | 
995  |  |   // `ABCDE` will be read as `BCDEABCD`.  | 
996  |  |   // `ABCDEF` will be read as `CDEFABCD`.  | 
997  |  |   // `ABCDEFG` will be read as `DEFGABCD`.  | 
998  |  |   // `ABCDEFGH` will be read as `EFGHABCD`.  | 
999  |  |   // We also do not care about endianness. On big-endian platforms, bytes will  | 
1000  |  |   // be permuted differently. We always shift low memory by 32, because that  | 
1001  |  |   // can be pipelined earlier. Reading high memory requires computing  | 
1002  |  |   // `p + len - 4`.  | 
1003  | 4  |   uint64_t most_significant =  | 
1004  | 4  |       static_cast<uint64_t>(absl::base_internal::UnalignedLoad32(p)) << 32;  | 
1005  | 4  |   uint64_t least_significant =  | 
1006  | 4  |       absl::base_internal::UnalignedLoad32(p + len - 4);  | 
1007  | 4  |   return most_significant | least_significant;  | 
1008  | 4  | }  | 
1009  |  |  | 
1010  |  | // Reads 1 to 3 bytes from p. Some input bytes may be duplicated in output.  | 
1011  | 0  | inline uint32_t Read1To3(const unsigned char* p, size_t len) { | 
1012  |  |   // The trick used by this implementation is to avoid branches.  | 
1013  |  |   // We always read three bytes by duplicating.  | 
1014  |  |   // E.g.,  | 
1015  |  |   // `A` is read as `AAA`.  | 
1016  |  |   // `AB` is read as `ABB`.  | 
1017  |  |   // `ABC` is read as `ABC`.  | 
1018  |  |   // We always shift `p[0]` so that it can be pipelined better.  | 
1019  |  |   // Other bytes require extra computation to find indices.  | 
1020  | 0  |   uint32_t mem0 = (static_cast<uint32_t>(p[0]) << 16) | p[len - 1];  | 
1021  | 0  |   uint32_t mem1 = static_cast<uint32_t>(p[len / 2]) << 8;  | 
1022  | 0  |   return mem0 | mem1;  | 
1023  | 0  | }  | 
1024  |  |  | 
1025  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineRawImpl(uint64_t state,  | 
1026  | 4  |                                                             uint64_t value) { | 
1027  | 4  |   return Mix(state ^ value, kMul);  | 
1028  | 4  | }  | 
1029  |  |  | 
1030  |  | // Slow dispatch path for calls to CombineContiguousImpl with a size argument  | 
1031  |  | // larger than inlined size. Has the same effect as calling  | 
1032  |  | // CombineContiguousImpl() repeatedly with the chunk stride size.  | 
1033  |  | uint64_t CombineLargeContiguousImplOn32BitLengthGt8(const unsigned char* first,  | 
1034  |  |                                                     size_t len, uint64_t state);  | 
1035  |  | uint64_t CombineLargeContiguousImplOn64BitLengthGt32(const unsigned char* first,  | 
1036  |  |                                                      size_t len,  | 
1037  |  |                                                      uint64_t state);  | 
1038  |  |  | 
1039  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineSmallContiguousImpl(  | 
1040  | 4  |     uint64_t state, const unsigned char* first, size_t len) { | 
1041  | 4  |   ABSL_ASSUME(len <= 8);  | 
1042  | 4  |   uint64_t v;  | 
1043  | 4  |   if (len >= 4) { | 
1044  | 4  |     v = Read4To8(first, len);  | 
1045  | 4  |   } else if (len > 0) { | 
1046  | 0  |     v = Read1To3(first, len);  | 
1047  | 0  |   } else { | 
1048  |  |     // Empty string must modify the state.  | 
1049  | 0  |     v = 0x57;  | 
1050  | 0  |   }  | 
1051  | 4  |   return CombineRawImpl(state, v);  | 
1052  | 4  | }  | 
1053  |  |  | 
1054  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl9to16(  | 
1055  | 0  |     uint64_t state, const unsigned char* first, size_t len) { | 
1056  | 0  |   ABSL_ASSUME(len >= 9);  | 
1057  | 0  |   ABSL_ASSUME(len <= 16);  | 
1058  |  |   // Note: any time one half of the mix function becomes zero it will fail to  | 
1059  |  |   // incorporate any bits from the other half. However, there is exactly 1 in  | 
1060  |  |   // 2^64 values for each side that achieve this, and only when the size is  | 
1061  |  |   // exactly 16 -- for smaller sizes there is an overlapping byte that makes  | 
1062  |  |   // this impossible unless the seed is *also* incredibly unlucky.  | 
1063  | 0  |   auto p = Read9To16(first, len);  | 
1064  | 0  |   return Mix(state ^ p.first, kMul ^ p.second);  | 
1065  | 0  | }  | 
1066  |  |  | 
1067  |  | ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl17to32(  | 
1068  | 8  |     uint64_t state, const unsigned char* first, size_t len) { | 
1069  | 8  |   ABSL_ASSUME(len >= 17);  | 
1070  | 8  |   ABSL_ASSUME(len <= 32);  | 
1071  |  |   // Do two mixes of overlapping 16-byte ranges in parallel to minimize  | 
1072  |  |   // latency.  | 
1073  | 8  |   const uint64_t m0 =  | 
1074  | 8  |       Mix(Read8(first) ^ kStaticRandomData[1], Read8(first + 8) ^ state);  | 
1075  |  |  | 
1076  | 8  |   const unsigned char* tail_16b_ptr = first + (len - 16);  | 
1077  | 8  |   const uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ kStaticRandomData[3],  | 
1078  | 8  |                           Read8(tail_16b_ptr + 8) ^ state);  | 
1079  | 8  |   return m0 ^ m1;  | 
1080  | 8  | }  | 
1081  |  |  | 
1082  |  | // Implementation of the base case for combine_contiguous where we actually  | 
1083  |  | // mix the bytes into the state.  | 
1084  |  | // Dispatch to different implementations of combine_contiguous depending  | 
1085  |  | // on the value of `sizeof(size_t)`.  | 
1086  |  | inline uint64_t CombineContiguousImpl(  | 
1087  |  |     uint64_t state, const unsigned char* first, size_t len,  | 
1088  | 0  |     std::integral_constant<int, 4> /* sizeof_size_t */) { | 
1089  | 0  |   // For large values we use CityHash, for small ones we use custom low latency  | 
1090  | 0  |   // hash.  | 
1091  | 0  |   if (len <= 8) { | 
1092  | 0  |     return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first,  | 
1093  | 0  |                                       len);  | 
1094  | 0  |   }  | 
1095  | 0  |   return CombineLargeContiguousImplOn32BitLengthGt8(first, len, state);  | 
1096  | 0  | }  | 
1097  |  |  | 
1098  |  | inline uint64_t CombineContiguousImpl(  | 
1099  |  |     uint64_t state, const unsigned char* first, size_t len,  | 
1100  | 30  |     std::integral_constant<int, 8> /* sizeof_size_t */) { | 
1101  |  |   // For large values we use LowLevelHash or CityHash depending on the platform,  | 
1102  |  |   // for small ones we use custom low latency hash.  | 
1103  | 30  |   if (len <= 8) { | 
1104  | 4  |     return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first,  | 
1105  | 4  |                                       len);  | 
1106  | 4  |   }  | 
1107  | 26  |   if (len <= 16) { | 
1108  | 0  |     return CombineContiguousImpl9to16(PrecombineLengthMix(state, len), first,  | 
1109  | 0  |                                       len);  | 
1110  | 0  |   }  | 
1111  | 26  |   if (len <= 32) { | 
1112  | 8  |     return CombineContiguousImpl17to32(PrecombineLengthMix(state, len), first,  | 
1113  | 8  |                                        len);  | 
1114  | 8  |   }  | 
1115  |  |   // We must not mix length into the state here because calling  | 
1116  |  |   // CombineContiguousImpl twice with PiecewiseChunkSize() must be equivalent  | 
1117  |  |   // to calling CombineLargeContiguousImpl once with 2 * PiecewiseChunkSize().  | 
1118  | 18  |   return CombineLargeContiguousImplOn64BitLengthGt32(first, len, state);  | 
1119  | 26  | }  | 
1120  |  |  | 
1121  |  | #if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \  | 
1122  |  |     ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_  | 
1123  |  | #define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1  | 
1124  |  | #else  | 
1125  |  | #define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0  | 
1126  |  | #endif  | 
1127  |  |  | 
1128  |  | // Type trait to select the appropriate hash implementation to use.  | 
1129  |  | // HashSelect::type<T> will give the proper hash implementation, to be invoked  | 
1130  |  | // as:  | 
1131  |  | //   HashSelect::type<T>::Invoke(state, value)  | 
1132  |  | // Also, HashSelect::type<T>::value is a boolean equal to `true` if there is a  | 
1133  |  | // valid `Invoke` function. Types that are not hashable will have a ::value of  | 
1134  |  | // `false`.  | 
1135  |  | struct HashSelect { | 
1136  |  |  private:  | 
1137  |  |   struct WeaklyMixedIntegerProbe { | 
1138  |  |     template <typename H>  | 
1139  |  |     static H Invoke(H state, WeaklyMixedInteger value) { | 
1140  |  |       return hash_internal::hash_weakly_mixed_integer(std::move(state), value);  | 
1141  |  |     }  | 
1142  |  |   };  | 
1143  |  |  | 
1144  |  |   struct State : HashStateBase<State> { | 
1145  |  |     static State combine_contiguous(State hash_state, const unsigned char*,  | 
1146  |  |                                     size_t);  | 
1147  |  |     using State::HashStateBase::combine_contiguous;  | 
1148  |  |     static State combine_raw(State state, uint64_t value);  | 
1149  |  |     static State combine_weakly_mixed_integer(State hash_state,  | 
1150  |  |                                               WeaklyMixedInteger value);  | 
1151  |  |   };  | 
1152  |  |  | 
1153  |  |   struct UniquelyRepresentedProbe { | 
1154  |  |     template <typename H, typename T>  | 
1155  |  |     static auto Invoke(H state, const T& value)  | 
1156  | 0  |         -> absl::enable_if_t<is_uniquely_represented<T>::value, H> { | 
1157  | 0  |       return hash_internal::hash_bytes(std::move(state), value);  | 
1158  | 0  |     } Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEmEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_ Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEiEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_  | 
1159  |  |   };  | 
1160  |  |  | 
1161  |  |   struct HashValueProbe { | 
1162  |  |     template <typename H, typename T>  | 
1163  |  |     static auto Invoke(H state, const T& value) -> absl::enable_if_t<  | 
1164  |  |         std::is_same<H,  | 
1165  |  |                      decltype(AbslHashValue(std::move(state), value))>::value,  | 
1166  | 30  |         H> { | 
1167  | 30  |       return AbslHashValue(std::move(state), value);  | 
1168  | 30  |     } _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__117basic_string_viewIcNS5_11char_traitsIcEEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_ Line  | Count  | Source  |  1166  | 30  |         H> { |  1167  | 30  |       return AbslHashValue(std::move(state), value);  |  1168  | 30  |     }  |  
 Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENS_4CordEEENSt3__19enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueES8_E4typeES8_RKT0_ Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKmEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_ Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKNS5_17basic_string_viewIcNS5_11char_traitsIcEEEERKiEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESH_E4typeESH_RKT0_  | 
1169  |  |   };  | 
1170  |  |  | 
1171  |  |   struct LegacyHashProbe { | 
1172  |  | #if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_  | 
1173  |  |     template <typename H, typename T>  | 
1174  |  |     static auto Invoke(H state, const T& value) -> absl::enable_if_t<  | 
1175  |  |         std::is_convertible<  | 
1176  |  |             decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>()(value)),  | 
1177  |  |             size_t>::value,  | 
1178  |  |         H> { | 
1179  |  |       return hash_internal::hash_bytes(  | 
1180  |  |           std::move(state),  | 
1181  |  |           ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value)); | 
1182  |  |     }  | 
1183  |  | #endif  // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_  | 
1184  |  |   };  | 
1185  |  |  | 
1186  |  |   struct StdHashProbe { | 
1187  |  |     template <typename H, typename T>  | 
1188  |  |     static auto Invoke(H state, const T& value)  | 
1189  |  |         -> absl::enable_if_t<type_traits_internal::IsHashable<T>::value, H> { | 
1190  |  |       return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value)); | 
1191  |  |     }  | 
1192  |  |   };  | 
1193  |  |  | 
1194  |  |   template <typename Hash, typename T>  | 
1195  |  |   struct Probe : Hash { | 
1196  |  |    private:  | 
1197  |  |     template <typename H, typename = decltype(H::Invoke(  | 
1198  |  |                               std::declval<State>(), std::declval<const T&>()))>  | 
1199  |  |     static std::true_type Test(int);  | 
1200  |  |     template <typename U>  | 
1201  |  |     static std::false_type Test(char);  | 
1202  |  |  | 
1203  |  |    public:  | 
1204  |  |     static constexpr bool value = decltype(Test<Hash>(0))::value;  | 
1205  |  |   };  | 
1206  |  |  | 
1207  |  |  public:  | 
1208  |  |   // Probe each implementation in order.  | 
1209  |  |   // disjunction provides short circuiting wrt instantiation.  | 
1210  |  |   template <typename T>  | 
1211  |  |   using Apply = absl::disjunction<         //  | 
1212  |  |       Probe<WeaklyMixedIntegerProbe, T>,   //  | 
1213  |  |       Probe<UniquelyRepresentedProbe, T>,  //  | 
1214  |  |       Probe<HashValueProbe, T>,            //  | 
1215  |  |       Probe<LegacyHashProbe, T>,           //  | 
1216  |  |       Probe<StdHashProbe, T>,              //  | 
1217  |  |       std::false_type>;  | 
1218  |  | };  | 
1219  |  |  | 
1220  |  | template <typename T>  | 
1221  |  | struct is_hashable  | 
1222  |  |     : std::integral_constant<bool, HashSelect::template Apply<T>::value> {}; | 
1223  |  |  | 
1224  |  | class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> { | 
1225  |  |   template <typename T>  | 
1226  |  |   using IntegralFastPath =  | 
1227  |  |       conjunction<std::is_integral<T>, is_uniquely_represented<T>,  | 
1228  |  |                   FitsIn64Bits<T>>;  | 
1229  |  |  | 
1230  |  |  public:  | 
1231  |  |   // Move only  | 
1232  |  |   MixingHashState(MixingHashState&&) = default;  | 
1233  |  |   MixingHashState& operator=(MixingHashState&&) = default;  | 
1234  |  |  | 
1235  |  |   // Fundamental base case for hash recursion: mixes the given range of bytes  | 
1236  |  |   // into the hash state.  | 
1237  |  |   static MixingHashState combine_contiguous(MixingHashState hash_state,  | 
1238  |  |                                             const unsigned char* first,  | 
1239  | 30  |                                             size_t size) { | 
1240  | 30  |     return MixingHashState(  | 
1241  | 30  |         CombineContiguousImpl(hash_state.state_, first, size,  | 
1242  | 30  |                               std::integral_constant<int, sizeof(size_t)>{})); | 
1243  | 30  |   }  | 
1244  |  |   using MixingHashState::HashStateBase::combine_contiguous;  | 
1245  |  |  | 
1246  |  |   template <typename T>  | 
1247  | 0  |   static size_t hash(const T& value) { | 
1248  | 0  |     return hash_with_seed(value, Seed());  | 
1249  | 0  |   } Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::basic_string_view<char, std::__1::char_traits<char> > >(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&) Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<absl::Cord>(absl::Cord const&) Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<unsigned long const&> >(std::__1::tuple<unsigned long const&> const&) Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> >(std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&)  | 
1250  |  |  | 
1251  |  |   // For performance reasons in non-opt mode, we specialize this for  | 
1252  |  |   // integral types.  | 
1253  |  |   // Otherwise we would be instantiating and calling dozens of functions for  | 
1254  |  |   // something that is just one multiplication and a couple xor's.  | 
1255  |  |   // The result should be the same as running the whole algorithm, but faster.  | 
1256  |  |   template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>  | 
1257  |  |   static size_t hash_with_seed(T value, size_t seed) { | 
1258  |  |     return static_cast<size_t>(  | 
1259  |  |         CombineRawImpl(seed, static_cast<std::make_unsigned_t<T>>(value)));  | 
1260  |  |   }  | 
1261  |  |  | 
1262  |  |   template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>  | 
1263  | 30  |   static size_t hash_with_seed(const T& value, size_t seed) { | 
1264  | 30  |     return static_cast<size_t>(combine(MixingHashState{seed}, value).state_); | 
1265  | 30  |   } _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__117basic_string_viewIcNS3_11char_traitsIcEEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS9_m Line  | Count  | Source  |  1263  | 30  |   static size_t hash_with_seed(const T& value, size_t seed) { |  1264  | 30  |     return static_cast<size_t>(combine(MixingHashState{seed}, value).state_); |  1265  | 30  |   }  |  
 Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINS_4CordETnNSt3__19enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS6_m Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__15tupleIJRKmEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKS9_m Unexecuted instantiation: _ZN4absl13hash_internal15MixingHashState14hash_with_seedINSt3__15tupleIJRKNS3_17basic_string_viewIcNS3_11char_traitsIcEEEERKiEEETnNS3_9enable_ifIXntsr16IntegralFastPathIT_EE5valueEiE4typeELi0EEEmRKSF_m  | 
1266  |  |  | 
1267  |  |  private:  | 
1268  |  |   friend class MixingHashState::HashStateBase;  | 
1269  |  |   template <typename H>  | 
1270  |  |   friend H absl::hash_internal::hash_weakly_mixed_integer(H,  | 
1271  |  |                                                           WeaklyMixedInteger);  | 
1272  |  |   // Allow the HashState type-erasure implementation to invoke  | 
1273  |  |   // RunCombinedUnordered() directly.  | 
1274  |  |   friend class absl::HashState;  | 
1275  |  |   friend struct CombineRaw;  | 
1276  |  |  | 
1277  |  |   // For use in Seed().  | 
1278  |  |   static const void* const kSeed;  | 
1279  |  |  | 
1280  |  |   // Invoked only once for a given argument; that plus the fact that this is  | 
1281  |  |   // move-only ensures that there is only one non-moved-from object.  | 
1282  | 0  |   MixingHashState() : state_(Seed()) {} | 
1283  |  |  | 
1284  |  |   // Workaround for MSVC bug.  | 
1285  |  |   // We make the type copyable to fix the calling convention, even though we  | 
1286  |  |   // never actually copy it. Keep it private to not affect the public API of the  | 
1287  |  |   // type.  | 
1288  |  |   MixingHashState(const MixingHashState&) = default;  | 
1289  |  |  | 
1290  | 60  |   explicit MixingHashState(uint64_t state) : state_(state) {} | 
1291  |  |  | 
1292  |  |   // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows  | 
1293  |  |   // us to be consistent with IntegralFastPath when combining raw types, but  | 
1294  |  |   // optimize Read1To3 and Read4To8 differently for the string case.  | 
1295  |  |   static MixingHashState combine_raw(MixingHashState hash_state,  | 
1296  | 0  |                                      uint64_t value) { | 
1297  | 0  |     return MixingHashState(CombineRawImpl(hash_state.state_, value));  | 
1298  | 0  |   }  | 
1299  |  |  | 
1300  |  |   static MixingHashState combine_weakly_mixed_integer(  | 
1301  | 0  |       MixingHashState hash_state, WeaklyMixedInteger value) { | 
1302  | 0  |     // Some transformation for the value is needed to make an empty  | 
1303  | 0  |     // string/container change the mixing hash state.  | 
1304  | 0  |     // We use constant smaller than 8 bits to make compiler use  | 
1305  | 0  |     // `add` with an immediate operand with 1 byte value.  | 
1306  | 0  |     return MixingHashState{hash_state.state_ + (0x57 + value.value)}; | 
1307  | 0  |   }  | 
1308  |  |  | 
1309  |  |   template <typename CombinerT>  | 
1310  |  |   static MixingHashState RunCombineUnordered(MixingHashState state,  | 
1311  |  |                                              CombinerT combiner) { | 
1312  |  |     uint64_t unordered_state = 0;  | 
1313  |  |     combiner(MixingHashState{}, [&](MixingHashState& inner_state) { | 
1314  |  |       // Add the hash state of the element to the running total, but mix the  | 
1315  |  |       // carry bit back into the low bit.  This in intended to avoid losing  | 
1316  |  |       // entropy to overflow, especially when unordered_multisets contain  | 
1317  |  |       // multiple copies of the same value.  | 
1318  |  |       auto element_state = inner_state.state_;  | 
1319  |  |       unordered_state += element_state;  | 
1320  |  |       if (unordered_state < element_state) { | 
1321  |  |         ++unordered_state;  | 
1322  |  |       }  | 
1323  |  |       inner_state = MixingHashState{}; | 
1324  |  |     });  | 
1325  |  |     return MixingHashState::combine(std::move(state), unordered_state);  | 
1326  |  |   }  | 
1327  |  |  | 
1328  |  |   // A non-deterministic seed.  | 
1329  |  |   //  | 
1330  |  |   // The current purpose of this seed is to generate non-deterministic results  | 
1331  |  |   // and prevent having users depend on the particular hash values.  | 
1332  |  |   // It is not meant as a security feature right now, but it leaves the door  | 
1333  |  |   // open to upgrade it to a true per-process random seed. A true random seed  | 
1334  |  |   // costs more and we don't need to pay for that right now.  | 
1335  |  |   //  | 
1336  |  |   // On platforms with ASLR, we take advantage of it to make a per-process  | 
1337  |  |   // random value.  | 
1338  |  |   // See https://en.wikipedia.org/wiki/Address_space_layout_randomization  | 
1339  |  |   //  | 
1340  |  |   // On other platforms this is still going to be non-deterministic but most  | 
1341  |  |   // probably per-build and not per-process.  | 
1342  | 0  |   ABSL_ATTRIBUTE_ALWAYS_INLINE static size_t Seed() { | 
1343  | 0  | #if (!defined(__clang__) || __clang_major__ > 11) && \  | 
1344  | 0  |     (!defined(__apple_build_version__) ||            \  | 
1345  | 0  |      __apple_build_version__ >= 19558921)  // Xcode 12  | 
1346  | 0  |     return static_cast<size_t>(reinterpret_cast<uintptr_t>(&kSeed));  | 
1347  |  | #else  | 
1348  |  |     // Workaround the absence of  | 
1349  |  |     // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.  | 
1350  |  |     return static_cast<size_t>(reinterpret_cast<uintptr_t>(kSeed));  | 
1351  |  | #endif  | 
1352  | 0  |   }  | 
1353  |  |  | 
1354  |  |   uint64_t state_;  | 
1355  |  | };  | 
1356  |  |  | 
1357  |  | struct AggregateBarrier {}; | 
1358  |  |  | 
1359  |  | // Add a private base class to make sure this type is not an aggregate.  | 
1360  |  | // Aggregates can be aggregate initialized even if the default constructor is  | 
1361  |  | // deleted.  | 
1362  |  | struct PoisonedHash : private AggregateBarrier { | 
1363  |  |   PoisonedHash() = delete;  | 
1364  |  |   PoisonedHash(const PoisonedHash&) = delete;  | 
1365  |  |   PoisonedHash& operator=(const PoisonedHash&) = delete;  | 
1366  |  | };  | 
1367  |  |  | 
1368  |  | template <typename T>  | 
1369  |  | struct HashImpl { | 
1370  | 0  |   size_t operator()(const T& value) const { | 
1371  | 0  |     return MixingHashState::hash(value);  | 
1372  | 0  |   } Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::basic_string_view<char, std::__1::char_traits<char> > >::operator()(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&) const Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::operator()(absl::Cord const&) const Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<unsigned long const&> >::operator()(std::__1::tuple<unsigned long const&> const&) const Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> >::operator()(std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&) const  | 
1373  |  |  | 
1374  |  |  private:  | 
1375  |  |   friend struct HashWithSeed;  | 
1376  |  |  | 
1377  | 30  |   size_t hash_with_seed(const T& value, size_t seed) const { | 
1378  | 30  |     return MixingHashState::hash_with_seed(value, seed);  | 
1379  | 30  |   } absl::hash_internal::HashImpl<std::__1::basic_string_view<char, std::__1::char_traits<char> > >::hash_with_seed(std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, unsigned long) const Line  | Count  | Source  |  1377  | 30  |   size_t hash_with_seed(const T& value, size_t seed) const { |  1378  | 30  |     return MixingHashState::hash_with_seed(value, seed);  |  1379  | 30  |   }  |  
 Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::hash_with_seed(absl::Cord const&, unsigned long) const  | 
1380  |  | };  | 
1381  |  |  | 
1382  |  | template <typename T>  | 
1383  |  | struct Hash  | 
1384  |  |     : absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {}; | 
1385  |  |  | 
1386  |  | template <typename H>  | 
1387  |  | template <typename T, typename... Ts>  | 
1388  | 30  | H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) { | 
1389  | 30  |   return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(  | 
1390  | 30  |                         std::move(state), value),  | 
1391  | 30  |                     values...);  | 
1392  | 30  | } absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::basic_string_view<char, std::__1::char_traits<char> >>(absl::hash_internal::MixingHashState, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&) Line  | Count  | Source  |  1388  | 30  | H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) { |  1389  | 30  |   return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(  |  1390  | 30  |                         std::move(state), value),  |  1391  | 30  |                     values...);  |  1392  | 30  | }  |  
 Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<absl::Cord>(absl::hash_internal::MixingHashState, absl::Cord const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<unsigned long const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<unsigned long>(absl::hash_internal::MixingHashState, unsigned long const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&> const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::basic_string_view<char, std::__1::char_traits<char> >, int>(absl::hash_internal::MixingHashState, std::__1::basic_string_view<char, std::__1::char_traits<char> > const&, int const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<int>(absl::hash_internal::MixingHashState, int const&)  | 
1393  |  |  | 
1394  |  | template <typename H>  | 
1395  |  | template <typename T>  | 
1396  | 30  | H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) { | 
1397  | 30  |   return hash_internal::hash_range_or_bytes(std::move(state), data, size);  | 
1398  | 30  | }  | 
1399  |  |  | 
1400  |  | template <typename H>  | 
1401  |  | template <typename I>  | 
1402  |  | H HashStateBase<H>::combine_unordered(H state, I begin, I end) { | 
1403  |  |   return H::RunCombineUnordered(std::move(state),  | 
1404  |  |                                 CombineUnorderedCallback<I>{begin, end}); | 
1405  |  | }  | 
1406  |  |  | 
1407  |  | template <typename H>  | 
1408  |  | H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,  | 
1409  | 0  |                                 size_t size) { | 
1410  | 0  |   if (position_ + size < PiecewiseChunkSize()) { | 
1411  | 0  |     // This partial chunk does not fill our existing buffer  | 
1412  | 0  |     memcpy(buf_ + position_, data, size);  | 
1413  | 0  |     position_ += size;  | 
1414  | 0  |     return state;  | 
1415  | 0  |   }  | 
1416  | 0  |   added_something_ = true;  | 
1417  | 0  |   // If the buffer is partially filled we need to complete the buffer  | 
1418  | 0  |   // and hash it.  | 
1419  | 0  |   if (position_ != 0) { | 
1420  | 0  |     const size_t bytes_needed = PiecewiseChunkSize() - position_;  | 
1421  | 0  |     memcpy(buf_ + position_, data, bytes_needed);  | 
1422  | 0  |     state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());  | 
1423  | 0  |     data += bytes_needed;  | 
1424  | 0  |     size -= bytes_needed;  | 
1425  | 0  |   }  | 
1426  | 0  | 
  | 
1427  | 0  |   // Hash whatever chunks we can without copying  | 
1428  | 0  |   while (size >= PiecewiseChunkSize()) { | 
1429  | 0  |     state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize());  | 
1430  | 0  |     data += PiecewiseChunkSize();  | 
1431  | 0  |     size -= PiecewiseChunkSize();  | 
1432  | 0  |   }  | 
1433  | 0  |   // Fill the buffer with the remainder  | 
1434  | 0  |   memcpy(buf_, data, size);  | 
1435  | 0  |   position_ = size;  | 
1436  | 0  |   return state;  | 
1437  | 0  | }  | 
1438  |  |  | 
1439  |  | template <typename H>  | 
1440  | 0  | H PiecewiseCombiner::finalize(H state) { | 
1441  | 0  |   // Do not call combine_contiguous with empty remainder since it is modifying  | 
1442  | 0  |   // state.  | 
1443  | 0  |   if (added_something_ && position_ == 0) { | 
1444  | 0  |     return state;  | 
1445  | 0  |   }  | 
1446  | 0  |   // We still call combine_contiguous for the entirely empty buffer.  | 
1447  | 0  |   return H::combine_contiguous(std::move(state), buf_, position_);  | 
1448  | 0  | }  | 
1449  |  |  | 
1450  |  | }  // namespace hash_internal  | 
1451  |  | ABSL_NAMESPACE_END  | 
1452  |  | }  // namespace absl  | 
1453  |  |  | 
1454  |  | #endif  // ABSL_HASH_INTERNAL_HASH_H_  |