/src/abseil-cpp/absl/hash/internal/hash.h
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // Copyright 2018 The Abseil Authors.  | 
2  |  | //  | 
3  |  | // Licensed under the Apache License, Version 2.0 (the "License");  | 
4  |  | // you may not use this file except in compliance with the License.  | 
5  |  | // You may obtain a copy of the License at  | 
6  |  | //  | 
7  |  | //      https://www.apache.org/licenses/LICENSE-2.0  | 
8  |  | //  | 
9  |  | // Unless required by applicable law or agreed to in writing, software  | 
10  |  | // distributed under the License is distributed on an "AS IS" BASIS,  | 
11  |  | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
12  |  | // See the License for the specific language governing permissions and  | 
13  |  | // limitations under the License.  | 
14  |  | //  | 
15  |  | // -----------------------------------------------------------------------------  | 
16  |  | // File: hash.h  | 
17  |  | // -----------------------------------------------------------------------------  | 
18  |  | //  | 
19  |  | #ifndef ABSL_HASH_INTERNAL_HASH_H_  | 
20  |  | #define ABSL_HASH_INTERNAL_HASH_H_  | 
21  |  |  | 
22  |  | #include <algorithm>  | 
23  |  | #include <array>  | 
24  |  | #include <bitset>  | 
25  |  | #include <cmath>  | 
26  |  | #include <cstddef>  | 
27  |  | #include <cstring>  | 
28  |  | #include <deque>  | 
29  |  | #include <forward_list>  | 
30  |  | #include <functional>  | 
31  |  | #include <iterator>  | 
32  |  | #include <limits>  | 
33  |  | #include <list>  | 
34  |  | #include <map>  | 
35  |  | #include <memory>  | 
36  |  | #include <set>  | 
37  |  | #include <string>  | 
38  |  | #include <tuple>  | 
39  |  | #include <type_traits>  | 
40  |  | #include <unordered_map>  | 
41  |  | #include <unordered_set>  | 
42  |  | #include <utility>  | 
43  |  | #include <vector>  | 
44  |  |  | 
45  |  | #include "absl/base/config.h"  | 
46  |  | #include "absl/base/internal/unaligned_access.h"  | 
47  |  | #include "absl/base/port.h"  | 
48  |  | #include "absl/container/fixed_array.h"  | 
49  |  | #include "absl/hash/internal/city.h"  | 
50  |  | #include "absl/hash/internal/low_level_hash.h"  | 
51  |  | #include "absl/meta/type_traits.h"  | 
52  |  | #include "absl/numeric/bits.h"  | 
53  |  | #include "absl/numeric/int128.h"  | 
54  |  | #include "absl/strings/string_view.h"  | 
55  |  | #include "absl/types/optional.h"  | 
56  |  | #include "absl/types/variant.h"  | 
57  |  | #include "absl/utility/utility.h"  | 
58  |  |  | 
59  |  | #ifdef ABSL_HAVE_STD_STRING_VIEW  | 
60  |  | #include <string_view>  | 
61  |  | #endif  | 
62  |  |  | 
63  |  | namespace absl { | 
64  |  | ABSL_NAMESPACE_BEGIN  | 
65  |  |  | 
66  |  | class HashState;  | 
67  |  |  | 
68  |  | namespace hash_internal { | 
69  |  |  | 
70  |  | // Internal detail: Large buffers are hashed in smaller chunks.  This function  | 
71  |  | // returns the size of these chunks.  | 
72  | 56  | constexpr size_t PiecewiseChunkSize() { return 1024; } | 
73  |  |  | 
74  |  | // PiecewiseCombiner  | 
75  |  | //  | 
76  |  | // PiecewiseCombiner is an internal-only helper class for hashing a piecewise  | 
77  |  | // buffer of `char` or `unsigned char` as though it were contiguous.  This class  | 
78  |  | // provides two methods:  | 
79  |  | //  | 
80  |  | //   H add_buffer(state, data, size)  | 
81  |  | //   H finalize(state)  | 
82  |  | //  | 
83  |  | // `add_buffer` can be called zero or more times, followed by a single call to  | 
84  |  | // `finalize`.  This will produce the same hash expansion as concatenating each  | 
85  |  | // buffer piece into a single contiguous buffer, and passing this to  | 
86  |  | // `H::combine_contiguous`.  | 
87  |  | //  | 
88  |  | //  Example usage:  | 
89  |  | //    PiecewiseCombiner combiner;  | 
90  |  | //    for (const auto& piece : pieces) { | 
91  |  | //      state = combiner.add_buffer(std::move(state), piece.data, piece.size);  | 
92  |  | //    }  | 
93  |  | //    return combiner.finalize(std::move(state));  | 
94  |  | class PiecewiseCombiner { | 
95  |  |  public:  | 
96  | 0  |   PiecewiseCombiner() : position_(0) {} | 
97  |  |   PiecewiseCombiner(const PiecewiseCombiner&) = delete;  | 
98  |  |   PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;  | 
99  |  |  | 
100  |  |   // PiecewiseCombiner::add_buffer()  | 
101  |  |   //  | 
102  |  |   // Appends the given range of bytes to the sequence to be hashed, which may  | 
103  |  |   // modify the provided hash state.  | 
104  |  |   template <typename H>  | 
105  |  |   H add_buffer(H state, const unsigned char* data, size_t size);  | 
106  |  |   template <typename H>  | 
107  | 0  |   H add_buffer(H state, const char* data, size_t size) { | 
108  | 0  |     return add_buffer(std::move(state),  | 
109  | 0  |                       reinterpret_cast<const unsigned char*>(data), size);  | 
110  | 0  |   }  | 
111  |  |  | 
112  |  |   // PiecewiseCombiner::finalize()  | 
113  |  |   //  | 
114  |  |   // Finishes combining the hash sequence, which may may modify the provided  | 
115  |  |   // hash state.  | 
116  |  |   //  | 
117  |  |   // Once finalize() is called, add_buffer() may no longer be called. The  | 
118  |  |   // resulting hash state will be the same as if the pieces passed to  | 
119  |  |   // add_buffer() were concatenated into a single flat buffer, and then provided  | 
120  |  |   // to H::combine_contiguous().  | 
121  |  |   template <typename H>  | 
122  |  |   H finalize(H state);  | 
123  |  |  | 
124  |  |  private:  | 
125  |  |   unsigned char buf_[PiecewiseChunkSize()];  | 
126  |  |   size_t position_;  | 
127  |  | };  | 
128  |  |  | 
129  |  | // is_hashable()  | 
130  |  | //  | 
131  |  | // Trait class which returns true if T is hashable by the absl::Hash framework.  | 
132  |  | // Used for the AbslHashValue implementations for composite types below.  | 
133  |  | template <typename T>  | 
134  |  | struct is_hashable;  | 
135  |  |  | 
136  |  | // HashStateBase  | 
137  |  | //  | 
138  |  | // An internal implementation detail that contains common implementation details  | 
139  |  | // for all of the "hash state objects" objects generated by Abseil.  This is not  | 
140  |  | // a public API; users should not create classes that inherit from this.  | 
141  |  | //  | 
142  |  | // A hash state object is the template argument `H` passed to `AbslHashValue`.  | 
143  |  | // It represents an intermediate state in the computation of an unspecified hash  | 
144  |  | // algorithm. `HashStateBase` provides a CRTP style base class for hash state  | 
145  |  | // implementations. Developers adding type support for `absl::Hash` should not  | 
146  |  | // rely on any parts of the state object other than the following member  | 
147  |  | // functions:  | 
148  |  | //  | 
149  |  | //   * HashStateBase::combine()  | 
150  |  | //   * HashStateBase::combine_contiguous()  | 
151  |  | //   * HashStateBase::combine_unordered()  | 
152  |  | //  | 
153  |  | // A derived hash state class of type `H` must provide a public member function  | 
154  |  | // with a signature similar to the following:  | 
155  |  | //  | 
156  |  | //    `static H combine_contiguous(H state, const unsigned char*, size_t)`.  | 
157  |  | //  | 
158  |  | // It must also provide a private template method named RunCombineUnordered.  | 
159  |  | //  | 
160  |  | // A "consumer" is a 1-arg functor returning void.  Its argument is a reference  | 
161  |  | // to an inner hash state object, and it may be called multiple times.  When  | 
162  |  | // called, the functor consumes the entropy from the provided state object,  | 
163  |  | // and resets that object to its empty state.  | 
164  |  | //  | 
165  |  | // A "combiner" is a stateless 2-arg functor returning void.  Its arguments are  | 
166  |  | // an inner hash state object and an ElementStateConsumer functor.  A combiner  | 
167  |  | // uses the provided inner hash state object to hash each element of the  | 
168  |  | // container, passing the inner hash state object to the consumer after hashing  | 
169  |  | // each element.  | 
170  |  | //  | 
171  |  | // Given these definitions, a derived hash state class of type H  | 
172  |  | // must provide a private template method with a signature similar to the  | 
173  |  | // following:  | 
174  |  | //  | 
175  |  | //    `template <typename CombinerT>`  | 
176  |  | //    `static H RunCombineUnordered(H outer_state, CombinerT combiner)`  | 
177  |  | //  | 
178  |  | // This function is responsible for constructing the inner state object and  | 
179  |  | // providing a consumer to the combiner.  It uses side effects of the consumer  | 
180  |  | // and combiner to mix the state of each element in an order-independent manner,  | 
181  |  | // and uses this to return an updated value of `outer_state`.  | 
182  |  | //  | 
183  |  | // This inside-out approach generates efficient object code in the normal case,  | 
184  |  | // but allows us to use stack storage to implement the absl::HashState type  | 
185  |  | // erasure mechanism (avoiding heap allocations while hashing).  | 
186  |  | //  | 
187  |  | // `HashStateBase` will provide a complete implementation for a hash state  | 
188  |  | // object in terms of these two methods.  | 
189  |  | //  | 
190  |  | // Example:  | 
191  |  | //  | 
192  |  | //   // Use CRTP to define your derived class.  | 
193  |  | //   struct MyHashState : HashStateBase<MyHashState> { | 
194  |  | //       static H combine_contiguous(H state, const unsigned char*, size_t);  | 
195  |  | //       using MyHashState::HashStateBase::combine;  | 
196  |  | //       using MyHashState::HashStateBase::combine_contiguous;  | 
197  |  | //       using MyHashState::HashStateBase::combine_unordered;  | 
198  |  | //     private:  | 
199  |  | //       template <typename CombinerT>  | 
200  |  | //       static H RunCombineUnordered(H state, CombinerT combiner);  | 
201  |  | //   };  | 
202  |  | template <typename H>  | 
203  |  | class HashStateBase { | 
204  |  |  public:  | 
205  |  |   // HashStateBase::combine()  | 
206  |  |   //  | 
207  |  |   // Combines an arbitrary number of values into a hash state, returning the  | 
208  |  |   // updated state.  | 
209  |  |   //  | 
210  |  |   // Each of the value types `T` must be separately hashable by the Abseil  | 
211  |  |   // hashing framework.  | 
212  |  |   //  | 
213  |  |   // NOTE:  | 
214  |  |   //  | 
215  |  |   //   state = H::combine(std::move(state), value1, value2, value3);  | 
216  |  |   //  | 
217  |  |   // is guaranteed to produce the same hash expansion as:  | 
218  |  |   //  | 
219  |  |   //   state = H::combine(std::move(state), value1);  | 
220  |  |   //   state = H::combine(std::move(state), value2);  | 
221  |  |   //   state = H::combine(std::move(state), value3);  | 
222  |  |   template <typename T, typename... Ts>  | 
223  |  |   static H combine(H state, const T& value, const Ts&... values);  | 
224  | 124  |   static H combine(H state) { return state; } | 
225  |  |  | 
226  |  |   // HashStateBase::combine_contiguous()  | 
227  |  |   //  | 
228  |  |   // Combines a contiguous array of `size` elements into a hash state, returning  | 
229  |  |   // the updated state.  | 
230  |  |   //  | 
231  |  |   // NOTE:  | 
232  |  |   //  | 
233  |  |   //   state = H::combine_contiguous(std::move(state), data, size);  | 
234  |  |   //  | 
235  |  |   // is NOT guaranteed to produce the same hash expansion as a for-loop (it may  | 
236  |  |   // perform internal optimizations).  If you need this guarantee, use the  | 
237  |  |   // for-loop instead.  | 
238  |  |   template <typename T>  | 
239  |  |   static H combine_contiguous(H state, const T* data, size_t size);  | 
240  |  |  | 
241  |  |   template <typename I>  | 
242  |  |   static H combine_unordered(H state, I begin, I end);  | 
243  |  |  | 
244  |  |   using AbslInternalPiecewiseCombiner = PiecewiseCombiner;  | 
245  |  |  | 
246  |  |   template <typename T>  | 
247  |  |   using is_hashable = absl::hash_internal::is_hashable<T>;  | 
248  |  |  | 
249  |  |  private:  | 
250  |  |   // Common implementation of the iteration step of a "combiner", as described  | 
251  |  |   // above.  | 
252  |  |   template <typename I>  | 
253  |  |   struct CombineUnorderedCallback { | 
254  |  |     I begin;  | 
255  |  |     I end;  | 
256  |  |  | 
257  |  |     template <typename InnerH, typename ElementStateConsumer>  | 
258  |  |     void operator()(InnerH inner_state, ElementStateConsumer cb) { | 
259  |  |       for (; begin != end; ++begin) { | 
260  |  |         inner_state = H::combine(std::move(inner_state), *begin);  | 
261  |  |         cb(inner_state);  | 
262  |  |       }  | 
263  |  |     }  | 
264  |  |   };  | 
265  |  | };  | 
266  |  |  | 
267  |  | // is_uniquely_represented  | 
268  |  | //  | 
269  |  | // `is_uniquely_represented<T>` is a trait class that indicates whether `T`  | 
270  |  | // is uniquely represented.  | 
271  |  | //  | 
272  |  | // A type is "uniquely represented" if two equal values of that type are  | 
273  |  | // guaranteed to have the same bytes in their underlying storage. In other  | 
274  |  | // words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be  | 
275  |  | // zero. This property cannot be detected automatically, so this trait is false  | 
276  |  | // by default, but can be specialized by types that wish to assert that they are  | 
277  |  | // uniquely represented. This makes them eligible for certain optimizations.  | 
278  |  | //  | 
279  |  | // If you have any doubt whatsoever, do not specialize this template.  | 
280  |  | // The default is completely safe, and merely disables some optimizations  | 
281  |  | // that will not matter for most types. Specializing this template,  | 
282  |  | // on the other hand, can be very hazardous.  | 
283  |  | //  | 
284  |  | // To be uniquely represented, a type must not have multiple ways of  | 
285  |  | // representing the same value; for example, float and double are not  | 
286  |  | // uniquely represented, because they have distinct representations for  | 
287  |  | // +0 and -0. Furthermore, the type's byte representation must consist  | 
288  |  | // solely of user-controlled data, with no padding bits and no compiler-  | 
289  |  | // controlled data such as vptrs or sanitizer metadata. This is usually  | 
290  |  | // very difficult to guarantee, because in most cases the compiler can  | 
291  |  | // insert data and padding bits at its own discretion.  | 
292  |  | //  | 
293  |  | // If you specialize this template for a type `T`, you must do so in the file  | 
294  |  | // that defines that type (or in this file). If you define that specialization  | 
295  |  | // anywhere else, `is_uniquely_represented<T>` could have different meanings  | 
296  |  | // in different places.  | 
297  |  | //  | 
298  |  | // The Enable parameter is meaningless; it is provided as a convenience,  | 
299  |  | // to support certain SFINAE techniques when defining specializations.  | 
300  |  | template <typename T, typename Enable = void>  | 
301  |  | struct is_uniquely_represented : std::false_type {}; | 
302  |  |  | 
303  |  | // is_uniquely_represented<unsigned char>  | 
304  |  | //  | 
305  |  | // unsigned char is a synonym for "byte", so it is guaranteed to be  | 
306  |  | // uniquely represented.  | 
307  |  | template <>  | 
308  |  | struct is_uniquely_represented<unsigned char> : std::true_type {}; | 
309  |  |  | 
310  |  | // is_uniquely_represented for non-standard integral types  | 
311  |  | //  | 
312  |  | // Integral types other than bool should be uniquely represented on any  | 
313  |  | // platform that this will plausibly be ported to.  | 
314  |  | template <typename Integral>  | 
315  |  | struct is_uniquely_represented<  | 
316  |  |     Integral, typename std::enable_if<std::is_integral<Integral>::value>::type>  | 
317  |  |     : std::true_type {}; | 
318  |  |  | 
319  |  | // is_uniquely_represented<bool>  | 
320  |  | //  | 
321  |  | //  | 
322  |  | template <>  | 
323  |  | struct is_uniquely_represented<bool> : std::false_type {}; | 
324  |  |  | 
325  |  | // hash_bytes()  | 
326  |  | //  | 
327  |  | // Convenience function that combines `hash_state` with the byte representation  | 
328  |  | // of `value`.  | 
329  |  | template <typename H, typename T>  | 
330  | 62  | H hash_bytes(H hash_state, const T& value) { | 
331  | 62  |   const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);  | 
332  | 62  |   return H::combine_contiguous(std::move(hash_state), start, sizeof(value));  | 
333  | 62  | } absl::hash_internal::MixingHashState absl::hash_internal::hash_bytes<absl::hash_internal::MixingHashState, unsigned long>(absl::hash_internal::MixingHashState, unsigned long const&) Line  | Count  | Source  |  330  | 62  | H hash_bytes(H hash_state, const T& value) { |  331  | 62  |   const unsigned char* start = reinterpret_cast<const unsigned char*>(&value);  |  332  | 62  |   return H::combine_contiguous(std::move(hash_state), start, sizeof(value));  |  333  | 62  | }  |  
 Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_bytes<absl::hash_internal::MixingHashState, int>(absl::hash_internal::MixingHashState, int const&)  | 
334  |  |  | 
335  |  | // -----------------------------------------------------------------------------  | 
336  |  | // AbslHashValue for Basic Types  | 
337  |  | // -----------------------------------------------------------------------------  | 
338  |  |  | 
339  |  | // Note: Default `AbslHashValue` implementations live in `hash_internal`. This  | 
340  |  | // allows us to block lexical scope lookup when doing an unqualified call to  | 
341  |  | // `AbslHashValue` below. User-defined implementations of `AbslHashValue` can  | 
342  |  | // only be found via ADL.  | 
343  |  |  | 
344  |  | // AbslHashValue() for hashing bool values  | 
345  |  | //  | 
346  |  | // We use SFINAE to ensure that this overload only accepts bool, not types that  | 
347  |  | // are convertible to bool.  | 
348  |  | template <typename H, typename B>  | 
349  |  | typename std::enable_if<std::is_same<B, bool>::value, H>::type AbslHashValue(  | 
350  |  |     H hash_state, B value) { | 
351  |  |   return H::combine(std::move(hash_state),  | 
352  |  |                     static_cast<unsigned char>(value ? 1 : 0));  | 
353  |  | }  | 
354  |  |  | 
355  |  | // AbslHashValue() for hashing enum values  | 
356  |  | template <typename H, typename Enum>  | 
357  |  | typename std::enable_if<std::is_enum<Enum>::value, H>::type AbslHashValue(  | 
358  |  |     H hash_state, Enum e) { | 
359  |  |   // In practice, we could almost certainly just invoke hash_bytes directly,  | 
360  |  |   // but it's possible that a sanitizer might one day want to  | 
361  |  |   // store data in the unused bits of an enum. To avoid that risk, we  | 
362  |  |   // convert to the underlying type before hashing. Hopefully this will get  | 
363  |  |   // optimized away; if not, we can reopen discussion with c-toolchain-team.  | 
364  |  |   return H::combine(std::move(hash_state),  | 
365  |  |                     static_cast<typename std::underlying_type<Enum>::type>(e));  | 
366  |  | }  | 
367  |  | // AbslHashValue() for hashing floating-point values  | 
368  |  | template <typename H, typename Float>  | 
369  |  | typename std::enable_if<std::is_same<Float, float>::value ||  | 
370  |  |                             std::is_same<Float, double>::value,  | 
371  |  |                         H>::type  | 
372  |  | AbslHashValue(H hash_state, Float value) { | 
373  |  |   return hash_internal::hash_bytes(std::move(hash_state),  | 
374  |  |                                    value == 0 ? 0 : value);  | 
375  |  | }  | 
376  |  |  | 
377  |  | // Long double has the property that it might have extra unused bytes in it.  | 
378  |  | // For example, in x86 sizeof(long double)==16 but it only really uses 80-bits  | 
379  |  | // of it. This means we can't use hash_bytes on a long double and have to  | 
380  |  | // convert it to something else first.  | 
381  |  | template <typename H, typename LongDouble>  | 
382  |  | typename std::enable_if<std::is_same<LongDouble, long double>::value, H>::type  | 
383  |  | AbslHashValue(H hash_state, LongDouble value) { | 
384  |  |   const int category = std::fpclassify(value);  | 
385  |  |   switch (category) { | 
386  |  |     case FP_INFINITE:  | 
387  |  |       // Add the sign bit to differentiate between +Inf and -Inf  | 
388  |  |       hash_state = H::combine(std::move(hash_state), std::signbit(value));  | 
389  |  |       break;  | 
390  |  |  | 
391  |  |     case FP_NAN:  | 
392  |  |     case FP_ZERO:  | 
393  |  |     default:  | 
394  |  |       // Category is enough for these.  | 
395  |  |       break;  | 
396  |  |  | 
397  |  |     case FP_NORMAL:  | 
398  |  |     case FP_SUBNORMAL:  | 
399  |  |       // We can't convert `value` directly to double because this would have  | 
400  |  |       // undefined behavior if the value is out of range.  | 
401  |  |       // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is  | 
402  |  |       // guaranteed to be in range for `double`. The truncation is  | 
403  |  |       // implementation defined, but that works as long as it is deterministic.  | 
404  |  |       int exp;  | 
405  |  |       auto mantissa = static_cast<double>(std::frexp(value, &exp));  | 
406  |  |       hash_state = H::combine(std::move(hash_state), mantissa, exp);  | 
407  |  |   }  | 
408  |  |  | 
409  |  |   return H::combine(std::move(hash_state), category);  | 
410  |  | }  | 
411  |  |  | 
412  |  | // AbslHashValue() for hashing pointers  | 
413  |  | template <typename H, typename T>  | 
414  |  | H AbslHashValue(H hash_state, T* ptr) { | 
415  |  |   auto v = reinterpret_cast<uintptr_t>(ptr);  | 
416  |  |   // Due to alignment, pointers tend to have low bits as zero, and the next few  | 
417  |  |   // bits follow a pattern since they are also multiples of some base value.  | 
418  |  |   // Mixing the pointer twice helps prevent stuck low bits for certain alignment  | 
419  |  |   // values.  | 
420  |  |   return H::combine(std::move(hash_state), v, v);  | 
421  |  | }  | 
422  |  |  | 
423  |  | // AbslHashValue() for hashing nullptr_t  | 
424  |  | template <typename H>  | 
425  |  | H AbslHashValue(H hash_state, std::nullptr_t) { | 
426  |  |   return H::combine(std::move(hash_state), static_cast<void*>(nullptr));  | 
427  |  | }  | 
428  |  |  | 
429  |  | // AbslHashValue() for hashing pointers-to-member  | 
430  |  | template <typename H, typename T, typename C>  | 
431  |  | H AbslHashValue(H hash_state, T C::*ptr) { | 
432  |  |   auto salient_ptm_size = [](std::size_t n) -> std::size_t { | 
433  |  | #if defined(_MSC_VER)  | 
434  |  |     // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,  | 
435  |  |     // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain  | 
436  |  |     // padding (namely when they have 1 or 3 ints). The value below is a lower  | 
437  |  |     // bound on the number of salient, non-padding bytes that we use for  | 
438  |  |     // hashing.  | 
439  |  |     if (alignof(T C::*) == alignof(int)) { | 
440  |  |       // No padding when all subobjects have the same size as the total  | 
441  |  |       // alignment. This happens in 32-bit mode.  | 
442  |  |       return n;  | 
443  |  |     } else { | 
444  |  |       // Padding for 1 int (size 16) or 3 ints (size 24).  | 
445  |  |       // With 2 ints, the size is 16 with no padding, which we pessimize.  | 
446  |  |       return n == 24 ? 20 : n == 16 ? 12 : n;  | 
447  |  |     }  | 
448  |  | #else  | 
449  |  |   // On other platforms, we assume that pointers-to-members do not have  | 
450  |  |   // padding.  | 
451  |  | #ifdef __cpp_lib_has_unique_object_representations  | 
452  |  |     static_assert(std::has_unique_object_representations<T C::*>::value);  | 
453  |  | #endif  // __cpp_lib_has_unique_object_representations  | 
454  |  |     return n;  | 
455  |  | #endif  | 
456  |  |   };  | 
457  |  |   return H::combine_contiguous(std::move(hash_state),  | 
458  |  |                                reinterpret_cast<unsigned char*>(&ptr),  | 
459  |  |                                salient_ptm_size(sizeof ptr));  | 
460  |  | }  | 
461  |  |  | 
462  |  | // -----------------------------------------------------------------------------  | 
463  |  | // AbslHashValue for Composite Types  | 
464  |  | // -----------------------------------------------------------------------------  | 
465  |  |  | 
466  |  | // AbslHashValue() for hashing pairs  | 
467  |  | template <typename H, typename T1, typename T2>  | 
468  |  | typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,  | 
469  |  |                         H>::type  | 
470  |  | AbslHashValue(H hash_state, const std::pair<T1, T2>& p) { | 
471  |  |   return H::combine(std::move(hash_state), p.first, p.second);  | 
472  |  | }  | 
473  |  |  | 
474  |  | // hash_tuple()  | 
475  |  | //  | 
476  |  | // Helper function for hashing a tuple. The third argument should  | 
477  |  | // be an index_sequence running from 0 to tuple_size<Tuple> - 1.  | 
478  |  | template <typename H, typename Tuple, size_t... Is>  | 
479  | 0  | H hash_tuple(H hash_state, const Tuple& t, absl::index_sequence<Is...>) { | 
480  | 0  |   return H::combine(std::move(hash_state), std::get<Is>(t)...);  | 
481  | 0  | } Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&>, 0ul>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&, absl::integer_sequence<unsigned long, 0ul>) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::hash_tuple<absl::hash_internal::MixingHashState, std::__1::tuple<absl::string_view const&, int const&>, 0ul, 1ul>(absl::hash_internal::MixingHashState, std::__1::tuple<absl::string_view const&, int const&> const&, absl::integer_sequence<unsigned long, 0ul, 1ul>)  | 
482  |  |  | 
483  |  | // AbslHashValue for hashing tuples  | 
484  |  | template <typename H, typename... Ts>  | 
485  |  | #if defined(_MSC_VER)  | 
486  |  | // This SFINAE gets MSVC confused under some conditions. Let's just disable it  | 
487  |  | // for now.  | 
488  |  | H  | 
489  |  | #else   // _MSC_VER  | 
490  |  | typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type  | 
491  |  | #endif  // _MSC_VER  | 
492  | 0  | AbslHashValue(H hash_state, const std::tuple<Ts...>& t) { | 
493  | 0  |   return hash_internal::hash_tuple(std::move(hash_state), t,  | 
494  | 0  |                                    absl::make_index_sequence<sizeof...(Ts)>());  | 
495  | 0  | } Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKmEEENSt3__19enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESB_RKNS5_5tupleIJDpS8_EEE Unexecuted instantiation: _ZN4absl13hash_internal13AbslHashValueINS0_15MixingHashStateEJRKNS_11string_viewERKiEEENSt3__19enable_ifIXsr4absl11conjunctionIDpNS0_11is_hashableIT0_EEEE5valueET_E4typeESE_RKNS8_5tupleIJDpSB_EEE  | 
496  |  |  | 
497  |  | // -----------------------------------------------------------------------------  | 
498  |  | // AbslHashValue for Pointers  | 
499  |  | // -----------------------------------------------------------------------------  | 
500  |  |  | 
501  |  | // AbslHashValue for hashing unique_ptr  | 
502  |  | template <typename H, typename T, typename D>  | 
503  |  | H AbslHashValue(H hash_state, const std::unique_ptr<T, D>& ptr) { | 
504  |  |   return H::combine(std::move(hash_state), ptr.get());  | 
505  |  | }  | 
506  |  |  | 
507  |  | // AbslHashValue for hashing shared_ptr  | 
508  |  | template <typename H, typename T>  | 
509  |  | H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) { | 
510  |  |   return H::combine(std::move(hash_state), ptr.get());  | 
511  |  | }  | 
512  |  |  | 
513  |  | // -----------------------------------------------------------------------------  | 
514  |  | // AbslHashValue for String-Like Types  | 
515  |  | // -----------------------------------------------------------------------------  | 
516  |  |  | 
517  |  | // AbslHashValue for hashing strings  | 
518  |  | //  | 
519  |  | // All the string-like types supported here provide the same hash expansion for  | 
520  |  | // the same character sequence. These types are:  | 
521  |  | //  | 
522  |  | //  - `absl::Cord`  | 
523  |  | //  - `std::string` (and std::basic_string<T, std::char_traits<T>, A> for  | 
524  |  | //      any allocator A and any T in {char, wchar_t, char16_t, char32_t}) | 
525  |  | //  - `absl::string_view`, `std::string_view`, `std::wstring_view`,  | 
526  |  | //    `std::u16string_view`, and `std::u32_string_view`.  | 
527  |  | //  | 
528  |  | // For simplicity, we currently support only strings built on `char`, `wchar_t`,  | 
529  |  | // `char16_t`, or `char32_t`. This support may be broadened, if necessary, but  | 
530  |  | // with some caution - this overload would misbehave in cases where the traits'  | 
531  |  | // `eq()` member isn't equivalent to `==` on the underlying character type.  | 
532  |  | template <typename H>  | 
533  | 62  | H AbslHashValue(H hash_state, absl::string_view str) { | 
534  | 62  |   return H::combine(  | 
535  | 62  |       H::combine_contiguous(std::move(hash_state), str.data(), str.size()),  | 
536  | 62  |       str.size());  | 
537  | 62  | }  | 
538  |  |  | 
539  |  | // Support std::wstring, std::u16string and std::u32string.  | 
540  |  | template <typename Char, typename Alloc, typename H,  | 
541  |  |           typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||  | 
542  |  |                                        std::is_same<Char, char16_t>::value ||  | 
543  |  |                                        std::is_same<Char, char32_t>::value>>  | 
544  |  | H AbslHashValue(  | 
545  |  |     H hash_state,  | 
546  |  |     const std::basic_string<Char, std::char_traits<Char>, Alloc>& str) { | 
547  |  |   return H::combine(  | 
548  |  |       H::combine_contiguous(std::move(hash_state), str.data(), str.size()),  | 
549  |  |       str.size());  | 
550  |  | }  | 
551  |  |  | 
552  |  | #ifdef ABSL_HAVE_STD_STRING_VIEW  | 
553  |  |  | 
554  |  | // Support std::wstring_view, std::u16string_view and std::u32string_view.  | 
555  |  | template <typename Char, typename H,  | 
556  |  |           typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||  | 
557  |  |                                        std::is_same<Char, char16_t>::value ||  | 
558  |  |                                        std::is_same<Char, char32_t>::value>>  | 
559  |  | H AbslHashValue(H hash_state, std::basic_string_view<Char> str) { | 
560  |  |   return H::combine(  | 
561  |  |       H::combine_contiguous(std::move(hash_state), str.data(), str.size()),  | 
562  |  |       str.size());  | 
563  |  | }  | 
564  |  |  | 
565  |  | #endif  // ABSL_HAVE_STD_STRING_VIEW  | 
566  |  |  | 
567  |  | // -----------------------------------------------------------------------------  | 
568  |  | // AbslHashValue for Sequence Containers  | 
569  |  | // -----------------------------------------------------------------------------  | 
570  |  |  | 
571  |  | // AbslHashValue for hashing std::array  | 
572  |  | template <typename H, typename T, size_t N>  | 
573  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
574  |  |     H hash_state, const std::array<T, N>& array) { | 
575  |  |   return H::combine_contiguous(std::move(hash_state), array.data(),  | 
576  |  |                                array.size());  | 
577  |  | }  | 
578  |  |  | 
579  |  | // AbslHashValue for hashing std::deque  | 
580  |  | template <typename H, typename T, typename Allocator>  | 
581  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
582  |  |     H hash_state, const std::deque<T, Allocator>& deque) { | 
583  |  |   // TODO(gromer): investigate a more efficient implementation taking  | 
584  |  |   // advantage of the chunk structure.  | 
585  |  |   for (const auto& t : deque) { | 
586  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
587  |  |   }  | 
588  |  |   return H::combine(std::move(hash_state), deque.size());  | 
589  |  | }  | 
590  |  |  | 
591  |  | // AbslHashValue for hashing std::forward_list  | 
592  |  | template <typename H, typename T, typename Allocator>  | 
593  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
594  |  |     H hash_state, const std::forward_list<T, Allocator>& list) { | 
595  |  |   size_t size = 0;  | 
596  |  |   for (const T& t : list) { | 
597  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
598  |  |     ++size;  | 
599  |  |   }  | 
600  |  |   return H::combine(std::move(hash_state), size);  | 
601  |  | }  | 
602  |  |  | 
603  |  | // AbslHashValue for hashing std::list  | 
604  |  | template <typename H, typename T, typename Allocator>  | 
605  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
606  |  |     H hash_state, const std::list<T, Allocator>& list) { | 
607  |  |   for (const auto& t : list) { | 
608  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
609  |  |   }  | 
610  |  |   return H::combine(std::move(hash_state), list.size());  | 
611  |  | }  | 
612  |  |  | 
613  |  | // AbslHashValue for hashing std::vector  | 
614  |  | //  | 
615  |  | // Do not use this for vector<bool> on platforms that have a working  | 
616  |  | // implementation of std::hash. It does not have a .data(), and a fallback for  | 
617  |  | // std::hash<> is most likely faster.  | 
618  |  | template <typename H, typename T, typename Allocator>  | 
619  |  | typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,  | 
620  |  |                         H>::type  | 
621  |  | AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { | 
622  |  |   return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(),  | 
623  |  |                                           vector.size()),  | 
624  |  |                     vector.size());  | 
625  |  | }  | 
626  |  |  | 
627  |  | // AbslHashValue special cases for hashing std::vector<bool>  | 
628  |  |  | 
629  |  | #if defined(ABSL_IS_BIG_ENDIAN) && \  | 
630  |  |     (defined(__GLIBCXX__) || defined(__GLIBCPP__))  | 
631  |  |  | 
632  |  | // std::hash in libstdc++ does not work correctly with vector<bool> on Big  | 
633  |  | // Endian platforms therefore we need to implement a custom AbslHashValue for  | 
634  |  | // it. More details on the bug:  | 
635  |  | // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531  | 
636  |  | template <typename H, typename T, typename Allocator>  | 
637  |  | typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,  | 
638  |  |                         H>::type  | 
639  |  | AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { | 
640  |  |   typename H::AbslInternalPiecewiseCombiner combiner;  | 
641  |  |   for (const auto& i : vector) { | 
642  |  |     unsigned char c = static_cast<unsigned char>(i);  | 
643  |  |     hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));  | 
644  |  |   }  | 
645  |  |   return H::combine(combiner.finalize(std::move(hash_state)), vector.size());  | 
646  |  | }  | 
647  |  | #else  | 
648  |  | // When not working around the libstdc++ bug above, we still have to contend  | 
649  |  | // with the fact that std::hash<vector<bool>> is often poor quality, hashing  | 
650  |  | // directly on the internal words and on no other state.  On these platforms,  | 
651  |  | // vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value. | 
652  |  | //  | 
653  |  | // Mixing in the size (as we do in our other vector<> implementations) on top  | 
654  |  | // of the library-provided hash implementation avoids this QOI issue.  | 
655  |  | template <typename H, typename T, typename Allocator>  | 
656  |  | typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,  | 
657  |  |                         H>::type  | 
658  |  | AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) { | 
659  |  |   return H::combine(std::move(hash_state),  | 
660  |  |                     std::hash<std::vector<T, Allocator>>{}(vector), | 
661  |  |                     vector.size());  | 
662  |  | }  | 
663  |  | #endif  | 
664  |  |  | 
665  |  | // -----------------------------------------------------------------------------  | 
666  |  | // AbslHashValue for Ordered Associative Containers  | 
667  |  | // -----------------------------------------------------------------------------  | 
668  |  |  | 
669  |  | // AbslHashValue for hashing std::map  | 
670  |  | template <typename H, typename Key, typename T, typename Compare,  | 
671  |  |           typename Allocator>  | 
672  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
673  |  |                         H>::type  | 
674  |  | AbslHashValue(H hash_state, const std::map<Key, T, Compare, Allocator>& map) { | 
675  |  |   for (const auto& t : map) { | 
676  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
677  |  |   }  | 
678  |  |   return H::combine(std::move(hash_state), map.size());  | 
679  |  | }  | 
680  |  |  | 
681  |  | // AbslHashValue for hashing std::multimap  | 
682  |  | template <typename H, typename Key, typename T, typename Compare,  | 
683  |  |           typename Allocator>  | 
684  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
685  |  |                         H>::type  | 
686  |  | AbslHashValue(H hash_state,  | 
687  |  |               const std::multimap<Key, T, Compare, Allocator>& map) { | 
688  |  |   for (const auto& t : map) { | 
689  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
690  |  |   }  | 
691  |  |   return H::combine(std::move(hash_state), map.size());  | 
692  |  | }  | 
693  |  |  | 
694  |  | // AbslHashValue for hashing std::set  | 
695  |  | template <typename H, typename Key, typename Compare, typename Allocator>  | 
696  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
697  |  |     H hash_state, const std::set<Key, Compare, Allocator>& set) { | 
698  |  |   for (const auto& t : set) { | 
699  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
700  |  |   }  | 
701  |  |   return H::combine(std::move(hash_state), set.size());  | 
702  |  | }  | 
703  |  |  | 
704  |  | // AbslHashValue for hashing std::multiset  | 
705  |  | template <typename H, typename Key, typename Compare, typename Allocator>  | 
706  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
707  |  |     H hash_state, const std::multiset<Key, Compare, Allocator>& set) { | 
708  |  |   for (const auto& t : set) { | 
709  |  |     hash_state = H::combine(std::move(hash_state), t);  | 
710  |  |   }  | 
711  |  |   return H::combine(std::move(hash_state), set.size());  | 
712  |  | }  | 
713  |  |  | 
714  |  | // -----------------------------------------------------------------------------  | 
715  |  | // AbslHashValue for Unordered Associative Containers  | 
716  |  | // -----------------------------------------------------------------------------  | 
717  |  |  | 
718  |  | // AbslHashValue for hashing std::unordered_set  | 
719  |  | template <typename H, typename Key, typename Hash, typename KeyEqual,  | 
720  |  |           typename Alloc>  | 
721  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
722  |  |     H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) { | 
723  |  |   return H::combine(  | 
724  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
725  |  |       s.size());  | 
726  |  | }  | 
727  |  |  | 
728  |  | // AbslHashValue for hashing std::unordered_multiset  | 
729  |  | template <typename H, typename Key, typename Hash, typename KeyEqual,  | 
730  |  |           typename Alloc>  | 
731  |  | typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(  | 
732  |  |     H hash_state,  | 
733  |  |     const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) { | 
734  |  |   return H::combine(  | 
735  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
736  |  |       s.size());  | 
737  |  | }  | 
738  |  |  | 
739  |  | // AbslHashValue for hashing std::unordered_set  | 
740  |  | template <typename H, typename Key, typename T, typename Hash,  | 
741  |  |           typename KeyEqual, typename Alloc>  | 
742  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
743  |  |                         H>::type  | 
744  |  | AbslHashValue(H hash_state,  | 
745  |  |               const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) { | 
746  |  |   return H::combine(  | 
747  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
748  |  |       s.size());  | 
749  |  | }  | 
750  |  |  | 
751  |  | // AbslHashValue for hashing std::unordered_multiset  | 
752  |  | template <typename H, typename Key, typename T, typename Hash,  | 
753  |  |           typename KeyEqual, typename Alloc>  | 
754  |  | typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,  | 
755  |  |                         H>::type  | 
756  |  | AbslHashValue(H hash_state,  | 
757  |  |               const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) { | 
758  |  |   return H::combine(  | 
759  |  |       H::combine_unordered(std::move(hash_state), s.begin(), s.end()),  | 
760  |  |       s.size());  | 
761  |  | }  | 
762  |  |  | 
763  |  | // -----------------------------------------------------------------------------  | 
764  |  | // AbslHashValue for Wrapper Types  | 
765  |  | // -----------------------------------------------------------------------------  | 
766  |  |  | 
767  |  | // AbslHashValue for hashing std::reference_wrapper  | 
768  |  | template <typename H, typename T>  | 
769  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
770  |  |     H hash_state, std::reference_wrapper<T> opt) { | 
771  |  |   return H::combine(std::move(hash_state), opt.get());  | 
772  |  | }  | 
773  |  |  | 
774  |  | // AbslHashValue for hashing absl::optional  | 
775  |  | template <typename H, typename T>  | 
776  |  | typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(  | 
777  |  |     H hash_state, const absl::optional<T>& opt) { | 
778  |  |   if (opt) hash_state = H::combine(std::move(hash_state), *opt);  | 
779  |  |   return H::combine(std::move(hash_state), opt.has_value());  | 
780  |  | }  | 
781  |  |  | 
782  |  | // VariantVisitor  | 
783  |  | template <typename H>  | 
784  |  | struct VariantVisitor { | 
785  |  |   H&& hash_state;  | 
786  |  |   template <typename T>  | 
787  |  |   H operator()(const T& t) const { | 
788  |  |     return H::combine(std::move(hash_state), t);  | 
789  |  |   }  | 
790  |  | };  | 
791  |  |  | 
792  |  | // AbslHashValue for hashing absl::variant  | 
793  |  | template <typename H, typename... T>  | 
794  |  | typename std::enable_if<conjunction<is_hashable<T>...>::value, H>::type  | 
795  |  | AbslHashValue(H hash_state, const absl::variant<T...>& v) { | 
796  |  |   if (!v.valueless_by_exception()) { | 
797  |  |     hash_state = absl::visit(VariantVisitor<H>{std::move(hash_state)}, v); | 
798  |  |   }  | 
799  |  |   return H::combine(std::move(hash_state), v.index());  | 
800  |  | }  | 
801  |  |  | 
802  |  | // -----------------------------------------------------------------------------  | 
803  |  | // AbslHashValue for Other Types  | 
804  |  | // -----------------------------------------------------------------------------  | 
805  |  |  | 
806  |  | // AbslHashValue for hashing std::bitset is not defined on Little Endian  | 
807  |  | // platforms, for the same reason as for vector<bool> (see std::vector above):  | 
808  |  | // It does not expose the raw bytes, and a fallback to std::hash<> is most  | 
809  |  | // likely faster.  | 
810  |  |  | 
811  |  | #if defined(ABSL_IS_BIG_ENDIAN) && \  | 
812  |  |     (defined(__GLIBCXX__) || defined(__GLIBCPP__))  | 
813  |  | // AbslHashValue for hashing std::bitset  | 
814  |  | //  | 
815  |  | // std::hash in libstdc++ does not work correctly with std::bitset on Big Endian  | 
816  |  | // platforms therefore we need to implement a custom AbslHashValue for it. More  | 
817  |  | // details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531  | 
818  |  | template <typename H, size_t N>  | 
819  |  | H AbslHashValue(H hash_state, const std::bitset<N>& set) { | 
820  |  |   typename H::AbslInternalPiecewiseCombiner combiner;  | 
821  |  |   for (int i = 0; i < N; i++) { | 
822  |  |     unsigned char c = static_cast<unsigned char>(set[i]);  | 
823  |  |     hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));  | 
824  |  |   }  | 
825  |  |   return H::combine(combiner.finalize(std::move(hash_state)), N);  | 
826  |  | }  | 
827  |  | #endif  | 
828  |  |  | 
829  |  | // -----------------------------------------------------------------------------  | 
830  |  |  | 
831  |  | // hash_range_or_bytes()  | 
832  |  | //  | 
833  |  | // Mixes all values in the range [data, data+size) into the hash state.  | 
834  |  | // This overload accepts only uniquely-represented types, and hashes them by  | 
835  |  | // hashing the entire range of bytes.  | 
836  |  | template <typename H, typename T>  | 
837  |  | typename std::enable_if<is_uniquely_represented<T>::value, H>::type  | 
838  | 62  | hash_range_or_bytes(H hash_state, const T* data, size_t size) { | 
839  | 62  |   const auto* bytes = reinterpret_cast<const unsigned char*>(data);  | 
840  | 62  |   return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size);  | 
841  | 62  | }  | 
842  |  |  | 
843  |  | // hash_range_or_bytes()  | 
844  |  | template <typename H, typename T>  | 
845  |  | typename std::enable_if<!is_uniquely_represented<T>::value, H>::type  | 
846  |  | hash_range_or_bytes(H hash_state, const T* data, size_t size) { | 
847  |  |   for (const auto end = data + size; data < end; ++data) { | 
848  |  |     hash_state = H::combine(std::move(hash_state), *data);  | 
849  |  |   }  | 
850  |  |   return hash_state;  | 
851  |  | }  | 
852  |  |  | 
853  |  | #if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \  | 
854  |  |     ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_  | 
855  |  | #define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1  | 
856  |  | #else  | 
857  |  | #define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0  | 
858  |  | #endif  | 
859  |  |  | 
860  |  | // HashSelect  | 
861  |  | //  | 
862  |  | // Type trait to select the appropriate hash implementation to use.  | 
863  |  | // HashSelect::type<T> will give the proper hash implementation, to be invoked  | 
864  |  | // as:  | 
865  |  | //   HashSelect::type<T>::Invoke(state, value)  | 
866  |  | // Also, HashSelect::type<T>::value is a boolean equal to `true` if there is a  | 
867  |  | // valid `Invoke` function. Types that are not hashable will have a ::value of  | 
868  |  | // `false`.  | 
869  |  | struct HashSelect { | 
870  |  |  private:  | 
871  |  |   struct State : HashStateBase<State> { | 
872  |  |     static State combine_contiguous(State hash_state, const unsigned char*,  | 
873  |  |                                     size_t);  | 
874  |  |     using State::HashStateBase::combine_contiguous;  | 
875  |  |   };  | 
876  |  |  | 
877  |  |   struct UniquelyRepresentedProbe { | 
878  |  |     template <typename H, typename T>  | 
879  |  |     static auto Invoke(H state, const T& value)  | 
880  | 62  |         -> absl::enable_if_t<is_uniquely_represented<T>::value, H> { | 
881  | 62  |       return hash_internal::hash_bytes(std::move(state), value);  | 
882  | 62  |     } _ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEmEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_ Line  | Count  | Source  |  880  | 62  |         -> absl::enable_if_t<is_uniquely_represented<T>::value, H> { |  881  | 62  |       return hash_internal::hash_bytes(std::move(state), value);  |  882  | 62  |     }  |  
 Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect24UniquelyRepresentedProbe6InvokeINS0_15MixingHashStateEiEENSt3__19enable_ifIXsr23is_uniquely_representedIT0_EE5valueET_E4typeES8_RKS7_  | 
883  |  |   };  | 
884  |  |  | 
885  |  |   struct HashValueProbe { | 
886  |  |     template <typename H, typename T>  | 
887  |  |     static auto Invoke(H state, const T& value) -> absl::enable_if_t<  | 
888  |  |         std::is_same<H,  | 
889  |  |                      decltype(AbslHashValue(std::move(state), value))>::value,  | 
890  | 62  |         H> { | 
891  | 62  |       return AbslHashValue(std::move(state), value);  | 
892  | 62  |     } _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENS_11string_viewEEENSt3__19enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueES8_E4typeES8_RKT0_ Line  | Count  | Source  |  890  | 62  |         H> { |  891  | 62  |       return AbslHashValue(std::move(state), value);  |  892  | 62  |     }  |  
 Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENS_4CordEEENSt3__19enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueES8_E4typeES8_RKT0_ Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKmEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESB_E4typeESB_RKT0_ Unexecuted instantiation: _ZN4absl13hash_internal10HashSelect14HashValueProbe6InvokeINS0_15MixingHashStateENSt3__15tupleIJRKNS_11string_viewERKiEEEEENS5_9enable_ifIXsr3std7is_sameIT_DTcl13AbslHashValueclsr3stdE4movefp_Efp0_EEEE5valueESE_E4typeESE_RKT0_  | 
893  |  |   };  | 
894  |  |  | 
895  |  |   struct LegacyHashProbe { | 
896  |  | #if ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_  | 
897  |  |     template <typename H, typename T>  | 
898  |  |     static auto Invoke(H state, const T& value) -> absl::enable_if_t<  | 
899  |  |         std::is_convertible<  | 
900  |  |             decltype(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>()(value)),  | 
901  |  |             size_t>::value,  | 
902  |  |         H> { | 
903  |  |       return hash_internal::hash_bytes(  | 
904  |  |           std::move(state),  | 
905  |  |           ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash<T>{}(value)); | 
906  |  |     }  | 
907  |  | #endif  // ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_  | 
908  |  |   };  | 
909  |  |  | 
910  |  |   struct StdHashProbe { | 
911  |  |     template <typename H, typename T>  | 
912  |  |     static auto Invoke(H state, const T& value)  | 
913  |  |         -> absl::enable_if_t<type_traits_internal::IsHashable<T>::value, H> { | 
914  |  |       return hash_internal::hash_bytes(std::move(state), std::hash<T>{}(value)); | 
915  |  |     }  | 
916  |  |   };  | 
917  |  |  | 
918  |  |   template <typename Hash, typename T>  | 
919  |  |   struct Probe : Hash { | 
920  |  |    private:  | 
921  |  |     template <typename H, typename = decltype(H::Invoke(  | 
922  |  |                               std::declval<State>(), std::declval<const T&>()))>  | 
923  |  |     static std::true_type Test(int);  | 
924  |  |     template <typename U>  | 
925  |  |     static std::false_type Test(char);  | 
926  |  |  | 
927  |  |    public:  | 
928  |  |     static constexpr bool value = decltype(Test<Hash>(0))::value;  | 
929  |  |   };  | 
930  |  |  | 
931  |  |  public:  | 
932  |  |   // Probe each implementation in order.  | 
933  |  |   // disjunction provides short circuiting wrt instantiation.  | 
934  |  |   template <typename T>  | 
935  |  |   using Apply = absl::disjunction<         //  | 
936  |  |       Probe<UniquelyRepresentedProbe, T>,  //  | 
937  |  |       Probe<HashValueProbe, T>,            //  | 
938  |  |       Probe<LegacyHashProbe, T>,           //  | 
939  |  |       Probe<StdHashProbe, T>,              //  | 
940  |  |       std::false_type>;  | 
941  |  | };  | 
942  |  |  | 
943  |  | template <typename T>  | 
944  |  | struct is_hashable  | 
945  |  |     : std::integral_constant<bool, HashSelect::template Apply<T>::value> {}; | 
946  |  |  | 
947  |  | // MixingHashState  | 
948  |  | class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> { | 
949  |  |   // absl::uint128 is not an alias or a thin wrapper around the intrinsic.  | 
950  |  |   // We use the intrinsic when available to improve performance.  | 
951  |  | #ifdef ABSL_HAVE_INTRINSIC_INT128  | 
952  |  |   using uint128 = __uint128_t;  | 
953  |  | #else   // ABSL_HAVE_INTRINSIC_INT128  | 
954  |  |   using uint128 = absl::uint128;  | 
955  |  | #endif  // ABSL_HAVE_INTRINSIC_INT128  | 
956  |  |  | 
957  |  |   static constexpr uint64_t kMul =  | 
958  |  |   sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51} | 
959  |  |                       : uint64_t{0x9ddfea08eb382d69}; | 
960  |  |  | 
961  |  |   template <typename T>  | 
962  |  |   using IntegralFastPath =  | 
963  |  |       conjunction<std::is_integral<T>, is_uniquely_represented<T>>;  | 
964  |  |  | 
965  |  |  public:  | 
966  |  |   // Move only  | 
967  |  |   MixingHashState(MixingHashState&&) = default;  | 
968  |  |   MixingHashState& operator=(MixingHashState&&) = default;  | 
969  |  |  | 
970  |  |   // MixingHashState::combine_contiguous()  | 
971  |  |   //  | 
972  |  |   // Fundamental base case for hash recursion: mixes the given range of bytes  | 
973  |  |   // into the hash state.  | 
974  |  |   static MixingHashState combine_contiguous(MixingHashState hash_state,  | 
975  |  |                                             const unsigned char* first,  | 
976  | 124  |                                             size_t size) { | 
977  | 124  |     return MixingHashState(  | 
978  | 124  |         CombineContiguousImpl(hash_state.state_, first, size,  | 
979  | 124  |                               std::integral_constant<int, sizeof(size_t)>{})); | 
980  | 124  |   }  | 
981  |  |   using MixingHashState::HashStateBase::combine_contiguous;  | 
982  |  |  | 
983  |  |   // MixingHashState::hash()  | 
984  |  |   //  | 
985  |  |   // For performance reasons in non-opt mode, we specialize this for  | 
986  |  |   // integral types.  | 
987  |  |   // Otherwise we would be instantiating and calling dozens of functions for  | 
988  |  |   // something that is just one multiplication and a couple xor's.  | 
989  |  |   // The result should be the same as running the whole algorithm, but faster.  | 
990  |  |   template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>  | 
991  |  |   static size_t hash(T value) { | 
992  |  |     return static_cast<size_t>(  | 
993  |  |         Mix(Seed(), static_cast<std::make_unsigned_t<T>>(value)));  | 
994  |  |   }  | 
995  |  |  | 
996  |  |   // Overload of MixingHashState::hash()  | 
997  |  |   template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>  | 
998  | 62  |   static size_t hash(const T& value) { | 
999  | 62  |     return static_cast<size_t>(combine(MixingHashState{}, value).state_); | 
1000  | 62  |   } unsigned long absl::hash_internal::MixingHashState::hash<absl::string_view, 0>(absl::string_view const&) Line  | Count  | Source  |  998  | 62  |   static size_t hash(const T& value) { |  999  | 62  |     return static_cast<size_t>(combine(MixingHashState{}, value).state_); |  1000  | 62  |   }  |  
 Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<absl::Cord, 0>(absl::Cord const&) Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<unsigned long const&>, 0>(std::__1::tuple<unsigned long const&> const&) Unexecuted instantiation: unsigned long absl::hash_internal::MixingHashState::hash<std::__1::tuple<absl::string_view const&, int const&>, 0>(std::__1::tuple<absl::string_view const&, int const&> const&)  | 
1001  |  |  | 
1002  |  |  private:  | 
1003  |  |   // Invoked only once for a given argument; that plus the fact that this is  | 
1004  |  |   // move-only ensures that there is only one non-moved-from object.  | 
1005  | 62  |   MixingHashState() : state_(Seed()) {} | 
1006  |  |  | 
1007  |  |   friend class MixingHashState::HashStateBase;  | 
1008  |  |  | 
1009  |  |   template <typename CombinerT>  | 
1010  |  |   static MixingHashState RunCombineUnordered(MixingHashState state,  | 
1011  |  |                                              CombinerT combiner) { | 
1012  |  |     uint64_t unordered_state = 0;  | 
1013  |  |     combiner(MixingHashState{}, [&](MixingHashState& inner_state) { | 
1014  |  |       // Add the hash state of the element to the running total, but mix the  | 
1015  |  |       // carry bit back into the low bit.  This in intended to avoid losing  | 
1016  |  |       // entropy to overflow, especially when unordered_multisets contain  | 
1017  |  |       // multiple copies of the same value.  | 
1018  |  |       auto element_state = inner_state.state_;  | 
1019  |  |       unordered_state += element_state;  | 
1020  |  |       if (unordered_state < element_state) { | 
1021  |  |         ++unordered_state;  | 
1022  |  |       }  | 
1023  |  |       inner_state = MixingHashState{}; | 
1024  |  |     });  | 
1025  |  |     return MixingHashState::combine(std::move(state), unordered_state);  | 
1026  |  |   }  | 
1027  |  |  | 
1028  |  |   // Allow the HashState type-erasure implementation to invoke  | 
1029  |  |   // RunCombinedUnordered() directly.  | 
1030  |  |   friend class absl::HashState;  | 
1031  |  |  | 
1032  |  |   // Workaround for MSVC bug.  | 
1033  |  |   // We make the type copyable to fix the calling convention, even though we  | 
1034  |  |   // never actually copy it. Keep it private to not affect the public API of the  | 
1035  |  |   // type.  | 
1036  |  |   MixingHashState(const MixingHashState&) = default;  | 
1037  |  |  | 
1038  | 124  |   explicit MixingHashState(uint64_t state) : state_(state) {} | 
1039  |  |  | 
1040  |  |   // Implementation of the base case for combine_contiguous where we actually  | 
1041  |  |   // mix the bytes into the state.  | 
1042  |  |   // Dispatch to different implementations of the combine_contiguous depending  | 
1043  |  |   // on the value of `sizeof(size_t)`.  | 
1044  |  |   static uint64_t CombineContiguousImpl(uint64_t state,  | 
1045  |  |                                         const unsigned char* first, size_t len,  | 
1046  |  |                                         std::integral_constant<int, 4>  | 
1047  |  |                                         /* sizeof_size_t */);  | 
1048  |  |   static uint64_t CombineContiguousImpl(uint64_t state,  | 
1049  |  |                                         const unsigned char* first, size_t len,  | 
1050  |  |                                         std::integral_constant<int, 8>  | 
1051  |  |                                         /* sizeof_size_t */);  | 
1052  |  |  | 
1053  |  |   // Slow dispatch path for calls to CombineContiguousImpl with a size argument  | 
1054  |  |   // larger than PiecewiseChunkSize().  Has the same effect as calling  | 
1055  |  |   // CombineContiguousImpl() repeatedly with the chunk stride size.  | 
1056  |  |   static uint64_t CombineLargeContiguousImpl32(uint64_t state,  | 
1057  |  |                                                const unsigned char* first,  | 
1058  |  |                                                size_t len);  | 
1059  |  |   static uint64_t CombineLargeContiguousImpl64(uint64_t state,  | 
1060  |  |                                                const unsigned char* first,  | 
1061  |  |                                                size_t len);  | 
1062  |  |  | 
1063  |  |   // Reads 9 to 16 bytes from p.  | 
1064  |  |   // The least significant 8 bytes are in .first, the rest (zero padded) bytes  | 
1065  |  |   // are in .second.  | 
1066  |  |   static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,  | 
1067  | 0  |                                                  size_t len) { | 
1068  | 0  |     uint64_t low_mem = absl::base_internal::UnalignedLoad64(p);  | 
1069  | 0  |     uint64_t high_mem = absl::base_internal::UnalignedLoad64(p + len - 8);  | 
1070  | 0  | #ifdef ABSL_IS_LITTLE_ENDIAN  | 
1071  | 0  |     uint64_t most_significant = high_mem;  | 
1072  | 0  |     uint64_t least_significant = low_mem;  | 
1073  |  | #else  | 
1074  |  |     uint64_t most_significant = low_mem;  | 
1075  |  |     uint64_t least_significant = high_mem;  | 
1076  |  | #endif  | 
1077  | 0  |     return {least_significant, most_significant}; | 
1078  | 0  |   }  | 
1079  |  |  | 
1080  |  |   // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.  | 
1081  | 68  |   static uint64_t Read4To8(const unsigned char* p, size_t len) { | 
1082  | 68  |     uint32_t low_mem = absl::base_internal::UnalignedLoad32(p);  | 
1083  | 68  |     uint32_t high_mem = absl::base_internal::UnalignedLoad32(p + len - 4);  | 
1084  | 68  | #ifdef ABSL_IS_LITTLE_ENDIAN  | 
1085  | 68  |     uint32_t most_significant = high_mem;  | 
1086  | 68  |     uint32_t least_significant = low_mem;  | 
1087  |  | #else  | 
1088  |  |     uint32_t most_significant = low_mem;  | 
1089  |  |     uint32_t least_significant = high_mem;  | 
1090  |  | #endif  | 
1091  | 68  |     return (static_cast<uint64_t>(most_significant) << (len - 4) * 8) |  | 
1092  | 68  |            least_significant;  | 
1093  | 68  |   }  | 
1094  |  |  | 
1095  |  |   // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.  | 
1096  | 0  |   static uint32_t Read1To3(const unsigned char* p, size_t len) { | 
1097  |  |     // The trick used by this implementation is to avoid branches if possible.  | 
1098  | 0  |     unsigned char mem0 = p[0];  | 
1099  | 0  |     unsigned char mem1 = p[len / 2];  | 
1100  | 0  |     unsigned char mem2 = p[len - 1];  | 
1101  | 0  | #ifdef ABSL_IS_LITTLE_ENDIAN  | 
1102  | 0  |     unsigned char significant2 = mem2;  | 
1103  | 0  |     unsigned char significant1 = mem1;  | 
1104  | 0  |     unsigned char significant0 = mem0;  | 
1105  |  | #else  | 
1106  |  |     unsigned char significant2 = mem0;  | 
1107  |  |     unsigned char significant1 = len == 2 ? mem0 : mem1;  | 
1108  |  |     unsigned char significant0 = mem2;  | 
1109  |  | #endif  | 
1110  | 0  |     return static_cast<uint32_t>(significant0 |                     //  | 
1111  | 0  |                                  (significant1 << (len / 2 * 8)) |  //  | 
1112  | 0  |                                  (significant2 << ((len - 1) * 8)));  | 
1113  | 0  |   }  | 
1114  |  |  | 
1115  | 124  |   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) { | 
1116  |  |     // Though the 128-bit product on AArch64 needs two instructions, it is  | 
1117  |  |     // still a good balance between speed and hash quality.  | 
1118  | 124  |     using MultType =  | 
1119  | 124  |         absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;  | 
1120  |  |     // We do the addition in 64-bit space to make sure the 128-bit  | 
1121  |  |     // multiplication is fast. If we were to do it as MultType the compiler has  | 
1122  |  |     // to assume that the high word is non-zero and needs to perform 2  | 
1123  |  |     // multiplications instead of one.  | 
1124  | 124  |     MultType m = state + v;  | 
1125  | 124  |     m *= kMul;  | 
1126  | 124  |     return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));  | 
1127  | 124  |   }  | 
1128  |  |  | 
1129  |  |   // An extern to avoid bloat on a direct call to LowLevelHash() with fixed  | 
1130  |  |   // values for both the seed and salt parameters.  | 
1131  |  |   static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);  | 
1132  |  |  | 
1133  |  |   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,  | 
1134  | 56  |                                                       size_t len) { | 
1135  | 56  | #ifdef ABSL_HAVE_INTRINSIC_INT128  | 
1136  | 56  |     return LowLevelHashImpl(data, len);  | 
1137  |  | #else  | 
1138  |  |     return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);  | 
1139  |  | #endif  | 
1140  | 56  |   }  | 
1141  |  |  | 
1142  |  |   // Seed()  | 
1143  |  |   //  | 
1144  |  |   // A non-deterministic seed.  | 
1145  |  |   //  | 
1146  |  |   // The current purpose of this seed is to generate non-deterministic results  | 
1147  |  |   // and prevent having users depend on the particular hash values.  | 
1148  |  |   // It is not meant as a security feature right now, but it leaves the door  | 
1149  |  |   // open to upgrade it to a true per-process random seed. A true random seed  | 
1150  |  |   // costs more and we don't need to pay for that right now.  | 
1151  |  |   //  | 
1152  |  |   // On platforms with ASLR, we take advantage of it to make a per-process  | 
1153  |  |   // random value.  | 
1154  |  |   // See https://en.wikipedia.org/wiki/Address_space_layout_randomization  | 
1155  |  |   //  | 
1156  |  |   // On other platforms this is still going to be non-deterministic but most  | 
1157  |  |   // probably per-build and not per-process.  | 
1158  | 118  |   ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() { | 
1159  | 118  | #if (!defined(__clang__) || __clang_major__ > 11) && \  | 
1160  | 118  |     (!defined(__apple_build_version__) ||            \  | 
1161  | 118  |      __apple_build_version__ >= 19558921)  // Xcode 12  | 
1162  | 118  |     return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));  | 
1163  |  | #else  | 
1164  |  |     // Workaround the absence of  | 
1165  |  |     // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.  | 
1166  |  |     return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));  | 
1167  |  | #endif  | 
1168  | 118  |   }  | 
1169  |  |   static const void* const kSeed;  | 
1170  |  |  | 
1171  |  |   uint64_t state_;  | 
1172  |  | };  | 
1173  |  |  | 
1174  |  | // MixingHashState::CombineContiguousImpl()  | 
1175  |  | inline uint64_t MixingHashState::CombineContiguousImpl(  | 
1176  |  |     uint64_t state, const unsigned char* first, size_t len,  | 
1177  | 0  |     std::integral_constant<int, 4> /* sizeof_size_t */) { | 
1178  |  |   // For large values we use CityHash, for small ones we just use a  | 
1179  |  |   // multiplicative hash.  | 
1180  | 0  |   uint64_t v;  | 
1181  | 0  |   if (len > 8) { | 
1182  | 0  |     if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) { | 
1183  | 0  |       return CombineLargeContiguousImpl32(state, first, len);  | 
1184  | 0  |     }  | 
1185  | 0  |     v = hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);  | 
1186  | 0  |   } else if (len >= 4) { | 
1187  | 0  |     v = Read4To8(first, len);  | 
1188  | 0  |   } else if (len > 0) { | 
1189  | 0  |     v = Read1To3(first, len);  | 
1190  | 0  |   } else { | 
1191  |  |     // Empty ranges have no effect.  | 
1192  | 0  |     return state;  | 
1193  | 0  |   }  | 
1194  | 0  |   return Mix(state, v);  | 
1195  | 0  | }  | 
1196  |  |  | 
1197  |  | // Overload of MixingHashState::CombineContiguousImpl()  | 
1198  |  | inline uint64_t MixingHashState::CombineContiguousImpl(  | 
1199  |  |     uint64_t state, const unsigned char* first, size_t len,  | 
1200  | 124  |     std::integral_constant<int, 8> /* sizeof_size_t */) { | 
1201  |  |   // For large values we use LowLevelHash or CityHash depending on the platform,  | 
1202  |  |   // for small ones we just use a multiplicative hash.  | 
1203  | 124  |   uint64_t v;  | 
1204  | 124  |   if (len > 16) { | 
1205  | 56  |     if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) { | 
1206  | 0  |       return CombineLargeContiguousImpl64(state, first, len);  | 
1207  | 0  |     }  | 
1208  | 56  |     v = Hash64(first, len);  | 
1209  | 68  |   } else if (len > 8) { | 
1210  |  |     // This hash function was constructed by the ML-driven algorithm discovery  | 
1211  |  |     // using reinforcement learning. We fed the agent lots of inputs from  | 
1212  |  |     // microbenchmarks, SMHasher, low hamming distance from generated inputs and  | 
1213  |  |     // picked up the one that was good on micro and macrobenchmarks.  | 
1214  | 0  |     auto p = Read9To16(first, len);  | 
1215  | 0  |     uint64_t lo = p.first;  | 
1216  | 0  |     uint64_t hi = p.second;  | 
1217  |  |     // Rotation by 53 was found to be most often useful when discovering these  | 
1218  |  |     // hashing algorithms with ML techniques.  | 
1219  | 0  |     lo = absl::rotr(lo, 53);  | 
1220  | 0  |     state += kMul;  | 
1221  | 0  |     lo += state;  | 
1222  | 0  |     state ^= hi;  | 
1223  | 0  |     uint128 m = state;  | 
1224  | 0  |     m *= lo;  | 
1225  | 0  |     return static_cast<uint64_t>(m ^ (m >> 64));  | 
1226  | 68  |   } else if (len >= 4) { | 
1227  | 68  |     v = Read4To8(first, len);  | 
1228  | 68  |   } else if (len > 0) { | 
1229  | 0  |     v = Read1To3(first, len);  | 
1230  | 0  |   } else { | 
1231  |  |     // Empty ranges have no effect.  | 
1232  | 0  |     return state;  | 
1233  | 0  |   }  | 
1234  | 124  |   return Mix(state, v);  | 
1235  | 124  | }  | 
1236  |  |  | 
1237  |  | struct AggregateBarrier {}; | 
1238  |  |  | 
1239  |  | // HashImpl  | 
1240  |  |  | 
1241  |  | // Add a private base class to make sure this type is not an aggregate.  | 
1242  |  | // Aggregates can be aggregate initialized even if the default constructor is  | 
1243  |  | // deleted.  | 
1244  |  | struct PoisonedHash : private AggregateBarrier { | 
1245  |  |   PoisonedHash() = delete;  | 
1246  |  |   PoisonedHash(const PoisonedHash&) = delete;  | 
1247  |  |   PoisonedHash& operator=(const PoisonedHash&) = delete;  | 
1248  |  | };  | 
1249  |  |  | 
1250  |  | template <typename T>  | 
1251  |  | struct HashImpl { | 
1252  | 62  |   size_t operator()(const T& value) const { | 
1253  | 62  |     return MixingHashState::hash(value);  | 
1254  | 62  |   } absl::hash_internal::HashImpl<absl::string_view>::operator()(absl::string_view const&) const Line  | Count  | Source  |  1252  | 62  |   size_t operator()(const T& value) const { |  1253  | 62  |     return MixingHashState::hash(value);  |  1254  | 62  |   }  |  
 Unexecuted instantiation: absl::hash_internal::HashImpl<absl::Cord>::operator()(absl::Cord const&) const Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<unsigned long const&> >::operator()(std::__1::tuple<unsigned long const&> const&) const Unexecuted instantiation: absl::hash_internal::HashImpl<std::__1::tuple<absl::string_view const&, int const&> >::operator()(std::__1::tuple<absl::string_view const&, int const&> const&) const  | 
1255  |  | };  | 
1256  |  |  | 
1257  |  | template <typename T>  | 
1258  |  | struct Hash  | 
1259  |  |     : absl::conditional_t<is_hashable<T>::value, HashImpl<T>, PoisonedHash> {}; | 
1260  |  |  | 
1261  |  | template <typename H>  | 
1262  |  | template <typename T, typename... Ts>  | 
1263  | 124  | H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) { | 
1264  | 124  |   return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(  | 
1265  | 124  |                         std::move(state), value),  | 
1266  | 124  |                     values...);  | 
1267  | 124  | } absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<absl::string_view>(absl::hash_internal::MixingHashState, absl::string_view const&) Line  | Count  | Source  |  1263  | 62  | H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) { |  1264  | 62  |   return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(  |  1265  | 62  |                         std::move(state), value),  |  1266  | 62  |                     values...);  |  1267  | 62  | }  |  
 absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<unsigned long>(absl::hash_internal::MixingHashState, unsigned long const&) Line  | Count  | Source  |  1263  | 62  | H HashStateBase<H>::combine(H state, const T& value, const Ts&... values) { |  1264  | 62  |   return H::combine(hash_internal::HashSelect::template Apply<T>::Invoke(  |  1265  | 62  |                         std::move(state), value),  |  1266  | 62  |                     values...);  |  1267  | 62  | }  |  
 Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<absl::Cord>(absl::hash_internal::MixingHashState, absl::Cord const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<unsigned long const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<unsigned long const&> const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<std::__1::tuple<absl::string_view const&, int const&>>(absl::hash_internal::MixingHashState, std::__1::tuple<absl::string_view const&, int const&> const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<absl::string_view, int>(absl::hash_internal::MixingHashState, absl::string_view const&, int const&) Unexecuted instantiation: absl::hash_internal::MixingHashState absl::hash_internal::HashStateBase<absl::hash_internal::MixingHashState>::combine<int>(absl::hash_internal::MixingHashState, int const&)  | 
1268  |  |  | 
1269  |  | // HashStateBase::combine_contiguous()  | 
1270  |  | template <typename H>  | 
1271  |  | template <typename T>  | 
1272  | 62  | H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) { | 
1273  | 62  |   return hash_internal::hash_range_or_bytes(std::move(state), data, size);  | 
1274  | 62  | }  | 
1275  |  |  | 
1276  |  | // HashStateBase::combine_unordered()  | 
1277  |  | template <typename H>  | 
1278  |  | template <typename I>  | 
1279  |  | H HashStateBase<H>::combine_unordered(H state, I begin, I end) { | 
1280  |  |   return H::RunCombineUnordered(std::move(state),  | 
1281  |  |                                 CombineUnorderedCallback<I>{begin, end}); | 
1282  |  | }  | 
1283  |  |  | 
1284  |  | // HashStateBase::PiecewiseCombiner::add_buffer()  | 
1285  |  | template <typename H>  | 
1286  |  | H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,  | 
1287  | 0  |                                 size_t size) { | 
1288  | 0  |   if (position_ + size < PiecewiseChunkSize()) { | 
1289  | 0  |     // This partial chunk does not fill our existing buffer  | 
1290  | 0  |     memcpy(buf_ + position_, data, size);  | 
1291  | 0  |     position_ += size;  | 
1292  | 0  |     return state;  | 
1293  | 0  |   }  | 
1294  | 0  | 
  | 
1295  | 0  |   // If the buffer is partially filled we need to complete the buffer  | 
1296  | 0  |   // and hash it.  | 
1297  | 0  |   if (position_ != 0) { | 
1298  | 0  |     const size_t bytes_needed = PiecewiseChunkSize() - position_;  | 
1299  | 0  |     memcpy(buf_ + position_, data, bytes_needed);  | 
1300  | 0  |     state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());  | 
1301  | 0  |     data += bytes_needed;  | 
1302  | 0  |     size -= bytes_needed;  | 
1303  | 0  |   }  | 
1304  | 0  | 
  | 
1305  | 0  |   // Hash whatever chunks we can without copying  | 
1306  | 0  |   while (size >= PiecewiseChunkSize()) { | 
1307  | 0  |     state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize());  | 
1308  | 0  |     data += PiecewiseChunkSize();  | 
1309  | 0  |     size -= PiecewiseChunkSize();  | 
1310  | 0  |   }  | 
1311  | 0  |   // Fill the buffer with the remainder  | 
1312  | 0  |   memcpy(buf_, data, size);  | 
1313  | 0  |   position_ = size;  | 
1314  | 0  |   return state;  | 
1315  | 0  | }  | 
1316  |  |  | 
1317  |  | // HashStateBase::PiecewiseCombiner::finalize()  | 
1318  |  | template <typename H>  | 
1319  | 0  | H PiecewiseCombiner::finalize(H state) { | 
1320  | 0  |   // Hash the remainder left in the buffer, which may be empty  | 
1321  | 0  |   return H::combine_contiguous(std::move(state), buf_, position_);  | 
1322  | 0  | }  | 
1323  |  |  | 
1324  |  | }  // namespace hash_internal  | 
1325  |  | ABSL_NAMESPACE_END  | 
1326  |  | }  // namespace absl  | 
1327  |  |  | 
1328  |  | #endif  // ABSL_HASH_INTERNAL_HASH_H_  |