/src/abseil-cpp/absl/container/internal/hashtablez_sampler.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2018 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | // |
15 | | // ----------------------------------------------------------------------------- |
16 | | // File: hashtablez_sampler.h |
17 | | // ----------------------------------------------------------------------------- |
18 | | // |
19 | | // This header file defines the API for a low level library to sample hashtables |
20 | | // and collect runtime statistics about them. |
21 | | // |
22 | | // `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which |
23 | | // store information about a single sample. |
24 | | // |
25 | | // `Record*` methods store information into samples. |
26 | | // `Sample()` and `Unsample()` make use of a single global sampler with |
27 | | // properties controlled by the flags hashtablez_enabled, |
28 | | // hashtablez_sample_rate, and hashtablez_max_samples. |
29 | | // |
30 | | // WARNING |
31 | | // |
32 | | // Using this sampling API may cause sampled Swiss tables to use the global |
33 | | // allocator (operator `new`) in addition to any custom allocator. If you |
34 | | // are using a table in an unusual circumstance where allocation or calling a |
35 | | // linux syscall is unacceptable, this could interfere. |
36 | | // |
37 | | // This utility is internal-only. Use at your own risk. |
38 | | |
39 | | #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ |
40 | | #define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ |
41 | | |
42 | | #include <atomic> |
43 | | #include <functional> |
44 | | #include <memory> |
45 | | #include <vector> |
46 | | |
47 | | #include "absl/base/config.h" |
48 | | #include "absl/base/internal/per_thread_tls.h" |
49 | | #include "absl/base/optimization.h" |
50 | | #include "absl/profiling/internal/sample_recorder.h" |
51 | | #include "absl/synchronization/mutex.h" |
52 | | #include "absl/utility/utility.h" |
53 | | |
54 | | namespace absl { |
55 | | ABSL_NAMESPACE_BEGIN |
56 | | namespace container_internal { |
57 | | |
58 | | // Stores information about a sampled hashtable. All mutations to this *must* |
59 | | // be made through `Record*` functions below. All reads from this *must* only |
60 | | // occur in the callback to `HashtablezSampler::Iterate`. |
61 | | struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> { |
62 | | // Constructs the object but does not fill in any fields. |
63 | | HashtablezInfo(); |
64 | | ~HashtablezInfo(); |
65 | | HashtablezInfo(const HashtablezInfo&) = delete; |
66 | | HashtablezInfo& operator=(const HashtablezInfo&) = delete; |
67 | | |
68 | | // Puts the object into a clean state, fills in the logically `const` members, |
69 | | // blocking for any readers that are currently sampling the object. |
70 | | void PrepareForSampling(int64_t stride, size_t inline_element_size_value) |
71 | | ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); |
72 | | |
73 | | // These fields are mutated by the various Record* APIs and need to be |
74 | | // thread-safe. |
75 | | std::atomic<size_t> capacity; |
76 | | std::atomic<size_t> size; |
77 | | std::atomic<size_t> num_erases; |
78 | | std::atomic<size_t> num_rehashes; |
79 | | std::atomic<size_t> max_probe_length; |
80 | | std::atomic<size_t> total_probe_length; |
81 | | std::atomic<size_t> hashes_bitwise_or; |
82 | | std::atomic<size_t> hashes_bitwise_and; |
83 | | std::atomic<size_t> hashes_bitwise_xor; |
84 | | std::atomic<size_t> max_reserve; |
85 | | |
86 | | // All of the fields below are set by `PrepareForSampling`, they must not be |
87 | | // mutated in `Record*` functions. They are logically `const` in that sense. |
88 | | // These are guarded by init_mu, but that is not externalized to clients, |
89 | | // which can read them only during `SampleRecorder::Iterate` which will hold |
90 | | // the lock. |
91 | | static constexpr int kMaxStackDepth = 64; |
92 | | absl::Time create_time; |
93 | | int32_t depth; |
94 | | void* stack[kMaxStackDepth]; |
95 | | size_t inline_element_size; // How big is the slot? |
96 | | }; |
97 | | |
98 | | void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length); |
99 | | |
100 | | void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity); |
101 | | |
102 | | void RecordClearedReservationSlow(HashtablezInfo* info); |
103 | | |
104 | | void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, |
105 | | size_t capacity); |
106 | | |
107 | | void RecordInsertSlow(HashtablezInfo* info, size_t hash, |
108 | | size_t distance_from_desired); |
109 | | |
110 | | void RecordEraseSlow(HashtablezInfo* info); |
111 | | |
112 | | struct SamplingState { |
113 | | int64_t next_sample; |
114 | | // When we make a sampling decision, we record that distance so we can weight |
115 | | // each sample. |
116 | | int64_t sample_stride; |
117 | | }; |
118 | | |
119 | | HashtablezInfo* SampleSlow(SamplingState& next_sample, |
120 | | size_t inline_element_size); |
121 | | void UnsampleSlow(HashtablezInfo* info); |
122 | | |
123 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
124 | | #error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set |
125 | | #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
126 | | |
127 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
128 | | class HashtablezInfoHandle { |
129 | | public: |
130 | | explicit HashtablezInfoHandle() : info_(nullptr) {} |
131 | | explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {} |
132 | | |
133 | | // We do not have a destructor. Caller is responsible for calling Unregister |
134 | | // before destroying the handle. |
135 | | void Unregister() { |
136 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
137 | | UnsampleSlow(info_); |
138 | | } |
139 | | |
140 | | inline bool IsSampled() const { return ABSL_PREDICT_FALSE(info_ != nullptr); } |
141 | | |
142 | | inline void RecordStorageChanged(size_t size, size_t capacity) { |
143 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
144 | | RecordStorageChangedSlow(info_, size, capacity); |
145 | | } |
146 | | |
147 | | inline void RecordRehash(size_t total_probe_length) { |
148 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
149 | | RecordRehashSlow(info_, total_probe_length); |
150 | | } |
151 | | |
152 | | inline void RecordReservation(size_t target_capacity) { |
153 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
154 | | RecordReservationSlow(info_, target_capacity); |
155 | | } |
156 | | |
157 | | inline void RecordClearedReservation() { |
158 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
159 | | RecordClearedReservationSlow(info_); |
160 | | } |
161 | | |
162 | | inline void RecordInsert(size_t hash, size_t distance_from_desired) { |
163 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
164 | | RecordInsertSlow(info_, hash, distance_from_desired); |
165 | | } |
166 | | |
167 | | inline void RecordErase() { |
168 | | if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; |
169 | | RecordEraseSlow(info_); |
170 | | } |
171 | | |
172 | | friend inline void swap(HashtablezInfoHandle& lhs, |
173 | | HashtablezInfoHandle& rhs) { |
174 | | std::swap(lhs.info_, rhs.info_); |
175 | | } |
176 | | |
177 | | private: |
178 | | friend class HashtablezInfoHandlePeer; |
179 | | HashtablezInfo* info_; |
180 | | }; |
181 | | #else |
182 | | // Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can |
183 | | // be removed by the linker, in order to reduce the binary size. |
184 | | class HashtablezInfoHandle { |
185 | | public: |
186 | | explicit HashtablezInfoHandle() = default; |
187 | 2 | explicit HashtablezInfoHandle(std::nullptr_t) {} |
188 | | |
189 | 0 | inline void Unregister() {} |
190 | 8 | inline bool IsSampled() const { return false; } |
191 | 0 | inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} |
192 | 8 | inline void RecordRehash(size_t /*total_probe_length*/) {} |
193 | 0 | inline void RecordReservation(size_t /*target_capacity*/) {} |
194 | 0 | inline void RecordClearedReservation() {} |
195 | 20 | inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} |
196 | 0 | inline void RecordErase() {} |
197 | | |
198 | | friend inline void swap(HashtablezInfoHandle& /*lhs*/, |
199 | 0 | HashtablezInfoHandle& /*rhs*/) {} |
200 | | }; |
201 | | #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
202 | | |
203 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
204 | | extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; |
205 | | #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
206 | | |
207 | | // Returns an RAII sampling handle that manages registration and unregistation |
208 | | // with the global sampler. |
209 | | inline HashtablezInfoHandle Sample( |
210 | 2 | size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { |
211 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
212 | | if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { |
213 | | return HashtablezInfoHandle(nullptr); |
214 | | } |
215 | | return HashtablezInfoHandle( |
216 | | SampleSlow(global_next_sample, inline_element_size)); |
217 | | #else |
218 | 2 | return HashtablezInfoHandle(nullptr); |
219 | 2 | #endif // !ABSL_PER_THREAD_TLS |
220 | 2 | } |
221 | | |
222 | | using HashtablezSampler = |
223 | | ::absl::profiling_internal::SampleRecorder<HashtablezInfo>; |
224 | | |
225 | | // Returns a global Sampler. |
226 | | HashtablezSampler& GlobalHashtablezSampler(); |
227 | | |
228 | | using HashtablezConfigListener = void (*)(); |
229 | | void SetHashtablezConfigListener(HashtablezConfigListener l); |
230 | | |
231 | | // Enables or disables sampling for Swiss tables. |
232 | | bool IsHashtablezEnabled(); |
233 | | void SetHashtablezEnabled(bool enabled); |
234 | | void SetHashtablezEnabledInternal(bool enabled); |
235 | | |
236 | | // Sets the rate at which Swiss tables will be sampled. |
237 | | int32_t GetHashtablezSampleParameter(); |
238 | | void SetHashtablezSampleParameter(int32_t rate); |
239 | | void SetHashtablezSampleParameterInternal(int32_t rate); |
240 | | |
241 | | // Sets a soft max for the number of samples that will be kept. |
242 | | size_t GetHashtablezMaxSamples(); |
243 | | void SetHashtablezMaxSamples(size_t max); |
244 | | void SetHashtablezMaxSamplesInternal(size_t max); |
245 | | |
246 | | // Configuration override. |
247 | | // This allows process-wide sampling without depending on order of |
248 | | // initialization of static storage duration objects. |
249 | | // The definition of this constant is weak, which allows us to inject a |
250 | | // different value for it at link time. |
251 | | extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); |
252 | | |
253 | | } // namespace container_internal |
254 | | ABSL_NAMESPACE_END |
255 | | } // namespace absl |
256 | | |
257 | | #endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ |