/src/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2018 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | #include "absl/container/internal/hashtablez_sampler.h" |
16 | | |
17 | | #include <algorithm> |
18 | | #include <atomic> |
19 | | #include <cassert> |
20 | | #include <cmath> |
21 | | #include <cstddef> |
22 | | #include <cstdint> |
23 | | #include <functional> |
24 | | #include <limits> |
25 | | |
26 | | #include "absl/base/attributes.h" |
27 | | #include "absl/base/config.h" |
28 | | #include "absl/base/internal/per_thread_tls.h" |
29 | | #include "absl/base/internal/raw_logging.h" |
30 | | #include "absl/base/macros.h" |
31 | | #include "absl/base/no_destructor.h" |
32 | | #include "absl/base/optimization.h" |
33 | | #include "absl/debugging/stacktrace.h" |
34 | | #include "absl/memory/memory.h" |
35 | | #include "absl/profiling/internal/exponential_biased.h" |
36 | | #include "absl/profiling/internal/sample_recorder.h" |
37 | | #include "absl/synchronization/mutex.h" |
38 | | #include "absl/time/clock.h" |
39 | | #include "absl/utility/utility.h" |
40 | | |
41 | | namespace absl { |
42 | | ABSL_NAMESPACE_BEGIN |
43 | | namespace container_internal { |
44 | | |
45 | | namespace { |
46 | | ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{ |
47 | | false |
48 | | }; |
49 | | ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10}; |
50 | | std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr}; |
51 | | |
52 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
53 | | ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased |
54 | | g_exponential_biased_generator; |
55 | | #endif |
56 | | |
57 | 0 | void TriggerHashtablezConfigListener() { |
58 | 0 | auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); |
59 | 0 | if (listener != nullptr) listener(); |
60 | 0 | } |
61 | | |
62 | | } // namespace |
63 | | |
64 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
65 | | ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; |
66 | | #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
67 | | |
68 | 0 | HashtablezSampler& GlobalHashtablezSampler() { |
69 | 0 | static absl::NoDestructor<HashtablezSampler> sampler; |
70 | 0 | return *sampler; |
71 | 0 | } |
72 | | |
73 | 0 | HashtablezInfo::HashtablezInfo() = default; |
74 | 0 | HashtablezInfo::~HashtablezInfo() = default; |
75 | | |
76 | | void HashtablezInfo::PrepareForSampling(int64_t stride, |
77 | | size_t inline_element_size_value, |
78 | | size_t key_size_value, |
79 | | size_t value_size_value, |
80 | 0 | uint16_t soo_capacity_value) { |
81 | 0 | capacity.store(0, std::memory_order_relaxed); |
82 | 0 | size.store(0, std::memory_order_relaxed); |
83 | 0 | num_erases.store(0, std::memory_order_relaxed); |
84 | 0 | num_rehashes.store(0, std::memory_order_relaxed); |
85 | 0 | max_probe_length.store(0, std::memory_order_relaxed); |
86 | 0 | total_probe_length.store(0, std::memory_order_relaxed); |
87 | 0 | hashes_bitwise_or.store(0, std::memory_order_relaxed); |
88 | 0 | hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); |
89 | 0 | hashes_bitwise_xor.store(0, std::memory_order_relaxed); |
90 | 0 | max_reserve.store(0, std::memory_order_relaxed); |
91 | |
|
92 | 0 | create_time = absl::Now(); |
93 | 0 | weight = stride; |
94 | | // The inliner makes hardcoded skip_count difficult (especially when combined |
95 | | // with LTO). We use the ability to exclude stacks by regex when encoding |
96 | | // instead. |
97 | 0 | depth = static_cast<unsigned>( |
98 | 0 | absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, |
99 | 0 | /* skip_count= */ 0)); |
100 | 0 | inline_element_size = inline_element_size_value; |
101 | 0 | key_size = key_size_value; |
102 | 0 | value_size = value_size_value; |
103 | 0 | soo_capacity = soo_capacity_value; |
104 | 0 | } |
105 | | |
106 | 0 | static bool ShouldForceSampling() { |
107 | 0 | enum ForceState { |
108 | 0 | kDontForce, |
109 | 0 | kForce, |
110 | 0 | kUninitialized |
111 | 0 | }; |
112 | 0 | ABSL_CONST_INIT static std::atomic<ForceState> global_state{ |
113 | 0 | kUninitialized}; |
114 | 0 | ForceState state = global_state.load(std::memory_order_relaxed); |
115 | 0 | if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; |
116 | | |
117 | 0 | if (state == kUninitialized) { |
118 | 0 | state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() |
119 | 0 | ? kForce |
120 | 0 | : kDontForce; |
121 | 0 | global_state.store(state, std::memory_order_relaxed); |
122 | 0 | } |
123 | 0 | return state == kForce; |
124 | 0 | } |
125 | | |
126 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
127 | | HashtablezInfoHandle ForcedTrySample(size_t inline_element_size, |
128 | | size_t key_size, size_t value_size, |
129 | | uint16_t soo_capacity) { |
130 | | return HashtablezInfoHandle(SampleSlow(global_next_sample, |
131 | | inline_element_size, key_size, |
132 | | value_size, soo_capacity)); |
133 | | } |
134 | | void TestOnlyRefreshSamplingStateForCurrentThread() { |
135 | | global_next_sample.next_sample = |
136 | | g_hashtablez_sample_parameter.load(std::memory_order_relaxed); |
137 | | global_next_sample.sample_stride = global_next_sample.next_sample; |
138 | | } |
139 | | #else |
140 | 0 | HashtablezInfoHandle ForcedTrySample(size_t, size_t, size_t, uint16_t) { |
141 | 0 | return HashtablezInfoHandle{nullptr}; |
142 | 0 | } |
143 | 0 | void TestOnlyRefreshSamplingStateForCurrentThread() {} |
144 | | #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE |
145 | | |
146 | | HashtablezInfo* SampleSlow(SamplingState& next_sample, |
147 | | size_t inline_element_size, size_t key_size, |
148 | 0 | size_t value_size, uint16_t soo_capacity) { |
149 | 0 | if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { |
150 | 0 | next_sample.next_sample = 1; |
151 | 0 | const int64_t old_stride = exchange(next_sample.sample_stride, 1); |
152 | 0 | HashtablezInfo* result = GlobalHashtablezSampler().Register( |
153 | 0 | old_stride, inline_element_size, key_size, value_size, soo_capacity); |
154 | 0 | return result; |
155 | 0 | } |
156 | | |
157 | 0 | #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
158 | 0 | next_sample = { |
159 | 0 | std::numeric_limits<int64_t>::max(), |
160 | 0 | std::numeric_limits<int64_t>::max(), |
161 | 0 | }; |
162 | 0 | return nullptr; |
163 | | #else |
164 | | bool first = next_sample.next_sample < 0; |
165 | | |
166 | | const int64_t next_stride = g_exponential_biased_generator.GetStride( |
167 | | g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); |
168 | | |
169 | | next_sample.next_sample = next_stride; |
170 | | const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); |
171 | | // Small values of interval are equivalent to just sampling next time. |
172 | | ABSL_ASSERT(next_stride >= 1); |
173 | | |
174 | | // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold |
175 | | // low enough that we will start sampling in a reasonable time, so we just use |
176 | | // the default sampling rate. |
177 | | if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr; |
178 | | |
179 | | // We will only be negative on our first count, so we should just retry in |
180 | | // that case. |
181 | | if (first) { |
182 | | if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; |
183 | | return SampleSlow(next_sample, inline_element_size, key_size, value_size, |
184 | | soo_capacity); |
185 | | } |
186 | | |
187 | | return GlobalHashtablezSampler().Register(old_stride, inline_element_size, |
188 | | key_size, value_size, soo_capacity); |
189 | | #endif |
190 | 0 | } |
191 | | |
192 | 0 | void UnsampleSlow(HashtablezInfo* info) { |
193 | 0 | GlobalHashtablezSampler().Unregister(info); |
194 | 0 | } |
195 | | |
196 | 0 | void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { |
197 | 0 | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
198 | 0 | total_probe_length /= 16; |
199 | | #else |
200 | | total_probe_length /= 8; |
201 | | #endif |
202 | 0 | info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); |
203 | 0 | info->num_erases.store(0, std::memory_order_relaxed); |
204 | | // There is only one concurrent writer, so `load` then `store` is sufficient |
205 | | // instead of using `fetch_add`. |
206 | 0 | info->num_rehashes.store( |
207 | 0 | 1 + info->num_rehashes.load(std::memory_order_relaxed), |
208 | 0 | std::memory_order_relaxed); |
209 | 0 | } |
210 | | |
211 | 0 | void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) { |
212 | 0 | info->max_reserve.store( |
213 | 0 | (std::max)(info->max_reserve.load(std::memory_order_relaxed), |
214 | 0 | target_capacity), |
215 | 0 | std::memory_order_relaxed); |
216 | 0 | } |
217 | | |
218 | 0 | void RecordClearedReservationSlow(HashtablezInfo* info) { |
219 | 0 | info->max_reserve.store(0, std::memory_order_relaxed); |
220 | 0 | } |
221 | | |
222 | | void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, |
223 | 0 | size_t capacity) { |
224 | 0 | info->size.store(size, std::memory_order_relaxed); |
225 | 0 | info->capacity.store(capacity, std::memory_order_relaxed); |
226 | 0 | if (size == 0) { |
227 | | // This is a clear, reset the total/num_erases too. |
228 | 0 | info->total_probe_length.store(0, std::memory_order_relaxed); |
229 | 0 | info->num_erases.store(0, std::memory_order_relaxed); |
230 | 0 | } |
231 | 0 | } |
232 | | |
233 | | void RecordInsertSlow(HashtablezInfo* info, size_t hash, |
234 | 0 | size_t distance_from_desired) { |
235 | | // SwissTables probe in groups of 16, so scale this to count items probes and |
236 | | // not offset from desired. |
237 | 0 | size_t probe_length = distance_from_desired; |
238 | 0 | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
239 | 0 | probe_length /= 16; |
240 | | #else |
241 | | probe_length /= 8; |
242 | | #endif |
243 | |
|
244 | 0 | info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); |
245 | 0 | info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); |
246 | 0 | info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed); |
247 | 0 | info->max_probe_length.store( |
248 | 0 | std::max(info->max_probe_length.load(std::memory_order_relaxed), |
249 | 0 | probe_length), |
250 | 0 | std::memory_order_relaxed); |
251 | 0 | info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed); |
252 | 0 | info->size.fetch_add(1, std::memory_order_relaxed); |
253 | 0 | } |
254 | | |
255 | 0 | void RecordEraseSlow(HashtablezInfo* info) { |
256 | 0 | info->size.fetch_sub(1, std::memory_order_relaxed); |
257 | | // There is only one concurrent writer, so `load` then `store` is sufficient |
258 | | // instead of using `fetch_add`. |
259 | 0 | info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed), |
260 | 0 | std::memory_order_relaxed); |
261 | 0 | } |
262 | | |
263 | 0 | void SetHashtablezConfigListener(HashtablezConfigListener l) { |
264 | 0 | g_hashtablez_config_listener.store(l, std::memory_order_release); |
265 | 0 | } |
266 | | |
267 | 0 | bool IsHashtablezEnabled() { |
268 | 0 | return g_hashtablez_enabled.load(std::memory_order_acquire); |
269 | 0 | } |
270 | | |
271 | 0 | void SetHashtablezEnabled(bool enabled) { |
272 | 0 | SetHashtablezEnabledInternal(enabled); |
273 | 0 | TriggerHashtablezConfigListener(); |
274 | 0 | } |
275 | | |
276 | 0 | void SetHashtablezEnabledInternal(bool enabled) { |
277 | 0 | g_hashtablez_enabled.store(enabled, std::memory_order_release); |
278 | 0 | } |
279 | | |
280 | 0 | int32_t GetHashtablezSampleParameter() { |
281 | 0 | return g_hashtablez_sample_parameter.load(std::memory_order_acquire); |
282 | 0 | } |
283 | | |
284 | 0 | void SetHashtablezSampleParameter(int32_t rate) { |
285 | 0 | SetHashtablezSampleParameterInternal(rate); |
286 | 0 | TriggerHashtablezConfigListener(); |
287 | 0 | } |
288 | | |
289 | 0 | void SetHashtablezSampleParameterInternal(int32_t rate) { |
290 | 0 | if (rate > 0) { |
291 | 0 | g_hashtablez_sample_parameter.store(rate, std::memory_order_release); |
292 | 0 | } else { |
293 | 0 | ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld", |
294 | 0 | static_cast<long long>(rate)); // NOLINT(runtime/int) |
295 | 0 | } |
296 | 0 | } |
297 | | |
298 | 0 | size_t GetHashtablezMaxSamples() { |
299 | 0 | return GlobalHashtablezSampler().GetMaxSamples(); |
300 | 0 | } |
301 | | |
302 | 0 | void SetHashtablezMaxSamples(size_t max) { |
303 | 0 | SetHashtablezMaxSamplesInternal(max); |
304 | 0 | TriggerHashtablezConfigListener(); |
305 | 0 | } |
306 | | |
307 | 0 | void SetHashtablezMaxSamplesInternal(size_t max) { |
308 | 0 | if (max > 0) { |
309 | 0 | GlobalHashtablezSampler().SetMaxSamples(max); |
310 | 0 | } else { |
311 | 0 | ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0"); |
312 | 0 | } |
313 | 0 | } |
314 | | |
315 | | } // namespace container_internal |
316 | | ABSL_NAMESPACE_END |
317 | | } // namespace absl |