/src/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2018 The Abseil Authors. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | #include "absl/container/internal/hashtablez_sampler.h" |
16 | | |
17 | | #include <algorithm> |
18 | | #include <atomic> |
19 | | #include <cassert> |
20 | | #include <cmath> |
21 | | #include <cstddef> |
22 | | #include <cstdint> |
23 | | #include <functional> |
24 | | #include <limits> |
25 | | |
26 | | #include "absl/base/attributes.h" |
27 | | #include "absl/base/config.h" |
28 | | #include "absl/base/internal/per_thread_tls.h" |
29 | | #include "absl/base/internal/raw_logging.h" |
30 | | #include "absl/base/macros.h" |
31 | | #include "absl/base/no_destructor.h" |
32 | | #include "absl/base/optimization.h" |
33 | | #include "absl/debugging/stacktrace.h" |
34 | | #include "absl/memory/memory.h" |
35 | | #include "absl/profiling/internal/exponential_biased.h" |
36 | | #include "absl/profiling/internal/sample_recorder.h" |
37 | | #include "absl/synchronization/mutex.h" |
38 | | #include "absl/time/clock.h" |
39 | | #include "absl/utility/utility.h" |
40 | | |
41 | | namespace absl { |
42 | | ABSL_NAMESPACE_BEGIN |
43 | | namespace container_internal { |
44 | | |
45 | | namespace { |
46 | | ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{ |
47 | | false |
48 | | }; |
49 | | ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10}; |
50 | | std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr}; |
51 | | |
52 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
53 | | ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased |
54 | | g_exponential_biased_generator; |
55 | | #endif |
56 | | |
57 | 0 | void TriggerHashtablezConfigListener() { |
58 | 0 | auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); |
59 | 0 | if (listener != nullptr) listener(); |
60 | 0 | } |
61 | | |
62 | | } // namespace |
63 | | |
64 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
65 | | ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; |
66 | | #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
67 | | |
68 | 0 | HashtablezSampler& GlobalHashtablezSampler() { |
69 | 0 | static absl::NoDestructor<HashtablezSampler> sampler; |
70 | 0 | return *sampler; |
71 | 0 | } |
72 | | |
73 | 0 | HashtablezInfo::HashtablezInfo() = default; |
74 | 0 | HashtablezInfo::~HashtablezInfo() = default; |
75 | | |
76 | | void HashtablezInfo::PrepareForSampling(int64_t stride, |
77 | | size_t inline_element_size_value, |
78 | | size_t key_size_value, |
79 | | size_t value_size_value, |
80 | 0 | uint16_t soo_capacity_value) { |
81 | 0 | capacity.store(0, std::memory_order_relaxed); |
82 | 0 | size.store(0, std::memory_order_relaxed); |
83 | 0 | num_erases.store(0, std::memory_order_relaxed); |
84 | 0 | num_rehashes.store(0, std::memory_order_relaxed); |
85 | 0 | max_probe_length.store(0, std::memory_order_relaxed); |
86 | 0 | total_probe_length.store(0, std::memory_order_relaxed); |
87 | 0 | hashes_bitwise_or.store(0, std::memory_order_relaxed); |
88 | 0 | hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); |
89 | 0 | hashes_bitwise_xor.store(0, std::memory_order_relaxed); |
90 | 0 | max_reserve.store(0, std::memory_order_relaxed); |
91 | |
|
92 | 0 | create_time = absl::Now(); |
93 | 0 | weight = stride; |
94 | | // The inliner makes hardcoded skip_count difficult (especially when combined |
95 | | // with LTO). We use the ability to exclude stacks by regex when encoding |
96 | | // instead. |
97 | 0 | depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, |
98 | 0 | /* skip_count= */ 0); |
99 | 0 | inline_element_size = inline_element_size_value; |
100 | 0 | key_size = key_size_value; |
101 | 0 | value_size = value_size_value; |
102 | 0 | soo_capacity = soo_capacity_value; |
103 | 0 | } |
104 | | |
105 | 0 | static bool ShouldForceSampling() { |
106 | 0 | enum ForceState { |
107 | 0 | kDontForce, |
108 | 0 | kForce, |
109 | 0 | kUninitialized |
110 | 0 | }; |
111 | 0 | ABSL_CONST_INIT static std::atomic<ForceState> global_state{ |
112 | 0 | kUninitialized}; |
113 | 0 | ForceState state = global_state.load(std::memory_order_relaxed); |
114 | 0 | if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; |
115 | | |
116 | 0 | if (state == kUninitialized) { |
117 | 0 | state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() |
118 | 0 | ? kForce |
119 | 0 | : kDontForce; |
120 | 0 | global_state.store(state, std::memory_order_relaxed); |
121 | 0 | } |
122 | 0 | return state == kForce; |
123 | 0 | } |
124 | | |
125 | | #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
126 | | HashtablezInfoHandle ForcedTrySample(size_t inline_element_size, |
127 | | size_t key_size, size_t value_size, |
128 | | uint16_t soo_capacity) { |
129 | | return HashtablezInfoHandle(SampleSlow(global_next_sample, |
130 | | inline_element_size, key_size, |
131 | | value_size, soo_capacity)); |
132 | | } |
133 | | void TestOnlyRefreshSamplingStateForCurrentThread() { |
134 | | global_next_sample.next_sample = |
135 | | g_hashtablez_sample_parameter.load(std::memory_order_relaxed); |
136 | | global_next_sample.sample_stride = global_next_sample.next_sample; |
137 | | } |
138 | | #else |
139 | 0 | HashtablezInfoHandle ForcedTrySample(size_t, size_t, size_t, uint16_t) { |
140 | 0 | return HashtablezInfoHandle{nullptr}; |
141 | 0 | } |
142 | 0 | void TestOnlyRefreshSamplingStateForCurrentThread() {} |
143 | | #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE |
144 | | |
145 | | HashtablezInfo* SampleSlow(SamplingState& next_sample, |
146 | | size_t inline_element_size, size_t key_size, |
147 | 0 | size_t value_size, uint16_t soo_capacity) { |
148 | 0 | if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { |
149 | 0 | next_sample.next_sample = 1; |
150 | 0 | const int64_t old_stride = exchange(next_sample.sample_stride, 1); |
151 | 0 | HashtablezInfo* result = GlobalHashtablezSampler().Register( |
152 | 0 | old_stride, inline_element_size, key_size, value_size, soo_capacity); |
153 | 0 | return result; |
154 | 0 | } |
155 | | |
156 | 0 | #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) |
157 | 0 | next_sample = { |
158 | 0 | std::numeric_limits<int64_t>::max(), |
159 | 0 | std::numeric_limits<int64_t>::max(), |
160 | 0 | }; |
161 | 0 | return nullptr; |
162 | | #else |
163 | | bool first = next_sample.next_sample < 0; |
164 | | |
165 | | const int64_t next_stride = g_exponential_biased_generator.GetStride( |
166 | | g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); |
167 | | |
168 | | next_sample.next_sample = next_stride; |
169 | | const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); |
170 | | // Small values of interval are equivalent to just sampling next time. |
171 | | ABSL_ASSERT(next_stride >= 1); |
172 | | |
173 | | // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold |
174 | | // low enough that we will start sampling in a reasonable time, so we just use |
175 | | // the default sampling rate. |
176 | | if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr; |
177 | | |
178 | | // We will only be negative on our first count, so we should just retry in |
179 | | // that case. |
180 | | if (first) { |
181 | | if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; |
182 | | return SampleSlow(next_sample, inline_element_size, key_size, value_size, |
183 | | soo_capacity); |
184 | | } |
185 | | |
186 | | return GlobalHashtablezSampler().Register(old_stride, inline_element_size, |
187 | | key_size, value_size, soo_capacity); |
188 | | #endif |
189 | 0 | } |
190 | | |
191 | 0 | void UnsampleSlow(HashtablezInfo* info) { |
192 | 0 | GlobalHashtablezSampler().Unregister(info); |
193 | 0 | } |
194 | | |
195 | 0 | void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { |
196 | 0 | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
197 | 0 | total_probe_length /= 16; |
198 | | #else |
199 | | total_probe_length /= 8; |
200 | | #endif |
201 | 0 | info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); |
202 | 0 | info->num_erases.store(0, std::memory_order_relaxed); |
203 | | // There is only one concurrent writer, so `load` then `store` is sufficient |
204 | | // instead of using `fetch_add`. |
205 | 0 | info->num_rehashes.store( |
206 | 0 | 1 + info->num_rehashes.load(std::memory_order_relaxed), |
207 | 0 | std::memory_order_relaxed); |
208 | 0 | } |
209 | | |
210 | 0 | void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) { |
211 | 0 | info->max_reserve.store( |
212 | 0 | (std::max)(info->max_reserve.load(std::memory_order_relaxed), |
213 | 0 | target_capacity), |
214 | 0 | std::memory_order_relaxed); |
215 | 0 | } |
216 | | |
217 | 0 | void RecordClearedReservationSlow(HashtablezInfo* info) { |
218 | 0 | info->max_reserve.store(0, std::memory_order_relaxed); |
219 | 0 | } |
220 | | |
221 | | void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, |
222 | 0 | size_t capacity) { |
223 | 0 | info->size.store(size, std::memory_order_relaxed); |
224 | 0 | info->capacity.store(capacity, std::memory_order_relaxed); |
225 | 0 | if (size == 0) { |
226 | | // This is a clear, reset the total/num_erases too. |
227 | 0 | info->total_probe_length.store(0, std::memory_order_relaxed); |
228 | 0 | info->num_erases.store(0, std::memory_order_relaxed); |
229 | 0 | } |
230 | 0 | } |
231 | | |
232 | | void RecordInsertSlow(HashtablezInfo* info, size_t hash, |
233 | 0 | size_t distance_from_desired) { |
234 | | // SwissTables probe in groups of 16, so scale this to count items probes and |
235 | | // not offset from desired. |
236 | 0 | size_t probe_length = distance_from_desired; |
237 | 0 | #ifdef ABSL_INTERNAL_HAVE_SSE2 |
238 | 0 | probe_length /= 16; |
239 | | #else |
240 | | probe_length /= 8; |
241 | | #endif |
242 | |
|
243 | 0 | info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); |
244 | 0 | info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); |
245 | 0 | info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed); |
246 | 0 | info->max_probe_length.store( |
247 | 0 | std::max(info->max_probe_length.load(std::memory_order_relaxed), |
248 | 0 | probe_length), |
249 | 0 | std::memory_order_relaxed); |
250 | 0 | info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed); |
251 | 0 | info->size.fetch_add(1, std::memory_order_relaxed); |
252 | 0 | } |
253 | | |
254 | 0 | void RecordEraseSlow(HashtablezInfo* info) { |
255 | 0 | info->size.fetch_sub(1, std::memory_order_relaxed); |
256 | | // There is only one concurrent writer, so `load` then `store` is sufficient |
257 | | // instead of using `fetch_add`. |
258 | 0 | info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed), |
259 | 0 | std::memory_order_relaxed); |
260 | 0 | } |
261 | | |
262 | 0 | void SetHashtablezConfigListener(HashtablezConfigListener l) { |
263 | 0 | g_hashtablez_config_listener.store(l, std::memory_order_release); |
264 | 0 | } |
265 | | |
266 | 0 | bool IsHashtablezEnabled() { |
267 | 0 | return g_hashtablez_enabled.load(std::memory_order_acquire); |
268 | 0 | } |
269 | | |
270 | 0 | void SetHashtablezEnabled(bool enabled) { |
271 | 0 | SetHashtablezEnabledInternal(enabled); |
272 | 0 | TriggerHashtablezConfigListener(); |
273 | 0 | } |
274 | | |
275 | 0 | void SetHashtablezEnabledInternal(bool enabled) { |
276 | 0 | g_hashtablez_enabled.store(enabled, std::memory_order_release); |
277 | 0 | } |
278 | | |
279 | 0 | int32_t GetHashtablezSampleParameter() { |
280 | 0 | return g_hashtablez_sample_parameter.load(std::memory_order_acquire); |
281 | 0 | } |
282 | | |
283 | 0 | void SetHashtablezSampleParameter(int32_t rate) { |
284 | 0 | SetHashtablezSampleParameterInternal(rate); |
285 | 0 | TriggerHashtablezConfigListener(); |
286 | 0 | } |
287 | | |
288 | 0 | void SetHashtablezSampleParameterInternal(int32_t rate) { |
289 | 0 | if (rate > 0) { |
290 | 0 | g_hashtablez_sample_parameter.store(rate, std::memory_order_release); |
291 | 0 | } else { |
292 | 0 | ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld", |
293 | 0 | static_cast<long long>(rate)); // NOLINT(runtime/int) |
294 | 0 | } |
295 | 0 | } |
296 | | |
297 | 0 | size_t GetHashtablezMaxSamples() { |
298 | 0 | return GlobalHashtablezSampler().GetMaxSamples(); |
299 | 0 | } |
300 | | |
301 | 0 | void SetHashtablezMaxSamples(size_t max) { |
302 | 0 | SetHashtablezMaxSamplesInternal(max); |
303 | 0 | TriggerHashtablezConfigListener(); |
304 | 0 | } |
305 | | |
306 | 0 | void SetHashtablezMaxSamplesInternal(size_t max) { |
307 | 0 | if (max > 0) { |
308 | 0 | GlobalHashtablezSampler().SetMaxSamples(max); |
309 | 0 | } else { |
310 | 0 | ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0"); |
311 | 0 | } |
312 | 0 | } |
313 | | |
314 | | } // namespace container_internal |
315 | | ABSL_NAMESPACE_END |
316 | | } // namespace absl |