/src/abseil-cpp/absl/flags/internal/flag.cc
Line | Count | Source |
1 | | // |
2 | | // Copyright 2019 The Abseil Authors. |
3 | | // |
4 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | // you may not use this file except in compliance with the License. |
6 | | // You may obtain a copy of the License at |
7 | | // |
8 | | // https://www.apache.org/licenses/LICENSE-2.0 |
9 | | // |
10 | | // Unless required by applicable law or agreed to in writing, software |
11 | | // distributed under the License is distributed on an "AS IS" BASIS, |
12 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | // See the License for the specific language governing permissions and |
14 | | // limitations under the License. |
15 | | |
16 | | #include "absl/flags/internal/flag.h" |
17 | | |
18 | | #include <assert.h> |
19 | | #include <stddef.h> |
20 | | #include <stdint.h> |
21 | | #include <string.h> |
22 | | |
23 | | #include <array> |
24 | | #include <atomic> |
25 | | #include <cstring> |
26 | | #include <memory> |
27 | | #include <string> |
28 | | #include <typeinfo> |
29 | | #include <utility> |
30 | | #include <vector> |
31 | | |
32 | | #include "absl/base/attributes.h" |
33 | | #include "absl/base/call_once.h" |
34 | | #include "absl/base/casts.h" |
35 | | #include "absl/base/config.h" |
36 | | #include "absl/base/const_init.h" |
37 | | #include "absl/base/dynamic_annotations.h" |
38 | | #include "absl/base/fast_type_id.h" |
39 | | #include "absl/base/no_destructor.h" |
40 | | #include "absl/base/optimization.h" |
41 | | #include "absl/base/thread_annotations.h" |
42 | | #include "absl/flags/config.h" |
43 | | #include "absl/flags/internal/commandlineflag.h" |
44 | | #include "absl/flags/usage_config.h" |
45 | | #include "absl/memory/memory.h" |
46 | | #include "absl/strings/str_cat.h" |
47 | | #include "absl/strings/string_view.h" |
48 | | #include "absl/synchronization/mutex.h" |
49 | | |
50 | | namespace absl { |
51 | | ABSL_NAMESPACE_BEGIN |
52 | | namespace flags_internal { |
53 | | |
54 | | // The help message indicating that the commandline flag has been stripped. It |
55 | | // will not show up when doing "-help" and its variants. The flag is stripped |
56 | | // if ABSL_FLAGS_STRIP_HELP is set to 1 before including absl/flags/flag.h |
57 | | const char kStrippedFlagHelp[] = "\001\002\003\004 (unknown) \004\003\002\001"; |
58 | | |
59 | | namespace { |
60 | | |
61 | | // Currently we only validate flag values for user-defined flag types. |
62 | 0 | bool ShouldValidateFlagValue(FlagFastTypeId flag_type_id) { |
63 | 0 | #define DONT_VALIDATE(T, _) \ |
64 | 0 | if (flag_type_id == absl::FastTypeId<T>()) return false; |
65 | 0 | ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(DONT_VALIDATE) |
66 | 0 | #undef DONT_VALIDATE |
67 | | |
68 | 0 | return true; |
69 | 0 | } |
70 | | |
71 | | // RAII helper used to temporarily unlock and relock `absl::Mutex`. |
72 | | // This is used when we need to ensure that locks are released while |
73 | | // invoking user supplied callbacks and then reacquired, since callbacks may |
74 | | // need to acquire these locks themselves. |
75 | | class MutexRelock { |
76 | | public: |
77 | 0 | explicit MutexRelock(absl::Mutex& mu) : mu_(mu) { mu_.unlock(); } |
78 | 0 | ~MutexRelock() { mu_.lock(); } |
79 | | |
80 | | MutexRelock(const MutexRelock&) = delete; |
81 | | MutexRelock& operator=(const MutexRelock&) = delete; |
82 | | |
83 | | private: |
84 | | absl::Mutex& mu_; |
85 | | }; |
86 | | |
87 | | // This is a freelist of leaked flag values and guard for its access. |
88 | | // When we can't guarantee it is safe to reuse the memory for flag values, |
89 | | // we move the memory to the freelist where it lives indefinitely, so it can |
90 | | // still be safely accessed. This also prevents leak checkers from complaining |
91 | | // about the leaked memory that can no longer be accessed through any pointer. |
92 | 0 | absl::Mutex& FreelistMutex() { |
93 | 0 | static absl::NoDestructor<absl::Mutex> mutex; |
94 | 0 | return *mutex; |
95 | 0 | } |
96 | | ABSL_CONST_INIT std::vector<void*>* s_freelist ABSL_GUARDED_BY(FreelistMutex()) |
97 | | ABSL_PT_GUARDED_BY(FreelistMutex()) = nullptr; |
98 | | |
99 | 0 | void AddToFreelist(void* p) { |
100 | 0 | absl::MutexLock l(FreelistMutex()); |
101 | 0 | if (!s_freelist) { |
102 | 0 | s_freelist = new std::vector<void*>; |
103 | 0 | } |
104 | 0 | s_freelist->push_back(p); |
105 | 0 | } |
106 | | |
107 | | } // namespace |
108 | | |
109 | | /////////////////////////////////////////////////////////////////////////////// |
110 | | |
111 | 0 | uint64_t NumLeakedFlagValues() { |
112 | 0 | absl::MutexLock l(FreelistMutex()); |
113 | 0 | return s_freelist == nullptr ? 0u : s_freelist->size(); |
114 | 0 | } |
115 | | |
116 | | /////////////////////////////////////////////////////////////////////////////// |
117 | | // Persistent state of the flag data. |
118 | | |
119 | | class FlagImpl; |
120 | | |
121 | | class FlagState : public flags_internal::FlagStateInterface { |
122 | | public: |
123 | | template <typename V> |
124 | | FlagState(FlagImpl& flag_impl, const V& v, bool modified, |
125 | | bool on_command_line, int64_t counter) |
126 | 0 | : flag_impl_(flag_impl), |
127 | 0 | value_(v), |
128 | 0 | modified_(modified), |
129 | 0 | on_command_line_(on_command_line), |
130 | 0 | counter_(counter) {}Unexecuted instantiation: absl::flags_internal::FlagState::FlagState<long>(absl::flags_internal::FlagImpl&, long const&, bool, bool, long) Unexecuted instantiation: absl::flags_internal::FlagState::FlagState<void*>(absl::flags_internal::FlagImpl&, void* const&, bool, bool, long) |
131 | | |
132 | 0 | ~FlagState() override { |
133 | 0 | if (flag_impl_.ValueStorageKind() != FlagValueStorageKind::kHeapAllocated && |
134 | 0 | flag_impl_.ValueStorageKind() != FlagValueStorageKind::kSequenceLocked) |
135 | 0 | return; |
136 | 0 | flags_internal::Delete(flag_impl_.op_, value_.heap_allocated); |
137 | 0 | } |
138 | | |
139 | | private: |
140 | | friend class FlagImpl; |
141 | | |
142 | | // Restores the flag to the saved state. |
143 | 0 | void Restore() && override { |
144 | 0 | if (!std::move(flag_impl_).RestoreState(*this)) return; |
145 | | |
146 | 0 | ABSL_INTERNAL_LOG(INFO, |
147 | 0 | absl::StrCat("Restore saved value of ", flag_impl_.Name(), |
148 | 0 | " to: ", flag_impl_.CurrentValue())); |
149 | 0 | } |
150 | | |
151 | | // Flag and saved flag data. |
152 | | FlagImpl& flag_impl_; |
153 | | union SavedValue { |
154 | 0 | explicit SavedValue(void* v) : heap_allocated(v) {} |
155 | 0 | explicit SavedValue(int64_t v) : one_word(v) {} |
156 | | |
157 | | void* heap_allocated; |
158 | | int64_t one_word; |
159 | | } value_; |
160 | | bool modified_; |
161 | | bool on_command_line_; |
162 | | int64_t counter_; |
163 | | }; |
164 | | |
165 | | /////////////////////////////////////////////////////////////////////////////// |
166 | | // Flag implementation, which does not depend on flag value type. |
167 | | |
168 | 0 | DynValueDeleter::DynValueDeleter(FlagOpFn op_arg) : op(op_arg) {} |
169 | | |
170 | 0 | void DynValueDeleter::operator()(void* ptr) const { |
171 | 0 | if (op == nullptr) return; |
172 | | |
173 | 0 | Delete(op, ptr); |
174 | 0 | } |
175 | | |
176 | 0 | MaskedPointer::MaskedPointer(ptr_t rhs, bool is_candidate) : ptr_(rhs) { |
177 | 0 | if (is_candidate) { |
178 | 0 | ApplyMask(kUnprotectedReadCandidate); |
179 | 0 | } |
180 | 0 | } |
181 | | |
182 | 0 | bool MaskedPointer::IsUnprotectedReadCandidate() const { |
183 | 0 | return CheckMask(kUnprotectedReadCandidate); |
184 | 0 | } |
185 | | |
186 | 0 | bool MaskedPointer::HasBeenRead() const { return CheckMask(kHasBeenRead); } |
187 | | |
188 | 0 | void MaskedPointer::Set(FlagOpFn op, const void* src, bool is_candidate) { |
189 | 0 | flags_internal::Copy(op, src, Ptr()); |
190 | 0 | if (is_candidate) { |
191 | 0 | ApplyMask(kUnprotectedReadCandidate); |
192 | 0 | } |
193 | 0 | } |
194 | 0 | void MaskedPointer::MarkAsRead() { ApplyMask(kHasBeenRead); } |
195 | | |
196 | 0 | void MaskedPointer::ApplyMask(mask_t mask) { |
197 | 0 | ptr_ = reinterpret_cast<ptr_t>(reinterpret_cast<mask_t>(ptr_) | mask); |
198 | 0 | } |
199 | 0 | bool MaskedPointer::CheckMask(mask_t mask) const { |
200 | 0 | return (reinterpret_cast<mask_t>(ptr_) & mask) != 0; |
201 | 0 | } |
202 | | |
203 | 1 | void FlagImpl::Init() { |
204 | 1 | new (&data_guard_) absl::Mutex; |
205 | | |
206 | 1 | auto def_kind = static_cast<FlagDefaultKind>(def_kind_); |
207 | | |
208 | 1 | switch (ValueStorageKind()) { |
209 | 1 | case FlagValueStorageKind::kValueAndInitBit: |
210 | 1 | case FlagValueStorageKind::kOneWordAtomic: { |
211 | 1 | alignas(int64_t) std::array<char, sizeof(int64_t)> buf{}; |
212 | 1 | if (def_kind == FlagDefaultKind::kGenFunc) { |
213 | 0 | (*default_value_.gen_func)(buf.data()); |
214 | 1 | } else { |
215 | 1 | assert(def_kind != FlagDefaultKind::kDynamicValue); |
216 | 1 | std::memcpy(buf.data(), &default_value_, Sizeof(op_)); |
217 | 1 | } |
218 | 1 | if (ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit) { |
219 | | // We presume here the memory layout of FlagValueAndInitBit struct. |
220 | 1 | uint8_t initialized = 1; |
221 | 1 | std::memcpy(buf.data() + Sizeof(op_), &initialized, |
222 | 1 | sizeof(initialized)); |
223 | 1 | } |
224 | | // Type can contain valid uninitialized bits, e.g. padding. |
225 | 1 | ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buf.data(), buf.size()); |
226 | 1 | OneWordValue().store(absl::bit_cast<int64_t>(buf), |
227 | 1 | std::memory_order_release); |
228 | 1 | break; |
229 | 1 | } |
230 | 0 | case FlagValueStorageKind::kSequenceLocked: { |
231 | | // For this storage kind the default_value_ always points to gen_func |
232 | | // during initialization. |
233 | 0 | assert(def_kind == FlagDefaultKind::kGenFunc); |
234 | 0 | (*default_value_.gen_func)(AtomicBufferValue()); |
235 | 0 | break; |
236 | 0 | } |
237 | 0 | case FlagValueStorageKind::kHeapAllocated: |
238 | | // For this storage kind the default_value_ always points to gen_func |
239 | | // during initialization. |
240 | 0 | assert(def_kind == FlagDefaultKind::kGenFunc); |
241 | | // Flag value initially points to the internal buffer. |
242 | 0 | MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire); |
243 | 0 | (*default_value_.gen_func)(ptr_value.Ptr()); |
244 | | // Default value is a candidate for an unprotected read. |
245 | 0 | PtrStorage().store(MaskedPointer(ptr_value.Ptr(), true), |
246 | 0 | std::memory_order_release); |
247 | 0 | break; |
248 | 1 | } |
249 | 1 | seq_lock_.MarkInitialized(); |
250 | 1 | } |
251 | | |
252 | 1 | absl::Mutex& FlagImpl::DataGuard() const { |
253 | 1 | absl::call_once(const_cast<FlagImpl*>(this)->init_control_, &FlagImpl::Init, |
254 | 1 | const_cast<FlagImpl*>(this)); |
255 | | |
256 | | // data_guard_ is initialized inside Init. |
257 | 1 | return *reinterpret_cast<absl::Mutex*>(&data_guard_); |
258 | 1 | } |
259 | | |
260 | | void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id, |
261 | 5.93k | const std::type_info* (*gen_rtti)()) const { |
262 | 5.93k | FlagFastTypeId lhs_type_id = flags_internal::FastTypeId(op_); |
263 | | |
264 | | // `rhs_type_id` is the fast type id corresponding to the declaration |
265 | | // visible at the call site. `lhs_type_id` is the fast type id |
266 | | // corresponding to the type specified in flag definition. They must match |
267 | | // for this operation to be well-defined. |
268 | 5.93k | if (ABSL_PREDICT_TRUE(lhs_type_id == rhs_type_id)) return; |
269 | | |
270 | 0 | const std::type_info* lhs_runtime_type_id = |
271 | 0 | flags_internal::RuntimeTypeId(op_); |
272 | 0 | const std::type_info* rhs_runtime_type_id = (*gen_rtti)(); |
273 | |
|
274 | 0 | if (lhs_runtime_type_id == rhs_runtime_type_id) return; |
275 | | |
276 | 0 | #ifdef ABSL_INTERNAL_HAS_RTTI |
277 | 0 | if (*lhs_runtime_type_id == *rhs_runtime_type_id) return; |
278 | 0 | #endif |
279 | | |
280 | 0 | ABSL_INTERNAL_LOG( |
281 | 0 | FATAL, absl::StrCat("Flag '", Name(), |
282 | 0 | "' is defined as one type and declared as another")); |
283 | 0 | } |
284 | | |
285 | 0 | std::unique_ptr<void, DynValueDeleter> FlagImpl::MakeInitValue() const { |
286 | 0 | void* res = nullptr; |
287 | 0 | switch (DefaultKind()) { |
288 | 0 | case FlagDefaultKind::kDynamicValue: |
289 | 0 | res = flags_internal::Clone(op_, default_value_.dynamic_value); |
290 | 0 | break; |
291 | 0 | case FlagDefaultKind::kGenFunc: |
292 | 0 | res = flags_internal::Alloc(op_); |
293 | 0 | (*default_value_.gen_func)(res); |
294 | 0 | break; |
295 | 0 | default: |
296 | 0 | res = flags_internal::Clone(op_, &default_value_); |
297 | 0 | break; |
298 | 0 | } |
299 | 0 | return {res, DynValueDeleter{op_}}; |
300 | 0 | } |
301 | | |
302 | 0 | void FlagImpl::StoreValue(const void* src, ValueSource source) { |
303 | 0 | switch (ValueStorageKind()) { |
304 | 0 | case FlagValueStorageKind::kValueAndInitBit: |
305 | 0 | case FlagValueStorageKind::kOneWordAtomic: { |
306 | | // Load the current value to avoid setting 'init' bit manually. |
307 | 0 | int64_t one_word_val = OneWordValue().load(std::memory_order_acquire); |
308 | 0 | std::memcpy(&one_word_val, src, Sizeof(op_)); |
309 | 0 | OneWordValue().store(one_word_val, std::memory_order_release); |
310 | 0 | seq_lock_.IncrementModificationCount(); |
311 | 0 | break; |
312 | 0 | } |
313 | 0 | case FlagValueStorageKind::kSequenceLocked: { |
314 | 0 | seq_lock_.Write(AtomicBufferValue(), src, Sizeof(op_)); |
315 | 0 | break; |
316 | 0 | } |
317 | 0 | case FlagValueStorageKind::kHeapAllocated: |
318 | 0 | MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire); |
319 | |
|
320 | 0 | if (ptr_value.IsUnprotectedReadCandidate() && ptr_value.HasBeenRead()) { |
321 | | // If current value is a candidate for an unprotected read and if it was |
322 | | // already read at least once, follow up reads (if any) are done without |
323 | | // mutex protection. We can't guarantee it is safe to reuse this memory |
324 | | // since it may have been accessed by another thread concurrently, so |
325 | | // instead we move the memory to a freelist so it can still be safely |
326 | | // accessed, and allocate a new one for the new value. |
327 | 0 | AddToFreelist(ptr_value.Ptr()); |
328 | 0 | ptr_value = MaskedPointer(Clone(op_, src), source == kCommandLine); |
329 | 0 | } else { |
330 | | // Current value either was set programmatically or was never read. |
331 | | // We can reuse the memory since all accesses to this value (if any) |
332 | | // were protected by mutex. That said, if a new value comes from command |
333 | | // line it now becomes a candidate for an unprotected read. |
334 | 0 | ptr_value.Set(op_, src, source == kCommandLine); |
335 | 0 | } |
336 | |
|
337 | 0 | PtrStorage().store(ptr_value, std::memory_order_release); |
338 | 0 | seq_lock_.IncrementModificationCount(); |
339 | 0 | break; |
340 | 0 | } |
341 | 0 | modified_ = true; |
342 | 0 | InvokeCallback(); |
343 | 0 | } |
344 | | |
345 | 16 | absl::string_view FlagImpl::Name() const { return name_; } |
346 | | |
347 | 0 | absl::string_view FlagImpl::TypeName() const { return type_name_; } |
348 | | |
349 | 16 | std::string FlagImpl::Filename() const { |
350 | 16 | return flags_internal::GetUsageConfig().normalize_filename(filename_); |
351 | 16 | } |
352 | | |
353 | 0 | std::string FlagImpl::Help() const { |
354 | 0 | return HelpSourceKind() == FlagHelpKind::kLiteral ? help_.literal |
355 | 0 | : help_.gen_func(); |
356 | 0 | } |
357 | | |
358 | 0 | FlagFastTypeId FlagImpl::TypeId() const { |
359 | 0 | return flags_internal::FastTypeId(op_); |
360 | 0 | } |
361 | | |
362 | 0 | int64_t FlagImpl::ModificationCount() const { |
363 | 0 | return seq_lock_.ModificationCount(); |
364 | 0 | } |
365 | | |
366 | 0 | bool FlagImpl::IsSpecifiedOnCommandLine() const { |
367 | 0 | absl::MutexLock l(DataGuard()); |
368 | 0 | return on_command_line_; |
369 | 0 | } |
370 | | |
371 | 0 | std::string FlagImpl::DefaultValue() const { |
372 | 0 | absl::MutexLock l(DataGuard()); |
373 | |
|
374 | 0 | auto obj = MakeInitValue(); |
375 | 0 | return flags_internal::Unparse(op_, obj.get()); |
376 | 0 | } |
377 | | |
378 | 0 | std::string FlagImpl::CurrentValue() const { |
379 | 0 | auto& guard = DataGuard(); // Make sure flag initialized |
380 | 0 | switch (ValueStorageKind()) { |
381 | 0 | case FlagValueStorageKind::kValueAndInitBit: |
382 | 0 | case FlagValueStorageKind::kOneWordAtomic: { |
383 | 0 | const auto one_word_val = |
384 | 0 | absl::bit_cast<std::array<char, sizeof(int64_t)>>( |
385 | 0 | OneWordValue().load(std::memory_order_acquire)); |
386 | 0 | return flags_internal::Unparse(op_, one_word_val.data()); |
387 | 0 | } |
388 | 0 | case FlagValueStorageKind::kSequenceLocked: { |
389 | 0 | std::unique_ptr<void, DynValueDeleter> cloned(flags_internal::Alloc(op_), |
390 | 0 | DynValueDeleter{op_}); |
391 | 0 | ReadSequenceLockedData(cloned.get()); |
392 | 0 | return flags_internal::Unparse(op_, cloned.get()); |
393 | 0 | } |
394 | 0 | case FlagValueStorageKind::kHeapAllocated: { |
395 | 0 | absl::MutexLock l(guard); |
396 | 0 | return flags_internal::Unparse( |
397 | 0 | op_, PtrStorage().load(std::memory_order_acquire).Ptr()); |
398 | 0 | } |
399 | 0 | } |
400 | | |
401 | 0 | return ""; |
402 | 0 | } |
403 | | |
404 | 0 | void FlagImpl::SetCallback(const FlagCallbackFunc mutation_callback) { |
405 | 0 | absl::MutexLock l(DataGuard()); |
406 | |
|
407 | 0 | if (callback_ == nullptr) { |
408 | 0 | callback_ = new FlagCallback; |
409 | 0 | } |
410 | 0 | callback_->func = mutation_callback; |
411 | |
|
412 | 0 | InvokeCallback(); |
413 | 0 | } |
414 | | |
415 | 0 | void FlagImpl::InvokeCallback() const { |
416 | 0 | if (!callback_) return; |
417 | | |
418 | | // Make a copy of the C-style function pointer that we are about to invoke |
419 | | // before we release the lock guarding it. |
420 | 0 | FlagCallbackFunc cb = callback_->func; |
421 | | |
422 | | // If the flag has a mutation callback this function invokes it. While the |
423 | | // callback is being invoked the primary flag's mutex is unlocked and it is |
424 | | // re-locked back after call to callback is completed. Callback invocation is |
425 | | // guarded by flag's secondary mutex instead which prevents concurrent |
426 | | // callback invocation. Note that it is possible for other thread to grab the |
427 | | // primary lock and update flag's value at any time during the callback |
428 | | // invocation. This is by design. Callback can get a value of the flag if |
429 | | // necessary, but it might be different from the value initiated the callback |
430 | | // and it also can be different by the time the callback invocation is |
431 | | // completed. Requires that *primary_lock be held in exclusive mode; it may be |
432 | | // released and reacquired by the implementation. |
433 | 0 | MutexRelock relock(DataGuard()); |
434 | 0 | absl::MutexLock lock(callback_->guard); |
435 | 0 | cb(); |
436 | 0 | } |
437 | | |
438 | 0 | std::unique_ptr<FlagStateInterface> FlagImpl::SaveState() { |
439 | 0 | absl::MutexLock l(DataGuard()); |
440 | |
|
441 | 0 | bool modified = modified_; |
442 | 0 | bool on_command_line = on_command_line_; |
443 | 0 | switch (ValueStorageKind()) { |
444 | 0 | case FlagValueStorageKind::kValueAndInitBit: |
445 | 0 | case FlagValueStorageKind::kOneWordAtomic: { |
446 | 0 | return absl::make_unique<FlagState>( |
447 | 0 | *this, OneWordValue().load(std::memory_order_acquire), modified, |
448 | 0 | on_command_line, ModificationCount()); |
449 | 0 | } |
450 | 0 | case FlagValueStorageKind::kSequenceLocked: { |
451 | 0 | void* cloned = flags_internal::Alloc(op_); |
452 | | // Read is guaranteed to be successful because we hold the lock. |
453 | 0 | bool success = |
454 | 0 | seq_lock_.TryRead(cloned, AtomicBufferValue(), Sizeof(op_)); |
455 | 0 | assert(success); |
456 | 0 | static_cast<void>(success); |
457 | 0 | return absl::make_unique<FlagState>(*this, cloned, modified, |
458 | 0 | on_command_line, ModificationCount()); |
459 | 0 | } |
460 | 0 | case FlagValueStorageKind::kHeapAllocated: { |
461 | 0 | return absl::make_unique<FlagState>( |
462 | 0 | *this, |
463 | 0 | flags_internal::Clone( |
464 | 0 | op_, PtrStorage().load(std::memory_order_acquire).Ptr()), |
465 | 0 | modified, on_command_line, ModificationCount()); |
466 | 0 | } |
467 | 0 | } |
468 | 0 | return nullptr; |
469 | 0 | } |
470 | | |
471 | 0 | bool FlagImpl::RestoreState(const FlagState& flag_state) { |
472 | 0 | absl::MutexLock l(DataGuard()); |
473 | 0 | if (flag_state.counter_ == ModificationCount()) { |
474 | 0 | return false; |
475 | 0 | } |
476 | | |
477 | 0 | switch (ValueStorageKind()) { |
478 | 0 | case FlagValueStorageKind::kValueAndInitBit: |
479 | 0 | case FlagValueStorageKind::kOneWordAtomic: |
480 | 0 | StoreValue(&flag_state.value_.one_word, kProgrammaticChange); |
481 | 0 | break; |
482 | 0 | case FlagValueStorageKind::kSequenceLocked: |
483 | 0 | case FlagValueStorageKind::kHeapAllocated: |
484 | 0 | StoreValue(flag_state.value_.heap_allocated, kProgrammaticChange); |
485 | 0 | break; |
486 | 0 | } |
487 | | |
488 | 0 | modified_ = flag_state.modified_; |
489 | 0 | on_command_line_ = flag_state.on_command_line_; |
490 | |
|
491 | 0 | return true; |
492 | 0 | } |
493 | | |
494 | | template <typename StorageT> |
495 | 2 | StorageT* FlagImpl::OffsetValue() const { |
496 | 2 | char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this)); |
497 | | // The offset is deduced via Flag value type specific op_. |
498 | 2 | ptrdiff_t offset = flags_internal::ValueOffset(op_); |
499 | | |
500 | 2 | return reinterpret_cast<StorageT*>(p + offset); |
501 | 2 | } Unexecuted instantiation: std::__1::atomic<unsigned long>* absl::flags_internal::FlagImpl::OffsetValue<std::__1::atomic<unsigned long> >() const absl::flags_internal::FlagOneWordValue* absl::flags_internal::FlagImpl::OffsetValue<absl::flags_internal::FlagOneWordValue>() const Line | Count | Source | 495 | 2 | StorageT* FlagImpl::OffsetValue() const { | 496 | 2 | char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this)); | 497 | | // The offset is deduced via Flag value type specific op_. | 498 | 2 | ptrdiff_t offset = flags_internal::ValueOffset(op_); | 499 | | | 500 | 2 | return reinterpret_cast<StorageT*>(p + offset); | 501 | 2 | } |
Unexecuted instantiation: absl::flags_internal::FlagMaskedPointerValue* absl::flags_internal::FlagImpl::OffsetValue<absl::flags_internal::FlagMaskedPointerValue>() const |
502 | | |
503 | 0 | std::atomic<uint64_t>* FlagImpl::AtomicBufferValue() const { |
504 | 0 | assert(ValueStorageKind() == FlagValueStorageKind::kSequenceLocked); |
505 | 0 | return OffsetValue<std::atomic<uint64_t>>(); |
506 | 0 | } |
507 | | |
508 | 2 | std::atomic<int64_t>& FlagImpl::OneWordValue() const { |
509 | 2 | assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic || |
510 | 2 | ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); |
511 | 2 | return OffsetValue<FlagOneWordValue>()->value; |
512 | 2 | } |
513 | | |
514 | 0 | std::atomic<MaskedPointer>& FlagImpl::PtrStorage() const { |
515 | 0 | assert(ValueStorageKind() == FlagValueStorageKind::kHeapAllocated); |
516 | 0 | return OffsetValue<FlagMaskedPointerValue>()->value; |
517 | 0 | } |
518 | | |
519 | | // Attempts to parse supplied `value` string using parsing routine in the `flag` |
520 | | // argument. If parsing successful, this function replaces the dst with newly |
521 | | // parsed value. In case if any error is encountered in either step, the error |
522 | | // message is stored in 'err' |
523 | | std::unique_ptr<void, DynValueDeleter> FlagImpl::TryParse( |
524 | 0 | absl::string_view value, std::string& err) const { |
525 | 0 | std::unique_ptr<void, DynValueDeleter> tentative_value = MakeInitValue(); |
526 | |
|
527 | 0 | std::string parse_err; |
528 | 0 | if (!flags_internal::Parse(op_, value, tentative_value.get(), &parse_err)) { |
529 | 0 | absl::string_view err_sep = parse_err.empty() ? "" : "; "; |
530 | 0 | err = absl::StrCat("Illegal value '", value, "' specified for flag '", |
531 | 0 | Name(), "'", err_sep, parse_err); |
532 | 0 | return nullptr; |
533 | 0 | } |
534 | | |
535 | 0 | return tentative_value; |
536 | 0 | } |
537 | | |
538 | 0 | void FlagImpl::Read(void* dst) const { |
539 | 0 | auto& guard = DataGuard(); // Make sure flag initialized |
540 | 0 | switch (ValueStorageKind()) { |
541 | 0 | case FlagValueStorageKind::kValueAndInitBit: |
542 | 0 | case FlagValueStorageKind::kOneWordAtomic: { |
543 | 0 | const int64_t one_word_val = |
544 | 0 | OneWordValue().load(std::memory_order_acquire); |
545 | 0 | std::memcpy(dst, &one_word_val, Sizeof(op_)); |
546 | 0 | break; |
547 | 0 | } |
548 | 0 | case FlagValueStorageKind::kSequenceLocked: { |
549 | 0 | ReadSequenceLockedData(dst); |
550 | 0 | break; |
551 | 0 | } |
552 | 0 | case FlagValueStorageKind::kHeapAllocated: { |
553 | 0 | absl::MutexLock l(guard); |
554 | 0 | MaskedPointer ptr_value = PtrStorage().load(std::memory_order_acquire); |
555 | |
|
556 | 0 | flags_internal::CopyConstruct(op_, ptr_value.Ptr(), dst); |
557 | | |
558 | | // For unprotected read candidates, mark that the value as has been read. |
559 | 0 | if (ptr_value.IsUnprotectedReadCandidate() && !ptr_value.HasBeenRead()) { |
560 | 0 | ptr_value.MarkAsRead(); |
561 | 0 | PtrStorage().store(ptr_value, std::memory_order_release); |
562 | 0 | } |
563 | 0 | break; |
564 | 0 | } |
565 | 0 | } |
566 | 0 | } |
567 | | |
568 | 1 | int64_t FlagImpl::ReadOneWord() const { |
569 | 1 | assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic || |
570 | 1 | ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); |
571 | 1 | auto& guard = DataGuard(); // Make sure flag initialized |
572 | 1 | (void)guard; |
573 | 1 | return OneWordValue().load(std::memory_order_acquire); |
574 | 1 | } |
575 | | |
576 | 0 | bool FlagImpl::ReadOneBool() const { |
577 | 0 | assert(ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); |
578 | 0 | auto& guard = DataGuard(); // Make sure flag initialized |
579 | 0 | (void)guard; |
580 | 0 | return absl::bit_cast<FlagValueAndInitBit<bool>>( |
581 | 0 | OneWordValue().load(std::memory_order_acquire)) |
582 | 0 | .value; |
583 | 0 | } |
584 | | |
585 | 0 | void FlagImpl::ReadSequenceLockedData(void* dst) const { |
586 | 0 | size_t size = Sizeof(op_); |
587 | | // Attempt to read using the sequence lock. |
588 | 0 | if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) { |
589 | 0 | return; |
590 | 0 | } |
591 | | // We failed due to contention. Acquire the lock to prevent contention |
592 | | // and try again. |
593 | 0 | absl::ReaderMutexLock l(DataGuard()); |
594 | 0 | bool success = seq_lock_.TryRead(dst, AtomicBufferValue(), size); |
595 | 0 | assert(success); |
596 | 0 | static_cast<void>(success); |
597 | 0 | } |
598 | | |
599 | 0 | void FlagImpl::Write(const void* src) { |
600 | 0 | absl::MutexLock l(DataGuard()); |
601 | |
|
602 | 0 | if (ShouldValidateFlagValue(flags_internal::FastTypeId(op_))) { |
603 | 0 | std::unique_ptr<void, DynValueDeleter> obj{flags_internal::Clone(op_, src), |
604 | 0 | DynValueDeleter{op_}}; |
605 | 0 | std::string ignored_error; |
606 | 0 | std::string src_as_str = flags_internal::Unparse(op_, src); |
607 | 0 | if (!flags_internal::Parse(op_, src_as_str, obj.get(), &ignored_error)) { |
608 | 0 | ABSL_INTERNAL_LOG(ERROR, absl::StrCat("Attempt to set flag '", Name(), |
609 | 0 | "' to invalid value ", src_as_str)); |
610 | 0 | } |
611 | 0 | } |
612 | |
|
613 | 0 | StoreValue(src, kProgrammaticChange); |
614 | 0 | } |
615 | | |
616 | | // Sets the value of the flag based on specified string `value`. If the flag |
617 | | // was successfully set to new value, it returns true. Otherwise, sets `err` |
618 | | // to indicate the error, leaves the flag unchanged, and returns false. There |
619 | | // are three ways to set the flag's value: |
620 | | // * Update the current flag value |
621 | | // * Update the flag's default value |
622 | | // * Update the current flag value if it was never set before |
623 | | // The mode is selected based on 'set_mode' parameter. |
624 | | bool FlagImpl::ParseFrom(absl::string_view value, FlagSettingMode set_mode, |
625 | 0 | ValueSource source, std::string& err) { |
626 | 0 | absl::MutexLock l(DataGuard()); |
627 | |
|
628 | 0 | switch (set_mode) { |
629 | 0 | case SET_FLAGS_VALUE: { |
630 | | // set or modify the flag's value |
631 | 0 | auto tentative_value = TryParse(value, err); |
632 | 0 | if (!tentative_value) return false; |
633 | | |
634 | 0 | StoreValue(tentative_value.get(), source); |
635 | |
|
636 | 0 | if (source == kCommandLine) { |
637 | 0 | on_command_line_ = true; |
638 | 0 | } |
639 | 0 | break; |
640 | 0 | } |
641 | 0 | case SET_FLAG_IF_DEFAULT: { |
642 | | // set the flag's value, but only if it hasn't been set by someone else |
643 | 0 | if (modified_) { |
644 | | // TODO(rogeeff): review and fix this semantic. Currently we do not fail |
645 | | // in this case if flag is modified. This is misleading since the flag's |
646 | | // value is not updated even though we return true. |
647 | | // *err = absl::StrCat(Name(), " is already set to ", |
648 | | // CurrentValue(), "\n"); |
649 | | // return false; |
650 | 0 | return true; |
651 | 0 | } |
652 | 0 | auto tentative_value = TryParse(value, err); |
653 | 0 | if (!tentative_value) return false; |
654 | | |
655 | 0 | StoreValue(tentative_value.get(), source); |
656 | 0 | break; |
657 | 0 | } |
658 | 0 | case SET_FLAGS_DEFAULT: { |
659 | 0 | auto tentative_value = TryParse(value, err); |
660 | 0 | if (!tentative_value) return false; |
661 | | |
662 | 0 | if (DefaultKind() == FlagDefaultKind::kDynamicValue) { |
663 | 0 | void* old_value = default_value_.dynamic_value; |
664 | 0 | default_value_.dynamic_value = tentative_value.release(); |
665 | 0 | tentative_value.reset(old_value); |
666 | 0 | } else { |
667 | 0 | default_value_.dynamic_value = tentative_value.release(); |
668 | 0 | def_kind_ = static_cast<uint8_t>(FlagDefaultKind::kDynamicValue); |
669 | 0 | } |
670 | |
|
671 | 0 | if (!modified_) { |
672 | | // Need to set both default value *and* current, in this case. |
673 | 0 | StoreValue(default_value_.dynamic_value, source); |
674 | 0 | modified_ = false; |
675 | 0 | } |
676 | 0 | break; |
677 | 0 | } |
678 | 0 | } |
679 | | |
680 | 0 | return true; |
681 | 0 | } |
682 | | |
683 | 0 | void FlagImpl::CheckDefaultValueParsingRoundtrip() const { |
684 | 0 | std::string v = DefaultValue(); |
685 | |
|
686 | 0 | absl::MutexLock lock(DataGuard()); |
687 | |
|
688 | 0 | auto dst = MakeInitValue(); |
689 | 0 | std::string error; |
690 | 0 | if (!flags_internal::Parse(op_, v, dst.get(), &error)) { |
691 | 0 | ABSL_INTERNAL_LOG( |
692 | 0 | FATAL, |
693 | 0 | absl::StrCat("Flag ", Name(), " (from ", Filename(), |
694 | 0 | "): string form of default value '", v, |
695 | 0 | "' could not be parsed; error=", error)); |
696 | 0 | } |
697 | | |
698 | | // We do not compare dst to def since parsing/unparsing may make |
699 | | // small changes, e.g., precision loss for floating point types. |
700 | 0 | } |
701 | | |
702 | 0 | bool FlagImpl::ValidateInputValue(absl::string_view value) const { |
703 | 0 | absl::MutexLock l(DataGuard()); |
704 | |
|
705 | 0 | auto obj = MakeInitValue(); |
706 | 0 | std::string ignored_error; |
707 | 0 | return flags_internal::Parse(op_, value, obj.get(), &ignored_error); |
708 | 0 | } |
709 | | |
710 | | } // namespace flags_internal |
711 | | ABSL_NAMESPACE_END |
712 | | } // namespace absl |