/src/abseil-cpp/absl/log/internal/vlog_config.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2022 The Abseil Authors |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | #include "absl/log/internal/vlog_config.h" |
16 | | |
17 | | #include <stddef.h> |
18 | | |
19 | | #include <algorithm> |
20 | | #include <atomic> |
21 | | #include <functional> |
22 | | #include <memory> |
23 | | #include <string> |
24 | | #include <utility> |
25 | | #include <vector> |
26 | | |
27 | | #include "absl/base/attributes.h" |
28 | | #include "absl/base/config.h" |
29 | | #include "absl/base/const_init.h" |
30 | | #include "absl/base/internal/spinlock.h" |
31 | | #include "absl/base/no_destructor.h" |
32 | | #include "absl/base/optimization.h" |
33 | | #include "absl/base/thread_annotations.h" |
34 | | #include "absl/log/internal/fnmatch.h" |
35 | | #include "absl/memory/memory.h" |
36 | | #include "absl/strings/numbers.h" |
37 | | #include "absl/strings/str_split.h" |
38 | | #include "absl/strings/string_view.h" |
39 | | #include "absl/strings/strip.h" |
40 | | #include "absl/synchronization/mutex.h" |
41 | | #include "absl/types/optional.h" |
42 | | |
43 | | namespace absl { |
44 | | ABSL_NAMESPACE_BEGIN |
45 | | namespace log_internal { |
46 | | |
47 | | namespace { |
48 | 0 | bool ModuleIsPath(absl::string_view module_pattern) { |
49 | | #ifdef _WIN32 |
50 | | return module_pattern.find_first_of("/\\") != module_pattern.npos; |
51 | | #else |
52 | 0 | return module_pattern.find('/') != module_pattern.npos; |
53 | 0 | #endif |
54 | 0 | } |
55 | | } // namespace |
56 | | |
57 | 0 | bool VLogSite::SlowIsEnabled(int stale_v, int level) { |
58 | 0 | if (ABSL_PREDICT_TRUE(stale_v != kUninitialized)) { |
59 | | // Because of the prerequisites to this function, we know that stale_v is |
60 | | // either uninitialized or >= level. If it's not uninitialized, that means |
61 | | // it must be >= level, thus we should log. |
62 | 0 | return true; |
63 | 0 | } |
64 | 0 | stale_v = log_internal::RegisterAndInitialize(this); |
65 | 0 | return ABSL_PREDICT_FALSE(stale_v >= level); |
66 | 0 | } |
67 | | |
68 | 0 | bool VLogSite::SlowIsEnabled0(int stale_v) { return SlowIsEnabled(stale_v, 0); } |
69 | 0 | bool VLogSite::SlowIsEnabled1(int stale_v) { return SlowIsEnabled(stale_v, 1); } |
70 | 0 | bool VLogSite::SlowIsEnabled2(int stale_v) { return SlowIsEnabled(stale_v, 2); } |
71 | 0 | bool VLogSite::SlowIsEnabled3(int stale_v) { return SlowIsEnabled(stale_v, 3); } |
72 | 0 | bool VLogSite::SlowIsEnabled4(int stale_v) { return SlowIsEnabled(stale_v, 4); } |
73 | 0 | bool VLogSite::SlowIsEnabled5(int stale_v) { return SlowIsEnabled(stale_v, 5); } |
74 | | |
75 | | namespace { |
76 | | struct VModuleInfo final { |
77 | | std::string module_pattern; |
78 | | bool module_is_path; // i.e. it contains a path separator. |
79 | | int vlog_level; |
80 | | |
81 | | // Allocates memory. |
82 | | VModuleInfo(absl::string_view module_pattern_arg, bool module_is_path_arg, |
83 | | int vlog_level_arg) |
84 | | : module_pattern(std::string(module_pattern_arg)), |
85 | | module_is_path(module_is_path_arg), |
86 | 0 | vlog_level(vlog_level_arg) {} |
87 | | }; |
88 | | |
89 | | // `mutex` guards all of the data structures that aren't lock-free. |
90 | | // To avoid problems with the heap checker which calls into `VLOG`, `mutex` must |
91 | | // be a `SpinLock` that prevents fiber scheduling instead of a `Mutex`. |
92 | | ABSL_CONST_INIT absl::base_internal::SpinLock mutex( |
93 | | absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); |
94 | | |
95 | | // `GetUpdateSitesMutex()` serializes updates to all of the sites (i.e. those in |
96 | | // `site_list_head`) themselves. |
97 | 0 | absl::Mutex* GetUpdateSitesMutex() { |
98 | | // Chromium requires no global destructors, so we can't use the |
99 | | // absl::kConstInit idiom since absl::Mutex as a non-trivial destructor. |
100 | 0 | static absl::NoDestructor<absl::Mutex> update_sites_mutex ABSL_ACQUIRED_AFTER( |
101 | 0 | mutex); |
102 | 0 | return update_sites_mutex.get(); |
103 | 0 | } |
104 | | |
105 | | ABSL_CONST_INIT int global_v ABSL_GUARDED_BY(mutex) = 0; |
106 | | // `site_list_head` is the head of a singly-linked list. Traversal, insertion, |
107 | | // and reads are atomic, so no locks are required, but updates to existing |
108 | | // elements are guarded by `GetUpdateSitesMutex()`. |
109 | | ABSL_CONST_INIT std::atomic<VLogSite*> site_list_head{nullptr}; |
110 | | ABSL_CONST_INIT std::vector<VModuleInfo>* vmodule_info ABSL_GUARDED_BY(mutex) |
111 | | ABSL_PT_GUARDED_BY(mutex){nullptr}; |
112 | | |
113 | | // Only used for lisp. |
114 | | ABSL_CONST_INIT std::vector<std::function<void()>>* update_callbacks |
115 | | ABSL_GUARDED_BY(GetUpdateSitesMutex()) |
116 | | ABSL_PT_GUARDED_BY(GetUpdateSitesMutex()){nullptr}; |
117 | | |
118 | | // Allocates memory. |
119 | | std::vector<VModuleInfo>& get_vmodule_info() |
120 | 0 | ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { |
121 | 0 | if (!vmodule_info) vmodule_info = new std::vector<VModuleInfo>; |
122 | 0 | return *vmodule_info; |
123 | 0 | } |
124 | | |
125 | | // Does not allocate or take locks. |
126 | | int VLogLevel(absl::string_view file, const std::vector<VModuleInfo>* infos, |
127 | 0 | int current_global_v) { |
128 | | // `infos` is null during a call to `VLOG` prior to setting `vmodule` (e.g. by |
129 | | // parsing flags). We can't allocate in `VLOG`, so we treat null as empty |
130 | | // here and press on. |
131 | 0 | if (!infos || infos->empty()) return current_global_v; |
132 | | // Get basename for file |
133 | 0 | absl::string_view basename = file; |
134 | 0 | { |
135 | 0 | const size_t sep = basename.rfind('/'); |
136 | 0 | if (sep != basename.npos) { |
137 | 0 | basename.remove_prefix(sep + 1); |
138 | | #ifdef _WIN32 |
139 | | } else { |
140 | | const size_t sep = basename.rfind('\\'); |
141 | | if (sep != basename.npos) basename.remove_prefix(sep + 1); |
142 | | #endif |
143 | 0 | } |
144 | 0 | } |
145 | |
|
146 | 0 | absl::string_view stem = file, stem_basename = basename; |
147 | 0 | { |
148 | 0 | const size_t sep = stem_basename.find('.'); |
149 | 0 | if (sep != stem_basename.npos) { |
150 | 0 | stem.remove_suffix(stem_basename.size() - sep); |
151 | 0 | stem_basename.remove_suffix(stem_basename.size() - sep); |
152 | 0 | } |
153 | 0 | if (absl::ConsumeSuffix(&stem_basename, "-inl")) { |
154 | 0 | stem.remove_suffix(absl::string_view("-inl").size()); |
155 | 0 | } |
156 | 0 | } |
157 | 0 | for (const auto& info : *infos) { |
158 | 0 | if (info.module_is_path) { |
159 | | // If there are any slashes in the pattern, try to match the full |
160 | | // name. |
161 | 0 | if (FNMatch(info.module_pattern, stem)) { |
162 | 0 | return info.vlog_level == kUseFlag ? current_global_v : info.vlog_level; |
163 | 0 | } |
164 | 0 | } else if (FNMatch(info.module_pattern, stem_basename)) { |
165 | 0 | return info.vlog_level == kUseFlag ? current_global_v : info.vlog_level; |
166 | 0 | } |
167 | 0 | } |
168 | | |
169 | 0 | return current_global_v; |
170 | 0 | } |
171 | | |
172 | | // Allocates memory. |
173 | | int AppendVModuleLocked(absl::string_view module_pattern, int log_level) |
174 | 0 | ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { |
175 | 0 | for (const auto& info : get_vmodule_info()) { |
176 | 0 | if (FNMatch(info.module_pattern, module_pattern)) { |
177 | | // This is a memory optimization to avoid storing patterns that will never |
178 | | // match due to exit early semantics. Primarily optimized for our own unit |
179 | | // tests. |
180 | 0 | return info.vlog_level; |
181 | 0 | } |
182 | 0 | } |
183 | 0 | bool module_is_path = ModuleIsPath(module_pattern); |
184 | 0 | get_vmodule_info().emplace_back(std::string(module_pattern), module_is_path, |
185 | 0 | log_level); |
186 | 0 | return global_v; |
187 | 0 | } |
188 | | |
189 | | // Allocates memory. |
190 | | int PrependVModuleLocked(absl::string_view module_pattern, int log_level) |
191 | 0 | ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex) { |
192 | 0 | absl::optional<int> old_log_level; |
193 | 0 | for (const auto& info : get_vmodule_info()) { |
194 | 0 | if (FNMatch(info.module_pattern, module_pattern)) { |
195 | 0 | old_log_level = info.vlog_level; |
196 | 0 | break; |
197 | 0 | } |
198 | 0 | } |
199 | 0 | bool module_is_path = ModuleIsPath(module_pattern); |
200 | 0 | auto iter = get_vmodule_info().emplace(get_vmodule_info().cbegin(), |
201 | 0 | std::string(module_pattern), |
202 | 0 | module_is_path, log_level); |
203 | | |
204 | | // This is a memory optimization to avoid storing patterns that will never |
205 | | // match due to exit early semantics. Primarily optimized for our own unit |
206 | | // tests. |
207 | 0 | get_vmodule_info().erase( |
208 | 0 | std::remove_if(++iter, get_vmodule_info().end(), |
209 | 0 | [module_pattern](const VModuleInfo& info) { |
210 | | // Remove the previous pattern if it is less generic than |
211 | | // the new one. For example, if the new pattern |
212 | | // `module_pattern` is "foo*" and the previous pattern |
213 | | // `info.module_pattern` is "foo", we should remove the |
214 | | // previous pattern. Because the new pattern "foo*" will |
215 | | // match all the files that the previous pattern "foo" |
216 | | // matches. |
217 | 0 | return FNMatch(module_pattern, info.module_pattern); |
218 | 0 | }), |
219 | 0 | get_vmodule_info().cend()); |
220 | 0 | return old_log_level.value_or(global_v); |
221 | 0 | } |
222 | | } // namespace |
223 | | |
224 | 0 | int VLogLevel(absl::string_view file) ABSL_LOCKS_EXCLUDED(mutex) { |
225 | 0 | absl::base_internal::SpinLockHolder l(&mutex); |
226 | 0 | return VLogLevel(file, vmodule_info, global_v); |
227 | 0 | } |
228 | | |
229 | 0 | int RegisterAndInitialize(VLogSite* v) ABSL_LOCKS_EXCLUDED(mutex) { |
230 | | // std::memory_order_seq_cst is overkill in this function, but given that this |
231 | | // path is intended to be slow, it's not worth the brain power to relax that. |
232 | 0 | VLogSite* h = site_list_head.load(std::memory_order_seq_cst); |
233 | |
|
234 | 0 | VLogSite* old = nullptr; |
235 | 0 | if (v->next_.compare_exchange_strong(old, h, std::memory_order_seq_cst, |
236 | 0 | std::memory_order_seq_cst)) { |
237 | | // Multiple threads may attempt to register this site concurrently. |
238 | | // By successfully setting `v->next` this thread commits to being *the* |
239 | | // thread that installs `v` in the list. |
240 | 0 | while (!site_list_head.compare_exchange_weak( |
241 | 0 | h, v, std::memory_order_seq_cst, std::memory_order_seq_cst)) { |
242 | 0 | v->next_.store(h, std::memory_order_seq_cst); |
243 | 0 | } |
244 | 0 | } |
245 | |
|
246 | 0 | int old_v = VLogSite::kUninitialized; |
247 | 0 | int new_v = VLogLevel(v->file_); |
248 | | // No loop, if someone else set this, we should respect their evaluation of |
249 | | // `VLogLevel`. This may mean we return a stale `v`, but `v` itself will |
250 | | // always arrive at the freshest value. Otherwise, we could be writing a |
251 | | // stale value and clobbering the fresher one. |
252 | 0 | if (v->v_.compare_exchange_strong(old_v, new_v, std::memory_order_seq_cst, |
253 | 0 | std::memory_order_seq_cst)) { |
254 | 0 | return new_v; |
255 | 0 | } |
256 | 0 | return old_v; |
257 | 0 | } |
258 | | |
259 | | void UpdateVLogSites() ABSL_UNLOCK_FUNCTION(mutex) |
260 | 0 | ABSL_LOCKS_EXCLUDED(GetUpdateSitesMutex()) { |
261 | 0 | std::vector<VModuleInfo> infos = get_vmodule_info(); |
262 | 0 | int current_global_v = global_v; |
263 | | // We need to grab `GetUpdateSitesMutex()` before we release `mutex` to ensure |
264 | | // that updates are not interleaved (resulting in an inconsistent final state) |
265 | | // and to ensure that the final state in the sites matches the final state of |
266 | | // `vmodule_info`. We unlock `mutex` to ensure that uninitialized sites don't |
267 | | // have to wait on all updates in order to acquire `mutex` and initialize |
268 | | // themselves. |
269 | 0 | absl::MutexLock ul(GetUpdateSitesMutex()); |
270 | 0 | mutex.Unlock(); |
271 | 0 | VLogSite* n = site_list_head.load(std::memory_order_seq_cst); |
272 | | // Because sites are added to the list in the order they are executed, there |
273 | | // tend to be clusters of entries with the same file. |
274 | 0 | const char* last_file = nullptr; |
275 | 0 | int last_file_level = 0; |
276 | 0 | while (n != nullptr) { |
277 | 0 | if (n->file_ != last_file) { |
278 | 0 | last_file = n->file_; |
279 | 0 | last_file_level = VLogLevel(n->file_, &infos, current_global_v); |
280 | 0 | } |
281 | 0 | n->v_.store(last_file_level, std::memory_order_seq_cst); |
282 | 0 | n = n->next_.load(std::memory_order_seq_cst); |
283 | 0 | } |
284 | 0 | if (update_callbacks) { |
285 | 0 | for (auto& cb : *update_callbacks) { |
286 | 0 | cb(); |
287 | 0 | } |
288 | 0 | } |
289 | 0 | } |
290 | | |
291 | | void UpdateVModule(absl::string_view vmodule) |
292 | 0 | ABSL_LOCKS_EXCLUDED(mutex, GetUpdateSitesMutex()) { |
293 | 0 | std::vector<std::pair<absl::string_view, int>> glob_levels; |
294 | 0 | for (absl::string_view glob_level : absl::StrSplit(vmodule, ',')) { |
295 | 0 | const size_t eq = glob_level.rfind('='); |
296 | 0 | if (eq == glob_level.npos) continue; |
297 | 0 | const absl::string_view glob = glob_level.substr(0, eq); |
298 | 0 | int level; |
299 | 0 | if (!absl::SimpleAtoi(glob_level.substr(eq + 1), &level)) continue; |
300 | 0 | glob_levels.emplace_back(glob, level); |
301 | 0 | } |
302 | 0 | mutex.Lock(); // Unlocked by UpdateVLogSites(). |
303 | 0 | get_vmodule_info().clear(); |
304 | 0 | for (const auto& it : glob_levels) { |
305 | 0 | const absl::string_view glob = it.first; |
306 | 0 | const int level = it.second; |
307 | 0 | AppendVModuleLocked(glob, level); |
308 | 0 | } |
309 | 0 | UpdateVLogSites(); |
310 | 0 | } |
311 | | |
312 | | int UpdateGlobalVLogLevel(int v) |
313 | 0 | ABSL_LOCKS_EXCLUDED(mutex, GetUpdateSitesMutex()) { |
314 | 0 | mutex.Lock(); // Unlocked by UpdateVLogSites(). |
315 | 0 | const int old_global_v = global_v; |
316 | 0 | if (v == global_v) { |
317 | 0 | mutex.Unlock(); |
318 | 0 | return old_global_v; |
319 | 0 | } |
320 | 0 | global_v = v; |
321 | 0 | UpdateVLogSites(); |
322 | 0 | return old_global_v; |
323 | 0 | } |
324 | | |
325 | | int PrependVModule(absl::string_view module_pattern, int log_level) |
326 | 0 | ABSL_LOCKS_EXCLUDED(mutex, GetUpdateSitesMutex()) { |
327 | 0 | mutex.Lock(); // Unlocked by UpdateVLogSites(). |
328 | 0 | int old_v = PrependVModuleLocked(module_pattern, log_level); |
329 | 0 | UpdateVLogSites(); |
330 | 0 | return old_v; |
331 | 0 | } |
332 | | |
333 | | void OnVLogVerbosityUpdate(std::function<void()> cb) |
334 | 0 | ABSL_LOCKS_EXCLUDED(GetUpdateSitesMutex()) { |
335 | 0 | absl::MutexLock ul(GetUpdateSitesMutex()); |
336 | 0 | if (!update_callbacks) |
337 | 0 | update_callbacks = new std::vector<std::function<void()>>; |
338 | 0 | update_callbacks->push_back(std::move(cb)); |
339 | 0 | } |
340 | | |
341 | 0 | VLogSite* SetVModuleListHeadForTestOnly(VLogSite* v) { |
342 | 0 | return site_list_head.exchange(v, std::memory_order_seq_cst); |
343 | 0 | } |
344 | | |
345 | | } // namespace log_internal |
346 | | ABSL_NAMESPACE_END |
347 | | } // namespace absl |