Line data Source code
1 : #pragma once
2 :
3 : #include <atomic>
4 : #include <chrono>
5 : #include <cstdint>
6 : #include <list>
7 : #include <memory>
8 : #include <string>
9 :
10 : #include "envoy/stats/tag.h"
11 : #include "envoy/thread_local/thread_local.h"
12 :
13 : #include "source/common/common/hash.h"
14 : #include "source/common/common/thread_synchronizer.h"
15 : #include "source/common/stats/allocator_impl.h"
16 : #include "source/common/stats/histogram_impl.h"
17 : #include "source/common/stats/null_counter.h"
18 : #include "source/common/stats/null_gauge.h"
19 : #include "source/common/stats/null_text_readout.h"
20 : #include "source/common/stats/symbol_table.h"
21 : #include "source/common/stats/utility.h"
22 :
23 : #include "absl/container/flat_hash_map.h"
24 : #include "circllhist.h"
25 :
26 : namespace Envoy {
27 : namespace Stats {
28 :
29 : /**
30 : * A histogram that is stored in TLS and used to record values per thread. This holds two
31 : * histograms, one to collect the values and other as backup that is used for merge process. The
32 : * swap happens during the merge process.
33 : */
34 : class ThreadLocalHistogramImpl : public HistogramImplHelper {
35 : public:
36 : ThreadLocalHistogramImpl(StatName name, Histogram::Unit unit, StatName tag_extracted_name,
37 : const StatNameTagVector& stat_name_tags, SymbolTable& symbol_table);
38 : ~ThreadLocalHistogramImpl() override;
39 :
40 : void merge(histogram_t* target);
41 :
42 : /**
43 : * Called in the beginning of merge process. Swaps the histogram used for collection so that we do
44 : * not have to lock the histogram in high throughput TLS writes.
45 : */
46 0 : void beginMerge() {
47 : // This switches the current_active_ between 1 and 0.
48 0 : ASSERT(std::this_thread::get_id() == created_thread_id_);
49 0 : current_active_ = otherHistogramIndex();
50 0 : }
51 :
52 : // Stats::Histogram
53 0 : Histogram::Unit unit() const override {
54 : // If at some point ThreadLocalHistogramImpl will hold a pointer to its parent we can just
55 : // return parent's unit here and not store it separately.
56 0 : return unit_;
57 0 : }
58 : void recordValue(uint64_t value) override;
59 :
60 : // Stats::Metric
61 0 : SymbolTable& symbolTable() final { return symbol_table_; }
62 0 : bool used() const override { return used_; }
63 0 : bool hidden() const override { return false; }
64 :
65 : private:
66 : Histogram::Unit unit_;
67 0 : uint64_t otherHistogramIndex() const { return 1 - current_active_; }
68 : uint64_t current_active_{0};
69 : histogram_t* histograms_[2];
70 : std::atomic<bool> used_;
71 : std::thread::id created_thread_id_;
72 : SymbolTable& symbol_table_;
73 : };
74 :
75 : using TlsHistogramSharedPtr = RefcountPtr<ThreadLocalHistogramImpl>;
76 :
77 : class ThreadLocalStoreImpl;
78 :
79 : /**
80 : * Log Linear Histogram implementation that is stored in the main thread.
81 : */
82 : class ParentHistogramImpl : public MetricImpl<ParentHistogram> {
83 : public:
84 : ParentHistogramImpl(StatName name, Histogram::Unit unit, ThreadLocalStoreImpl& parent,
85 : StatName tag_extracted_name, const StatNameTagVector& stat_name_tags,
86 : ConstSupportedBuckets& supported_buckets, uint64_t id);
87 : ~ParentHistogramImpl() override;
88 :
89 : void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr);
90 :
91 : // Stats::Histogram
92 : Histogram::Unit unit() const override;
93 : void recordValue(uint64_t value) override;
94 :
95 : /**
96 : * This method is called during the main stats flush process for each of the histograms. It
97 : * iterates through the TLS histograms and collects the histogram data of all of them
98 : * in to "interval_histogram". Then the collected "interval_histogram" is merged to a
99 : * "cumulative_histogram".
100 : */
101 : void merge() override;
102 :
103 0 : const HistogramStatistics& intervalStatistics() const override { return interval_statistics_; }
104 0 : const HistogramStatistics& cumulativeStatistics() const override {
105 0 : return cumulative_statistics_;
106 0 : }
107 : std::string quantileSummary() const override;
108 : std::string bucketSummary() const override;
109 0 : std::vector<Bucket> detailedTotalBuckets() const override {
110 0 : return detailedlBucketsHelper(*cumulative_histogram_);
111 0 : }
112 0 : std::vector<Bucket> detailedIntervalBuckets() const override {
113 0 : return detailedlBucketsHelper(*interval_histogram_);
114 0 : }
115 :
116 : // Stats::Metric
117 : SymbolTable& symbolTable() override;
118 : bool used() const override;
119 : bool hidden() const override;
120 :
121 : // RefcountInterface
122 : void incRefCount() override;
123 : bool decRefCount() override;
124 0 : uint32_t use_count() const override { return ref_count_; }
125 :
126 : // Indicates that the ThreadLocalStore is shutting down, so no need to clear its histogram_set_.
127 1539 : void setShuttingDown(bool shutting_down) { shutting_down_ = shutting_down; }
128 0 : bool shuttingDown() const { return shutting_down_; }
129 :
130 : private:
131 : bool usedLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(merge_lock_);
132 : static std::vector<Stats::ParentHistogram::Bucket>
133 : detailedlBucketsHelper(const histogram_t& histogram);
134 :
135 : Histogram::Unit unit_;
136 : ThreadLocalStoreImpl& thread_local_store_;
137 : histogram_t* interval_histogram_;
138 : histogram_t* cumulative_histogram_;
139 : HistogramStatisticsImpl interval_statistics_;
140 : HistogramStatisticsImpl cumulative_statistics_;
141 : mutable Thread::MutexBasicLockable merge_lock_;
142 : std::list<TlsHistogramSharedPtr> tls_histograms_ ABSL_GUARDED_BY(merge_lock_);
143 : bool merged_{false};
144 : std::atomic<bool> shutting_down_{false};
145 : std::atomic<uint32_t> ref_count_{0};
146 : const uint64_t id_; // Index into TlsCache::histogram_cache_.
147 : };
148 :
149 : using ParentHistogramImplSharedPtr = RefcountPtr<ParentHistogramImpl>;
150 :
151 : /**
152 : * Store implementation with thread local caching. For design details see
153 : * https://github.com/envoyproxy/envoy/blob/main/source/docs/stats.md
154 : */
155 : class ThreadLocalStoreImpl : Logger::Loggable<Logger::Id::stats>, public StoreRoot {
156 : public:
157 : static const char DeleteScopeSync[];
158 : static const char IterateScopeSync[];
159 : static const char MainDispatcherCleanupSync[];
160 :
161 : ThreadLocalStoreImpl(Allocator& alloc);
162 : ~ThreadLocalStoreImpl() override;
163 : // Stats::Store
164 0 : NullCounterImpl& nullCounter() override { return null_counter_; }
165 1590 : NullGaugeImpl& nullGauge() override { return null_gauge_; }
166 5706 : ScopeSharedPtr rootScope() override { return default_scope_; }
167 0 : ConstScopeSharedPtr constRootScope() const override { return default_scope_; }
168 0 : const SymbolTable& constSymbolTable() const override { return alloc_.constSymbolTable(); }
169 222815 : SymbolTable& symbolTable() override { return alloc_.symbolTable(); }
170 :
171 0 : bool iterate(const IterateFn<Counter>& fn) const override { return iterHelper(fn); }
172 0 : bool iterate(const IterateFn<Gauge>& fn) const override { return iterHelper(fn); }
173 0 : bool iterate(const IterateFn<Histogram>& fn) const override { return iterHelper(fn); }
174 0 : bool iterate(const IterateFn<TextReadout>& fn) const override { return iterHelper(fn); }
175 :
176 : std::vector<CounterSharedPtr> counters() const override;
177 : std::vector<GaugeSharedPtr> gauges() const override;
178 : std::vector<TextReadoutSharedPtr> textReadouts() const override;
179 : std::vector<ParentHistogramSharedPtr> histograms() const override;
180 :
181 : void forEachCounter(SizeFn f_size, StatFn<Counter> f_stat) const override;
182 : void forEachGauge(SizeFn f_size, StatFn<Gauge> f_stat) const override;
183 : void forEachTextReadout(SizeFn f_size, StatFn<TextReadout> f_stat) const override;
184 : void forEachHistogram(SizeFn f_size, StatFn<ParentHistogram> f_stat) const override;
185 : void forEachScope(SizeFn f_size, StatFn<const Scope> f_stat) const override;
186 :
187 : // Stats::StoreRoot
188 0 : void addSink(Sink& sink) override { timer_sinks_.push_back(sink); }
189 98 : void setTagProducer(TagProducerPtr&& tag_producer) override {
190 98 : tag_producer_ = std::move(tag_producer);
191 98 : }
192 : void setStatsMatcher(StatsMatcherPtr&& stats_matcher) override;
193 : void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) override;
194 : void initializeThreading(Event::Dispatcher& main_thread_dispatcher,
195 : ThreadLocal::Instance& tls) override;
196 : void shutdownThreading() override;
197 : void mergeHistograms(PostMergeCb merge_cb) override;
198 : void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override;
199 :
200 : Histogram& tlsHistogram(ParentHistogramImpl& parent, uint64_t id);
201 :
202 : void forEachSinkedCounter(SizeFn f_size, StatFn<Counter> f_stat) const override;
203 : void forEachSinkedGauge(SizeFn f_size, StatFn<Gauge> f_stat) const override;
204 : void forEachSinkedTextReadout(SizeFn f_size, StatFn<TextReadout> f_stat) const override;
205 : void forEachSinkedHistogram(SizeFn f_size, StatFn<ParentHistogram> f_stat) const override;
206 :
207 : void setSinkPredicates(std::unique_ptr<SinkPredicates>&& sink_predicates) override;
208 0 : OptRef<SinkPredicates> sinkPredicates() override { return sink_predicates_; }
209 :
210 : /**
211 : * @return a thread synchronizer object used for controlling thread behavior in tests.
212 : */
213 0 : Thread::ThreadSynchronizer& sync() { return sync_; }
214 :
215 : /**
216 : * @return a set of well known tag names; used to reduce symbol table churn.
217 : */
218 45705 : const StatNameSet& wellKnownTags() const { return *well_known_tags_; }
219 :
220 : bool decHistogramRefCount(ParentHistogramImpl& histogram, std::atomic<uint32_t>& ref_count);
221 : void releaseHistogramCrossThread(uint64_t histogram_id);
222 :
223 52335 : const TagProducer& tagProducer() const { return *tag_producer_; }
224 : void extractAndAppendTags(StatName name, StatNamePool& pool, StatNameTagVector& tags) override;
225 : void extractAndAppendTags(absl::string_view name, StatNamePool& pool,
226 : StatNameTagVector& tags) override;
227 0 : const TagVector& fixedTags() override { return tag_producer_->fixedTags(); };
228 :
229 : private:
230 : friend class ThreadLocalStoreTestingPeer;
231 :
232 : template <class Stat> using StatRefMap = StatNameHashMap<std::reference_wrapper<Stat>>;
233 :
234 : struct TlsCacheEntry {
235 : // The counters, gauges and text readouts in the TLS cache are stored by reference,
236 : // depending on the CentralCache for backing store. This avoids a potential
237 : // contention-storm when destructing a scope, as the counter/gauge ref-count
238 : // decrement in allocator_impl.cc needs to hold the single allocator mutex.
239 : StatRefMap<Counter> counters_;
240 : StatRefMap<Gauge> gauges_;
241 : StatRefMap<TextReadout> text_readouts_;
242 :
243 : // Histograms also require holding a mutex while decrementing reference
244 : // counts. The only difference from other stats is that the histogram_set_
245 : // lives in the ThreadLocalStore object, rather than in
246 : // AllocatorImpl. Histograms are removed from that set when all scopes
247 : // referencing the histogram are dropped. Each ParentHistogram has a unique
248 : // index, which is not re-used during the process lifetime.
249 : //
250 : // There is also a tls_histogram_cache_ in the TlsCache object, which is
251 : // not tied to a scope. It maps from parent histogram's unique index to
252 : // a TlsHistogram. This enables continuity between same-named histograms
253 : // in same-named scopes. That scenario is common when re-creating scopes in
254 : // response to xDS.
255 : StatNameHashMap<ParentHistogramSharedPtr> parent_histograms_;
256 :
257 : // We keep a TLS cache of rejected stat names. This costs memory, but
258 : // reduces runtime overhead running the matcher. Moreover, once symbol
259 : // tables are integrated, rejection will need the fully elaborated string,
260 : // and it we need to take a global symbol-table lock to run. We keep this
261 : // StatName set here in the TLS cache to avoid taking a lock to compute
262 : // rejection.
263 : StatNameHashSet rejected_stats_;
264 : };
265 :
266 : struct CentralCacheEntry : public RefcountHelper {
267 1443 : explicit CentralCacheEntry(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}
268 : ~CentralCacheEntry();
269 :
270 : StatNameHashMap<CounterSharedPtr> counters_;
271 : StatNameHashMap<GaugeSharedPtr> gauges_;
272 : StatNameHashMap<ParentHistogramImplSharedPtr> histograms_;
273 : StatNameHashMap<TextReadoutSharedPtr> text_readouts_;
274 : StatNameStorageSet rejected_stats_;
275 : SymbolTable& symbol_table_;
276 : };
277 : using CentralCacheEntrySharedPtr = RefcountPtr<CentralCacheEntry>;
278 :
279 : struct ScopeImpl : public Scope {
280 : ScopeImpl(ThreadLocalStoreImpl& parent, StatName prefix);
281 : ~ScopeImpl() override;
282 :
283 : // Stats::Scope
284 : Counter& counterFromStatNameWithTags(const StatName& name,
285 : StatNameTagVectorOptConstRef tags) override;
286 : Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,
287 : Gauge::ImportMode import_mode) override;
288 : Histogram& histogramFromStatNameWithTags(const StatName& name,
289 : StatNameTagVectorOptConstRef tags,
290 : Histogram::Unit unit) override;
291 : TextReadout& textReadoutFromStatNameWithTags(const StatName& name,
292 : StatNameTagVectorOptConstRef tags) override;
293 : ScopeSharedPtr createScope(const std::string& name) override;
294 : ScopeSharedPtr scopeFromStatName(StatName name) override;
295 0 : const SymbolTable& constSymbolTable() const final { return parent_.constSymbolTable(); }
296 111189 : SymbolTable& symbolTable() final { return parent_.symbolTable(); }
297 :
298 22424 : Counter& counterFromString(const std::string& name) override {
299 22424 : StatNameManagedStorage storage(name, symbolTable());
300 22424 : return counterFromStatName(storage.statName());
301 22424 : }
302 :
303 6467 : Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {
304 6467 : StatNameManagedStorage storage(name, symbolTable());
305 6467 : return gaugeFromStatName(storage.statName(), import_mode);
306 6467 : }
307 1186 : Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {
308 1186 : StatNameManagedStorage storage(name, symbolTable());
309 1186 : return histogramFromStatName(storage.statName(), unit);
310 1186 : }
311 232 : TextReadout& textReadoutFromString(const std::string& name) override {
312 232 : StatNameManagedStorage storage(name, symbolTable());
313 232 : return textReadoutFromStatName(storage.statName());
314 232 : }
315 :
316 0 : template <class StatMap, class StatFn> bool iterHelper(StatFn fn, const StatMap& map) const {
317 0 : for (auto& iter : map) {
318 0 : if (!fn(iter.second)) {
319 0 : return false;
320 0 : }
321 0 : }
322 0 : return true;
323 0 : }
324 :
325 0 : bool iterate(const IterateFn<Counter>& fn) const override {
326 0 : Thread::LockGuard lock(parent_.lock_);
327 0 : return iterateLockHeld(fn);
328 0 : }
329 0 : bool iterate(const IterateFn<Gauge>& fn) const override {
330 0 : Thread::LockGuard lock(parent_.lock_);
331 0 : return iterateLockHeld(fn);
332 0 : }
333 0 : bool iterate(const IterateFn<Histogram>& fn) const override {
334 0 : Thread::LockGuard lock(parent_.lock_);
335 0 : return iterateLockHeld(fn);
336 0 : }
337 0 : bool iterate(const IterateFn<TextReadout>& fn) const override {
338 0 : Thread::LockGuard lock(parent_.lock_);
339 0 : return iterateLockHeld(fn);
340 0 : }
341 :
342 0 : bool iterateLockHeld(const IterateFn<Counter>& fn) const {
343 0 : return iterHelper(fn, centralCacheLockHeld()->counters_);
344 0 : }
345 0 : bool iterateLockHeld(const IterateFn<Gauge>& fn) const {
346 0 : return iterHelper(fn, centralCacheLockHeld()->gauges_);
347 0 : }
348 0 : bool iterateLockHeld(const IterateFn<Histogram>& fn) const {
349 0 : return iterHelper(fn, centralCacheLockHeld()->histograms_);
350 0 : }
351 0 : bool iterateLockHeld(const IterateFn<TextReadout>& fn) const {
352 0 : return iterHelper(fn, centralCacheLockHeld()->text_readouts_);
353 0 : }
354 1749 : ThreadLocalStoreImpl& store() override { return parent_; }
355 0 : const ThreadLocalStoreImpl& constStore() const override { return parent_; }
356 :
357 : // NOTE: The find methods assume that `name` is fully-qualified.
358 : // Implementations will not add the scope prefix.
359 : CounterOptConstRef findCounter(StatName name) const override;
360 : GaugeOptConstRef findGauge(StatName name) const override;
361 : HistogramOptConstRef findHistogram(StatName name) const override;
362 : TextReadoutOptConstRef findTextReadout(StatName name) const override;
363 :
364 : HistogramOptConstRef findHistogramLockHeld(StatName name) const;
365 :
366 : template <class StatType>
367 : using MakeStatFn = std::function<RefcountPtr<StatType>(
368 : Allocator&, StatName name, StatName tag_extracted_name, const StatNameTagVector& tags)>;
369 :
370 : /**
371 : * Makes a stat either by looking it up in the central cache,
372 : * generating it from the parent allocator, or as a last
373 : * result, creating it with the heap allocator.
374 : *
375 : * @param full_stat_name the full name of the stat with appended tags.
376 : * @param name_no_tags the full name of the stat (not tag extracted) without appended tags.
377 : * @param stat_name_tags the tags provided at creation time. If empty, tag extraction occurs.
378 : * @param central_cache_map a map from name to the desired object in the central cache.
379 : * @param make_stat a function to generate the stat object, called if it's not in cache.
380 : * @param tls_ref possibly null reference to a cache entry for this stat, which will be
381 : * used if non-empty, or filled in if empty (and non-null).
382 : */
383 : template <class StatType>
384 : StatType& safeMakeStat(StatName full_stat_name, StatName name_no_tags,
385 : const absl::optional<StatNameTagVector>& stat_name_tags,
386 : StatNameHashMap<RefcountPtr<StatType>>& central_cache_map,
387 : StatsMatcher::FastResult fast_reject_result,
388 : StatNameStorageSet& central_rejected_stats,
389 : MakeStatFn<StatType> make_stat, StatRefMap<StatType>* tls_cache,
390 : StatNameHashSet* tls_rejected_stats, StatType& null_stat);
391 :
392 : template <class StatType>
393 : using StatTypeOptConstRef = absl::optional<std::reference_wrapper<const StatType>>;
394 :
395 : /**
396 : * Looks up an existing stat, populating the local cache if necessary. Does
397 : * not check the TLS or rejects, and does not create a stat if it does not
398 : * exist.
399 : *
400 : * @param name the full name of the stat (not tag extracted).
401 : * @param central_cache_map a map from name to the desired object in the central cache.
402 : * @return a reference to the stat, if it exists.
403 : */
404 : template <class StatType>
405 : StatTypeOptConstRef<StatType>
406 : findStatLockHeld(StatName name,
407 0 : StatNameHashMap<RefcountPtr<StatType>>& central_cache_map) const {
408 0 : auto iter = central_cache_map.find(name);
409 0 : if (iter == central_cache_map.end()) {
410 0 : return absl::nullopt;
411 0 : }
412 :
413 0 : return std::cref(*iter->second);
414 0 : }
415 :
416 159 : StatName prefix() const override { return prefix_.statName(); }
417 :
418 : // Returns the central cache, asserting that the parent lock is held.
419 : //
420 : // When a ThreadLocalStore method takes lock_ and then accesses
421 : // scope->central_cache_, the analysis system cannot understand that the
422 : // scope's parent_.lock_ is held, so we assert that here.
423 : const CentralCacheEntrySharedPtr& centralCacheLockHeld() const
424 32 : ABSL_ASSERT_EXCLUSIVE_LOCK(parent_.lock_) {
425 32 : return central_cache_;
426 32 : }
427 :
428 : // Returns the central cache, bypassing thread analysis.
429 : //
430 : // This is used only when passing references to maps held in the central
431 : // cache to safeMakeStat, which takes the lock only if those maps are
432 : // actually referenced, due to the lookup missing the TLS cache.
433 : const CentralCacheEntrySharedPtr&
434 53065 : centralCacheNoThreadAnalysis() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
435 53065 : return central_cache_;
436 53065 : }
437 :
438 : const uint64_t scope_id_;
439 : ThreadLocalStoreImpl& parent_;
440 :
441 : private:
442 : StatNameStorage prefix_;
443 : mutable CentralCacheEntrySharedPtr central_cache_ ABSL_GUARDED_BY(parent_.lock_);
444 : };
445 :
446 : struct TlsCache : public ThreadLocal::ThreadLocalObject {
447 : TlsCacheEntry& insertScope(uint64_t scope_id);
448 : void eraseScopes(const std::vector<uint64_t>& scope_ids);
449 : void eraseHistograms(const std::vector<uint64_t>& histograms);
450 :
451 : // The TLS scope cache is keyed by scope ID. This is used to avoid complex circular references
452 : // during scope destruction. An ID is required vs. using the address of the scope pointer
453 : // because it's possible that the memory allocator will recycle the scope pointer immediately
454 : // upon destruction, leading to a situation in which a new scope with the same address is used
455 : // to reference the cache, and then subsequently cache flushed, leaving nothing in the central
456 : // store. See the overview for more information. This complexity is required for lockless
457 : // operation in the fast path.
458 : absl::flat_hash_map<uint64_t, TlsCacheEntry> scope_cache_;
459 :
460 : // Maps from histogram ID (monotonically increasing) to a TLS histogram.
461 : absl::flat_hash_map<uint64_t, TlsHistogramSharedPtr> tls_histogram_cache_;
462 : };
463 :
464 : using ScopeImplSharedPtr = std::shared_ptr<ScopeImpl>;
465 :
466 : /**
467 : * Calls fn_lock_held for every scope with, lock_ held. This avoids iterate/destruct
468 : * races for scopes.
469 : *
470 : * @param fn_lock_held function to be called, with lock_ held, on every scope, until
471 : * fn_lock_held() returns false.
472 : * @return true if the iteration completed with fn_lock_held never returning false.
473 : */
474 98 : bool iterateScopes(const std::function<bool(const ScopeImplSharedPtr&)> fn_lock_held) const {
475 98 : Thread::LockGuard lock(lock_);
476 98 : return iterateScopesLockHeld(fn_lock_held);
477 98 : }
478 :
479 : bool iterateScopesLockHeld(const std::function<bool(const ScopeImplSharedPtr&)> fn) const
480 : ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);
481 :
482 : // The Store versions of iterate cover all the scopes in the store.
483 0 : template <class StatFn> bool iterHelper(StatFn fn) const {
484 0 : return iterateScopes(
485 0 : [fn](const ScopeImplSharedPtr& scope) -> bool { return scope->iterateLockHeld(fn); });
486 0 : }
487 :
488 : std::string getTagsForName(const std::string& name, TagVector& tags) const;
489 : void clearScopesFromCaches();
490 : void clearHistogramsFromCaches();
491 : void releaseScopeCrossThread(ScopeImpl* scope);
492 : void mergeInternal(PostMergeCb merge_cb);
493 : bool slowRejects(StatsMatcher::FastResult fast_reject_result, StatName name) const;
494 0 : bool rejects(StatName name) const { return stats_matcher_->rejects(name); }
495 : StatsMatcher::FastResult fastRejects(StatName name) const;
496 53227 : bool rejectsAll() const { return stats_matcher_->rejectsAll(); }
497 : template <class StatMapClass, class StatListClass>
498 : void removeRejectedStats(StatMapClass& map, StatListClass& list);
499 : template <class StatSharedPtr>
500 : void removeRejectedStats(StatNameHashMap<StatSharedPtr>& map,
501 : std::function<void(const StatSharedPtr&)> f_deletion);
502 : bool checkAndRememberRejection(StatName name, StatsMatcher::FastResult fast_reject_result,
503 : StatNameStorageSet& central_rejected_stats,
504 : StatNameHashSet* tls_rejected_stats);
505 47256 : TlsCache& tlsCache() { return **tls_cache_; }
506 : void addScope(std::shared_ptr<ScopeImpl>& new_scope);
507 :
508 : OptRef<SinkPredicates> sink_predicates_;
509 : Allocator& alloc_;
510 : Event::Dispatcher* main_thread_dispatcher_{};
511 : using TlsCacheSlot = ThreadLocal::TypedSlotPtr<TlsCache>;
512 : ThreadLocal::TypedSlotPtr<TlsCache> tls_cache_;
513 : mutable Thread::MutexBasicLockable lock_;
514 : absl::flat_hash_map<ScopeImpl*, std::weak_ptr<ScopeImpl>> scopes_ ABSL_GUARDED_BY(lock_);
515 : ScopeSharedPtr default_scope_;
516 : std::list<std::reference_wrapper<Sink>> timer_sinks_;
517 : TagProducerPtr tag_producer_;
518 : StatsMatcherPtr stats_matcher_;
519 : HistogramSettingsConstPtr histogram_settings_;
520 : std::atomic<bool> threading_ever_initialized_{};
521 : std::atomic<bool> shutting_down_{};
522 : std::atomic<bool> merge_in_progress_{};
523 : OptRef<ThreadLocal::Instance> tls_;
524 :
525 : NullCounterImpl null_counter_;
526 : NullGaugeImpl null_gauge_;
527 : NullHistogramImpl null_histogram_;
528 : NullTextReadoutImpl null_text_readout_;
529 :
530 : mutable Thread::ThreadSynchronizer sync_;
531 : std::atomic<uint64_t> next_scope_id_{};
532 : uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0;
533 :
534 : StatNameSetPtr well_known_tags_;
535 :
536 : mutable Thread::MutexBasicLockable hist_mutex_;
537 : StatSet<ParentHistogramImpl> histogram_set_ ABSL_GUARDED_BY(hist_mutex_);
538 : StatSet<ParentHistogramImpl> sinked_histograms_ ABSL_GUARDED_BY(hist_mutex_);
539 :
540 : // Retain storage for deleted stats; these are no longer in maps because the
541 : // matcher-pattern was established after they were created. Since the stats
542 : // are held by reference in code that expects them to be there, we can't
543 : // actually delete the stats.
544 : //
545 : // It seems like it would be better to have each client that expects a stat
546 : // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter&
547 : // but that would be fairly complex to change.
548 : std::vector<HistogramSharedPtr> deleted_histograms_ ABSL_GUARDED_BY(lock_);
549 :
550 : // Scope IDs and central cache entries that are queued for cross-scope release.
551 : // Because there can be a large number of scopes, all of which are released at once,
552 : // (e.g. when a scope is deleted), it is more efficient to batch their cleanup,
553 : // which would otherwise entail a post() per scope per thread.
554 : std::vector<uint64_t> scopes_to_cleanup_ ABSL_GUARDED_BY(lock_);
555 : std::vector<CentralCacheEntrySharedPtr> central_cache_entries_to_cleanup_ ABSL_GUARDED_BY(lock_);
556 :
557 : // Histograms IDs that are queued for cross-scope release. Because there
558 : // can be a large number of histograms, all of which are released at once,
559 : // (e.g. when a scope is deleted), it is likely more efficient to batch their
560 : // cleanup, which would otherwise entail a post() per histogram per thread.
561 : std::vector<uint64_t> histograms_to_cleanup_ ABSL_GUARDED_BY(hist_mutex_);
562 : };
563 :
564 : using ThreadLocalStoreImplPtr = std::unique_ptr<ThreadLocalStoreImpl>;
565 :
566 : } // namespace Stats
567 : } // namespace Envoy
|