1
#pragma once
2

            
3
#include <atomic>
4
#include <chrono>
5
#include <cstdint>
6
#include <list>
7
#include <memory>
8
#include <string>
9

            
10
#include "envoy/stats/stats_matcher.h"
11
#include "envoy/stats/tag.h"
12
#include "envoy/thread_local/thread_local.h"
13

            
14
#include "source/common/common/hash.h"
15
#include "source/common/common/thread_synchronizer.h"
16
#include "source/common/stats/allocator.h"
17
#include "source/common/stats/histogram_impl.h"
18
#include "source/common/stats/null_counter.h"
19
#include "source/common/stats/null_gauge.h"
20
#include "source/common/stats/null_text_readout.h"
21
#include "source/common/stats/symbol_table.h"
22
#include "source/common/stats/utility.h"
23

            
24
#include "absl/container/flat_hash_map.h"
25
#include "circllhist.h"
26

            
27
namespace Envoy {
28
namespace Stats {
29

            
30
/**
31
 * A histogram that is stored in TLS and used to record values per thread. This holds two
32
 * histograms, one to collect the values and other as backup that is used for merge process. The
33
 * swap happens during the merge process.
34
 */
35
class ThreadLocalHistogramImpl : public HistogramImplHelper {
36
public:
37
  ThreadLocalHistogramImpl(StatName name, Histogram::Unit unit, StatName tag_extracted_name,
38
                           const StatNameTagVector& stat_name_tags, SymbolTable& symbol_table,
39
                           absl::optional<uint32_t> bins);
40
  ~ThreadLocalHistogramImpl() override;
41

            
42
  void merge(histogram_t* target);
43

            
44
  /**
45
   * Called in the beginning of merge process. Swaps the histogram used for collection so that we do
46
   * not have to lock the histogram in high throughput TLS writes.
47
   */
48
6283
  void beginMerge() {
49
    // This switches the current_active_ between 1 and 0.
50
6283
    ASSERT(std::this_thread::get_id() == created_thread_id_);
51
6283
    current_active_ = otherHistogramIndex();
52
6283
  }
53

            
54
  // Stats::Histogram
55
  Histogram::Unit unit() const override {
56
    // If at some point ThreadLocalHistogramImpl will hold a pointer to its parent we can just
57
    // return parent's unit here and not store it separately.
58
    return unit_;
59
  }
60
  void recordValue(uint64_t value) override;
61

            
62
  // Stats::Metric
63
  SymbolTable& symbolTable() final { return symbol_table_; }
64
3308
  bool used() const override { return used_; }
65
2
  void markUnused() override { used_ = false; }
66
  bool hidden() const override { return false; }
67

            
68
private:
69
  const Histogram::Unit unit_;
70
12555
  uint64_t otherHistogramIndex() const { return 1 - current_active_; }
71
  uint64_t current_active_{0};
72
  histogram_t* histograms_[2];
73
  std::atomic<bool> used_;
74
  const std::thread::id created_thread_id_;
75
  SymbolTable& symbol_table_;
76
};
77

            
78
using TlsHistogramSharedPtr = RefcountPtr<ThreadLocalHistogramImpl>;
79

            
80
class ThreadLocalStoreImpl;
81

            
82
/**
83
 * Log Linear Histogram implementation that is stored in the main thread.
84
 */
85
class ParentHistogramImpl : public MetricImpl<ParentHistogram> {
86
public:
87
  ParentHistogramImpl(StatName name, Histogram::Unit unit, ThreadLocalStoreImpl& parent,
88
                      StatName tag_extracted_name, const StatNameTagVector& stat_name_tags,
89
                      ConstSupportedBuckets& supported_buckets, absl::optional<uint32_t> bins,
90
                      uint64_t id);
91
  ~ParentHistogramImpl() override;
92

            
93
  void addTlsHistogram(const TlsHistogramSharedPtr& hist_ptr);
94

            
95
  // Stats::Histogram
96
  Histogram::Unit unit() const override;
97
  void recordValue(uint64_t value) override;
98

            
99
  /**
100
   * This method is called during the main stats flush process for each of the histograms. It
101
   * iterates through the TLS histograms and collects the histogram data of all of them
102
   * in to "interval_histogram". Then the collected "interval_histogram" is merged to a
103
   * "cumulative_histogram".
104
   */
105
  void merge() override;
106

            
107
155
  const HistogramStatistics& intervalStatistics() const override { return interval_statistics_; }
108
32028
  const HistogramStatistics& cumulativeStatistics() const override {
109
32028
    return cumulative_statistics_;
110
32028
  }
111
  std::string quantileSummary() const override;
112
  std::string bucketSummary() const override;
113
18
  std::vector<Bucket> detailedTotalBuckets() const override {
114
18
    return detailedlBucketsHelper(*cumulative_histogram_);
115
18
  }
116
5
  std::vector<Bucket> detailedIntervalBuckets() const override {
117
5
    return detailedlBucketsHelper(*interval_histogram_);
118
5
  }
119
  uint64_t cumulativeCountLessThanOrEqualToValue(double value) const override;
120

            
121
  // Stats::Metric
122
  SymbolTable& symbolTable() override;
123
  bool used() const override;
124
  void markUnused() override;
125
  bool hidden() const override;
126

            
127
  // RefcountInterface
128
  void incRefCount() override;
129
  bool decRefCount() override;
130
  uint32_t use_count() const override { return ref_count_; }
131

            
132
  // Indicates that the ThreadLocalStore is shutting down, so no need to clear its histogram_set_.
133
195401
  void setShuttingDown(bool shutting_down) { shutting_down_ = shutting_down; }
134
  bool shuttingDown() const { return shutting_down_; }
135
95923
  absl::optional<uint32_t> bins() const { return bins_; }
136

            
137
private:
138
  bool usedLockHeld() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(merge_lock_);
139
  std::vector<Stats::ParentHistogram::Bucket>
140
  detailedlBucketsHelper(const histogram_t& histogram) const;
141

            
142
  const Histogram::Unit unit_;
143
  const absl::optional<uint32_t> bins_;
144
  ThreadLocalStoreImpl& thread_local_store_;
145
  histogram_t* interval_histogram_;
146
  histogram_t* cumulative_histogram_;
147
  HistogramStatisticsImpl interval_statistics_;
148
  HistogramStatisticsImpl cumulative_statistics_;
149
  mutable Thread::MutexBasicLockable merge_lock_;
150
  std::list<TlsHistogramSharedPtr> tls_histograms_ ABSL_GUARDED_BY(merge_lock_);
151
  bool merged_{false};
152
  std::atomic<bool> shutting_down_{false};
153
  std::atomic<uint32_t> ref_count_{0};
154
  const uint64_t id_; // Index into TlsCache::histogram_cache_.
155
};
156

            
157
using ParentHistogramImplSharedPtr = RefcountPtr<ParentHistogramImpl>;
158

            
159
/**
160
 * Store implementation with thread local caching. For design details see
161
 * https://github.com/envoyproxy/envoy/blob/main/source/docs/stats.md
162
 */
163
class ThreadLocalStoreImpl : Logger::Loggable<Logger::Id::stats>, public StoreRoot {
164
public:
165
  static const char DeleteScopeSync[];
166
  static const char IterateScopeSync[];
167
  static const char MainDispatcherCleanupSync[];
168

            
169
  ThreadLocalStoreImpl(Allocator& alloc);
170
  ~ThreadLocalStoreImpl() override;
171
  // Stats::Store
172
2
  NullCounterImpl& nullCounter() override { return null_counter_; }
173
171542
  NullGaugeImpl& nullGauge() override { return null_gauge_; }
174
721321
  ScopeSharedPtr rootScope() override { return default_scope_; }
175
  ConstScopeSharedPtr constRootScope() const override { return default_scope_; }
176
6
  const SymbolTable& constSymbolTable() const override { return alloc_.constSymbolTable(); }
177
24367125
  SymbolTable& symbolTable() override { return alloc_.symbolTable(); }
178

            
179
3
  bool iterate(const IterateFn<Counter>& fn) const override { return iterHelper(fn); }
180
4
  bool iterate(const IterateFn<Gauge>& fn) const override { return iterHelper(fn); }
181
2
  bool iterate(const IterateFn<Histogram>& fn) const override { return iterHelper(fn); }
182
2
  bool iterate(const IterateFn<TextReadout>& fn) const override { return iterHelper(fn); }
183

            
184
  std::vector<CounterSharedPtr> counters() const override;
185
  std::vector<GaugeSharedPtr> gauges() const override;
186
  std::vector<TextReadoutSharedPtr> textReadouts() const override;
187
  std::vector<ParentHistogramSharedPtr> histograms() const override;
188

            
189
  void forEachCounter(SizeFn f_size, StatFn<Counter> f_stat) const override;
190
  void forEachGauge(SizeFn f_size, StatFn<Gauge> f_stat) const override;
191
  void forEachTextReadout(SizeFn f_size, StatFn<TextReadout> f_stat) const override;
192
  void forEachHistogram(SizeFn f_size, StatFn<ParentHistogram> f_stat) const override;
193
  void forEachScope(SizeFn f_size, StatFn<const Scope> f_stat) const override;
194

            
195
  void evictUnused() override;
196

            
197
  // Stats::StoreRoot
198
146
  void addSink(Sink& sink) override { timer_sinks_.push_back(sink); }
199
10611
  void setTagProducer(TagProducerPtr&& tag_producer) override {
200
10611
    tag_producer_ = std::move(tag_producer);
201
10611
  }
202
  void setStatsMatcher(StatsMatcherPtr&& stats_matcher) override;
203
  void setHistogramSettings(HistogramSettingsConstPtr&& histogram_settings) override;
204
  void initializeThreading(Event::Dispatcher& main_thread_dispatcher,
205
                           ThreadLocal::Instance& tls) override;
206
  void shutdownThreading() override;
207
  void mergeHistograms(PostMergeCb merge_cb) override;
208
  void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override;
209

            
210
  Histogram& tlsHistogram(ParentHistogramImpl& parent, uint64_t id);
211

            
212
  void forEachSinkedCounter(SizeFn f_size, StatFn<Counter> f_stat) const override;
213
  void forEachSinkedGauge(SizeFn f_size, StatFn<Gauge> f_stat) const override;
214
  void forEachSinkedTextReadout(SizeFn f_size, StatFn<TextReadout> f_stat) const override;
215
  void forEachSinkedHistogram(SizeFn f_size, StatFn<ParentHistogram> f_stat) const override;
216

            
217
  void setSinkPredicates(std::unique_ptr<SinkPredicates>&& sink_predicates) override;
218
2
  OptRef<SinkPredicates> sinkPredicates() override { return sink_predicates_; }
219

            
220
  /**
221
   * @return a thread synchronizer object used for controlling thread behavior in tests.
222
   */
223
10
  Thread::ThreadSynchronizer& sync() { return sync_; }
224

            
225
  /**
226
   * @return a set of well known tag names; used to reduce symbol table churn.
227
   */
228
4567631
  const StatNameSet& wellKnownTags() const { return *well_known_tags_; }
229

            
230
  bool decHistogramRefCount(ParentHistogramImpl& histogram, std::atomic<uint32_t>& ref_count);
231
  void releaseHistogramCrossThread(uint64_t histogram_id);
232

            
233
5481175
  const TagProducer& tagProducer() const { return *tag_producer_; }
234
  void extractAndAppendTags(StatName name, StatNamePool& pool, StatNameTagVector& tags) override;
235
  void extractAndAppendTags(absl::string_view name, StatNamePool& pool,
236
                            StatNameTagVector& tags) override;
237
1
  const TagVector& fixedTags() override { return tag_producer_->fixedTags(); };
238

            
239
  void ensureOverflowStats(const ScopeStatsLimitSettings& limits);
240

            
241
private:
242
  friend class ThreadLocalStoreTestingPeer;
243

            
244
  template <class Stat> using StatRefMap = StatNameHashMap<std::reference_wrapper<Stat>>;
245

            
246
  struct TlsCacheEntry {
247
    // The counters, gauges and text readouts in the TLS cache are stored by reference,
248
    // depending on the CentralCache for backing store. This avoids a potential
249
    // contention-storm when destructing a scope, as the counter/gauge ref-count
250
    // decrement in allocator.cc needs to hold the single allocator mutex.
251
    StatRefMap<Counter> counters_;
252
    StatRefMap<Gauge> gauges_;
253
    StatRefMap<TextReadout> text_readouts_;
254

            
255
    // Histograms also require holding a mutex while decrementing reference
256
    // counts. The only difference from other stats is that the histogram_set_
257
    // lives in the ThreadLocalStore object, rather than in
258
    // Allocator. Histograms are removed from that set when all scopes
259
    // referencing the histogram are dropped. Each ParentHistogram has a unique
260
    // index, which is not re-used during the process lifetime.
261
    //
262
    // There is also a tls_histogram_cache_ in the TlsCache object, which is
263
    // not tied to a scope. It maps from parent histogram's unique index to
264
    // a TlsHistogram. This enables continuity between same-named histograms
265
    // in same-named scopes. That scenario is common when re-creating scopes in
266
    // response to xDS.
267
    StatNameHashMap<ParentHistogramSharedPtr> parent_histograms_;
268

            
269
    // We keep a TLS cache of rejected stat names. This costs memory, but
270
    // reduces runtime overhead running the matcher. Moreover, once symbol
271
    // tables are integrated, rejection will need the fully elaborated string,
272
    // and it we need to take a global symbol-table lock to run. We keep this
273
    // StatName set here in the TLS cache to avoid taking a lock to compute
274
    // rejection.
275
    StatNameHashSet rejected_stats_;
276
  };
277

            
278
  struct CentralCacheEntry : public RefcountHelper {
279
254286
    explicit CentralCacheEntry(SymbolTable& symbol_table) : symbol_table_(symbol_table) {}
280
    ~CentralCacheEntry();
281

            
282
    StatNameHashMap<CounterSharedPtr> counters_;
283
    StatNameHashMap<GaugeSharedPtr> gauges_;
284
    StatNameHashMap<ParentHistogramImplSharedPtr> histograms_;
285
    StatNameHashMap<TextReadoutSharedPtr> text_readouts_;
286
    StatNameStorageSet rejected_stats_;
287
    SymbolTable& symbol_table_;
288
  };
289
  using CentralCacheEntrySharedPtr = RefcountPtr<CentralCacheEntry>;
290

            
291
  struct ScopeImpl : public Scope {
292
    ScopeImpl(ThreadLocalStoreImpl& parent, StatName prefix, bool evictable,
293
              const ScopeStatsLimitSettings& limits = {},
294
              StatsMatcherSharedPtr scope_matcher = nullptr);
295
    ~ScopeImpl() override;
296

            
297
    // Stats::Scope
298
    Counter& counterFromStatNameWithTags(const StatName& name,
299
                                         StatNameTagVectorOptConstRef tags) override;
300
    Gauge& gaugeFromStatNameWithTags(const StatName& name, StatNameTagVectorOptConstRef tags,
301
                                     Gauge::ImportMode import_mode) override;
302
    Histogram& histogramFromStatNameWithTags(const StatName& name,
303
                                             StatNameTagVectorOptConstRef tags,
304
                                             Histogram::Unit unit) override;
305
    TextReadout& textReadoutFromStatNameWithTags(const StatName& name,
306
                                                 StatNameTagVectorOptConstRef tags) override;
307
    ScopeSharedPtr createScope(const std::string& name, bool evictable = false,
308
                               const ScopeStatsLimitSettings& limits = {},
309
                               StatsMatcherSharedPtr matcher = nullptr) override;
310
    ScopeSharedPtr scopeFromStatName(StatName name, bool evictable = false,
311
                                     const ScopeStatsLimitSettings& limits = {},
312
                                     StatsMatcherSharedPtr matcher = nullptr) override;
313
6
    const SymbolTable& constSymbolTable() const final { return parent_.constSymbolTable(); }
314
12184673
    SymbolTable& symbolTable() final { return parent_.symbolTable(); }
315

            
316
2238876
    Counter& counterFromString(const std::string& name) override {
317
2238876
      StatNameManagedStorage storage(name, symbolTable());
318
2238876
      return counterFromStatName(storage.statName());
319
2238876
    }
320

            
321
663030
    Gauge& gaugeFromString(const std::string& name, Gauge::ImportMode import_mode) override {
322
663030
      StatNameManagedStorage storage(name, symbolTable());
323
663030
      return gaugeFromStatName(storage.statName(), import_mode);
324
663030
    }
325
109910
    Histogram& histogramFromString(const std::string& name, Histogram::Unit unit) override {
326
109910
      StatNameManagedStorage storage(name, symbolTable());
327
109910
      return histogramFromStatName(storage.statName(), unit);
328
109910
    }
329
14536
    TextReadout& textReadoutFromString(const std::string& name) override {
330
14536
      StatNameManagedStorage storage(name, symbolTable());
331
14536
      return textReadoutFromStatName(storage.statName());
332
14536
    }
333

            
334
156514
    template <class StatMap, class StatFn> bool iterHelper(StatFn fn, const StatMap& map) const {
335
203524
      for (auto& iter : map) {
336
203524
        if (!fn(iter.second)) {
337
25
          return false;
338
25
        }
339
203524
      }
340
156489
      return true;
341
156514
    }
342

            
343
39126
    bool iterate(const IterateFn<Counter>& fn) const override {
344
39126
      Thread::LockGuard lock(parent_.lock_);
345
39126
      return iterateLockHeld(fn);
346
39126
    }
347
39120
    bool iterate(const IterateFn<Gauge>& fn) const override {
348
39120
      Thread::LockGuard lock(parent_.lock_);
349
39120
      return iterateLockHeld(fn);
350
39120
    }
351
39117
    bool iterate(const IterateFn<Histogram>& fn) const override {
352
39117
      Thread::LockGuard lock(parent_.lock_);
353
39117
      return iterateLockHeld(fn);
354
39117
    }
355
39119
    bool iterate(const IterateFn<TextReadout>& fn) const override {
356
39119
      Thread::LockGuard lock(parent_.lock_);
357
39119
      return iterateLockHeld(fn);
358
39119
    }
359

            
360
    bool iterateLockHeld(const IterateFn<Counter>& fn) const
361
39137
        ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_.lock_) {
362
39137
      return iterHelper(fn, centralCacheLockHeld()->counters_);
363
39137
    }
364
    bool iterateLockHeld(const IterateFn<Gauge>& fn) const
365
39135
        ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_.lock_) {
366
39135
      return iterHelper(fn, centralCacheLockHeld()->gauges_);
367
39135
    }
368
    bool iterateLockHeld(const IterateFn<Histogram>& fn) const
369
39120
        ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_.lock_) {
370
39120
      return iterHelper(fn, centralCacheLockHeld()->histograms_);
371
39120
    }
372
    bool iterateLockHeld(const IterateFn<TextReadout>& fn) const
373
39122
        ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_.lock_) {
374
39122
      return iterHelper(fn, centralCacheLockHeld()->text_readouts_);
375
39122
    }
376
210116
    ThreadLocalStoreImpl& store() override { return parent_; }
377
4
    const ThreadLocalStoreImpl& constStore() const override { return parent_; }
378

            
379
    // NOTE: The find methods assume that `name` is fully-qualified.
380
    // Implementations will not add the scope prefix.
381
    CounterOptConstRef findCounter(StatName name) const override;
382
    GaugeOptConstRef findGauge(StatName name) const override;
383
    HistogramOptConstRef findHistogram(StatName name) const override;
384
    TextReadoutOptConstRef findTextReadout(StatName name) const override;
385

            
386
    HistogramOptConstRef findHistogramLockHeld(StatName name) const;
387

            
388
    template <class StatType>
389
    using MakeStatFn = std::function<RefcountPtr<StatType>(
390
        Allocator&, StatName name, StatName tag_extracted_name, const StatNameTagVector& tags)>;
391

            
392
    /**
393
     * Makes a stat either by looking it up in the central cache,
394
     * generating it from the parent allocator, or as a last
395
     * result, creating it with the heap allocator.
396
     *
397
     * @param full_stat_name the full name of the stat with appended tags.
398
     * @param name_no_tags the full name of the stat (not tag extracted) without appended tags.
399
     * @param stat_name_tags the tags provided at creation time. If empty, tag extraction occurs.
400
     * @param central_cache_map a map from name to the desired object in the central cache.
401
     * @param make_stat a function to generate the stat object, called if it's not in cache.
402
     * @param tls_ref possibly null reference to a cache entry for this stat, which will be
403
     *     used if non-empty, or filled in if empty (and non-null).
404
     */
405
    template <class StatType>
406
    StatType& safeMakeStat(StatName full_stat_name, StatName name_no_tags,
407
                           const absl::optional<StatNameTagVector>& stat_name_tags,
408
                           StatNameHashMap<RefcountPtr<StatType>>& central_cache_map,
409
                           StatsMatcher::FastResult fast_reject_result,
410
                           StatNameStorageSet& central_rejected_stats,
411
                           MakeStatFn<StatType> make_stat, StatRefMap<StatType>* tls_cache,
412
                           StatNameHashSet* tls_rejected_stats, StatType& null_stat);
413

            
414
    template <class StatType>
415
    using StatTypeOptConstRef = absl::optional<std::reference_wrapper<const StatType>>;
416

            
417
    /**
418
     * Looks up an existing stat, populating the local cache if necessary. Does
419
     * not check the TLS or rejects, and does not create a stat if it does not
420
     * exist.
421
     *
422
     * @param name the full name of the stat (not tag extracted).
423
     * @param central_cache_map a map from name to the desired object in the central cache.
424
     * @return a reference to the stat, if it exists.
425
     */
426
    template <class StatType>
427
    StatTypeOptConstRef<StatType>
428
    findStatLockHeld(StatName name,
429
72
                     StatNameHashMap<RefcountPtr<StatType>>& central_cache_map) const {
430
72
      auto iter = central_cache_map.find(name);
431
72
      if (iter == central_cache_map.end()) {
432
56
        return absl::nullopt;
433
56
      }
434

            
435
16
      return std::cref(*iter->second);
436
72
    }
437

            
438
117891
    StatName prefix() const override { return prefix_.statName(); }
439

            
440
    // Returns the central cache, asserting that the parent lock is held.
441
    //
442
    // When a ThreadLocalStore method takes lock_ and then accesses
443
    // scope->central_cache_, the analysis system cannot understand that the
444
    // scope's parent_.lock_ is held, so we assert that here.
445
    const CentralCacheEntrySharedPtr& centralCacheLockHeld() const
446
271616
        ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_.lock_) {
447
271616
      return central_cache_;
448
271616
    }
449

            
450
    CentralCacheEntrySharedPtr&
451
8
    centralCacheMutableNoThreadAnalysis() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
452
8
      return central_cache_;
453
8
    }
454

            
455
    // Returns the central cache, bypassing thread analysis.
456
    //
457
    // This is used only when passing references to maps held in the central
458
    // cache to safeMakeStat, which takes the lock only if those maps are
459
    // actually referenced, due to the lookup missing the TLS cache.
460
    const CentralCacheEntrySharedPtr&
461
5800199
    centralCacheNoThreadAnalysis() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
462
5800199
      return central_cache_;
463
5800199
    }
464

            
465
    // Returns the effective matcher for this scope: scope-level if set, else store-level.
466
17376115
    const StatsMatcher& effectiveMatcher() const {
467
17376115
      return scope_matcher_ ? *scope_matcher_ : *parent_.stats_matcher_;
468
17376115
    }
469
5960230
    bool scopeRejectsAll() const { return effectiveMatcher().rejectsAll(); }
470
5959851
    StatsMatcher::FastResult scopeFastRejects(StatName name) const {
471
5959851
      return effectiveMatcher().fastRejects(name);
472
5959851
    }
473

            
474
    const uint64_t scope_id_;
475
    ThreadLocalStoreImpl& parent_;
476
    const bool evictable_{};
477

            
478
    const ScopeStatsLimitSettings limits_;
479
    StatsMatcherSharedPtr scope_matcher_;
480

            
481
  private:
482
    StatNameStorage prefix_;
483
    mutable CentralCacheEntrySharedPtr central_cache_ ABSL_GUARDED_BY(parent_.lock_);
484
  };
485

            
486
  struct TlsCache : public ThreadLocal::ThreadLocalObject {
487
    TlsCacheEntry& insertScope(uint64_t scope_id);
488
    void eraseScopes(const std::vector<uint64_t>& scope_ids);
489
    void eraseHistograms(const std::vector<uint64_t>& histograms);
490

            
491
    // The TLS scope cache is keyed by scope ID. This is used to avoid complex circular references
492
    // during scope destruction. An ID is required vs. using the address of the scope pointer
493
    // because it's possible that the memory allocator will recycle the scope pointer immediately
494
    // upon destruction, leading to a situation in which a new scope with the same address is used
495
    // to reference the cache, and then subsequently cache flushed, leaving nothing in the central
496
    // store. See the overview for more information. This complexity is required for lockless
497
    // operation in the fast path.
498
    absl::flat_hash_map<uint64_t, TlsCacheEntry> scope_cache_;
499

            
500
    // Maps from histogram ID (monotonically increasing) to a TLS histogram.
501
    absl::flat_hash_map<uint64_t, TlsHistogramSharedPtr> tls_histogram_cache_;
502
  };
503

            
504
  using ScopeImplSharedPtr = std::shared_ptr<ScopeImpl>;
505

            
506
  /**
507
   * assertLocked exists to help compiler figure out that lock_ and scope->parent_.lock_ is
508
   * actually the same lock known under two different names. This function requires lock_ to
509
   * be held when it's called and at the same time it is annotated as if it checks in runtime
510
   * that scope->parent_.lock_ is held. It does not actually perform any runtime checks, because
511
   * those aren't needed since we know that scope->parent_ refers to ThreadLockStoreImpl and
512
   * therefore scope->parent_.lock is the same as lock_.
513
   */
514
  void assertLocked(const ScopeImpl& scope) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_)
515
136346
      ABSL_ASSERT_EXCLUSIVE_LOCK(scope.parent_.lock_) {
516
136346
    UNREFERENCED_PARAMETER(scope);
517
136346
  }
518

            
519
  /**
520
   * Calls fn_lock_held for every scope with, lock_ held. This avoids iterate/destruct
521
   * races for scopes.
522
   *
523
   * @param fn_lock_held function to be called, with lock_ held, on every scope, until
524
   *   fn_lock_held() returns false.
525
   * @return true if the iteration completed with fn_lock_held never returning false.
526
   */
527
10974
  bool iterateScopes(const std::function<bool(const ScopeImplSharedPtr&)> fn_lock_held) const {
528
10974
    Thread::LockGuard lock(lock_);
529
10974
    return iterateScopesLockHeld(fn_lock_held);
530
10974
  }
531

            
532
  bool iterateScopesLockHeld(const std::function<bool(const ScopeImplSharedPtr&)> fn) const
533
      ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_);
534

            
535
  // The Store versions of iterate cover all the scopes in the store.
536
11
  template <class StatFn> bool iterHelper(StatFn fn) const {
537
11
    return iterateScopes([this, fn](const ScopeImplSharedPtr& scope)
538
32
                             ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) -> bool {
539
32
                               assertLocked(*scope);
540
32
                               return scope->iterateLockHeld(fn);
541
32
                             });
542
11
  }
543

            
544
  std::string getTagsForName(const std::string& name, TagVector& tags) const;
545
  void clearScopesFromCaches();
546
  void clearHistogramsFromCaches();
547
  void releaseScopeCrossThread(ScopeImpl* scope);
548
  void mergeInternal(PostMergeCb merge_cb);
549
  bool slowRejects(StatsMatcher::FastResult fast_reject_result, StatName name) const;
550
81
  bool rejects(StatName name) const { return stats_matcher_->rejects(name); }
551
  StatsMatcher::FastResult fastRejects(StatName name) const;
552
  bool rejectsAll() const { return stats_matcher_->rejectsAll(); }
553
  template <class StatMapClass, class StatListClass>
554
  void removeRejectedStats(StatMapClass& map, StatListClass& list);
555
  template <class StatSharedPtr>
556
  void removeRejectedStats(StatNameHashMap<StatSharedPtr>& map,
557
                           std::function<void(const StatSharedPtr&)> f_deletion);
558
  bool checkAndRememberRejection(StatName name, StatsMatcher::FastResult fast_reject_result,
559
                                 StatNameStorageSet& central_rejected_stats,
560
                                 StatNameHashSet* tls_rejected_stats, const StatsMatcher& matcher);
561
5263945
  TlsCache& tlsCache() { return **tls_cache_; }
562
  void addScope(std::shared_ptr<ScopeImpl>& new_scope);
563

            
564
  OptRef<SinkPredicates> sink_predicates_;
565
  Allocator& alloc_;
566
  Event::Dispatcher* main_thread_dispatcher_{};
567
  using TlsCacheSlot = ThreadLocal::TypedSlotPtr<TlsCache>;
568
  ThreadLocal::TypedSlotPtr<TlsCache> tls_cache_;
569
  mutable Thread::MutexBasicLockable lock_;
570
  absl::flat_hash_map<ScopeImpl*, std::weak_ptr<ScopeImpl>> scopes_ ABSL_GUARDED_BY(lock_);
571
  ScopeSharedPtr default_scope_;
572
  std::vector<std::reference_wrapper<Sink>> timer_sinks_;
573
  TagProducerPtr tag_producer_;
574
  StatsMatcherPtr stats_matcher_;
575
  HistogramSettingsConstPtr histogram_settings_;
576
  std::atomic<bool> threading_ever_initialized_{false};
577
  std::atomic<bool> shutting_down_{false};
578
  std::atomic<bool> merge_in_progress_{false};
579
  OptRef<ThreadLocal::Instance> tls_;
580

            
581
  NullCounterImpl null_counter_;
582
  NullGaugeImpl null_gauge_;
583
  NullHistogramImpl null_histogram_;
584
  NullTextReadoutImpl null_text_readout_;
585

            
586
  mutable Thread::ThreadSynchronizer sync_;
587
  std::atomic<uint64_t> next_scope_id_{0};
588
  uint64_t next_histogram_id_ ABSL_GUARDED_BY(hist_mutex_) = 0;
589

            
590
  StatNameSetPtr well_known_tags_;
591

            
592
  mutable Thread::MutexBasicLockable hist_mutex_;
593
  StatSet<ParentHistogramImpl> histogram_set_ ABSL_GUARDED_BY(hist_mutex_);
594
  StatSet<ParentHistogramImpl> sinked_histograms_ ABSL_GUARDED_BY(hist_mutex_);
595

            
596
  // Retain storage for deleted stats; these are no longer in maps because the
597
  // matcher-pattern was established after they were created. Since the stats
598
  // are held by reference in code that expects them to be there, we can't
599
  // actually delete the stats.
600
  //
601
  // It seems like it would be better to have each client that expects a stat
602
  // to exist to hold it as (e.g.) a CounterSharedPtr rather than a Counter&
603
  // but that would be fairly complex to change.
604
  std::vector<HistogramSharedPtr> deleted_histograms_ ABSL_GUARDED_BY(lock_);
605

            
606
  // Scope IDs and central cache entries that are queued for cross-scope release.
607
  // Because there can be a large number of scopes, all of which are released at once,
608
  // (e.g. when a scope is deleted), it is more efficient to batch their cleanup,
609
  // which would otherwise entail a post() per scope per thread.
610
  std::vector<uint64_t> scopes_to_cleanup_ ABSL_GUARDED_BY(lock_);
611
  std::vector<CentralCacheEntrySharedPtr> central_cache_entries_to_cleanup_ ABSL_GUARDED_BY(lock_);
612

            
613
  // Histograms IDs that are queued for cross-scope release. Because there
614
  // can be a large number of histograms, all of which are released at once,
615
  // (e.g. when a scope is deleted), it is likely more efficient to batch their
616
  // cleanup, which would otherwise entail a post() per histogram per thread.
617
  std::vector<uint64_t> histograms_to_cleanup_ ABSL_GUARDED_BY(hist_mutex_);
618

            
619
  CounterSharedPtr counters_overflow_;
620
  CounterSharedPtr gauges_overflow_;
621
  CounterSharedPtr histograms_overflow_;
622
};
623

            
624
using ThreadLocalStoreImplPtr = std::unique_ptr<ThreadLocalStoreImpl>;
625

            
626
} // namespace Stats
627
} // namespace Envoy