Stats::Counter& makeCounter(Stats::Scope& scope, absl::string_view a, absl::string_view b) {
Stats::StatNameManagedStorage stat_name(absl::StrCat("overload.", name), scope.symbolTable());
new OverloadManagerImpl(dispatcher, stats_scope, slot_allocator, config, validation_visitor,
OverloadManagerImpl::OverloadManagerImpl(Event::Dispatcher& dispatcher, Stats::Scope& stats_scope,
// proactive and regular resource monitors in configuration API. But internally we will maintain
const auto result = loadshed_points_.try_emplace(point.name(), *std::move(load_shed_or_error));
// Start a new flush epoch. If all resource updates complete before this callback runs, the last
std::chrono::duration_cast<std::chrono::milliseconds>(now - time_resources_last_measured_);
ThreadLocalOverloadState& OverloadManagerImpl::getThreadLocalOverloadState() { return *tls_; }
std::for_each(callbacks_start, callbacks_end, [&](ActionToCallbackMap::value_type& cb_entry) {
// previous update callback is late, the logic in OverloadManager::Resource::update() will prevent
[updates = std::move(shared_updates)](OptRef<ThreadLocalOverloadStateImpl> overload_state) {