Line data Source code
1 : #include "source/extensions/load_balancing_policies/subset/subset_lb.h"
2 :
3 : #include <memory>
4 :
5 : #include "envoy/common/optref.h"
6 : #include "envoy/config/cluster/v3/cluster.pb.h"
7 : #include "envoy/config/core/v3/base.pb.h"
8 : #include "envoy/runtime/runtime.h"
9 :
10 : #include "source/common/common/assert.h"
11 : #include "source/common/config/metadata.h"
12 : #include "source/common/config/well_known_names.h"
13 : #include "source/common/protobuf/utility.h"
14 : #include "source/common/upstream/load_balancer_impl.h"
15 : #include "source/extensions/load_balancing_policies/maglev/maglev_lb.h"
16 : #include "source/extensions/load_balancing_policies/ring_hash/ring_hash_lb.h"
17 :
18 : #include "absl/container/node_hash_set.h"
19 :
20 : namespace Envoy {
21 : namespace Upstream {
22 :
23 : namespace {
24 :
25 : /**
26 : * Iterates all the selectors and finds the first one that match_criteria contains all the keys
27 : * of the selector. Returns nullptr if no selector matches, otherwise returns sub match criteria
28 : * that contains only the keys of the selector.
29 : */
30 : Router::MetadataMatchCriteriaConstPtr
31 : filterCriteriaBySelectors(const std::vector<SubsetSelectorPtr>& subset_selectors,
32 0 : const Router::MetadataMatchCriteria* match_criteria) {
33 0 : if (match_criteria != nullptr) {
34 0 : for (const auto& selector : subset_selectors) {
35 0 : const auto& selector_keys = selector->selectorKeys();
36 0 : auto sub_match_criteria = match_criteria->filterMatchCriteria(selector_keys);
37 0 : if (sub_match_criteria != nullptr &&
38 0 : sub_match_criteria->metadataMatchCriteria().size() == selector_keys.size()) {
39 0 : return sub_match_criteria;
40 0 : }
41 0 : }
42 0 : }
43 0 : return nullptr;
44 0 : }
45 :
46 : } // namespace
47 :
48 : using HostPredicate = std::function<bool(const Host&)>;
49 :
50 : LegacyChildLoadBalancerCreatorImpl::LegacyChildLoadBalancerCreatorImpl(
51 : LoadBalancerType lb_type,
52 : OptRef<const envoy::config::cluster::v3::Cluster::RingHashLbConfig> lb_ring_hash_config,
53 : OptRef<const envoy::config::cluster::v3::Cluster::MaglevLbConfig> lb_maglev_config,
54 : OptRef<const envoy::config::cluster::v3::Cluster::RoundRobinLbConfig> round_robin_config,
55 : OptRef<const envoy::config::cluster::v3::Cluster::LeastRequestLbConfig> least_request_config,
56 : const envoy::config::cluster::v3::Cluster::CommonLbConfig& common_config)
57 : : lb_type_(lb_type),
58 : lb_ring_hash_config_(
59 : lb_ring_hash_config.has_value()
60 : ? std::make_unique<const envoy::config::cluster::v3::Cluster::RingHashLbConfig>(
61 : lb_ring_hash_config.ref())
62 : : nullptr),
63 : lb_maglev_config_(
64 : lb_maglev_config.has_value()
65 : ? std::make_unique<const envoy::config::cluster::v3::Cluster::MaglevLbConfig>(
66 : lb_maglev_config.ref())
67 : : nullptr),
68 : round_robin_config_(
69 : round_robin_config.has_value()
70 : ? std::make_unique<const envoy::config::cluster::v3::Cluster::RoundRobinLbConfig>(
71 : round_robin_config.ref())
72 : : nullptr),
73 : least_request_config_(
74 : least_request_config.has_value()
75 : ? std::make_unique<const envoy::config::cluster::v3::Cluster::LeastRequestLbConfig>(
76 : least_request_config.ref())
77 : : nullptr),
78 0 : common_config_(common_config) {}
79 :
80 : std::pair<Upstream::ThreadAwareLoadBalancerPtr, Upstream::LoadBalancerPtr>
81 : LegacyChildLoadBalancerCreatorImpl::createLoadBalancer(
82 : const Upstream::PrioritySet& child_priority_set,
83 : const Upstream::PrioritySet* local_priority_set, ClusterLbStats& stats, Stats::Scope& scope,
84 0 : Runtime::Loader& runtime, Random::RandomGenerator& random, TimeSource& time_source) {
85 0 : switch (lb_type_) {
86 0 : case Upstream::LoadBalancerType::LeastRequest: {
87 0 : Upstream::LoadBalancerPtr lb = std::make_unique<Upstream::LeastRequestLoadBalancer>(
88 0 : child_priority_set, local_priority_set, stats, runtime, random, common_config_,
89 0 : lbLeastRequestConfig(), time_source);
90 0 : return {nullptr, std::move(lb)};
91 0 : }
92 0 : case Upstream::LoadBalancerType::Random: {
93 0 : Upstream::LoadBalancerPtr lb = std::make_unique<Upstream::RandomLoadBalancer>(
94 0 : child_priority_set, local_priority_set, stats, runtime, random, common_config_);
95 0 : return {nullptr, std::move(lb)};
96 0 : }
97 0 : case Upstream::LoadBalancerType::RoundRobin: {
98 0 : Upstream::LoadBalancerPtr lb = std::make_unique<Upstream::RoundRobinLoadBalancer>(
99 0 : child_priority_set, local_priority_set, stats, runtime, random, common_config_,
100 0 : lbRoundRobinConfig(), time_source);
101 0 : return {nullptr, std::move(lb)};
102 0 : }
103 0 : case Upstream::LoadBalancerType::RingHash: {
104 : // TODO(mattklein123): The ring hash LB is thread aware, but currently the subset LB is not.
105 : // We should make the subset LB thread aware since the calculations are costly, and then we
106 : // can also use a thread aware sub-LB properly. The following works fine but is not optimal.
107 0 : Upstream::ThreadAwareLoadBalancerPtr lb = std::make_unique<Upstream::RingHashLoadBalancer>(
108 0 : child_priority_set, stats, scope, runtime, random, lbRingHashConfig(), common_config_);
109 0 : return {std::move(lb), nullptr};
110 0 : }
111 0 : case Upstream::LoadBalancerType::Maglev: {
112 : // TODO(mattklein123): The Maglev LB is thread aware, but currently the subset LB is not.
113 : // We should make the subset LB thread aware since the calculations are costly, and then we
114 : // can also use a thread aware sub-LB properly. The following works fine but is not optimal.
115 :
116 0 : Upstream::ThreadAwareLoadBalancerPtr lb = std::make_unique<Upstream::MaglevLoadBalancer>(
117 0 : child_priority_set, stats, scope, runtime, random, lbMaglevConfig(), common_config_);
118 0 : return {std::move(lb), nullptr};
119 0 : }
120 0 : case Upstream::LoadBalancerType::OriginalDst:
121 0 : case Upstream::LoadBalancerType::ClusterProvided:
122 0 : case Upstream::LoadBalancerType::LoadBalancingPolicyConfig:
123 : // These load balancer types can only be created when there is no subset configuration.
124 0 : PANIC("not implemented");
125 0 : }
126 0 : return {nullptr, nullptr};
127 0 : }
128 :
129 : SubsetLoadBalancerConfig::SubsetLoadBalancerConfig(
130 : const SubsetLoadbalancingPolicyProto& subset_config,
131 : ProtobufMessage::ValidationVisitor& visitor)
132 0 : : subset_info_(subset_config) {
133 :
134 0 : absl::InlinedVector<absl::string_view, 4> missing_policies;
135 :
136 0 : for (const auto& policy : subset_config.subset_lb_policy().policies()) {
137 0 : auto* factory = Config::Utility::getAndCheckFactory<Upstream::TypedLoadBalancerFactory>(
138 0 : policy.typed_extension_config(), /*is_optional=*/true);
139 :
140 0 : if (factory != nullptr) {
141 : // Load and validate the configuration.
142 0 : auto sub_lb_proto_message = factory->createEmptyConfigProto();
143 0 : Config::Utility::translateOpaqueConfig(policy.typed_extension_config().typed_config(),
144 0 : visitor, *sub_lb_proto_message);
145 :
146 0 : sub_load_balancer_config_ = factory->loadConfig(*sub_lb_proto_message, visitor);
147 0 : sub_load_balancer_factory_ = factory;
148 0 : break;
149 0 : }
150 :
151 0 : missing_policies.push_back(policy.typed_extension_config().name());
152 0 : }
153 :
154 0 : if (sub_load_balancer_factory_ == nullptr) {
155 0 : throw EnvoyException(fmt::format("cluster: didn't find a registered load balancer factory "
156 0 : "implementation for subset lb with names from [{}]",
157 0 : absl::StrJoin(missing_policies, ", ")));
158 0 : }
159 0 : }
160 :
161 : SubsetLoadBalancer::SubsetLoadBalancer(const LoadBalancerSubsetInfo& subsets,
162 : ChildLoadBalancerCreatorPtr child_lb,
163 : const PrioritySet& priority_set,
164 : const PrioritySet* local_priority_set, ClusterLbStats& stats,
165 : Stats::Scope& scope, Runtime::Loader& runtime,
166 : Random::RandomGenerator& random, TimeSource& time_source)
167 : : stats_(stats), scope_(scope), runtime_(runtime), random_(random), time_source_(time_source),
168 : fallback_policy_(subsets.fallbackPolicy()),
169 : metadata_fallback_policy_(subsets.metadataFallbackPolicy()),
170 : default_subset_metadata_(subsets.defaultSubset().fields().begin(),
171 : subsets.defaultSubset().fields().end()),
172 : subset_selectors_(subsets.subsetSelectors()), original_priority_set_(priority_set),
173 : original_local_priority_set_(local_priority_set), child_lb_creator_(std::move(child_lb)),
174 : locality_weight_aware_(subsets.localityWeightAware()),
175 : scale_locality_weight_(subsets.scaleLocalityWeight()), list_as_any_(subsets.listAsAny()),
176 0 : allow_redundant_keys_(subsets.allowRedundantKeys()) {
177 0 : ASSERT(subsets.isEnabled());
178 :
179 0 : if (fallback_policy_ != envoy::config::cluster::v3::Cluster::LbSubsetConfig::NO_FALLBACK) {
180 0 : if (fallback_policy_ == envoy::config::cluster::v3::Cluster::LbSubsetConfig::ANY_ENDPOINT) {
181 0 : ENVOY_LOG(debug, "subset lb: creating any-endpoint fallback load balancer");
182 0 : initSubsetAnyOnce();
183 0 : fallback_subset_ = subset_any_;
184 0 : } else {
185 0 : ENVOY_LOG(debug, "subset lb: creating fallback load balancer for {}",
186 0 : describeMetadata(default_subset_metadata_));
187 0 : initSubsetDefaultOnce();
188 0 : fallback_subset_ = subset_default_;
189 0 : }
190 0 : }
191 :
192 0 : if (subsets.panicModeAny()) {
193 0 : initSubsetAnyOnce();
194 0 : panic_mode_subset_ = subset_any_;
195 0 : }
196 :
197 0 : initSubsetSelectorMap();
198 :
199 : // Create filtered default subset (if necessary) and other subsets based on current hosts.
200 0 : refreshSubsets();
201 :
202 : // Configure future updates.
203 0 : original_priority_set_callback_handle_ = priority_set.addPriorityUpdateCb(
204 0 : [this](uint32_t priority, const HostVector&, const HostVector&) {
205 0 : refreshSubsets(priority);
206 0 : purgeEmptySubsets(subsets_);
207 0 : });
208 0 : }
209 :
210 0 : SubsetLoadBalancer::~SubsetLoadBalancer() {
211 : // Ensure gauges reflect correct values.
212 0 : forEachSubset(subsets_, [&](LbSubsetEntryPtr entry) {
213 0 : if (entry->active()) {
214 0 : stats_.lb_subsets_removed_.inc();
215 0 : stats_.lb_subsets_active_.dec();
216 0 : }
217 0 : });
218 0 : }
219 :
220 0 : void SubsetLoadBalancer::refreshSubsets() {
221 0 : for (auto& host_set : original_priority_set_.hostSetsPerPriority()) {
222 0 : update(host_set->priority(), host_set->hosts());
223 0 : }
224 0 : }
225 :
226 0 : void SubsetLoadBalancer::refreshSubsets(uint32_t priority) {
227 0 : const auto& host_sets = original_priority_set_.hostSetsPerPriority();
228 0 : ASSERT(priority < host_sets.size());
229 0 : update(priority, host_sets[priority]->hosts());
230 0 : }
231 :
232 0 : void SubsetLoadBalancer::initSubsetAnyOnce() {
233 0 : if (!subset_any_) {
234 0 : subset_any_ = std::make_shared<LbSubsetEntry>();
235 0 : subset_any_->lb_subset_ =
236 0 : std::make_unique<PriorityLbSubset>(*this, locality_weight_aware_, scale_locality_weight_);
237 0 : }
238 0 : }
239 :
240 0 : void SubsetLoadBalancer::initSubsetDefaultOnce() {
241 0 : if (!subset_default_) {
242 0 : subset_default_ = std::make_shared<LbSubsetEntry>();
243 0 : subset_default_->lb_subset_ =
244 0 : std::make_unique<PriorityLbSubset>(*this, locality_weight_aware_, scale_locality_weight_);
245 0 : }
246 0 : }
247 :
248 0 : void SubsetLoadBalancer::initSubsetSelectorMap() {
249 0 : selectors_ = std::make_shared<SubsetSelectorMap>();
250 0 : SubsetSelectorMapPtr selectors;
251 0 : for (const auto& subset_selector : subset_selectors_) {
252 0 : const auto& selector_keys = subset_selector->selectorKeys();
253 0 : const auto& selector_fallback_policy = subset_selector->fallbackPolicy();
254 0 : const auto& selector_fallback_keys_subset = subset_selector->fallbackKeysSubset();
255 :
256 0 : uint32_t pos = 0;
257 0 : selectors = selectors_;
258 0 : for (const auto& key : selector_keys) {
259 0 : const auto& selector_it = selectors->subset_keys_.find(key);
260 0 : pos++;
261 0 : if (selector_it == selectors->subset_keys_.end()) {
262 0 : selectors->subset_keys_.emplace(std::make_pair(key, std::make_shared<SubsetSelectorMap>()));
263 0 : const auto& child_selector = selectors->subset_keys_.find(key);
264 : // if this is last key for given selector, check if it has fallback specified
265 0 : if (pos == selector_keys.size()) {
266 0 : child_selector->second->fallback_params_.fallback_policy_ = selector_fallback_policy;
267 0 : child_selector->second->fallback_params_.fallback_keys_subset_ =
268 0 : &selector_fallback_keys_subset;
269 0 : initSelectorFallbackSubset(selector_fallback_policy);
270 0 : }
271 0 : selectors = child_selector->second;
272 0 : } else {
273 0 : selectors = selector_it->second;
274 0 : }
275 0 : }
276 0 : selectors = selectors_;
277 0 : }
278 0 : }
279 :
280 : void SubsetLoadBalancer::initSelectorFallbackSubset(
281 : const envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::
282 0 : LbSubsetSelectorFallbackPolicy& fallback_policy) {
283 0 : if (fallback_policy ==
284 0 : envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT &&
285 0 : subset_any_ == nullptr) {
286 0 : ENVOY_LOG(debug, "subset lb: creating any-endpoint fallback load balancer for selector");
287 0 : initSubsetAnyOnce();
288 0 : } else if (fallback_policy == envoy::config::cluster::v3::Cluster::LbSubsetConfig::
289 0 : LbSubsetSelector::DEFAULT_SUBSET &&
290 0 : subset_default_ == nullptr) {
291 0 : ENVOY_LOG(debug, "subset lb: creating default subset fallback load balancer for selector");
292 0 : initSubsetDefaultOnce();
293 0 : }
294 0 : }
295 :
296 0 : HostConstSharedPtr SubsetLoadBalancer::chooseHost(LoadBalancerContext* context) {
297 0 : if (metadata_fallback_policy_ !=
298 0 : envoy::config::cluster::v3::
299 0 : Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_FALLBACK_LIST) {
300 0 : return chooseHostIteration(context);
301 0 : }
302 0 : const ProtobufWkt::Value* metadata_fallbacks = getMetadataFallbackList(context);
303 0 : if (metadata_fallbacks == nullptr) {
304 0 : return chooseHostIteration(context);
305 0 : }
306 :
307 0 : LoadBalancerContextWrapper context_no_metadata_fallback = removeMetadataFallbackList(context);
308 0 : return chooseHostWithMetadataFallbacks(&context_no_metadata_fallback,
309 0 : metadata_fallbacks->list_value().values());
310 0 : }
311 :
312 : HostConstSharedPtr
313 : SubsetLoadBalancer::chooseHostWithMetadataFallbacks(LoadBalancerContext* context,
314 0 : const MetadataFallbacks& metadata_fallbacks) {
315 :
316 0 : if (metadata_fallbacks.empty()) {
317 0 : return chooseHostIteration(context);
318 0 : }
319 :
320 0 : for (const auto& metadata_override : metadata_fallbacks) {
321 0 : LoadBalancerContextWrapper context_wrapper(context, metadata_override.struct_value());
322 0 : const auto host = chooseHostIteration(&context_wrapper);
323 0 : if (host) {
324 0 : return host;
325 0 : }
326 0 : }
327 0 : return nullptr;
328 0 : }
329 :
330 : // assumes context->metadataMatchCriteria() is not null and there is 'fallback_list' criterion
331 : SubsetLoadBalancer::LoadBalancerContextWrapper
332 0 : SubsetLoadBalancer::removeMetadataFallbackList(LoadBalancerContext* context) {
333 0 : ASSERT(context->metadataMatchCriteria());
334 0 : const auto& match_criteria = context->metadataMatchCriteria()->metadataMatchCriteria();
335 :
336 0 : std::set<std::string> to_preserve;
337 0 : for (const auto& criterion : match_criteria) {
338 0 : if (criterion->name() != Config::MetadataEnvoyLbKeys::get().FALLBACK_LIST) {
339 0 : to_preserve.emplace(criterion->name());
340 0 : }
341 0 : }
342 0 : return {context, to_preserve};
343 0 : }
344 :
345 : const ProtobufWkt::Value*
346 0 : SubsetLoadBalancer::getMetadataFallbackList(LoadBalancerContext* context) const {
347 0 : if (context == nullptr) {
348 0 : return nullptr;
349 0 : }
350 0 : const auto& match_criteria = context->metadataMatchCriteria();
351 0 : if (match_criteria == nullptr) {
352 0 : return nullptr;
353 0 : }
354 :
355 0 : for (const auto& criterion : match_criteria->metadataMatchCriteria()) {
356 0 : if (criterion->name() == Config::MetadataEnvoyLbKeys::get().FALLBACK_LIST) {
357 0 : return &criterion->value().value();
358 0 : } // TODO(MarcinFalkowski): optimization: stop iteration when lexically after 'fallback_list'
359 0 : }
360 0 : return nullptr;
361 0 : }
362 :
363 0 : HostConstSharedPtr SubsetLoadBalancer::chooseHostIteration(LoadBalancerContext* context) {
364 0 : if (context) {
365 0 : LoadBalancerContext* actual_used_context = context;
366 0 : std::unique_ptr<LoadBalancerContextWrapper> actual_used_context_wrapper;
367 :
368 0 : if (allow_redundant_keys_) {
369 : // If redundant keys are allowed then we can filter the metadata match criteria by the
370 : // selectors first to reduce the redundant keys.
371 0 : auto actual_used_criteria =
372 0 : filterCriteriaBySelectors(subset_selectors_, context->metadataMatchCriteria());
373 0 : if (actual_used_criteria != nullptr) {
374 0 : actual_used_context_wrapper =
375 0 : std::make_unique<LoadBalancerContextWrapper>(context, std::move(actual_used_criteria));
376 0 : actual_used_context = actual_used_context_wrapper.get();
377 0 : }
378 0 : }
379 :
380 0 : bool host_chosen;
381 0 : HostConstSharedPtr host = tryChooseHostFromContext(actual_used_context, host_chosen);
382 0 : if (host_chosen) {
383 : // Subset lookup succeeded, return this result even if it's nullptr.
384 0 : return host;
385 0 : }
386 : // otherwise check if there is fallback policy configured for given route metadata
387 0 : absl::optional<SubsetSelectorFallbackParamsRef> selector_fallback_params =
388 0 : tryFindSelectorFallbackParams(actual_used_context);
389 0 : if (selector_fallback_params &&
390 0 : selector_fallback_params->get().fallback_policy_ !=
391 0 : envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::NOT_DEFINED) {
392 : // return result according to configured fallback policy
393 0 : return chooseHostForSelectorFallbackPolicy(*selector_fallback_params, actual_used_context);
394 0 : }
395 0 : }
396 :
397 0 : if (fallback_subset_ == nullptr) {
398 0 : return nullptr;
399 0 : }
400 :
401 0 : HostConstSharedPtr host = fallback_subset_->lb_subset_->chooseHost(context);
402 0 : if (host != nullptr) {
403 0 : stats_.lb_subsets_fallback_.inc();
404 0 : return host;
405 0 : }
406 :
407 0 : if (panic_mode_subset_ != nullptr) {
408 0 : HostConstSharedPtr host = panic_mode_subset_->lb_subset_->chooseHost(context);
409 0 : if (host != nullptr) {
410 0 : stats_.lb_subsets_fallback_panic_.inc();
411 0 : return host;
412 0 : }
413 0 : }
414 :
415 0 : return nullptr;
416 0 : }
417 :
418 : absl::optional<SubsetLoadBalancer::SubsetSelectorFallbackParamsRef>
419 0 : SubsetLoadBalancer::tryFindSelectorFallbackParams(LoadBalancerContext* context) {
420 0 : const Router::MetadataMatchCriteria* match_criteria = context->metadataMatchCriteria();
421 0 : if (!match_criteria) {
422 0 : return absl::nullopt;
423 0 : }
424 0 : const auto match_criteria_vec = match_criteria->metadataMatchCriteria();
425 0 : SubsetSelectorMapPtr selectors = selectors_;
426 0 : if (selectors == nullptr) {
427 0 : return absl::nullopt;
428 0 : }
429 0 : for (uint32_t i = 0; i < match_criteria_vec.size(); i++) {
430 0 : const Router::MetadataMatchCriterion& match_criterion = *match_criteria_vec[i];
431 0 : const auto& subset_it = selectors->subset_keys_.find(match_criterion.name());
432 0 : if (subset_it == selectors->subset_keys_.end()) {
433 : // No subsets with this key (at this level in the hierarchy).
434 0 : break;
435 0 : }
436 :
437 0 : if (i + 1 == match_criteria_vec.size()) {
438 : // We've reached the end of the criteria, and they all matched.
439 0 : return subset_it->second->fallback_params_;
440 0 : }
441 0 : selectors = subset_it->second;
442 0 : }
443 :
444 0 : return absl::nullopt;
445 0 : }
446 :
447 : HostConstSharedPtr SubsetLoadBalancer::chooseHostForSelectorFallbackPolicy(
448 0 : const SubsetSelectorFallbackParams& fallback_params, LoadBalancerContext* context) {
449 0 : const auto& fallback_policy = fallback_params.fallback_policy_;
450 0 : if (fallback_policy ==
451 0 : envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::ANY_ENDPOINT &&
452 0 : subset_any_ != nullptr) {
453 0 : return subset_any_->lb_subset_->chooseHost(context);
454 0 : } else if (fallback_policy == envoy::config::cluster::v3::Cluster::LbSubsetConfig::
455 0 : LbSubsetSelector::DEFAULT_SUBSET &&
456 0 : subset_default_ != nullptr) {
457 0 : return subset_default_->lb_subset_->chooseHost(context);
458 0 : } else if (fallback_policy ==
459 0 : envoy::config::cluster::v3::Cluster::LbSubsetConfig::LbSubsetSelector::KEYS_SUBSET) {
460 0 : ASSERT(fallback_params.fallback_keys_subset_);
461 0 : auto filtered_context = std::make_unique<LoadBalancerContextWrapper>(
462 0 : context, *fallback_params.fallback_keys_subset_);
463 : // Perform whole subset load balancing again with reduced metadata match criteria
464 0 : return chooseHostIteration(filtered_context.get());
465 0 : } else {
466 0 : return nullptr;
467 0 : }
468 0 : }
469 :
470 : // Find a host from the subsets. Sets host_chosen to false and returns nullptr if the context has
471 : // no metadata match criteria, if there is no matching subset, or if the matching subset contains
472 : // no hosts (ignoring health). Otherwise, host_chosen is true and the returns HostConstSharedPtr
473 : // is from the subset's load balancer (technically, it may still be nullptr).
474 : HostConstSharedPtr SubsetLoadBalancer::tryChooseHostFromContext(LoadBalancerContext* context,
475 0 : bool& host_chosen) {
476 0 : host_chosen = false;
477 0 : const Router::MetadataMatchCriteria* match_criteria = context->metadataMatchCriteria();
478 0 : if (!match_criteria) {
479 0 : return nullptr;
480 0 : }
481 :
482 : // Route has metadata match criteria defined, see if we have a matching subset.
483 0 : LbSubsetEntryPtr entry = findSubset(match_criteria->metadataMatchCriteria());
484 0 : if (entry == nullptr || !entry->active()) {
485 : // No matching subset or subset not active: use fallback policy.
486 0 : return nullptr;
487 0 : }
488 :
489 0 : host_chosen = true;
490 0 : stats_.lb_subsets_selected_.inc();
491 0 : return entry->lb_subset_->chooseHost(context);
492 0 : }
493 :
494 : // Iterates over the given metadata match criteria (which must be lexically sorted by key) and
495 : // find a matching LbSubsetEntryPtr, if any.
496 : SubsetLoadBalancer::LbSubsetEntryPtr SubsetLoadBalancer::findSubset(
497 0 : const std::vector<Router::MetadataMatchCriterionConstSharedPtr>& match_criteria) {
498 0 : const LbSubsetMap* subsets = &subsets_;
499 :
500 : // Because the match_criteria and the host metadata used to populate subsets_ are sorted in the
501 : // same order, we can iterate over the criteria and perform a lookup for each key and value,
502 : // starting with the root LbSubsetMap and using the previous iteration's LbSubsetMap thereafter
503 : // (tracked in subsets). If ever a criterion's key or value is not found, there is no subset for
504 : // this criteria. If we reach the last criterion, we've found the LbSubsetEntry for the
505 : // criteria, which may or may not have a subset attached to it.
506 0 : for (uint32_t i = 0; i < match_criteria.size(); i++) {
507 0 : const Router::MetadataMatchCriterion& match_criterion = *match_criteria[i];
508 0 : const auto& subset_it = subsets->find(match_criterion.name());
509 0 : if (subset_it == subsets->end()) {
510 : // No subsets with this key (at this level in the hierarchy).
511 0 : break;
512 0 : }
513 :
514 0 : const ValueSubsetMap& vs_map = subset_it->second;
515 0 : const auto& vs_it = vs_map.find(match_criterion.value());
516 0 : if (vs_it == vs_map.end()) {
517 : // No subsets with this value.
518 0 : break;
519 0 : }
520 :
521 0 : const LbSubsetEntryPtr& entry = vs_it->second;
522 0 : if (i + 1 == match_criteria.size()) {
523 : // We've reached the end of the criteria, and they all matched.
524 0 : return entry;
525 0 : }
526 :
527 0 : subsets = &entry->children_;
528 0 : }
529 :
530 0 : return nullptr;
531 0 : }
532 :
533 0 : void SubsetLoadBalancer::updateFallbackSubset(uint32_t priority, const HostVector& all_hosts) {
534 0 : auto update_func = [priority, &all_hosts](LbSubsetPtr& subset, const HostPredicate& predicate) {
535 0 : for (const auto& host : all_hosts) {
536 0 : if (predicate(*host)) {
537 0 : subset->pushHost(priority, host);
538 0 : }
539 0 : }
540 0 : subset->finalize(priority);
541 0 : };
542 :
543 0 : if (subset_any_ != nullptr) {
544 0 : update_func(subset_any_->lb_subset_, [](const Host&) { return true; });
545 0 : }
546 :
547 0 : if (subset_default_ != nullptr) {
548 0 : HostPredicate predicate = std::bind(&SubsetLoadBalancer::hostMatches, this,
549 0 : default_subset_metadata_, std::placeholders::_1);
550 0 : update_func(subset_default_->lb_subset_, predicate);
551 0 : }
552 :
553 0 : if (fallback_subset_ == nullptr) {
554 0 : ENVOY_LOG(debug, "subset lb: fallback load balancer disabled");
555 0 : return;
556 0 : }
557 :
558 : // Same thing for the panic mode subset.
559 0 : ASSERT(panic_mode_subset_ == nullptr || panic_mode_subset_ == subset_any_);
560 0 : }
561 :
562 0 : void SubsetLoadBalancer::initLbSubsetEntryOnce(LbSubsetEntryPtr& entry, bool single_host_subset) {
563 0 : ASSERT(entry != nullptr);
564 0 : if (entry->initialized()) {
565 0 : return;
566 0 : }
567 :
568 0 : if (single_host_subset) {
569 0 : entry->lb_subset_ = std::make_unique<SingleHostLbSubset>();
570 0 : entry->single_host_subset_ = true;
571 0 : } else {
572 0 : entry->lb_subset_ =
573 0 : std::make_unique<PriorityLbSubset>(*this, locality_weight_aware_, scale_locality_weight_);
574 0 : entry->single_host_subset_ = false;
575 0 : }
576 :
577 0 : stats_.lb_subsets_active_.inc();
578 0 : stats_.lb_subsets_created_.inc();
579 0 : }
580 :
581 : // Iterates all the hosts of specified priority, looking up an LbSubsetEntryPtr for each and add
582 : // hosts to related entry. Because the metadata of host can be updated inlined, we must evaluate
583 : // every hosts for every update.
584 0 : void SubsetLoadBalancer::processSubsets(uint32_t priority, const HostVector& all_hosts) {
585 0 : absl::flat_hash_set<const LbSubsetEntry*> single_host_entries;
586 0 : uint64_t collision_count_of_single_host_entries{};
587 :
588 0 : for (const auto& host : all_hosts) {
589 0 : for (const auto& subset_selector : subset_selectors_) {
590 0 : const auto& keys = subset_selector->selectorKeys();
591 : // For each host, for each subset key, attempt to extract the metadata corresponding to the
592 : // key from the host.
593 0 : std::vector<SubsetMetadata> all_kvs = extractSubsetMetadata(keys, *host);
594 0 : for (const auto& kvs : all_kvs) {
595 : // The host has metadata for each key, find or create its subset.
596 0 : auto entry = findOrCreateLbSubsetEntry(subsets_, kvs, 0);
597 0 : initLbSubsetEntryOnce(entry, subset_selector->singleHostPerSubset());
598 :
599 0 : if (entry->single_host_subset_) {
600 0 : if (single_host_entries.contains(entry.get())) {
601 0 : collision_count_of_single_host_entries++;
602 0 : continue;
603 0 : }
604 0 : single_host_entries.emplace(entry.get());
605 0 : }
606 :
607 0 : entry->lb_subset_->pushHost(priority, host);
608 0 : }
609 0 : }
610 0 : }
611 :
612 : // This stat isn't added to `ClusterTrafficStats` because it wouldn't be used for nearly all
613 : // clusters, and is only set during configuration updates, not in the data path, so performance
614 : // of looking up the stat isn't critical.
615 0 : if (single_duplicate_stat_ == nullptr) {
616 0 : Stats::StatNameManagedStorage name_storage("lb_subsets_single_host_per_subset_duplicate",
617 0 : scope_.symbolTable());
618 :
619 0 : single_duplicate_stat_ = &Stats::Utility::gaugeFromElements(
620 0 : scope_, {name_storage.statName()}, Stats::Gauge::ImportMode::Accumulate);
621 0 : }
622 0 : single_duplicate_stat_->set(collision_count_of_single_host_entries);
623 :
624 : // Finalize updates after all the hosts are evaluated.
625 0 : forEachSubset(subsets_, [priority](LbSubsetEntryPtr entry) {
626 0 : if (entry->initialized()) {
627 0 : entry->lb_subset_->finalize(priority);
628 0 : }
629 0 : });
630 0 : }
631 :
632 : // Given the latest all hosts, update all subsets for this priority level, creating new subsets as
633 : // necessary.
634 0 : void SubsetLoadBalancer::update(uint32_t priority, const HostVector& all_hosts) {
635 0 : updateFallbackSubset(priority, all_hosts);
636 0 : processSubsets(priority, all_hosts);
637 0 : }
638 :
639 0 : bool SubsetLoadBalancer::hostMatches(const SubsetMetadata& kvs, const Host& host) {
640 0 : return Config::Metadata::metadataLabelMatch(
641 0 : kvs, host.metadata().get(), Config::MetadataFilters::get().ENVOY_LB, list_as_any_);
642 0 : }
643 :
644 : // Iterates over subset_keys looking up values from the given host's metadata. Each key-value pair
645 : // is appended to kvs. Returns a non-empty value if the host has a value for each key.
646 : std::vector<SubsetLoadBalancer::SubsetMetadata>
647 : SubsetLoadBalancer::extractSubsetMetadata(const std::set<std::string>& subset_keys,
648 0 : const Host& host) {
649 0 : std::vector<SubsetMetadata> all_kvs;
650 0 : if (!host.metadata()) {
651 0 : return all_kvs;
652 0 : }
653 0 : const envoy::config::core::v3::Metadata& metadata = *host.metadata();
654 0 : const auto& filter_it = metadata.filter_metadata().find(Config::MetadataFilters::get().ENVOY_LB);
655 0 : if (filter_it == metadata.filter_metadata().end()) {
656 0 : return all_kvs;
657 0 : }
658 :
659 0 : const auto& fields = filter_it->second.fields();
660 0 : for (const auto& key : subset_keys) {
661 0 : const auto it = fields.find(key);
662 0 : if (it == fields.end()) {
663 0 : all_kvs.clear();
664 0 : break;
665 0 : }
666 :
667 0 : if (list_as_any_ && it->second.kind_case() == ProtobufWkt::Value::kListValue) {
668 : // If the list of kvs is empty, we initialize one kvs for each value in the list.
669 : // Otherwise, we branch the list of kvs by generating one new kvs per old kvs per
670 : // new value.
671 : //
672 : // For example, two kvs (<a=1>, <a=2>) joined with the kv foo=[bar,baz] results in four kvs:
673 : // <a=1,foo=bar>
674 : // <a=1,foo=baz>
675 : // <a=2,foo=bar>
676 : // <a=2,foo=baz>
677 0 : if (all_kvs.empty()) {
678 0 : for (const auto& v : it->second.list_value().values()) {
679 0 : all_kvs.emplace_back(SubsetMetadata({make_pair(key, v)}));
680 0 : }
681 0 : } else {
682 0 : std::vector<SubsetMetadata> new_kvs;
683 0 : for (const auto& kvs : all_kvs) {
684 0 : for (const auto& v : it->second.list_value().values()) {
685 0 : auto kv_copy = kvs;
686 0 : kv_copy.emplace_back(make_pair(key, v));
687 0 : new_kvs.emplace_back(kv_copy);
688 0 : }
689 0 : }
690 0 : all_kvs = new_kvs;
691 0 : }
692 :
693 0 : } else {
694 0 : if (all_kvs.empty()) {
695 0 : all_kvs.emplace_back(SubsetMetadata({std::make_pair(key, it->second)}));
696 0 : } else {
697 0 : for (auto& kvs : all_kvs) {
698 0 : kvs.emplace_back(std::make_pair(key, it->second));
699 0 : }
700 0 : }
701 0 : }
702 0 : }
703 :
704 0 : return all_kvs;
705 0 : }
706 :
707 0 : std::string SubsetLoadBalancer::describeMetadata(const SubsetLoadBalancer::SubsetMetadata& kvs) {
708 0 : if (kvs.empty()) {
709 0 : return "<no metadata>";
710 0 : }
711 :
712 0 : std::ostringstream buf;
713 0 : bool first = true;
714 0 : for (const auto& it : kvs) {
715 0 : if (!first) {
716 0 : buf << ", ";
717 0 : } else {
718 0 : first = false;
719 0 : }
720 :
721 0 : const ProtobufWkt::Value& value = it.second;
722 0 : buf << it.first << "=" << MessageUtil::getJsonStringFromMessageOrError(value);
723 0 : }
724 0 : return buf.str();
725 0 : }
726 :
727 : // Given a vector of key-values (from extractSubsetMetadata), recursively finds the matching
728 : // LbSubsetEntryPtr.
729 : SubsetLoadBalancer::LbSubsetEntryPtr
730 : SubsetLoadBalancer::findOrCreateLbSubsetEntry(LbSubsetMap& subsets, const SubsetMetadata& kvs,
731 0 : uint32_t idx) {
732 0 : ASSERT(idx < kvs.size());
733 :
734 0 : const std::string& name = kvs[idx].first;
735 0 : const ProtobufWkt::Value& pb_value = kvs[idx].second;
736 0 : const HashedValue value(pb_value);
737 :
738 0 : LbSubsetEntryPtr entry;
739 :
740 0 : const auto kv_it = subsets.find(name);
741 :
742 0 : if (kv_it != subsets.end()) {
743 0 : ValueSubsetMap& value_subset_map = kv_it->second;
744 0 : const auto vs_it = value_subset_map.find(value);
745 0 : if (vs_it != value_subset_map.end()) {
746 0 : entry = vs_it->second;
747 0 : }
748 0 : }
749 :
750 0 : if (!entry) {
751 : // Not found. Create an uninitialized entry.
752 0 : entry = std::make_shared<LbSubsetEntry>();
753 0 : if (kv_it != subsets.end()) {
754 0 : ValueSubsetMap& value_subset_map = kv_it->second;
755 0 : value_subset_map.emplace(value, entry);
756 0 : } else {
757 0 : ValueSubsetMap value_subset_map = {{value, entry}};
758 0 : subsets.emplace(name, value_subset_map);
759 0 : }
760 0 : }
761 :
762 0 : idx++;
763 0 : if (idx == kvs.size()) {
764 : // We've matched all the key-values, return the entry.
765 0 : return entry;
766 0 : }
767 :
768 0 : return findOrCreateLbSubsetEntry(entry->children_, kvs, idx);
769 0 : }
770 :
771 : // Invokes cb for each LbSubsetEntryPtr in subsets.
772 : void SubsetLoadBalancer::forEachSubset(LbSubsetMap& subsets,
773 0 : std::function<void(LbSubsetEntryPtr&)> cb) {
774 0 : for (auto& vsm : subsets) {
775 0 : for (auto& em : vsm.second) {
776 0 : LbSubsetEntryPtr entry = em.second;
777 0 : cb(entry);
778 0 : forEachSubset(entry->children_, cb);
779 0 : }
780 0 : }
781 0 : }
782 :
783 0 : void SubsetLoadBalancer::purgeEmptySubsets(LbSubsetMap& subsets) {
784 0 : for (auto subset_it = subsets.begin(); subset_it != subsets.end();) {
785 0 : for (auto it = subset_it->second.begin(); it != subset_it->second.end();) {
786 0 : LbSubsetEntryPtr entry = it->second;
787 :
788 0 : purgeEmptySubsets(entry->children_);
789 :
790 0 : if (entry->active() || entry->hasChildren()) {
791 0 : it++;
792 0 : continue;
793 0 : }
794 :
795 : // If it wasn't initialized, it wasn't accounted for.
796 0 : if (entry->initialized()) {
797 0 : stats_.lb_subsets_active_.dec();
798 0 : stats_.lb_subsets_removed_.inc();
799 0 : }
800 :
801 0 : auto next_it = std::next(it);
802 0 : subset_it->second.erase(it);
803 0 : it = next_it;
804 0 : }
805 :
806 0 : if (subset_it->second.empty()) {
807 0 : auto next_subset_it = std::next(subset_it);
808 0 : subsets.erase(subset_it);
809 0 : subset_it = next_subset_it;
810 0 : } else {
811 0 : subset_it++;
812 0 : }
813 0 : }
814 0 : }
815 :
816 : // Initialize a new HostSubsetImpl and LoadBalancer from the SubsetLoadBalancer, filtering hosts
817 : // with the given predicate.
818 : SubsetLoadBalancer::PrioritySubsetImpl::PrioritySubsetImpl(const SubsetLoadBalancer& subset_lb,
819 : bool locality_weight_aware,
820 : bool scale_locality_weight)
821 : : original_priority_set_(subset_lb.original_priority_set_),
822 : original_local_priority_set_(subset_lb.original_local_priority_set_),
823 0 : locality_weight_aware_(locality_weight_aware), scale_locality_weight_(scale_locality_weight) {
824 : // Create at least one host set.
825 0 : getOrCreateHostSet(0);
826 :
827 0 : auto lb_pair = subset_lb.child_lb_creator_->createLoadBalancer(
828 0 : *this, original_local_priority_set_, subset_lb.stats_, subset_lb.scope_, subset_lb.runtime_,
829 0 : subset_lb.random_, subset_lb.time_source_);
830 :
831 0 : if (lb_pair.first != nullptr) {
832 0 : thread_aware_lb_ = std::move(lb_pair.first);
833 0 : thread_aware_lb_->initialize();
834 0 : lb_ = thread_aware_lb_->factory()->create({*this, original_local_priority_set_});
835 0 : } else {
836 0 : lb_ = std::move(lb_pair.second);
837 0 : }
838 :
839 0 : triggerCallbacks();
840 0 : }
841 :
842 : // Given all hosts that that belong in this subset, hosts_added and hosts_removed, update the
843 : // underlying HostSet. The hosts_added Hosts and hosts_removed Hosts have been filtered to match
844 : // hosts that belong in this subset.
845 : void SubsetLoadBalancer::HostSubsetImpl::update(const HostHashSet& matching_hosts,
846 : const HostVector& hosts_added,
847 0 : const HostVector& hosts_removed) {
848 0 : auto cached_predicate = [&matching_hosts](const auto& host) {
849 0 : return matching_hosts.count(&host) == 1;
850 0 : };
851 :
852 : // TODO(snowp): If we had a unhealthyHosts() function we could avoid potentially traversing
853 : // the list of hosts twice.
854 0 : auto hosts = std::make_shared<HostVector>();
855 0 : hosts->reserve(original_host_set_.hosts().size());
856 0 : for (const auto& host : original_host_set_.hosts()) {
857 0 : if (cached_predicate(*host)) {
858 0 : hosts->emplace_back(host);
859 0 : }
860 0 : }
861 :
862 0 : auto healthy_hosts = std::make_shared<HealthyHostVector>();
863 0 : healthy_hosts->get().reserve(original_host_set_.healthyHosts().size());
864 0 : for (const auto& host : original_host_set_.healthyHosts()) {
865 0 : if (cached_predicate(*host)) {
866 0 : healthy_hosts->get().emplace_back(host);
867 0 : }
868 0 : }
869 :
870 0 : auto degraded_hosts = std::make_shared<DegradedHostVector>();
871 0 : degraded_hosts->get().reserve(original_host_set_.degradedHosts().size());
872 0 : for (const auto& host : original_host_set_.degradedHosts()) {
873 0 : if (cached_predicate(*host)) {
874 0 : degraded_hosts->get().emplace_back(host);
875 0 : }
876 0 : }
877 :
878 0 : auto excluded_hosts = std::make_shared<ExcludedHostVector>();
879 0 : excluded_hosts->get().reserve(original_host_set_.excludedHosts().size());
880 0 : for (const auto& host : original_host_set_.excludedHosts()) {
881 0 : if (cached_predicate(*host)) {
882 0 : excluded_hosts->get().emplace_back(host);
883 0 : }
884 0 : }
885 :
886 : // If we only have one locality we can avoid the first call to filter() by
887 : // just creating a new HostsPerLocality from the list of all hosts.
888 0 : HostsPerLocalityConstSharedPtr hosts_per_locality;
889 :
890 0 : if (original_host_set_.hostsPerLocality().get().size() == 1) {
891 0 : hosts_per_locality = std::make_shared<HostsPerLocalityImpl>(
892 0 : *hosts, original_host_set_.hostsPerLocality().hasLocalLocality());
893 0 : } else {
894 0 : hosts_per_locality = original_host_set_.hostsPerLocality().filter({cached_predicate})[0];
895 0 : }
896 :
897 0 : auto healthy_hosts_per_locality =
898 0 : original_host_set_.healthyHostsPerLocality().filter({cached_predicate})[0];
899 0 : auto degraded_hosts_per_locality =
900 0 : original_host_set_.degradedHostsPerLocality().filter({cached_predicate})[0];
901 0 : auto excluded_hosts_per_locality =
902 0 : original_host_set_.excludedHostsPerLocality().filter({cached_predicate})[0];
903 :
904 0 : HostSetImpl::updateHosts(
905 0 : HostSetImpl::updateHostsParams(
906 0 : hosts, hosts_per_locality, healthy_hosts, healthy_hosts_per_locality, degraded_hosts,
907 0 : degraded_hosts_per_locality, excluded_hosts, excluded_hosts_per_locality),
908 0 : determineLocalityWeights(*hosts_per_locality), hosts_added, hosts_removed,
909 0 : original_host_set_.weightedPriorityHealth(), original_host_set_.overprovisioningFactor());
910 0 : }
911 :
912 : LocalityWeightsConstSharedPtr SubsetLoadBalancer::HostSubsetImpl::determineLocalityWeights(
913 0 : const HostsPerLocality& hosts_per_locality) const {
914 0 : if (locality_weight_aware_) {
915 0 : if (scale_locality_weight_) {
916 0 : const auto& original_hosts_per_locality = original_host_set_.hostsPerLocality().get();
917 : // E.g. we can be here in static clusters with actual locality weighting before pre-init
918 : // completes.
919 0 : if (!original_host_set_.localityWeights()) {
920 0 : return {};
921 0 : }
922 0 : const auto& original_weights = *original_host_set_.localityWeights();
923 :
924 0 : auto scaled_locality_weights = std::make_shared<LocalityWeights>(original_weights.size());
925 0 : for (uint32_t i = 0; i < original_weights.size(); ++i) {
926 : // If the original locality has zero hosts, skip it. This leaves the weight at zero.
927 0 : if (original_hosts_per_locality[i].empty()) {
928 0 : continue;
929 0 : }
930 :
931 : // Otherwise, scale it proportionally to the number of hosts removed by the subset
932 : // predicate.
933 0 : (*scaled_locality_weights)[i] =
934 0 : std::round(float((original_weights[i] * hosts_per_locality.get()[i].size())) /
935 0 : original_hosts_per_locality[i].size());
936 0 : }
937 :
938 0 : return scaled_locality_weights;
939 0 : } else {
940 0 : return original_host_set_.localityWeights();
941 0 : }
942 0 : }
943 0 : return {};
944 0 : }
945 :
946 : HostSetImplPtr SubsetLoadBalancer::PrioritySubsetImpl::createHostSet(
947 : uint32_t priority, absl::optional<bool> weighted_priority_health,
948 0 : absl::optional<uint32_t> overprovisioning_factor) {
949 : // Use original hostset's overprovisioning_factor.
950 0 : RELEASE_ASSERT(priority < original_priority_set_.hostSetsPerPriority().size(), "");
951 :
952 0 : const HostSetPtr& host_set = original_priority_set_.hostSetsPerPriority()[priority];
953 :
954 0 : ASSERT(!overprovisioning_factor.has_value() ||
955 0 : overprovisioning_factor.value() == host_set->overprovisioningFactor());
956 0 : ASSERT(!weighted_priority_health.has_value() ||
957 0 : weighted_priority_health.value() == host_set->weightedPriorityHealth());
958 0 : return HostSetImplPtr{
959 0 : new HostSubsetImpl(*host_set, locality_weight_aware_, scale_locality_weight_)};
960 0 : }
961 :
962 : void SubsetLoadBalancer::PrioritySubsetImpl::update(uint32_t priority,
963 : const HostHashSet& matching_hosts,
964 : const HostVector& hosts_added,
965 0 : const HostVector& hosts_removed) {
966 0 : const auto& host_subset = getOrCreateHostSet(priority);
967 0 : updateSubset(priority, matching_hosts, hosts_added, hosts_removed);
968 :
969 0 : if (host_subset.hosts().empty() != empty_) {
970 0 : empty_ = true;
971 0 : for (auto& host_set : hostSetsPerPriority()) {
972 0 : empty_ &= host_set->hosts().empty();
973 0 : }
974 0 : }
975 :
976 : // Create a new worker local LB if needed.
977 : // TODO(mattklein123): See the PrioritySubsetImpl constructor for additional comments on how
978 : // we can do better here.
979 0 : if (thread_aware_lb_ != nullptr && thread_aware_lb_->factory()->recreateOnHostChange()) {
980 0 : lb_ = thread_aware_lb_->factory()->create({*this, original_local_priority_set_});
981 0 : }
982 0 : }
983 :
984 : SubsetLoadBalancer::LoadBalancerContextWrapper::LoadBalancerContextWrapper(
985 : LoadBalancerContext* wrapped,
986 : const std::set<std::string>& filtered_metadata_match_criteria_names)
987 0 : : wrapped_(wrapped) {
988 0 : ASSERT(wrapped->metadataMatchCriteria());
989 :
990 0 : metadata_match_ =
991 0 : wrapped->metadataMatchCriteria()->filterMatchCriteria(filtered_metadata_match_criteria_names);
992 0 : }
993 :
994 : SubsetLoadBalancer::LoadBalancerContextWrapper::LoadBalancerContextWrapper(
995 : LoadBalancerContext* wrapped, const ProtobufWkt::Struct& metadata_match_criteria_override)
996 0 : : wrapped_(wrapped) {
997 0 : ASSERT(wrapped->metadataMatchCriteria());
998 0 : metadata_match_ =
999 0 : wrapped->metadataMatchCriteria()->mergeMatchCriteria(metadata_match_criteria_override);
1000 0 : }
1001 : } // namespace Upstream
1002 : } // namespace Envoy
|