for (size_t priority = 0; priority < healthy_per_priority_load.get().size(); ++priority) {
recalculatePerPriorityState(priority, priority_set_, per_priority_load_, per_priority_health_,
// recalculatePerPriorityState and recalculatePerPriorityPanic methods (normalized total health is
// - normalized total health is = 100%. It means there are enough healthy hosts to handle the load.
// Continue distributing the load among priority sets, but turn on panic mode for a given priority
per_priority_load.healthy_priority_load_.get().resize(priority_set.hostSetsPerPriority().size());
per_priority_load.degraded_priority_load_.get().resize(priority_set.hostSetsPerPriority().size());
// Health ranges from 0-100, and is the ratio of healthy/degraded hosts to total hosts, modified
// Now that we've updated health for the changed priority level, we need to calculate percentage
// First, determine if the load needs to be scaled relative to availability (healthy + degraded).
// they will get 16% / 28% / 14% load to healthy hosts and 28% / 14% / 0% load to degraded hosts
// to ensure total load adds up to 100. Note the first healthy priority is receiving 2% additional
// Sum of priority levels' health and degraded values may exceed 100, so it is capped at 100 and
// availability. We first attempt to distribute this load to healthy priorities based on healthy
// Anything that remains should just be rounding errors, so allocate that to the first available
100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_));
const auto priority_and_source = choosePriority(hash, priority_loads.healthy_priority_load_,
const auto priority_and_source = choosePriority(hash, per_priority_load_.healthy_priority_load_,
for (uint32_t priority = 0; priority < priority_set_.hostSetsPerPriority().size(); ++priority) {
// If we cannot route all requests to the same locality, calculate what percentage can be routed.
// locality we should route. Percentage of requests routed cross locality to a specific locality
// Do not perform locality routing if there are too few local localities for zone routing to have
// routing correctly. This will not cause a traffic imbalance because other envoys will not know
HostSelectionResponse ZoneAwareLoadBalancerBase::chooseHost(LoadBalancerContext* context) {
100, runtime_.snapshot().getInteger(RuntimePanicThreshold, default_healthy_panic_percent_));
absl::flat_hash_map<envoy::config::core::v3::Locality, uint64_t, LocalityHash, LocalityEqualTo>
absl::flat_hash_map<envoy::config::core::v3::Locality, uint64_t, LocalityHash, LocalityEqualTo>
// If locality_basis_ is set to HEALTHY_HOSTS_WEIGHT, it uses the host's weight to calculate the
// If locality_basis_ is set to HEALTHY_HOSTS_WEIGHT, it uses the host's weight to calculate the
uint32_t ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& host_set) const {
ZoneAwareLoadBalancerBase::hostSourceToUse(LoadBalancerContext* context, uint64_t hash) const {
const HostVector& ZoneAwareLoadBalancerBase::hostSourceToHosts(HostsSource hosts_source) const {
for (uint32_t priority = 0; priority < priority_set_.hostSetsPerPriority().size(); ++priority) {
// TODO(nezdolik): linear scan can be improved with using flat hash set for hosts in slow start.
add_hosts_source(HostsSource(priority, HostsSource::SourceType::AllHosts), host_set->hosts());
const absl::optional<HostsSource> hosts_source = hostSourceToUse(context, random(false));
auto host = scheduler.edf_->pickAndAdd([this](const Host& host) { return hostWeight(host); });
double EdfLoadBalancerBase::applySlowStartFactor(double host_weight, const Host& host) const {
// We can reliably apply slow start weight only if `last_hc_pass_time` in host has been populated