always_log_health_check_success_(config.always_log_health_check_success()), cluster_(cluster),
event_logger_(std::move(event_logger)), interval_(PROTOBUF_GET_MS_REQUIRED(config, interval)),
// the host info relatively up to date in case we suddenly start sending traffic to this cluster.
// In general host updates are rare and this should greatly smooth out needless health checking.
// If a connection has been established, we choose an interval based on the host's health. Please
// delayed. In this situation Envoy should use the edge interval settings between health checks.
// - check succeeds, host is still unhealthy and next check happens after healthy_edge_interval;
// - check succeeds, host is still unhealthy and next check happens after healthy_edge_interval;
const uint64_t min_interval = runtime_.snapshot().getInteger("health_check.min_interval", 0);
// 2) On the main thread, we make sure it is still valid (as the cluster may have been destroyed).
// Run callbacks in case something is waiting for health checks to run which will now never run.
if (changed_state != HealthTransition::Changed && parent_.always_log_health_check_success_ &&
return type == envoy::data::core::v3::NETWORK || type == envoy::data::core::v3::NETWORK_TIMEOUT;
// Otherwise clear it. This allows a host to toggle between timeout and failure if it's continuing
HealthCheckerImplBase::ActiveHealthCheckSession::clearPendingFlag(HealthTransition changed_state) {