Line data Source code
1 : #include "source/server/admin/config_dump_handler.h"
2 :
3 : #include "envoy/config/core/v3/health_check.pb.h"
4 : #include "envoy/config/endpoint/v3/endpoint.pb.h"
5 :
6 : #include "source/common/common/matchers.h"
7 : #include "source/common/common/regex.h"
8 : #include "source/common/common/statusor.h"
9 : #include "source/common/http/headers.h"
10 : #include "source/common/http/utility.h"
11 : #include "source/common/network/utility.h"
12 : #include "source/server/admin/utils.h"
13 :
14 : namespace Envoy {
15 : namespace Server {
16 :
17 : namespace {
18 :
19 : // Validates that `field_mask` is valid for `message` and applies `TrimMessage`.
20 : // Necessary because TrimMessage crashes if `field_mask` is invalid.
21 : // Returns `true` on success.
22 : bool checkFieldMaskAndTrimMessage(const Protobuf::FieldMask& field_mask,
23 0 : Protobuf::Message& message) {
24 0 : for (const auto& path : field_mask.paths()) {
25 0 : if (!ProtobufUtil::FieldMaskUtil::GetFieldDescriptors(message.GetDescriptor(), path, nullptr)) {
26 0 : return false;
27 0 : }
28 0 : }
29 0 : ProtobufUtil::FieldMaskUtil::TrimMessage(field_mask, &message);
30 0 : return true;
31 0 : }
32 :
33 : // Apply a field mask to a resource message. A simple field mask might look
34 : // like "cluster.name,cluster.alt_stat_name,last_updated" for a StaticCluster
35 : // resource. Unfortunately, since the "cluster" field is Any and the in-built
36 : // FieldMask utils can't mask inside an Any field, we need to do additional work
37 : // below.
38 : //
39 : // We take advantage of the fact that for the most part (with the exception of
40 : // DynamicListener) that ConfigDump resources have a single Any field where the
41 : // embedded resources lives. This allows us to construct an inner field mask for
42 : // the Any resource and an outer field mask for the enclosing message. In the
43 : // above example, the inner field mask would be "name,alt_stat_name" and the
44 : // outer field mask "cluster,last_updated". The masks are applied to their
45 : // respective messages, with the Any resource requiring an unpack/mask/pack
46 : // series of operations.
47 : //
48 : // TODO(htuch): we could make field masks more powerful in future and generalize
49 : // this to allow arbitrary indexing through Any fields. This is pretty
50 : // complicated, we would need to build a FieldMask tree similar to how the C++
51 : // Protobuf library does this internally.
52 : /**
53 : * @return true on success, false if `field_mask` is invalid.
54 : */
55 0 : bool trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Message& message) {
56 0 : const Protobuf::Descriptor* descriptor = message.GetDescriptor();
57 0 : const Protobuf::Reflection* reflection = message.GetReflection();
58 : // Figure out which paths cover Any fields. For each field, gather the paths to
59 : // an inner mask, switch the outer mask to cover only the original field.
60 0 : Protobuf::FieldMask outer_field_mask;
61 0 : Protobuf::FieldMask inner_field_mask;
62 0 : std::string any_field_name;
63 0 : for (int i = 0; i < field_mask.paths().size(); ++i) {
64 0 : const std::string& path = field_mask.paths(i);
65 0 : std::vector<std::string> frags = absl::StrSplit(path, '.');
66 0 : if (frags.empty()) {
67 0 : continue;
68 0 : }
69 0 : const Protobuf::FieldDescriptor* field = descriptor->FindFieldByName(frags[0]);
70 : // Only a single Any field supported, repeated fields don't support further
71 : // indexing.
72 : // TODO(htuch): should add support for DynamicListener for multiple Any
73 : // fields in the future, see
74 : // https://github.com/envoyproxy/envoy/issues/9669.
75 0 : if (field != nullptr && field->message_type() != nullptr && !field->is_repeated() &&
76 0 : field->message_type()->full_name() == "google.protobuf.Any") {
77 0 : if (any_field_name.empty()) {
78 0 : any_field_name = frags[0];
79 0 : } else {
80 : // This should be structurally true due to the ConfigDump proto
81 : // definition (but not for DynamicListener today).
82 0 : ASSERT(any_field_name == frags[0],
83 0 : "Only a single Any field in a config dump resource is supported.");
84 0 : }
85 0 : outer_field_mask.add_paths(frags[0]);
86 0 : frags.erase(frags.begin());
87 0 : inner_field_mask.add_paths(absl::StrJoin(frags, "."));
88 0 : } else {
89 0 : outer_field_mask.add_paths(path);
90 0 : }
91 0 : }
92 :
93 0 : if (!any_field_name.empty()) {
94 0 : const Protobuf::FieldDescriptor* any_field = descriptor->FindFieldByName(any_field_name);
95 0 : if (reflection->HasField(message, any_field)) {
96 0 : ASSERT(any_field != nullptr);
97 : // Unpack to a DynamicMessage.
98 0 : ProtobufWkt::Any any_message;
99 0 : any_message.MergeFrom(reflection->GetMessage(message, any_field));
100 0 : Protobuf::DynamicMessageFactory dmf;
101 0 : const absl::string_view inner_type_name =
102 0 : TypeUtil::typeUrlToDescriptorFullName(any_message.type_url());
103 0 : const Protobuf::Descriptor* inner_descriptor =
104 0 : Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
105 0 : static_cast<std::string>(inner_type_name));
106 0 : ASSERT(inner_descriptor != nullptr);
107 0 : std::unique_ptr<Protobuf::Message> inner_message;
108 0 : inner_message.reset(dmf.GetPrototype(inner_descriptor)->New());
109 0 : MessageUtil::unpackTo(any_message, *inner_message);
110 : // Trim message.
111 0 : if (!checkFieldMaskAndTrimMessage(inner_field_mask, *inner_message)) {
112 0 : return false;
113 0 : }
114 : // Pack it back into the Any resource.
115 0 : any_message.PackFrom(*inner_message);
116 0 : reflection->MutableMessage(&message, any_field)->CopyFrom(any_message);
117 0 : }
118 0 : }
119 0 : return checkFieldMaskAndTrimMessage(outer_field_mask, message);
120 0 : }
121 :
122 : // Helper method to get the eds parameter.
123 98 : bool shouldIncludeEdsInDump(const Http::Utility::QueryParamsMulti& params) {
124 98 : return params.getFirstValue("include_eds").has_value();
125 98 : }
126 :
127 : absl::StatusOr<Matchers::StringMatcherPtr>
128 98 : buildNameMatcher(const Http::Utility::QueryParamsMulti& params) {
129 98 : const auto name_regex = params.getFirstValue("name_regex");
130 98 : if (!name_regex.has_value() || name_regex->empty()) {
131 98 : return std::make_unique<Matchers::UniversalStringMatcher>();
132 98 : }
133 0 : envoy::type::matcher::v3::RegexMatcher matcher;
134 0 : *matcher.mutable_google_re2() = envoy::type::matcher::v3::RegexMatcher::GoogleRE2();
135 0 : matcher.set_regex(*name_regex);
136 0 : TRY_ASSERT_MAIN_THREAD
137 0 : return Regex::Utility::parseRegex(matcher);
138 0 : END_TRY
139 0 : catch (EnvoyException& e) {
140 0 : return absl::InvalidArgumentError(
141 0 : absl::StrCat("Error while parsing name_regex from ", *name_regex, ": ", e.what()));
142 0 : }
143 0 : }
144 :
145 : } // namespace
146 :
147 : ConfigDumpHandler::ConfigDumpHandler(ConfigTracker& config_tracker, Server::Instance& server)
148 134 : : HandlerContextBase(server), config_tracker_(config_tracker) {}
149 :
150 : Http::Code ConfigDumpHandler::handlerConfigDump(Http::ResponseHeaderMap& response_headers,
151 : Buffer::Instance& response,
152 98 : AdminStream& admin_stream) const {
153 98 : Http::Utility::QueryParamsMulti query_params = admin_stream.queryParams();
154 98 : const auto resource = query_params.getFirstValue("resource");
155 98 : const auto mask = query_params.getFirstValue("mask");
156 98 : const bool include_eds = shouldIncludeEdsInDump(query_params);
157 98 : const absl::StatusOr<Matchers::StringMatcherPtr> name_matcher = buildNameMatcher(query_params);
158 98 : if (!name_matcher.ok()) {
159 0 : response.add(name_matcher.status().ToString());
160 0 : response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text);
161 0 : return Http::Code::BadRequest;
162 0 : }
163 :
164 98 : envoy::admin::v3::ConfigDump dump;
165 :
166 98 : absl::optional<std::pair<Http::Code, std::string>> err;
167 98 : if (resource.has_value()) {
168 0 : err = addResourceToDump(dump, mask, resource.value(), **name_matcher, include_eds);
169 98 : } else {
170 98 : err = addAllConfigToDump(dump, mask, **name_matcher, include_eds);
171 98 : }
172 98 : if (err.has_value()) {
173 0 : response_headers.addReference(Http::Headers::get().XContentTypeOptions,
174 0 : Http::Headers::get().XContentTypeOptionValues.Nosniff);
175 0 : response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text);
176 0 : response.add(err.value().second);
177 0 : return err.value().first;
178 0 : }
179 98 : MessageUtil::redact(dump);
180 :
181 98 : response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);
182 98 : response.add(MessageUtil::getJsonStringFromMessageOrError(dump, true)); // pretty-print
183 98 : return Http::Code::OK;
184 98 : }
185 :
186 : absl::optional<std::pair<Http::Code, std::string>> ConfigDumpHandler::addResourceToDump(
187 : envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask,
188 : const std::string& resource, const Matchers::StringMatcher& name_matcher,
189 0 : bool include_eds) const {
190 0 : Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap();
191 0 : if (include_eds) {
192 : // TODO(mattklein123): Add ability to see warming clusters in admin output.
193 0 : auto all_clusters = server_.clusterManager().clusters();
194 0 : if (!all_clusters.active_clusters_.empty()) {
195 0 : callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) {
196 0 : return dumpEndpointConfigs(name_matcher);
197 0 : });
198 0 : }
199 0 : }
200 :
201 0 : for (const auto& [name, callback] : callbacks_map) {
202 0 : UNREFERENCED_PARAMETER(name);
203 0 : ProtobufTypes::MessagePtr message = callback(name_matcher);
204 0 : ASSERT(message);
205 :
206 0 : auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource);
207 0 : const Protobuf::Reflection* reflection = message->GetReflection();
208 0 : if (!field_descriptor) {
209 0 : continue;
210 0 : } else if (!field_descriptor->is_repeated()) {
211 0 : return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(
212 0 : Http::Code::BadRequest,
213 0 : fmt::format("{} is not a repeated field. Use ?mask={} to get only this field",
214 0 : field_descriptor->name(), field_descriptor->name()))};
215 0 : }
216 :
217 0 : auto repeated = reflection->GetRepeatedPtrField<Protobuf::Message>(*message, field_descriptor);
218 0 : for (Protobuf::Message& msg : repeated) {
219 0 : if (mask.has_value()) {
220 0 : Protobuf::FieldMask field_mask;
221 0 : ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask);
222 0 : if (!trimResourceMessage(field_mask, msg)) {
223 0 : return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(
224 0 : Http::Code::BadRequest, absl::StrCat("FieldMask ", field_mask.DebugString(),
225 0 : " could not be successfully used."))};
226 0 : }
227 0 : }
228 0 : auto* config = dump.add_configs();
229 0 : config->PackFrom(msg);
230 0 : }
231 :
232 : // We found the desired resource so there is no need to continue iterating over
233 : // the other keys.
234 0 : return absl::nullopt;
235 0 : }
236 :
237 0 : return absl::optional<std::pair<Http::Code, std::string>>{
238 0 : std::make_pair(Http::Code::NotFound, fmt::format("{} not found in config dump", resource))};
239 0 : }
240 :
241 : absl::optional<std::pair<Http::Code, std::string>> ConfigDumpHandler::addAllConfigToDump(
242 : envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask,
243 98 : const Matchers::StringMatcher& name_matcher, bool include_eds) const {
244 98 : Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap();
245 98 : if (include_eds) {
246 : // TODO(mattklein123): Add ability to see warming clusters in admin output.
247 0 : auto all_clusters = server_.clusterManager().clusters();
248 0 : if (!all_clusters.active_clusters_.empty()) {
249 0 : callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) {
250 0 : return dumpEndpointConfigs(name_matcher);
251 0 : });
252 0 : }
253 0 : }
254 :
255 532 : for (const auto& [name, callback] : callbacks_map) {
256 532 : UNREFERENCED_PARAMETER(name);
257 532 : ProtobufTypes::MessagePtr message = callback(name_matcher);
258 532 : ASSERT(message);
259 :
260 532 : if (mask.has_value()) {
261 0 : Protobuf::FieldMask field_mask;
262 0 : ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask);
263 : // We don't use trimMessage() above here since masks don't support
264 : // indexing through repeated fields. We don't return error on failure
265 : // because different callback return types will have different valid
266 : // field masks.
267 0 : if (!checkFieldMaskAndTrimMessage(field_mask, *message)) {
268 0 : continue;
269 0 : }
270 0 : }
271 :
272 532 : auto* config = dump.add_configs();
273 532 : config->PackFrom(*message);
274 532 : }
275 98 : if (dump.configs().empty() && mask.has_value()) {
276 0 : return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(
277 0 : Http::Code::BadRequest,
278 0 : absl::StrCat("FieldMask ", *mask, " could not be successfully applied to any configs."))};
279 0 : }
280 98 : return absl::nullopt;
281 98 : }
282 :
283 : ProtobufTypes::MessagePtr
284 0 : ConfigDumpHandler::dumpEndpointConfigs(const Matchers::StringMatcher& name_matcher) const {
285 0 : auto endpoint_config_dump = std::make_unique<envoy::admin::v3::EndpointsConfigDump>();
286 : // TODO(mattklein123): Add ability to see warming clusters in admin output.
287 0 : auto all_clusters = server_.clusterManager().clusters();
288 0 : for (const auto& [name, cluster_ref] : all_clusters.active_clusters_) {
289 0 : UNREFERENCED_PARAMETER(name);
290 0 : const Upstream::Cluster& cluster = cluster_ref.get();
291 0 : Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info();
292 0 : envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;
293 :
294 0 : if (!cluster_info->edsServiceName().empty()) {
295 0 : cluster_load_assignment.set_cluster_name(cluster_info->edsServiceName());
296 0 : } else {
297 0 : cluster_load_assignment.set_cluster_name(cluster_info->name());
298 0 : }
299 0 : if (!name_matcher.match(cluster_load_assignment.cluster_name())) {
300 0 : continue;
301 0 : }
302 0 : auto& policy = *cluster_load_assignment.mutable_policy();
303 :
304 : // Using MILLION as denominator in config dump.
305 0 : float value = cluster.dropOverload().value() * 1000000;
306 0 : if (value > 0) {
307 0 : auto* drop_overload = policy.add_drop_overloads();
308 0 : drop_overload->set_category("drop_overload");
309 0 : auto* percent = drop_overload->mutable_drop_percentage();
310 0 : percent->set_denominator(envoy::type::v3::FractionalPercent::MILLION);
311 0 : percent->set_numerator(uint32_t(value));
312 0 : }
313 :
314 0 : for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {
315 0 : policy.mutable_overprovisioning_factor()->set_value(host_set->overprovisioningFactor());
316 :
317 0 : if (!host_set->hostsPerLocality().get().empty()) {
318 0 : for (int index = 0; index < static_cast<int>(host_set->hostsPerLocality().get().size());
319 0 : index++) {
320 0 : auto locality_host_set = host_set->hostsPerLocality().get()[index];
321 :
322 0 : if (!locality_host_set.empty()) {
323 0 : auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add();
324 0 : locality_lb_endpoint.mutable_locality()->MergeFrom(locality_host_set[0]->locality());
325 0 : locality_lb_endpoint.set_priority(locality_host_set[0]->priority());
326 0 : if (host_set->localityWeights() != nullptr && !host_set->localityWeights()->empty()) {
327 0 : locality_lb_endpoint.mutable_load_balancing_weight()->set_value(
328 0 : (*host_set->localityWeights())[index]);
329 0 : }
330 :
331 0 : for (auto& host : locality_host_set) {
332 0 : addLbEndpoint(host, locality_lb_endpoint);
333 0 : }
334 0 : }
335 0 : }
336 0 : } else {
337 0 : for (auto& host : host_set->hosts()) {
338 0 : auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add();
339 0 : locality_lb_endpoint.mutable_locality()->MergeFrom(host->locality());
340 0 : locality_lb_endpoint.set_priority(host->priority());
341 0 : addLbEndpoint(host, locality_lb_endpoint);
342 0 : }
343 0 : }
344 0 : }
345 0 : if (cluster_info->addedViaApi()) {
346 0 : auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add();
347 0 : dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment);
348 0 : } else {
349 0 : auto& static_endpoint = *endpoint_config_dump->mutable_static_endpoint_configs()->Add();
350 0 : static_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment);
351 0 : }
352 0 : }
353 0 : return endpoint_config_dump;
354 0 : }
355 :
356 : void ConfigDumpHandler::addLbEndpoint(
357 : const Upstream::HostSharedPtr& host,
358 0 : envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const {
359 0 : auto& lb_endpoint = *locality_lb_endpoint.mutable_lb_endpoints()->Add();
360 0 : if (host->metadata() != nullptr) {
361 0 : lb_endpoint.mutable_metadata()->MergeFrom(*host->metadata());
362 0 : }
363 0 : lb_endpoint.mutable_load_balancing_weight()->set_value(host->weight());
364 :
365 0 : switch (host->coarseHealth()) {
366 0 : case Upstream::Host::Health::Healthy:
367 0 : lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::HEALTHY);
368 0 : break;
369 0 : case Upstream::Host::Health::Unhealthy:
370 0 : lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNHEALTHY);
371 0 : break;
372 0 : case Upstream::Host::Health::Degraded:
373 0 : lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::DEGRADED);
374 0 : break;
375 0 : default:
376 0 : lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNKNOWN);
377 0 : }
378 :
379 0 : auto& endpoint = *lb_endpoint.mutable_endpoint();
380 0 : endpoint.set_hostname(host->hostname());
381 0 : Network::Utility::addressToProtobufAddress(*host->address(), *endpoint.mutable_address());
382 0 : auto& health_check_config = *endpoint.mutable_health_check_config();
383 0 : health_check_config.set_hostname(host->hostnameForHealthChecks());
384 0 : if (host->healthCheckAddress()->asString() != host->address()->asString()) {
385 0 : health_check_config.set_port_value(host->healthCheckAddress()->ip()->port());
386 0 : }
387 0 : }
388 :
389 : } // namespace Server
390 : } // namespace Envoy
|