/proc/self/cwd/source/server/admin/config_dump_handler.cc
Line | Count | Source (jump to first uncovered line) |
1 | | #include "source/server/admin/config_dump_handler.h" |
2 | | |
3 | | #include "envoy/config/core/v3/health_check.pb.h" |
4 | | #include "envoy/config/endpoint/v3/endpoint.pb.h" |
5 | | |
6 | | #include "source/common/common/matchers.h" |
7 | | #include "source/common/common/regex.h" |
8 | | #include "source/common/common/statusor.h" |
9 | | #include "source/common/http/headers.h" |
10 | | #include "source/common/http/utility.h" |
11 | | #include "source/common/network/utility.h" |
12 | | #include "source/server/admin/utils.h" |
13 | | |
14 | | namespace Envoy { |
15 | | namespace Server { |
16 | | |
17 | | namespace { |
18 | | |
19 | | // Validates that `field_mask` is valid for `message` and applies `TrimMessage`. |
20 | | // Necessary because TrimMessage crashes if `field_mask` is invalid. |
21 | | // Returns `true` on success. |
22 | | bool checkFieldMaskAndTrimMessage(const Protobuf::FieldMask& field_mask, |
23 | 0 | Protobuf::Message& message) { |
24 | 0 | for (const auto& path : field_mask.paths()) { |
25 | 0 | if (!ProtobufUtil::FieldMaskUtil::GetFieldDescriptors(message.GetDescriptor(), path, nullptr)) { |
26 | 0 | return false; |
27 | 0 | } |
28 | 0 | } |
29 | 0 | ProtobufUtil::FieldMaskUtil::TrimMessage(field_mask, &message); |
30 | 0 | return true; |
31 | 0 | } |
32 | | |
33 | | // Apply a field mask to a resource message. A simple field mask might look |
34 | | // like "cluster.name,cluster.alt_stat_name,last_updated" for a StaticCluster |
35 | | // resource. Unfortunately, since the "cluster" field is Any and the in-built |
36 | | // FieldMask utils can't mask inside an Any field, we need to do additional work |
37 | | // below. |
38 | | // |
39 | | // We take advantage of the fact that for the most part (with the exception of |
40 | | // DynamicListener) that ConfigDump resources have a single Any field where the |
41 | | // embedded resources lives. This allows us to construct an inner field mask for |
42 | | // the Any resource and an outer field mask for the enclosing message. In the |
43 | | // above example, the inner field mask would be "name,alt_stat_name" and the |
44 | | // outer field mask "cluster,last_updated". The masks are applied to their |
45 | | // respective messages, with the Any resource requiring an unpack/mask/pack |
46 | | // series of operations. |
47 | | // |
48 | | // TODO(htuch): we could make field masks more powerful in future and generalize |
49 | | // this to allow arbitrary indexing through Any fields. This is pretty |
50 | | // complicated, we would need to build a FieldMask tree similar to how the C++ |
51 | | // Protobuf library does this internally. |
52 | | /** |
53 | | * @return true on success, false if `field_mask` is invalid. |
54 | | */ |
55 | 0 | bool trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Message& message) { |
56 | 0 | const Protobuf::Descriptor* descriptor = message.GetDescriptor(); |
57 | 0 | const Protobuf::Reflection* reflection = message.GetReflection(); |
58 | | // Figure out which paths cover Any fields. For each field, gather the paths to |
59 | | // an inner mask, switch the outer mask to cover only the original field. |
60 | 0 | Protobuf::FieldMask outer_field_mask; |
61 | 0 | Protobuf::FieldMask inner_field_mask; |
62 | 0 | std::string any_field_name; |
63 | 0 | for (int i = 0; i < field_mask.paths().size(); ++i) { |
64 | 0 | const std::string& path = field_mask.paths(i); |
65 | 0 | std::vector<std::string> frags = absl::StrSplit(path, '.'); |
66 | 0 | if (frags.empty()) { |
67 | 0 | continue; |
68 | 0 | } |
69 | 0 | const Protobuf::FieldDescriptor* field = descriptor->FindFieldByName(frags[0]); |
70 | | // Only a single Any field supported, repeated fields don't support further |
71 | | // indexing. |
72 | | // TODO(htuch): should add support for DynamicListener for multiple Any |
73 | | // fields in the future, see |
74 | | // https://github.com/envoyproxy/envoy/issues/9669. |
75 | 0 | if (field != nullptr && field->message_type() != nullptr && !field->is_repeated() && |
76 | 0 | field->message_type()->full_name() == "google.protobuf.Any") { |
77 | 0 | if (any_field_name.empty()) { |
78 | 0 | any_field_name = frags[0]; |
79 | 0 | } else { |
80 | | // This should be structurally true due to the ConfigDump proto |
81 | | // definition (but not for DynamicListener today). |
82 | 0 | ASSERT(any_field_name == frags[0], |
83 | 0 | "Only a single Any field in a config dump resource is supported."); |
84 | 0 | } |
85 | 0 | outer_field_mask.add_paths(frags[0]); |
86 | 0 | frags.erase(frags.begin()); |
87 | 0 | inner_field_mask.add_paths(absl::StrJoin(frags, ".")); |
88 | 0 | } else { |
89 | 0 | outer_field_mask.add_paths(path); |
90 | 0 | } |
91 | 0 | } |
92 | | |
93 | 0 | if (!any_field_name.empty()) { |
94 | 0 | const Protobuf::FieldDescriptor* any_field = descriptor->FindFieldByName(any_field_name); |
95 | 0 | if (reflection->HasField(message, any_field)) { |
96 | 0 | ASSERT(any_field != nullptr); |
97 | | // Unpack to a DynamicMessage. |
98 | 0 | ProtobufWkt::Any any_message; |
99 | 0 | any_message.MergeFrom(reflection->GetMessage(message, any_field)); |
100 | 0 | Protobuf::DynamicMessageFactory dmf; |
101 | 0 | const absl::string_view inner_type_name = |
102 | 0 | TypeUtil::typeUrlToDescriptorFullName(any_message.type_url()); |
103 | 0 | const Protobuf::Descriptor* inner_descriptor = |
104 | 0 | Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName( |
105 | 0 | static_cast<std::string>(inner_type_name)); |
106 | 0 | ASSERT(inner_descriptor != nullptr); |
107 | 0 | std::unique_ptr<Protobuf::Message> inner_message; |
108 | 0 | inner_message.reset(dmf.GetPrototype(inner_descriptor)->New()); |
109 | 0 | MessageUtil::unpackTo(any_message, *inner_message); |
110 | | // Trim message. |
111 | 0 | if (!checkFieldMaskAndTrimMessage(inner_field_mask, *inner_message)) { |
112 | 0 | return false; |
113 | 0 | } |
114 | | // Pack it back into the Any resource. |
115 | 0 | any_message.PackFrom(*inner_message); |
116 | 0 | reflection->MutableMessage(&message, any_field)->CopyFrom(any_message); |
117 | 0 | } |
118 | 0 | } |
119 | 0 | return checkFieldMaskAndTrimMessage(outer_field_mask, message); |
120 | 0 | } |
121 | | |
122 | | // Helper method to get the resource parameter. |
123 | 2.64k | absl::optional<std::string> resourceParam(const Http::Utility::QueryParams& params) { |
124 | 2.64k | return Utility::queryParam(params, "resource"); |
125 | 2.64k | } |
126 | | |
127 | | // Helper method to get the mask parameter. |
128 | 2.64k | absl::optional<std::string> maskParam(const Http::Utility::QueryParams& params) { |
129 | 2.64k | return Utility::queryParam(params, "mask"); |
130 | 2.64k | } |
131 | | |
132 | | // Helper method to get the eds parameter. |
133 | 2.64k | bool shouldIncludeEdsInDump(const Http::Utility::QueryParams& params) { |
134 | 2.64k | return params.find("include_eds") != params.end(); |
135 | 2.64k | } |
136 | | |
137 | | absl::StatusOr<Matchers::StringMatcherPtr> |
138 | 2.64k | buildNameMatcher(const Http::Utility::QueryParams& params) { |
139 | 2.64k | const auto name_regex = Utility::queryParam(params, "name_regex"); |
140 | 2.64k | if (!name_regex.has_value() || name_regex->empty()) { |
141 | 2.64k | return std::make_unique<Matchers::UniversalStringMatcher>(); |
142 | 2.64k | } |
143 | 0 | envoy::type::matcher::v3::RegexMatcher matcher; |
144 | 0 | *matcher.mutable_google_re2() = envoy::type::matcher::v3::RegexMatcher::GoogleRE2(); |
145 | 0 | matcher.set_regex(*name_regex); |
146 | 0 | TRY_ASSERT_MAIN_THREAD |
147 | 0 | return Regex::Utility::parseRegex(matcher); |
148 | 0 | END_TRY |
149 | 0 | catch (EnvoyException& e) { |
150 | 0 | return absl::InvalidArgumentError( |
151 | 0 | absl::StrCat("Error while parsing name_regex from ", *name_regex, ": ", e.what())); |
152 | 0 | } |
153 | 0 | } |
154 | | |
155 | | } // namespace |
156 | | |
157 | | ConfigDumpHandler::ConfigDumpHandler(ConfigTracker& config_tracker, Server::Instance& server) |
158 | 5.34k | : HandlerContextBase(server), config_tracker_(config_tracker) {} |
159 | | |
160 | | Http::Code ConfigDumpHandler::handlerConfigDump(Http::ResponseHeaderMap& response_headers, |
161 | | Buffer::Instance& response, |
162 | 2.64k | AdminStream& admin_stream) const { |
163 | 2.64k | Http::Utility::QueryParams query_params = admin_stream.queryParams(); |
164 | 2.64k | const auto resource = resourceParam(query_params); |
165 | 2.64k | const auto mask = maskParam(query_params); |
166 | 2.64k | const bool include_eds = shouldIncludeEdsInDump(query_params); |
167 | 2.64k | const absl::StatusOr<Matchers::StringMatcherPtr> name_matcher = buildNameMatcher(query_params); |
168 | 2.64k | if (!name_matcher.ok()) { |
169 | 0 | response.add(name_matcher.status().ToString()); |
170 | 0 | response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text); |
171 | 0 | return Http::Code::BadRequest; |
172 | 0 | } |
173 | | |
174 | 2.64k | envoy::admin::v3::ConfigDump dump; |
175 | | |
176 | 2.64k | absl::optional<std::pair<Http::Code, std::string>> err; |
177 | 2.64k | if (resource.has_value()) { |
178 | 0 | err = addResourceToDump(dump, mask, resource.value(), **name_matcher, include_eds); |
179 | 2.64k | } else { |
180 | 2.64k | err = addAllConfigToDump(dump, mask, **name_matcher, include_eds); |
181 | 2.64k | } |
182 | 2.64k | if (err.has_value()) { |
183 | 0 | response_headers.addReference(Http::Headers::get().XContentTypeOptions, |
184 | 0 | Http::Headers::get().XContentTypeOptionValues.Nosniff); |
185 | 0 | response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text); |
186 | 0 | response.add(err.value().second); |
187 | 0 | return err.value().first; |
188 | 0 | } |
189 | 2.64k | MessageUtil::redact(dump); |
190 | | |
191 | 2.64k | response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); |
192 | 2.64k | response.add(MessageUtil::getJsonStringFromMessageOrError(dump, true)); // pretty-print |
193 | 2.64k | return Http::Code::OK; |
194 | 2.64k | } |
195 | | |
196 | | absl::optional<std::pair<Http::Code, std::string>> ConfigDumpHandler::addResourceToDump( |
197 | | envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask, |
198 | | const std::string& resource, const Matchers::StringMatcher& name_matcher, |
199 | 0 | bool include_eds) const { |
200 | 0 | Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); |
201 | 0 | if (include_eds) { |
202 | | // TODO(mattklein123): Add ability to see warming clusters in admin output. |
203 | 0 | auto all_clusters = server_.clusterManager().clusters(); |
204 | 0 | if (!all_clusters.active_clusters_.empty()) { |
205 | 0 | callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) { |
206 | 0 | return dumpEndpointConfigs(name_matcher); |
207 | 0 | }); |
208 | 0 | } |
209 | 0 | } |
210 | |
|
211 | 0 | for (const auto& [name, callback] : callbacks_map) { |
212 | 0 | UNREFERENCED_PARAMETER(name); |
213 | 0 | ProtobufTypes::MessagePtr message = callback(name_matcher); |
214 | 0 | ASSERT(message); |
215 | | |
216 | 0 | auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource); |
217 | 0 | const Protobuf::Reflection* reflection = message->GetReflection(); |
218 | 0 | if (!field_descriptor) { |
219 | 0 | continue; |
220 | 0 | } else if (!field_descriptor->is_repeated()) { |
221 | 0 | return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair( |
222 | 0 | Http::Code::BadRequest, |
223 | 0 | fmt::format("{} is not a repeated field. Use ?mask={} to get only this field", |
224 | 0 | field_descriptor->name(), field_descriptor->name()))}; |
225 | 0 | } |
226 | | |
227 | 0 | auto repeated = reflection->GetRepeatedPtrField<Protobuf::Message>(*message, field_descriptor); |
228 | 0 | for (Protobuf::Message& msg : repeated) { |
229 | 0 | if (mask.has_value()) { |
230 | 0 | Protobuf::FieldMask field_mask; |
231 | 0 | ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask); |
232 | 0 | if (!trimResourceMessage(field_mask, msg)) { |
233 | 0 | return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair( |
234 | 0 | Http::Code::BadRequest, absl::StrCat("FieldMask ", field_mask.DebugString(), |
235 | 0 | " could not be successfully used."))}; |
236 | 0 | } |
237 | 0 | } |
238 | 0 | auto* config = dump.add_configs(); |
239 | 0 | config->PackFrom(msg); |
240 | 0 | } |
241 | | |
242 | | // We found the desired resource so there is no need to continue iterating over |
243 | | // the other keys. |
244 | 0 | return absl::nullopt; |
245 | 0 | } |
246 | | |
247 | 0 | return absl::optional<std::pair<Http::Code, std::string>>{ |
248 | 0 | std::make_pair(Http::Code::NotFound, fmt::format("{} not found in config dump", resource))}; |
249 | 0 | } |
250 | | |
251 | | absl::optional<std::pair<Http::Code, std::string>> ConfigDumpHandler::addAllConfigToDump( |
252 | | envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask, |
253 | 2.64k | const Matchers::StringMatcher& name_matcher, bool include_eds) const { |
254 | 2.64k | Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap(); |
255 | 2.64k | if (include_eds) { |
256 | | // TODO(mattklein123): Add ability to see warming clusters in admin output. |
257 | 0 | auto all_clusters = server_.clusterManager().clusters(); |
258 | 0 | if (!all_clusters.active_clusters_.empty()) { |
259 | 0 | callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) { |
260 | 0 | return dumpEndpointConfigs(name_matcher); |
261 | 0 | }); |
262 | 0 | } |
263 | 0 | } |
264 | | |
265 | 15.8k | for (const auto& [name, callback] : callbacks_map) { |
266 | 15.8k | UNREFERENCED_PARAMETER(name); |
267 | 15.8k | ProtobufTypes::MessagePtr message = callback(name_matcher); |
268 | 15.8k | ASSERT(message); |
269 | | |
270 | 15.8k | if (mask.has_value()) { |
271 | 0 | Protobuf::FieldMask field_mask; |
272 | 0 | ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask); |
273 | | // We don't use trimMessage() above here since masks don't support |
274 | | // indexing through repeated fields. We don't return error on failure |
275 | | // because different callback return types will have different valid |
276 | | // field masks. |
277 | 0 | if (!checkFieldMaskAndTrimMessage(field_mask, *message)) { |
278 | 0 | continue; |
279 | 0 | } |
280 | 0 | } |
281 | | |
282 | 15.8k | auto* config = dump.add_configs(); |
283 | 15.8k | config->PackFrom(*message); |
284 | 15.8k | } |
285 | 2.64k | if (dump.configs().empty() && mask.has_value()) { |
286 | 0 | return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair( |
287 | 0 | Http::Code::BadRequest, |
288 | 0 | absl::StrCat("FieldMask ", *mask, " could not be successfully applied to any configs."))}; |
289 | 0 | } |
290 | 2.64k | return absl::nullopt; |
291 | 2.64k | } |
292 | | |
293 | | ProtobufTypes::MessagePtr |
294 | 0 | ConfigDumpHandler::dumpEndpointConfigs(const Matchers::StringMatcher& name_matcher) const { |
295 | 0 | auto endpoint_config_dump = std::make_unique<envoy::admin::v3::EndpointsConfigDump>(); |
296 | | // TODO(mattklein123): Add ability to see warming clusters in admin output. |
297 | 0 | auto all_clusters = server_.clusterManager().clusters(); |
298 | 0 | for (const auto& [name, cluster_ref] : all_clusters.active_clusters_) { |
299 | 0 | UNREFERENCED_PARAMETER(name); |
300 | 0 | const Upstream::Cluster& cluster = cluster_ref.get(); |
301 | 0 | Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info(); |
302 | 0 | envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment; |
303 | |
|
304 | 0 | if (!cluster_info->edsServiceName().empty()) { |
305 | 0 | cluster_load_assignment.set_cluster_name(cluster_info->edsServiceName()); |
306 | 0 | } else { |
307 | 0 | cluster_load_assignment.set_cluster_name(cluster_info->name()); |
308 | 0 | } |
309 | 0 | if (!name_matcher.match(cluster_load_assignment.cluster_name())) { |
310 | 0 | continue; |
311 | 0 | } |
312 | 0 | auto& policy = *cluster_load_assignment.mutable_policy(); |
313 | |
|
314 | 0 | for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) { |
315 | 0 | policy.mutable_overprovisioning_factor()->set_value(host_set->overprovisioningFactor()); |
316 | |
|
317 | 0 | if (!host_set->hostsPerLocality().get().empty()) { |
318 | 0 | for (int index = 0; index < static_cast<int>(host_set->hostsPerLocality().get().size()); |
319 | 0 | index++) { |
320 | 0 | auto locality_host_set = host_set->hostsPerLocality().get()[index]; |
321 | |
|
322 | 0 | if (!locality_host_set.empty()) { |
323 | 0 | auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add(); |
324 | 0 | locality_lb_endpoint.mutable_locality()->MergeFrom(locality_host_set[0]->locality()); |
325 | 0 | locality_lb_endpoint.set_priority(locality_host_set[0]->priority()); |
326 | 0 | if (host_set->localityWeights() != nullptr && !host_set->localityWeights()->empty()) { |
327 | 0 | locality_lb_endpoint.mutable_load_balancing_weight()->set_value( |
328 | 0 | (*host_set->localityWeights())[index]); |
329 | 0 | } |
330 | |
|
331 | 0 | for (auto& host : locality_host_set) { |
332 | 0 | addLbEndpoint(host, locality_lb_endpoint); |
333 | 0 | } |
334 | 0 | } |
335 | 0 | } |
336 | 0 | } else { |
337 | 0 | for (auto& host : host_set->hosts()) { |
338 | 0 | auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add(); |
339 | 0 | locality_lb_endpoint.mutable_locality()->MergeFrom(host->locality()); |
340 | 0 | locality_lb_endpoint.set_priority(host->priority()); |
341 | 0 | addLbEndpoint(host, locality_lb_endpoint); |
342 | 0 | } |
343 | 0 | } |
344 | 0 | } |
345 | 0 | if (cluster_info->addedViaApi()) { |
346 | 0 | auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add(); |
347 | 0 | dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); |
348 | 0 | } else { |
349 | 0 | auto& static_endpoint = *endpoint_config_dump->mutable_static_endpoint_configs()->Add(); |
350 | 0 | static_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment); |
351 | 0 | } |
352 | 0 | } |
353 | 0 | return endpoint_config_dump; |
354 | 0 | } |
355 | | |
356 | | void ConfigDumpHandler::addLbEndpoint( |
357 | | const Upstream::HostSharedPtr& host, |
358 | 0 | envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const { |
359 | 0 | auto& lb_endpoint = *locality_lb_endpoint.mutable_lb_endpoints()->Add(); |
360 | 0 | if (host->metadata() != nullptr) { |
361 | 0 | lb_endpoint.mutable_metadata()->MergeFrom(*host->metadata()); |
362 | 0 | } |
363 | 0 | lb_endpoint.mutable_load_balancing_weight()->set_value(host->weight()); |
364 | |
|
365 | 0 | switch (host->coarseHealth()) { |
366 | 0 | case Upstream::Host::Health::Healthy: |
367 | 0 | lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::HEALTHY); |
368 | 0 | break; |
369 | 0 | case Upstream::Host::Health::Unhealthy: |
370 | 0 | lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNHEALTHY); |
371 | 0 | break; |
372 | 0 | case Upstream::Host::Health::Degraded: |
373 | 0 | lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::DEGRADED); |
374 | 0 | break; |
375 | 0 | default: |
376 | 0 | lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNKNOWN); |
377 | 0 | } |
378 | | |
379 | 0 | auto& endpoint = *lb_endpoint.mutable_endpoint(); |
380 | 0 | endpoint.set_hostname(host->hostname()); |
381 | 0 | Network::Utility::addressToProtobufAddress(*host->address(), *endpoint.mutable_address()); |
382 | 0 | auto& health_check_config = *endpoint.mutable_health_check_config(); |
383 | 0 | health_check_config.set_hostname(host->hostnameForHealthChecks()); |
384 | 0 | if (host->healthCheckAddress()->asString() != host->address()->asString()) { |
385 | 0 | health_check_config.set_port_value(host->healthCheckAddress()->ip()->port()); |
386 | 0 | } |
387 | 0 | } |
388 | | |
389 | | } // namespace Server |
390 | | } // namespace Envoy |