1
#include "source/server/admin/config_dump_handler.h"
2

            
3
#include "envoy/config/core/v3/health_check.pb.h"
4
#include "envoy/config/endpoint/v3/endpoint.pb.h"
5

            
6
#include "source/common/common/matchers.h"
7
#include "source/common/common/regex.h"
8
#include "source/common/common/statusor.h"
9
#include "source/common/http/headers.h"
10
#include "source/common/http/utility.h"
11
#include "source/common/network/utility.h"
12
#include "source/server/admin/utils.h"
13

            
14
namespace Envoy {
15
namespace Server {
16

            
17
namespace {
18

            
19
// Validates that `field_mask` is valid for `message` and applies `TrimMessage`.
20
// Necessary because TrimMessage crashes if `field_mask` is invalid.
21
// Returns `true` on success.
22
bool checkFieldMaskAndTrimMessage(const Protobuf::FieldMask& field_mask,
23
12
                                  Protobuf::Message& message) {
24
17
  for (const auto& path : field_mask.paths()) {
25
17
    if (!ProtobufUtil::FieldMaskUtil::GetFieldDescriptors(message.GetDescriptor(), path, nullptr)) {
26
4
      return false;
27
4
    }
28
17
  }
29
8
  ProtobufUtil::FieldMaskUtil::TrimMessage(field_mask, &message);
30
8
  return true;
31
12
}
32

            
33
// Apply a field mask to a resource message. A simple field mask might look
34
// like "cluster.name,cluster.alt_stat_name,last_updated" for a StaticCluster
35
// resource. Unfortunately, since the "cluster" field is Any and the in-built
36
// FieldMask utils can't mask inside an Any field, we need to do additional work
37
// below.
38
//
39
// We take advantage of the fact that for the most part (with the exception of
40
// DynamicListener) that ConfigDump resources have a single Any field where the
41
// embedded resources lives. This allows us to construct an inner field mask for
42
// the Any resource and an outer field mask for the enclosing message. In the
43
// above example, the inner field mask would be "name,alt_stat_name" and the
44
// outer field mask "cluster,last_updated". The masks are applied to their
45
// respective messages, with the Any resource requiring an unpack/mask/pack
46
// series of operations.
47
//
48
// TODO(htuch): we could make field masks more powerful in future and generalize
49
// this to allow arbitrary indexing through Any fields. This is pretty
50
// complicated, we would need to build a FieldMask tree similar to how the C++
51
// Protobuf library does this internally.
52
/**
53
 * @return true on success, false if `field_mask` is invalid.
54
 */
55
5
bool trimResourceMessage(const Protobuf::FieldMask& field_mask, Protobuf::Message& message) {
56
5
  const Protobuf::Descriptor* descriptor = message.GetDescriptor();
57
5
  const Protobuf::Reflection* reflection = message.GetReflection();
58
  // Figure out which paths cover Any fields. For each field, gather the paths to
59
  // an inner mask, switch the outer mask to cover only the original field.
60
5
  Protobuf::FieldMask outer_field_mask;
61
5
  Protobuf::FieldMask inner_field_mask;
62
5
  std::string any_field_name;
63
14
  for (int i = 0; i < field_mask.paths().size(); ++i) {
64
9
    const std::string& path = field_mask.paths(i);
65
9
    std::vector<std::string> frags = absl::StrSplit(path, '.');
66
9
    if (frags.empty()) {
67
      continue;
68
    }
69
9
    const Protobuf::FieldDescriptor* field = descriptor->FindFieldByName(frags[0]);
70
    // Only a single Any field supported, repeated fields don't support further
71
    // indexing.
72
    // TODO(htuch): should add support for DynamicListener for multiple Any
73
    // fields in the future, see
74
    // https://github.com/envoyproxy/envoy/issues/9669.
75
9
    if (field != nullptr && field->message_type() != nullptr && !field->is_repeated() &&
76
9
        field->message_type()->full_name() == "google.protobuf.Any") {
77
5
      if (any_field_name.empty()) {
78
4
        any_field_name = frags[0];
79
4
      } else {
80
        // This should be structurally true due to the ConfigDump proto
81
        // definition (but not for DynamicListener today).
82
1
        ASSERT(any_field_name == frags[0],
83
1
               "Only a single Any field in a config dump resource is supported.");
84
1
      }
85
5
      outer_field_mask.add_paths(frags[0]);
86
5
      frags.erase(frags.begin());
87
5
      inner_field_mask.add_paths(absl::StrJoin(frags, "."));
88
5
    } else {
89
4
      outer_field_mask.add_paths(path);
90
4
    }
91
9
  }
92

            
93
5
  if (!any_field_name.empty()) {
94
4
    const Protobuf::FieldDescriptor* any_field = descriptor->FindFieldByName(any_field_name);
95
4
    if (reflection->HasField(message, any_field)) {
96
4
      ASSERT(any_field != nullptr);
97
      // Unpack to a DynamicMessage.
98
4
      Protobuf::Any any_message;
99
4
      any_message.MergeFrom(reflection->GetMessage(message, any_field));
100
4
      Protobuf::DynamicMessageFactory dmf;
101
4
      const absl::string_view inner_type_name =
102
4
          TypeUtil::typeUrlToDescriptorFullName(any_message.type_url());
103
4
      const Protobuf::Descriptor* inner_descriptor =
104
4
          Protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
105
4
              static_cast<std::string>(inner_type_name));
106
4
      ASSERT(inner_descriptor != nullptr);
107
4
      std::unique_ptr<Protobuf::Message> inner_message;
108
4
      inner_message.reset(dmf.GetPrototype(inner_descriptor)->New());
109
4
      if (!MessageUtil::unpackTo(any_message, *inner_message).ok()) {
110
        return false;
111
      }
112
      // Trim message.
113
4
      if (!checkFieldMaskAndTrimMessage(inner_field_mask, *inner_message)) {
114
1
        return false;
115
1
      }
116
      // Pack it back into the Any resource.
117
3
      any_message.PackFrom(*inner_message);
118
3
      reflection->MutableMessage(&message, any_field)->CopyFrom(any_message);
119
3
    }
120
4
  }
121
4
  return checkFieldMaskAndTrimMessage(outer_field_mask, message);
122
5
}
123

            
124
// Helper method to get the eds parameter.
125
79
bool shouldIncludeEdsInDump(const Http::Utility::QueryParamsMulti& params) {
126
79
  return params.getFirstValue("include_eds").has_value();
127
79
}
128

            
129
absl::StatusOr<Matchers::StringMatcherPtr>
130
79
buildNameMatcher(const Http::Utility::QueryParamsMulti& params, Regex::Engine& engine) {
131
79
  const auto name_regex = params.getFirstValue("name_regex");
132
79
  if (!name_regex.has_value() || name_regex->empty()) {
133
65
    return std::make_unique<Matchers::UniversalStringMatcher>();
134
65
  }
135
14
  envoy::type::matcher::v3::RegexMatcher matcher;
136
14
  *matcher.mutable_google_re2() = envoy::type::matcher::v3::RegexMatcher::GoogleRE2();
137
14
  matcher.set_regex(*name_regex);
138
14
  auto regex_or_error = Regex::Utility::parseRegex(matcher, engine);
139
14
  if (regex_or_error.status().ok()) {
140
13
    return std::move(*regex_or_error);
141
13
  }
142
1
  return absl::InvalidArgumentError(absl::StrCat("Error while parsing name_regex from ",
143
1
                                                 *name_regex, ": ",
144
1
                                                 regex_or_error.status().message()));
145
14
}
146

            
147
} // namespace
148

            
149
ConfigDumpHandler::ConfigDumpHandler(ConfigTracker& config_tracker, Server::Instance& server)
150
10735
    : HandlerContextBase(server), config_tracker_(config_tracker) {}
151

            
152
Http::Code ConfigDumpHandler::handlerConfigDump(Http::ResponseHeaderMap& response_headers,
153
                                                Buffer::Instance& response,
154
79
                                                AdminStream& admin_stream) const {
155
79
  Http::Utility::QueryParamsMulti query_params = admin_stream.queryParams();
156
79
  const absl::optional<std::string> resource =
157
79
      Utility::nonEmptyQueryParam(query_params, "resource");
158
79
  const absl::optional<std::string> mask = Utility::nonEmptyQueryParam(query_params, "mask");
159
79
  const bool include_eds = shouldIncludeEdsInDump(query_params);
160
79
  const absl::StatusOr<Matchers::StringMatcherPtr> name_matcher =
161
79
      buildNameMatcher(query_params, server_.regexEngine());
162
79
  if (!name_matcher.ok()) {
163
1
    response.add(name_matcher.status().ToString());
164
1
    response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text);
165
1
    return Http::Code::BadRequest;
166
1
  }
167

            
168
78
  envoy::admin::v3::ConfigDump dump;
169

            
170
78
  absl::optional<std::pair<Http::Code, std::string>> err;
171
78
  if (resource.has_value()) {
172
27
    err = addResourceToDump(dump, mask, resource.value(), **name_matcher, include_eds);
173
51
  } else {
174
51
    err = addAllConfigToDump(dump, mask, **name_matcher, include_eds);
175
51
  }
176
78
  if (err.has_value()) {
177
5
    response_headers.addReference(Http::Headers::get().XContentTypeOptions,
178
5
                                  Http::Headers::get().XContentTypeOptionValues.Nosniff);
179
5
    response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Text);
180
5
    response.add(err.value().second);
181
5
    return err.value().first;
182
5
  }
183
73
  MessageUtil::redact(dump);
184

            
185
73
  response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json);
186
73
  response.add(MessageUtil::getJsonStringFromMessageOrError(dump, true)); // pretty-print
187
73
  return Http::Code::OK;
188
78
}
189

            
190
absl::optional<std::pair<Http::Code, std::string>> ConfigDumpHandler::addResourceToDump(
191
    envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask,
192
    const std::string& resource, const Matchers::StringMatcher& name_matcher,
193
27
    bool include_eds) const {
194
27
  Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap();
195
27
  if (include_eds) {
196
    // TODO(mattklein123): Add ability to see warming clusters in admin output.
197
1
    if (server_.clusterManager().hasActiveClusters()) {
198
1
      callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) {
199
1
        return dumpEndpointConfigs(name_matcher);
200
1
      });
201
1
    }
202
1
  }
203

            
204
65
  for (const auto& [name, callback] : callbacks_map) {
205
65
    UNREFERENCED_PARAMETER(name);
206
65
    ProtobufTypes::MessagePtr message = callback(name_matcher);
207
65
    ASSERT(message);
208

            
209
65
    auto field_descriptor = message->GetDescriptor()->FindFieldByName(resource);
210
65
    const Protobuf::Reflection* reflection = message->GetReflection();
211
65
    if (!field_descriptor) {
212
39
      continue;
213
46
    } else if (!field_descriptor->is_repeated()) {
214
1
      return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(
215
1
          Http::Code::BadRequest,
216
1
          fmt::format("{} is not a repeated field. Use ?mask={} to get only this field",
217
1
                      field_descriptor->name(), field_descriptor->name()))};
218
1
    }
219

            
220
25
    auto repeated = reflection->GetRepeatedPtrField<Protobuf::Message>(*message, field_descriptor);
221
29
    for (Protobuf::Message& msg : repeated) {
222
21
      if (mask.has_value()) {
223
5
        Protobuf::FieldMask field_mask;
224
5
        ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask);
225
5
        if (!trimResourceMessage(field_mask, msg)) {
226
2
          return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(
227
2
              Http::Code::BadRequest, absl::StrCat("FieldMask ", field_mask.DebugString(),
228
2
                                                   " could not be successfully used."))};
229
2
        }
230
5
      }
231
19
      auto* config = dump.add_configs();
232
19
      config->PackFrom(msg);
233
19
    }
234

            
235
    // We found the desired resource so there is no need to continue iterating over
236
    // the other keys.
237
23
    return absl::nullopt;
238
25
  }
239

            
240
1
  return absl::optional<std::pair<Http::Code, std::string>>{
241
1
      std::make_pair(Http::Code::NotFound, fmt::format("{} not found in config dump", resource))};
242
27
}
243

            
244
absl::optional<std::pair<Http::Code, std::string>> ConfigDumpHandler::addAllConfigToDump(
245
    envoy::admin::v3::ConfigDump& dump, const absl::optional<std::string>& mask,
246
51
    const Matchers::StringMatcher& name_matcher, bool include_eds) const {
247
51
  Envoy::Server::ConfigTracker::CbsMap callbacks_map = config_tracker_.getCallbacksMap();
248
51
  if (include_eds) {
249
    // TODO(mattklein123): Add ability to see warming clusters in admin output.
250
6
    if (server_.clusterManager().hasActiveClusters()) {
251
6
      callbacks_map.emplace("endpoint", [this](const Matchers::StringMatcher& name_matcher) {
252
6
        return dumpEndpointConfigs(name_matcher);
253
6
      });
254
6
    }
255
6
  }
256

            
257
249
  for (const auto& [name, callback] : callbacks_map) {
258
249
    UNREFERENCED_PARAMETER(name);
259
249
    ProtobufTypes::MessagePtr message = callback(name_matcher);
260
249
    ASSERT(message);
261

            
262
249
    if (mask.has_value()) {
263
4
      Protobuf::FieldMask field_mask;
264
4
      ProtobufUtil::FieldMaskUtil::FromString(mask.value(), &field_mask);
265
      // We don't use trimMessage() above here since masks don't support
266
      // indexing through repeated fields. We don't return error on failure
267
      // because different callback return types will have different valid
268
      // field masks.
269
4
      if (!checkFieldMaskAndTrimMessage(field_mask, *message)) {
270
2
        continue;
271
2
      }
272
4
    }
273

            
274
247
    auto* config = dump.add_configs();
275
247
    config->PackFrom(*message);
276
247
  }
277
51
  if (dump.configs().empty() && mask.has_value()) {
278
1
    return absl::optional<std::pair<Http::Code, std::string>>{std::make_pair(
279
1
        Http::Code::BadRequest,
280
1
        absl::StrCat("FieldMask ", *mask, " could not be successfully applied to any configs."))};
281
1
  }
282
50
  return absl::nullopt;
283
51
}
284

            
285
ProtobufTypes::MessagePtr
286
7
ConfigDumpHandler::dumpEndpointConfigs(const Matchers::StringMatcher& name_matcher) const {
287
7
  auto endpoint_config_dump = std::make_unique<envoy::admin::v3::EndpointsConfigDump>();
288
  // TODO(mattklein123): Add ability to see warming clusters in admin output.
289
7
  auto all_clusters = server_.clusterManager().clusters();
290
9
  for (const auto& [name, cluster_ref] : all_clusters.active_clusters_) {
291
9
    UNREFERENCED_PARAMETER(name);
292
9
    const Upstream::Cluster& cluster = cluster_ref.get();
293
9
    Upstream::ClusterInfoConstSharedPtr cluster_info = cluster.info();
294
9
    envoy::config::endpoint::v3::ClusterLoadAssignment cluster_load_assignment;
295

            
296
9
    if (!cluster_info->edsServiceName().empty()) {
297
      cluster_load_assignment.set_cluster_name(cluster_info->edsServiceName());
298
9
    } else {
299
9
      cluster_load_assignment.set_cluster_name(cluster_info->name());
300
9
    }
301
9
    if (!name_matcher.match(cluster_load_assignment.cluster_name())) {
302
1
      continue;
303
1
    }
304
8
    auto& policy = *cluster_load_assignment.mutable_policy();
305

            
306
    // Using MILLION as denominator in config dump.
307
8
    float value = cluster.dropOverload().value() * 1000000;
308
8
    if (value > 0) {
309
1
      auto* drop_overload = policy.add_drop_overloads();
310
1
      drop_overload->set_category(cluster.dropCategory());
311
1
      auto* percent = drop_overload->mutable_drop_percentage();
312
1
      percent->set_denominator(envoy::type::v3::FractionalPercent::MILLION);
313
1
      percent->set_numerator(uint32_t(value));
314
1
    }
315

            
316
9
    for (auto& host_set : cluster.prioritySet().hostSetsPerPriority()) {
317
9
      policy.mutable_overprovisioning_factor()->set_value(host_set->overprovisioningFactor());
318

            
319
9
      if (!host_set->hostsPerLocality().get().empty()) {
320
9
        for (int index = 0; index < static_cast<int>(host_set->hostsPerLocality().get().size());
321
5
             index++) {
322
5
          auto locality_host_set = host_set->hostsPerLocality().get()[index];
323

            
324
5
          if (!locality_host_set.empty()) {
325
5
            auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add();
326
5
            locality_lb_endpoint.mutable_locality()->MergeFrom(locality_host_set[0]->locality());
327
5
            locality_lb_endpoint.set_priority(locality_host_set[0]->priority());
328
5
            if (host_set->localityWeights() != nullptr && !host_set->localityWeights()->empty()) {
329
5
              locality_lb_endpoint.mutable_load_balancing_weight()->set_value(
330
5
                  (*host_set->localityWeights())[index]);
331
5
            }
332

            
333
6
            for (auto& host : locality_host_set) {
334
6
              addLbEndpoint(host, locality_lb_endpoint);
335
6
            }
336
5
          }
337
5
        }
338
8
      } else {
339
5
        for (auto& host : host_set->hosts()) {
340
5
          auto& locality_lb_endpoint = *cluster_load_assignment.mutable_endpoints()->Add();
341
5
          locality_lb_endpoint.mutable_locality()->MergeFrom(host->locality());
342
5
          locality_lb_endpoint.set_priority(host->priority());
343
5
          addLbEndpoint(host, locality_lb_endpoint);
344
5
        }
345
5
      }
346
9
    }
347
8
    if (cluster_info->addedViaApi()) {
348
1
      auto& dynamic_endpoint = *endpoint_config_dump->mutable_dynamic_endpoint_configs()->Add();
349
1
      dynamic_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment);
350
7
    } else {
351
7
      auto& static_endpoint = *endpoint_config_dump->mutable_static_endpoint_configs()->Add();
352
7
      static_endpoint.mutable_endpoint_config()->PackFrom(cluster_load_assignment);
353
7
    }
354
8
  }
355
7
  return endpoint_config_dump;
356
7
}
357

            
358
void ConfigDumpHandler::addLbEndpoint(
359
    const Upstream::HostSharedPtr& host,
360
11
    envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint) const {
361
11
  auto& lb_endpoint = *locality_lb_endpoint.mutable_lb_endpoints()->Add();
362
11
  if (host->metadata() != nullptr) {
363
8
    lb_endpoint.mutable_metadata()->MergeFrom(*host->metadata());
364
8
  }
365
11
  lb_endpoint.mutable_load_balancing_weight()->set_value(host->weight());
366

            
367
11
  switch (host->coarseHealth()) {
368
11
  case Upstream::Host::Health::Healthy:
369
11
    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::HEALTHY);
370
11
    break;
371
  case Upstream::Host::Health::Unhealthy:
372
    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNHEALTHY);
373
    break;
374
  case Upstream::Host::Health::Degraded:
375
    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::DEGRADED);
376
    break;
377
  default:
378
    lb_endpoint.set_health_status(envoy::config::core::v3::HealthStatus::UNKNOWN);
379
11
  }
380

            
381
11
  auto& endpoint = *lb_endpoint.mutable_endpoint();
382
11
  endpoint.set_hostname(host->hostname());
383
11
  Network::Utility::addressToProtobufAddress(*host->address(), *endpoint.mutable_address());
384
11
  if (host->addressListOrNull() != nullptr) {
385
8
    const auto& address_list = *host->addressListOrNull();
386
8
    if (address_list.size() > 1) {
387
      // skip first address of the list as the default address is not an additional one.
388
2
      for (auto it = std::next(address_list.begin()); it != address_list.end(); ++it) {
389
1
        auto& new_address = *endpoint.mutable_additional_addresses()->Add();
390
1
        Network::Utility::addressToProtobufAddress(**it, *new_address.mutable_address());
391
1
      }
392
1
    }
393
8
  }
394
11
  auto& health_check_config = *endpoint.mutable_health_check_config();
395
11
  health_check_config.set_hostname(host->hostnameForHealthChecks());
396
11
  if (host->healthCheckAddress()->asString() != host->address()->asString()) {
397
5
    health_check_config.set_port_value(host->healthCheckAddress()->ip()->port());
398
5
  }
399
11
}
400

            
401
} // namespace Server
402
} // namespace Envoy