/proc/self/cwd/source/server/admin/stats_render.cc
Line | Count | Source (jump to first uncovered line) |
1 | | #include "source/server/admin/stats_render.h" |
2 | | |
3 | | #include "source/common/stats/histogram_impl.h" |
4 | | |
5 | | #include "absl/strings/str_format.h" |
6 | | |
7 | | namespace Envoy { |
8 | | namespace Server { |
9 | | |
10 | | StatsTextRender::StatsTextRender(const StatsParams& params) |
11 | 0 | : histogram_buckets_mode_(params.histogram_buckets_mode_) {} |
12 | | |
13 | | void StatsTextRender::generate(Buffer::Instance& response, const std::string& name, |
14 | 0 | uint64_t value) { |
15 | 0 | response.addFragments({name, ": ", absl::StrCat(value), "\n"}); |
16 | 0 | } |
17 | | |
18 | | void StatsTextRender::generate(Buffer::Instance& response, const std::string& name, |
19 | 0 | const std::string& value) { |
20 | 0 | response.addFragments({name, ": \"", value, "\"\n"}); |
21 | 0 | } |
22 | | |
23 | | void StatsTextRender::generate(Buffer::Instance& response, const std::string& name, |
24 | 0 | const Stats::ParentHistogram& histogram) { |
25 | 0 | if (!histogram.used()) { |
26 | 0 | response.addFragments({name, ": No recorded values\n"}); |
27 | 0 | return; |
28 | 0 | } |
29 | | |
30 | 0 | switch (histogram_buckets_mode_) { |
31 | 0 | case Utility::HistogramBucketsMode::Unset: |
32 | 0 | case Utility::HistogramBucketsMode::Summary: |
33 | 0 | response.addFragments({name, ": ", histogram.quantileSummary(), "\n"}); |
34 | 0 | break; |
35 | 0 | case Utility::HistogramBucketsMode::Cumulative: |
36 | 0 | response.addFragments({name, ": ", histogram.bucketSummary(), "\n"}); |
37 | 0 | break; |
38 | 0 | case Utility::HistogramBucketsMode::Disjoint: |
39 | 0 | addDisjointBuckets(name, histogram, response); |
40 | 0 | break; |
41 | 0 | case Utility::HistogramBucketsMode::Detailed: |
42 | 0 | response.addFragments({name, ":\n totals="}); |
43 | 0 | addDetail(histogram.detailedTotalBuckets(), response); |
44 | 0 | response.add("\n intervals="); |
45 | 0 | addDetail(histogram.detailedIntervalBuckets(), response); |
46 | 0 | response.addFragments({"\n summary=", histogram.quantileSummary(), "\n"}); |
47 | 0 | break; |
48 | 0 | } |
49 | 0 | } |
50 | | |
51 | 0 | void StatsTextRender::finalize(Buffer::Instance&) {} |
52 | | |
53 | | void StatsTextRender::addDetail(const std::vector<Stats::ParentHistogram::Bucket>& buckets, |
54 | 0 | Buffer::Instance& response) { |
55 | 0 | absl::string_view delim = ""; |
56 | 0 | for (const Stats::ParentHistogram::Bucket& bucket : buckets) { |
57 | 0 | response.addFragments({delim, absl::StrFormat("%.15g,%.15g:%lu", bucket.lower_bound_, |
58 | 0 | bucket.width_, bucket.count_)}); |
59 | 0 | delim = ", "; |
60 | 0 | } |
61 | 0 | } |
62 | | |
63 | | // Computes disjoint buckets as text and adds them to the response buffer. |
64 | | void StatsTextRender::addDisjointBuckets(const std::string& name, |
65 | | const Stats::ParentHistogram& histogram, |
66 | 0 | Buffer::Instance& response) { |
67 | 0 | if (!histogram.used()) { |
68 | 0 | response.addFragments({name, ": No recorded values\n"}); |
69 | 0 | return; |
70 | 0 | } |
71 | 0 | response.addFragments({name, ": "}); |
72 | 0 | std::vector<absl::string_view> bucket_summary; |
73 | |
|
74 | 0 | const Stats::HistogramStatistics& interval_statistics = histogram.intervalStatistics(); |
75 | 0 | Stats::ConstSupportedBuckets& supported_buckets = interval_statistics.supportedBuckets(); |
76 | 0 | const std::vector<uint64_t> disjoint_interval_buckets = |
77 | 0 | interval_statistics.computeDisjointBuckets(); |
78 | 0 | const std::vector<uint64_t> disjoint_cumulative_buckets = |
79 | 0 | histogram.cumulativeStatistics().computeDisjointBuckets(); |
80 | | // Make sure all vectors are the same size. |
81 | 0 | ASSERT(disjoint_interval_buckets.size() == disjoint_cumulative_buckets.size()); |
82 | 0 | ASSERT(disjoint_cumulative_buckets.size() == supported_buckets.size()); |
83 | 0 | const size_t min_size = std::min({disjoint_interval_buckets.size(), |
84 | 0 | disjoint_cumulative_buckets.size(), supported_buckets.size()}); |
85 | 0 | std::vector<std::string> bucket_strings; |
86 | 0 | bucket_strings.reserve(min_size); |
87 | 0 | for (size_t i = 0; i < min_size; ++i) { |
88 | 0 | if (i != 0) { |
89 | 0 | bucket_summary.push_back(" "); |
90 | 0 | } |
91 | 0 | bucket_strings.push_back(fmt::format("B{:g}({},{})", supported_buckets[i], |
92 | 0 | disjoint_interval_buckets[i], |
93 | 0 | disjoint_cumulative_buckets[i])); |
94 | 0 | bucket_summary.push_back(bucket_strings.back()); |
95 | 0 | } |
96 | 0 | bucket_summary.push_back("\n"); |
97 | 0 | response.addFragments(bucket_summary); |
98 | 0 | } |
99 | | |
100 | | StatsJsonRender::StatsJsonRender(Http::ResponseHeaderMap& response_headers, |
101 | | Buffer::Instance& response, const StatsParams& params) |
102 | | : histogram_buckets_mode_(params.histogram_buckets_mode_), |
103 | 0 | json_(std::make_unique<JsonContext>(response)), response_(response) { |
104 | 0 | response_headers.setReferenceContentType(Http::Headers::get().ContentTypeValues.Json); |
105 | 0 | } |
106 | | |
107 | | StatsJsonRender::JsonContext::JsonContext(Buffer::Instance& response) |
108 | 0 | : streamer_(response), stats_map_(streamer_.makeRootMap()) { |
109 | | // We don't create a JSON data model for the stats output, as that makes |
110 | | // streaming difficult. Instead we emit the preamble in the constructor here, |
111 | | // and create json models for each stats entry. |
112 | 0 | stats_map_->addKey("stats"); |
113 | 0 | stats_array_ = stats_map_->addArray(); |
114 | 0 | } |
115 | | |
116 | 0 | void StatsJsonRender::drainIfNeeded(Buffer::Instance& response) { |
117 | 0 | if (&response_ != &response) { |
118 | 0 | response.move(response_); |
119 | 0 | } |
120 | 0 | } |
121 | | |
122 | | // Buffers a JSON fragment for a numeric stats, flushing to the response |
123 | | // buffer once we exceed JsonStatsFlushCount stats. |
124 | | void StatsJsonRender::generate(Buffer::Instance& response, const std::string& name, |
125 | 0 | uint64_t value) { |
126 | 0 | ASSERT(!histograms_initialized_); |
127 | 0 | json_->stats_array_->addMap()->addEntries({{"name", name}, {"value", value}}); |
128 | 0 | drainIfNeeded(response); |
129 | 0 | } |
130 | | |
131 | | // Buffers a JSON fragment for a text-readout stat, flushing to the response |
132 | | // buffer once we exceed JsonStatsFlushCount stats. |
133 | | void StatsJsonRender::generate(Buffer::Instance& response, const std::string& name, |
134 | 0 | const std::string& value) { |
135 | 0 | ASSERT(!histograms_initialized_); |
136 | 0 | json_->stats_array_->addMap()->addEntries({{"name", name}, {"value", value}}); |
137 | 0 | drainIfNeeded(response); |
138 | 0 | } |
139 | | |
140 | | // In JSON we buffer all histograms and don't write them immediately, so we |
141 | | // can, in one JSON structure, emit shared attributes of all histograms and |
142 | | // each individual histogram. |
143 | | // |
144 | | // This is counter to the goals of streaming and chunked interfaces, but |
145 | | // usually there are far fewer histograms than counters or gauges. |
146 | | // |
147 | | // We can further optimize this by streaming out the histograms object, one |
148 | | // histogram at a time, in case buffering all the histograms in Envoy |
149 | | // buffers up too much memory. |
150 | | void StatsJsonRender::generate(Buffer::Instance& response, const std::string& name, |
151 | 0 | const Stats::ParentHistogram& histogram) { |
152 | 0 | if (!histograms_initialized_) { |
153 | 0 | renderHistogramStart(); |
154 | 0 | } |
155 | |
|
156 | 0 | switch (histogram_buckets_mode_) { |
157 | 0 | case Utility::HistogramBucketsMode::Unset: |
158 | 0 | case Utility::HistogramBucketsMode::Summary: { |
159 | 0 | Json::Streamer::MapPtr map = json_->histogram_array_->addMap(); |
160 | 0 | map->addEntries({{"name", name}}); |
161 | 0 | map->addKey("values"); |
162 | 0 | populatePercentiles(histogram, *map); |
163 | 0 | break; |
164 | 0 | } |
165 | 0 | case Utility::HistogramBucketsMode::Cumulative: { |
166 | 0 | const Stats::HistogramStatistics& interval_statistics = histogram.intervalStatistics(); |
167 | 0 | const std::vector<uint64_t>& interval_buckets = interval_statistics.computedBuckets(); |
168 | 0 | const std::vector<uint64_t>& cumulative_buckets = |
169 | 0 | histogram.cumulativeStatistics().computedBuckets(); |
170 | 0 | collectBuckets(name, histogram, interval_buckets, cumulative_buckets); |
171 | 0 | break; |
172 | 0 | } |
173 | 0 | case Utility::HistogramBucketsMode::Disjoint: { |
174 | 0 | const Stats::HistogramStatistics& interval_statistics = histogram.intervalStatistics(); |
175 | 0 | const std::vector<uint64_t> interval_buckets = interval_statistics.computeDisjointBuckets(); |
176 | 0 | const std::vector<uint64_t> cumulative_buckets = |
177 | 0 | histogram.cumulativeStatistics().computeDisjointBuckets(); |
178 | 0 | collectBuckets(name, histogram, interval_buckets, cumulative_buckets); |
179 | 0 | break; |
180 | 0 | } |
181 | 0 | case Utility::HistogramBucketsMode::Detailed: { |
182 | 0 | generateHistogramDetail(name, histogram, *json_->histogram_array_->addMap()); |
183 | 0 | break; |
184 | 0 | } |
185 | 0 | } |
186 | 0 | drainIfNeeded(response); |
187 | 0 | } |
188 | | |
189 | 0 | void StatsJsonRender::populateSupportedPercentiles(Json::Streamer::Array& array) { |
190 | 0 | Stats::HistogramStatisticsImpl empty_statistics; |
191 | 0 | std::vector<double> supported = empty_statistics.supportedQuantiles(); |
192 | 0 | std::vector<Json::Streamer::Value> views(supported.size()); |
193 | 0 | for (uint32_t i = 0, n = supported.size(); i < n; ++i) { |
194 | 0 | views[i] = supported[i] * 100; |
195 | 0 | } |
196 | 0 | array.addEntries(views); |
197 | 0 | } |
198 | | |
199 | | void StatsJsonRender::populatePercentiles(const Stats::ParentHistogram& histogram, |
200 | 0 | Json::Streamer::Map& map) { |
201 | 0 | Json::Streamer::ArrayPtr array = map.addArray(); |
202 | 0 | std::vector<double> totals = histogram.cumulativeStatistics().computedQuantiles(), |
203 | 0 | intervals = histogram.intervalStatistics().computedQuantiles(); |
204 | 0 | uint32_t min_size = std::min(totals.size(), intervals.size()); |
205 | 0 | ASSERT(totals.size() == min_size); |
206 | 0 | ASSERT(intervals.size() == min_size); |
207 | 0 | for (uint32_t i = 0; i < min_size; ++i) { |
208 | 0 | array->addMap()->addEntries({{"cumulative", totals[i]}, {"interval", intervals[i]}}); |
209 | 0 | } |
210 | 0 | }; |
211 | | |
212 | 0 | void StatsJsonRender::renderHistogramStart() { |
213 | 0 | histograms_initialized_ = true; |
214 | 0 | json_->histogram_map1_ = json_->stats_array_->addMap(); |
215 | 0 | json_->histogram_map1_->addKey("histograms"); |
216 | 0 | switch (histogram_buckets_mode_) { |
217 | 0 | case Utility::HistogramBucketsMode::Detailed: |
218 | 0 | json_->histogram_map2_ = json_->histogram_map1_->addMap(); |
219 | 0 | json_->histogram_map2_->addKey("supported_percentiles"); |
220 | 0 | { populateSupportedPercentiles(*json_->histogram_map2_->addArray()); } |
221 | 0 | json_->histogram_map2_->addKey("details"); |
222 | 0 | json_->histogram_array_ = json_->histogram_map2_->addArray(); |
223 | 0 | break; |
224 | 0 | case Utility::HistogramBucketsMode::Unset: |
225 | 0 | case Utility::HistogramBucketsMode::Summary: |
226 | 0 | json_->histogram_map2_ = json_->histogram_map1_->addMap(); |
227 | 0 | json_->histogram_map2_->addKey("supported_quantiles"); |
228 | 0 | { populateSupportedPercentiles(*json_->histogram_map2_->addArray()); } |
229 | 0 | json_->histogram_map2_->addKey("computed_quantiles"); |
230 | 0 | json_->histogram_array_ = json_->histogram_map2_->addArray(); |
231 | 0 | break; |
232 | 0 | case Utility::HistogramBucketsMode::Cumulative: |
233 | 0 | case Utility::HistogramBucketsMode::Disjoint: |
234 | 0 | json_->histogram_array_ = json_->histogram_map1_->addArray(); |
235 | 0 | break; |
236 | 0 | } |
237 | 0 | } |
238 | | |
239 | | void StatsJsonRender::generateHistogramDetail(const std::string& name, |
240 | | const Stats::ParentHistogram& histogram, |
241 | 0 | Json::Streamer::Map& map) { |
242 | | // Now we produce the stream-able histogram records, without using the json intermediate |
243 | | // representation or serializer. |
244 | 0 | map.addEntries({{"name", name}}); |
245 | 0 | map.addKey("totals"); |
246 | 0 | populateBucketsVerbose(histogram.detailedTotalBuckets(), map); |
247 | 0 | map.addKey("intervals"); |
248 | 0 | populateBucketsVerbose(histogram.detailedIntervalBuckets(), map); |
249 | 0 | map.addKey("percentiles"); |
250 | 0 | populatePercentiles(histogram, map); |
251 | 0 | } |
252 | | |
253 | | void StatsJsonRender::populateBucketsVerbose( |
254 | 0 | const std::vector<Stats::ParentHistogram::Bucket>& buckets, Json::Streamer::Map& map) { |
255 | 0 | Json::Streamer::ArrayPtr buckets_array = map.addArray(); |
256 | 0 | for (const Stats::ParentHistogram::Bucket& bucket : buckets) { |
257 | 0 | buckets_array->addMap()->addEntries( |
258 | 0 | {{"lower_bound", bucket.lower_bound_}, {"width", bucket.width_}, {"count", bucket.count_}}); |
259 | 0 | } |
260 | 0 | } |
261 | | |
262 | | // Since histograms are buffered (see above), the finalize() method generates |
263 | | // all of them. |
264 | 0 | void StatsJsonRender::finalize(Buffer::Instance& response) { |
265 | 0 | json_.reset(); |
266 | 0 | drainIfNeeded(response); |
267 | 0 | } |
268 | | |
269 | | // Collects the buckets from the specified histogram, using either the |
270 | | // cumulative or disjoint views, as controlled by buckets_fn. |
271 | | void StatsJsonRender::collectBuckets(const std::string& name, |
272 | | const Stats::ParentHistogram& histogram, |
273 | | const std::vector<uint64_t>& interval_buckets, |
274 | 0 | const std::vector<uint64_t>& cumulative_buckets) { |
275 | 0 | const Stats::HistogramStatistics& interval_statistics = histogram.intervalStatistics(); |
276 | 0 | Stats::ConstSupportedBuckets& supported_buckets = interval_statistics.supportedBuckets(); |
277 | | |
278 | | // Make sure all vectors are the same size. |
279 | 0 | ASSERT(interval_buckets.size() == cumulative_buckets.size()); |
280 | 0 | ASSERT(cumulative_buckets.size() == supported_buckets.size()); |
281 | 0 | size_t min_size = |
282 | 0 | std::min({interval_buckets.size(), cumulative_buckets.size(), supported_buckets.size()}); |
283 | |
|
284 | 0 | Json::Streamer::MapPtr map = json_->histogram_array_->addMap(); |
285 | 0 | map->addEntries({{"name", name}}); |
286 | 0 | map->addKey("buckets"); |
287 | 0 | Json::Streamer::ArrayPtr buckets = map->addArray(); |
288 | 0 | for (uint32_t i = 0; i < min_size; ++i) { |
289 | 0 | Json::Streamer::MapPtr bucket_map = buckets->addMap(); |
290 | 0 | bucket_map->addEntries({{"upper_bound", supported_buckets[i]}, |
291 | 0 | {"interval", interval_buckets[i]}, |
292 | 0 | {"cumulative", cumulative_buckets[i]}}); |
293 | 0 | } |
294 | 0 | } |
295 | | |
296 | | } // namespace Server |
297 | | } // namespace Envoy |