/src/openvswitch/lib/coverage.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | #include "coverage.h" |
19 | | #include <inttypes.h> |
20 | | #include <stdlib.h> |
21 | | #include "openvswitch/dynamic-string.h" |
22 | | #include "hash.h" |
23 | | #include "svec.h" |
24 | | #include "timeval.h" |
25 | | #include "unixctl.h" |
26 | | #include "util.h" |
27 | | #include "openvswitch/vlog.h" |
28 | | |
29 | | VLOG_DEFINE_THIS_MODULE(coverage); |
30 | | |
31 | | /* The coverage counters. */ |
32 | | static struct coverage_counter **coverage_counters = NULL; |
33 | | static size_t n_coverage_counters = 0; |
34 | | static size_t allocated_coverage_counters = 0; |
35 | | |
36 | | static struct ovs_mutex coverage_mutex = OVS_MUTEX_INITIALIZER; |
37 | | |
38 | | DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time, LLONG_MIN); |
39 | | static long long int coverage_run_time = LLONG_MIN; |
40 | | |
41 | | /* Index counter used to compute the moving average array's index. */ |
42 | | static unsigned int idx_count = 0; |
43 | | |
44 | | static void coverage_read(struct svec *); |
45 | | static unsigned int coverage_array_sum(const unsigned int *arr, |
46 | | const unsigned int len); |
47 | | static bool coverage_read_counter(const char *name, |
48 | | unsigned long long int *count); |
49 | | |
50 | | /* Registers a coverage counter with the coverage core */ |
51 | | void |
52 | | coverage_counter_register(struct coverage_counter* counter) |
53 | 254 | { |
54 | 254 | if (n_coverage_counters >= allocated_coverage_counters) { |
55 | 16 | coverage_counters = x2nrealloc(coverage_counters, |
56 | 16 | &allocated_coverage_counters, |
57 | 16 | sizeof(struct coverage_counter*)); |
58 | 16 | } |
59 | 254 | coverage_counters[n_coverage_counters++] = counter; |
60 | 254 | } |
61 | | |
62 | | static void |
63 | | coverage_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED, |
64 | | const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) |
65 | 0 | { |
66 | 0 | struct svec lines; |
67 | 0 | char *reply; |
68 | |
|
69 | 0 | svec_init(&lines); |
70 | 0 | coverage_read(&lines); |
71 | 0 | reply = svec_join(&lines, "\n", "\n"); |
72 | 0 | unixctl_command_reply(conn, reply); |
73 | 0 | free(reply); |
74 | 0 | svec_destroy(&lines); |
75 | 0 | } |
76 | | |
77 | | static void |
78 | | coverage_unixctl_read_counter(struct unixctl_conn *conn, int argc OVS_UNUSED, |
79 | | const char *argv[], void *aux OVS_UNUSED) |
80 | 0 | { |
81 | 0 | unsigned long long count; |
82 | 0 | char *reply; |
83 | 0 | bool ok; |
84 | |
|
85 | 0 | ok = coverage_read_counter(argv[1], &count); |
86 | 0 | if (!ok) { |
87 | 0 | unixctl_command_reply_error(conn, "No such counter"); |
88 | 0 | return; |
89 | 0 | } |
90 | | |
91 | 0 | reply = xasprintf("%llu\n", count); |
92 | 0 | unixctl_command_reply(conn, reply); |
93 | 0 | free(reply); |
94 | 0 | } |
95 | | |
96 | | void |
97 | | coverage_init(void) |
98 | 0 | { |
99 | 0 | unixctl_command_register("coverage/show", "", 0, 0, |
100 | 0 | coverage_unixctl_show, NULL); |
101 | 0 | unixctl_command_register("coverage/read-counter", "COUNTER", 1, 1, |
102 | 0 | coverage_unixctl_read_counter, NULL); |
103 | 0 | } |
104 | | |
105 | | /* Sorts coverage counters in descending order by total, within equal |
106 | | * totals alphabetically by name. */ |
107 | | static int |
108 | | compare_coverage_counters(const void *a_, const void *b_) |
109 | 0 | { |
110 | 0 | const struct coverage_counter *const *ap = a_; |
111 | 0 | const struct coverage_counter *const *bp = b_; |
112 | 0 | const struct coverage_counter *a = *ap; |
113 | 0 | const struct coverage_counter *b = *bp; |
114 | 0 | if (a->total != b->total) { |
115 | 0 | return a->total < b->total ? 1 : -1; |
116 | 0 | } else { |
117 | 0 | return strcmp(a->name, b->name); |
118 | 0 | } |
119 | 0 | } |
120 | | |
121 | | static uint32_t |
122 | | coverage_hash(void) |
123 | 0 | { |
124 | 0 | struct coverage_counter **c; |
125 | 0 | uint32_t hash = 0; |
126 | 0 | int n_groups, i; |
127 | | |
128 | | /* Sort coverage counters into groups with equal totals. */ |
129 | 0 | c = xmalloc(n_coverage_counters * sizeof *c); |
130 | 0 | ovs_mutex_lock(&coverage_mutex); |
131 | 0 | for (i = 0; i < n_coverage_counters; i++) { |
132 | 0 | c[i] = coverage_counters[i]; |
133 | 0 | } |
134 | 0 | ovs_mutex_unlock(&coverage_mutex); |
135 | 0 | qsort(c, n_coverage_counters, sizeof *c, compare_coverage_counters); |
136 | | |
137 | | /* Hash the names in each group along with the rank. */ |
138 | 0 | n_groups = 0; |
139 | 0 | for (i = 0; i < n_coverage_counters; ) { |
140 | 0 | int j; |
141 | |
|
142 | 0 | if (!c[i]->total) { |
143 | 0 | break; |
144 | 0 | } |
145 | 0 | n_groups++; |
146 | 0 | hash = hash_int(i, hash); |
147 | 0 | for (j = i; j < n_coverage_counters; j++) { |
148 | 0 | if (c[j]->total != c[i]->total) { |
149 | 0 | break; |
150 | 0 | } |
151 | 0 | hash = hash_string(c[j]->name, hash); |
152 | 0 | } |
153 | 0 | i = j; |
154 | 0 | } |
155 | |
|
156 | 0 | free(c); |
157 | |
|
158 | 0 | return hash_int(n_groups, hash); |
159 | 0 | } |
160 | | |
161 | | static bool |
162 | | coverage_hit(uint32_t hash) |
163 | 0 | { |
164 | 0 | enum { HIT_BITS = 1024, BITS_PER_WORD = 32 }; |
165 | 0 | static uint32_t hit[HIT_BITS / BITS_PER_WORD]; |
166 | 0 | BUILD_ASSERT_DECL(IS_POW2(HIT_BITS)); |
167 | |
|
168 | 0 | static long long int next_clear = LLONG_MIN; |
169 | |
|
170 | 0 | unsigned int bit_index = hash & (HIT_BITS - 1); |
171 | 0 | unsigned int word_index = bit_index / BITS_PER_WORD; |
172 | 0 | unsigned int word_mask = 1u << (bit_index % BITS_PER_WORD); |
173 | | |
174 | | /* Expire coverage hash suppression once a day. */ |
175 | 0 | if (time_msec() >= next_clear) { |
176 | 0 | memset(hit, 0, sizeof hit); |
177 | 0 | next_clear = time_msec() + 60 * 60 * 24 * 1000LL; |
178 | 0 | } |
179 | |
|
180 | 0 | if (hit[word_index] & word_mask) { |
181 | 0 | return true; |
182 | 0 | } else { |
183 | 0 | hit[word_index] |= word_mask; |
184 | 0 | return false; |
185 | 0 | } |
186 | 0 | } |
187 | | |
188 | | /* Logs the coverage counters, unless a similar set of events has already been |
189 | | * logged. |
190 | | * |
191 | | * This function logs at log level VLL_INFO. Use care before adjusting this |
192 | | * level, because depending on its configuration, syslogd can write changes |
193 | | * synchronously, which can cause the coverage messages to take several seconds |
194 | | * to write. */ |
195 | | void |
196 | | coverage_log(void) |
197 | 0 | { |
198 | 0 | static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 3); |
199 | |
|
200 | 0 | if (!VLOG_DROP_INFO(&rl)) { |
201 | 0 | uint32_t hash = coverage_hash(); |
202 | 0 | if (coverage_hit(hash)) { |
203 | 0 | VLOG_INFO("Skipping details of duplicate event coverage for " |
204 | 0 | "hash=%08"PRIx32, hash); |
205 | 0 | } else { |
206 | 0 | struct svec lines; |
207 | 0 | const char *line; |
208 | 0 | size_t i; |
209 | |
|
210 | 0 | svec_init(&lines); |
211 | 0 | coverage_read(&lines); |
212 | 0 | SVEC_FOR_EACH (i, line, &lines) { |
213 | 0 | VLOG_INFO("%s", line); |
214 | 0 | } |
215 | 0 | svec_destroy(&lines); |
216 | 0 | } |
217 | 0 | } |
218 | 0 | } |
219 | | |
220 | | /* Adds coverage counter information to 'lines'. */ |
221 | | static void |
222 | | coverage_read(struct svec *lines) |
223 | 0 | { |
224 | 0 | struct coverage_counter **c = coverage_counters; |
225 | 0 | unsigned long long int *totals; |
226 | 0 | size_t n_never_hit; |
227 | 0 | uint32_t hash; |
228 | 0 | size_t i; |
229 | |
|
230 | 0 | hash = coverage_hash(); |
231 | |
|
232 | 0 | n_never_hit = 0; |
233 | 0 | svec_add_nocopy(lines, |
234 | 0 | xasprintf("Event coverage, avg rate over last: %d " |
235 | 0 | "seconds, last minute, last hour, " |
236 | 0 | "hash=%08"PRIx32":", |
237 | 0 | COVERAGE_RUN_INTERVAL/1000, hash)); |
238 | |
|
239 | 0 | totals = xmalloc(n_coverage_counters * sizeof *totals); |
240 | 0 | ovs_mutex_lock(&coverage_mutex); |
241 | 0 | for (i = 0; i < n_coverage_counters; i++) { |
242 | 0 | totals[i] = c[i]->total; |
243 | 0 | } |
244 | 0 | ovs_mutex_unlock(&coverage_mutex); |
245 | |
|
246 | 0 | for (i = 0; i < n_coverage_counters; i++) { |
247 | 0 | if (totals[i]) { |
248 | | /* Shows the averaged per-second rates for the last |
249 | | * COVERAGE_RUN_INTERVAL interval, the last minute and |
250 | | * the last hour. */ |
251 | 0 | svec_add_nocopy(lines, |
252 | 0 | xasprintf("%-24s %5.1f/sec %9.3f/sec " |
253 | 0 | "%13.4f/sec total: %llu", |
254 | 0 | c[i]->name, |
255 | 0 | (c[i]->min[(idx_count - 1) % MIN_AVG_LEN] |
256 | 0 | * 1000.0 / COVERAGE_RUN_INTERVAL), |
257 | 0 | coverage_array_sum(c[i]->min, MIN_AVG_LEN) / 60.0, |
258 | 0 | coverage_array_sum(c[i]->hr, HR_AVG_LEN) / 3600.0, |
259 | 0 | totals[i])); |
260 | 0 | } else { |
261 | 0 | n_never_hit++; |
262 | 0 | } |
263 | 0 | } |
264 | |
|
265 | 0 | svec_add_nocopy(lines, xasprintf("%"PRIuSIZE" events never hit", n_never_hit)); |
266 | 0 | free(totals); |
267 | 0 | } |
268 | | |
269 | | /* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to |
270 | | * synchronize per-thread counters with global counters. Every thread maintains |
271 | | * a separate timer to ensure all counters are periodically aggregated. |
272 | | * |
273 | | * Uses 'ovs_mutex_trylock()' if 'trylock' is true. This is to prevent |
274 | | * multiple performance-critical threads contending over the 'coverage_mutex'. |
275 | | * |
276 | | * */ |
277 | | static void |
278 | | coverage_clear__(bool trylock) |
279 | 0 | { |
280 | 0 | long long int now, *thread_time; |
281 | |
|
282 | 0 | now = time_msec(); |
283 | 0 | thread_time = coverage_clear_time_get(); |
284 | | |
285 | | /* Initialize the coverage_clear_time. */ |
286 | 0 | if (*thread_time == LLONG_MIN) { |
287 | 0 | *thread_time = now + COVERAGE_CLEAR_INTERVAL; |
288 | 0 | } |
289 | |
|
290 | 0 | if (now >= *thread_time) { |
291 | 0 | size_t i; |
292 | |
|
293 | 0 | if (trylock) { |
294 | | /* Returns if cannot acquire lock. */ |
295 | 0 | if (ovs_mutex_trylock(&coverage_mutex)) { |
296 | 0 | return; |
297 | 0 | } |
298 | 0 | } else { |
299 | 0 | ovs_mutex_lock(&coverage_mutex); |
300 | 0 | } |
301 | | |
302 | 0 | for (i = 0; i < n_coverage_counters; i++) { |
303 | 0 | struct coverage_counter *c = coverage_counters[i]; |
304 | 0 | c->total += c->count(); |
305 | 0 | } |
306 | 0 | ovs_mutex_unlock(&coverage_mutex); |
307 | 0 | *thread_time = now + COVERAGE_CLEAR_INTERVAL; |
308 | 0 | } |
309 | 0 | } |
310 | | |
311 | | void |
312 | | coverage_clear(void) |
313 | 0 | { |
314 | 0 | coverage_clear__(false); |
315 | 0 | } |
316 | | |
317 | | void |
318 | | coverage_try_clear(void) |
319 | 0 | { |
320 | 0 | coverage_clear__(true); |
321 | 0 | } |
322 | | |
323 | | /* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the |
324 | | * coverage counters' 'min' and 'hr' array. 'min' array is for cumulating |
325 | | * per second counts into per minute count. 'hr' array is for cumulating per |
326 | | * minute counts into per hour count. Every thread may call this function. */ |
327 | | void |
328 | | coverage_run(void) |
329 | 0 | { |
330 | 0 | struct coverage_counter **c = coverage_counters; |
331 | 0 | long long int now; |
332 | |
|
333 | 0 | ovs_mutex_lock(&coverage_mutex); |
334 | 0 | now = time_msec(); |
335 | | /* Initialize the coverage_run_time. */ |
336 | 0 | if (coverage_run_time == LLONG_MIN) { |
337 | 0 | coverage_run_time = now + COVERAGE_RUN_INTERVAL; |
338 | 0 | } |
339 | |
|
340 | 0 | if (now >= coverage_run_time) { |
341 | 0 | size_t i, j; |
342 | | /* Computes the number of COVERAGE_RUN_INTERVAL slots, since |
343 | | * it is possible that the actual run interval is multiple of |
344 | | * COVERAGE_RUN_INTERVAL. */ |
345 | 0 | int slots = (now - coverage_run_time) / COVERAGE_RUN_INTERVAL + 1; |
346 | |
|
347 | 0 | for (i = 0; i < n_coverage_counters; i++) { |
348 | 0 | unsigned int count, portion; |
349 | 0 | unsigned int idx = idx_count; |
350 | | |
351 | | /* Computes the differences between the current total and the one |
352 | | * recorded in last invocation of coverage_run(). */ |
353 | 0 | count = c[i]->total - c[i]->last_total; |
354 | 0 | c[i]->last_total = c[i]->total; |
355 | | /* The count over the time interval is evenly distributed |
356 | | * among slots by calculating the portion. */ |
357 | 0 | portion = count / slots; |
358 | |
|
359 | 0 | for (j = 0; j < slots; j++) { |
360 | | /* Updates the index variables. */ |
361 | | /* The m_idx is increased from 0 to MIN_AVG_LEN - 1. Every |
362 | | * time the m_idx finishes a cycle (a cycle is one minute), |
363 | | * the h_idx is incremented by 1. */ |
364 | 0 | unsigned int m_idx = idx % MIN_AVG_LEN; |
365 | 0 | unsigned int h_idx = idx / MIN_AVG_LEN; |
366 | |
|
367 | 0 | c[i]->min[m_idx] = portion + (j == (slots - 1) |
368 | 0 | ? count % slots : 0); |
369 | 0 | c[i]->hr[h_idx] = m_idx == 0 |
370 | 0 | ? c[i]->min[m_idx] |
371 | 0 | : (c[i]->hr[h_idx] + c[i]->min[m_idx]); |
372 | | /* This is to guarantee that h_idx ranges from 0 to 59. */ |
373 | 0 | idx = (idx + 1) % (MIN_AVG_LEN * HR_AVG_LEN); |
374 | 0 | } |
375 | 0 | } |
376 | | |
377 | | /* Updates the global index variables. */ |
378 | 0 | idx_count = (idx_count + slots) % (MIN_AVG_LEN * HR_AVG_LEN); |
379 | | /* Updates the run time. */ |
380 | 0 | coverage_run_time = now + COVERAGE_RUN_INTERVAL; |
381 | 0 | } |
382 | 0 | ovs_mutex_unlock(&coverage_mutex); |
383 | 0 | } |
384 | | |
385 | | static unsigned int |
386 | | coverage_array_sum(const unsigned int *arr, const unsigned int len) |
387 | 0 | { |
388 | 0 | unsigned int sum = 0; |
389 | 0 | size_t i; |
390 | |
|
391 | 0 | ovs_mutex_lock(&coverage_mutex); |
392 | 0 | for (i = 0; i < len; i++) { |
393 | 0 | sum += arr[i]; |
394 | 0 | } |
395 | 0 | ovs_mutex_unlock(&coverage_mutex); |
396 | 0 | return sum; |
397 | 0 | } |
398 | | |
399 | | static bool |
400 | | coverage_read_counter(const char *name, unsigned long long int *count) |
401 | 0 | { |
402 | 0 | for (size_t i = 0; i < n_coverage_counters; i++) { |
403 | 0 | struct coverage_counter *c = coverage_counters[i]; |
404 | |
|
405 | 0 | if (!strcmp(c->name, name)) { |
406 | 0 | ovs_mutex_lock(&coverage_mutex); |
407 | 0 | c->total += c->count(); |
408 | 0 | *count = c->total; |
409 | 0 | ovs_mutex_unlock(&coverage_mutex); |
410 | |
|
411 | 0 | return true; |
412 | 0 | } |
413 | 0 | } |
414 | | |
415 | 0 | return false; |
416 | 0 | } |