/src/openvswitch/lib/timeval.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | #include "timeval.h" |
19 | | #include <errno.h> |
20 | | #include <poll.h> |
21 | | #include <pthread.h> |
22 | | #include <signal.h> |
23 | | #include <stdlib.h> |
24 | | #include <string.h> |
25 | | #include <sys/time.h> |
26 | | #include <sys/resource.h> |
27 | | #include <unistd.h> |
28 | | #include "coverage.h" |
29 | | #include "dummy.h" |
30 | | #include "openvswitch/dynamic-string.h" |
31 | | #include "fatal-signal.h" |
32 | | #include "hash.h" |
33 | | #include "openvswitch/hmap.h" |
34 | | #include "ovs-rcu.h" |
35 | | #include "ovs-thread.h" |
36 | | #include "signals.h" |
37 | | #include "seq.h" |
38 | | #include "unixctl.h" |
39 | | #include "util.h" |
40 | | #include "openvswitch/vlog.h" |
41 | | |
42 | | VLOG_DEFINE_THIS_MODULE(timeval); |
43 | | |
44 | | COVERAGE_DEFINE(long_poll_interval); |
45 | | |
46 | | #if !defined(HAVE_CLOCK_GETTIME) |
47 | | typedef unsigned int clockid_t; |
48 | | static int clock_gettime(clock_t id, struct timespec *ts); |
49 | | |
50 | | #ifndef CLOCK_MONOTONIC |
51 | | #define CLOCK_MONOTONIC 1 |
52 | | #endif |
53 | | |
54 | | #ifndef CLOCK_REALTIME |
55 | | #define CLOCK_REALTIME 2 |
56 | | #endif |
57 | | #endif /* !defined(HAVE_CLOCK_GETTIME) */ |
58 | | |
59 | | #ifdef _WIN32 |
60 | | /* Number of 100 ns intervals from January 1, 1601 till January 1, 1970. */ |
61 | | const static unsigned long long unix_epoch = 116444736000000000; |
62 | | #endif /* _WIN32 */ |
63 | | |
64 | | /* Structure set by unixctl time/warp command. */ |
65 | | struct large_warp { |
66 | | struct unixctl_conn *conn; /* Connection waiting for warp response. */ |
67 | | long long int total_warp; /* Total offset to be added to monotonic time. */ |
68 | | long long int warp; /* 'total_warp' offset done in steps of 'warp'. */ |
69 | | unsigned int main_thread_id; /* Identification for the main thread. */ |
70 | | }; |
71 | | |
72 | | struct clock { |
73 | | clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */ |
74 | | |
75 | | /* Features for use by unit tests. Protected by 'mutex'. */ |
76 | | atomic_bool slow_path; /* True if warped or stopped. */ |
77 | | bool stopped OVS_GUARDED; /* Disable real-time updates if true. */ |
78 | | struct ovs_mutex mutex; |
79 | | struct timespec warp OVS_GUARDED; /* Offset added for unit tests. */ |
80 | | struct timespec cache OVS_GUARDED; /* Last time read from kernel. */ |
81 | | struct large_warp large_warp OVS_GUARDED; /* Connection information waiting |
82 | | for warp response. */ |
83 | | }; |
84 | | |
85 | | /* Our clocks. */ |
86 | | static struct clock monotonic_clock; /* CLOCK_MONOTONIC, if available. */ |
87 | | static struct clock wall_clock; /* CLOCK_REALTIME. */ |
88 | | |
89 | | /* The monotonic time at which the time module was initialized. */ |
90 | | static long long int boot_time; |
91 | | |
92 | | /* True only when timeval_dummy_register() is called. */ |
93 | | static bool timewarp_enabled; |
94 | | /* Reference to the seq struct. Threads other than main thread can |
95 | | * wait on timewarp_seq and be waken up when time is warped. */ |
96 | | static struct seq *timewarp_seq; |
97 | | /* Last value of 'timewarp_seq'. */ |
98 | | DEFINE_STATIC_PER_THREAD_DATA(uint64_t, last_seq, 0); |
99 | | |
100 | | /* Monotonic time in milliseconds at which to die with SIGALRM (if not |
101 | | * LLONG_MAX). */ |
102 | | static long long int deadline = LLONG_MAX; |
103 | | |
104 | | /* Monotonic time, in milliseconds, at which the last call to time_poll() woke |
105 | | * up. */ |
106 | | DEFINE_STATIC_PER_THREAD_DATA(long long int, last_wakeup, 0); |
107 | | |
108 | | static void log_poll_interval(long long int last_wakeup); |
109 | | static struct rusage *get_recent_rusage(void); |
110 | | static int getrusage_thread(struct rusage *); |
111 | | static void refresh_rusage(void); |
112 | | static void timespec_add(struct timespec *sum, |
113 | | const struct timespec *a, const struct timespec *b); |
114 | | |
115 | | static void |
116 | | init_clock(struct clock *c, clockid_t id) |
117 | 0 | { |
118 | 0 | memset(c, 0, sizeof *c); |
119 | 0 | c->id = id; |
120 | 0 | ovs_mutex_init(&c->mutex); |
121 | 0 | atomic_init(&c->slow_path, false); |
122 | 0 | xclock_gettime(c->id, &c->cache); |
123 | 0 | } |
124 | | |
125 | | static void |
126 | | do_init_time(void) |
127 | 0 | { |
128 | 0 | struct timespec ts; |
129 | |
|
130 | 0 | coverage_init(); |
131 | |
|
132 | 0 | timewarp_seq = seq_create(); |
133 | 0 | init_clock(&monotonic_clock, (!clock_gettime(CLOCK_MONOTONIC, &ts) |
134 | 0 | ? CLOCK_MONOTONIC |
135 | 0 | : CLOCK_REALTIME)); |
136 | 0 | init_clock(&wall_clock, CLOCK_REALTIME); |
137 | 0 | boot_time = timespec_to_msec(&monotonic_clock.cache); |
138 | 0 | } |
139 | | |
140 | | /* Initializes the timetracking module, if not already initialized. */ |
141 | | static void |
142 | | time_init(void) |
143 | 0 | { |
144 | 0 | static pthread_once_t once = PTHREAD_ONCE_INIT; |
145 | 0 | pthread_once(&once, do_init_time); |
146 | 0 | } |
147 | | |
148 | | static void |
149 | | time_timespec__(struct clock *c, struct timespec *ts) |
150 | 0 | { |
151 | 0 | bool slow_path; |
152 | |
|
153 | 0 | time_init(); |
154 | |
|
155 | 0 | atomic_read_relaxed(&c->slow_path, &slow_path); |
156 | 0 | if (!slow_path) { |
157 | 0 | xclock_gettime(c->id, ts); |
158 | 0 | } else { |
159 | 0 | struct timespec warp; |
160 | 0 | struct timespec cache; |
161 | 0 | bool stopped; |
162 | |
|
163 | 0 | ovs_mutex_lock(&c->mutex); |
164 | 0 | stopped = c->stopped; |
165 | 0 | warp = c->warp; |
166 | 0 | cache = c->cache; |
167 | 0 | ovs_mutex_unlock(&c->mutex); |
168 | |
|
169 | 0 | if (!stopped) { |
170 | 0 | xclock_gettime(c->id, &cache); |
171 | 0 | } |
172 | 0 | timespec_add(ts, &cache, &warp); |
173 | 0 | } |
174 | 0 | } |
175 | | |
176 | | /* Stores a monotonic timer into '*ts'. */ |
177 | | void |
178 | | time_timespec(struct timespec *ts) |
179 | 0 | { |
180 | 0 | time_timespec__(&monotonic_clock, ts); |
181 | 0 | } |
182 | | |
183 | | /* Stores the current time into '*ts'. */ |
184 | | void |
185 | | time_wall_timespec(struct timespec *ts) |
186 | 0 | { |
187 | 0 | time_timespec__(&wall_clock, ts); |
188 | 0 | } |
189 | | |
190 | | static time_t |
191 | | time_sec__(struct clock *c) |
192 | 0 | { |
193 | 0 | struct timespec ts; |
194 | |
|
195 | 0 | time_timespec__(c, &ts); |
196 | 0 | return ts.tv_sec; |
197 | 0 | } |
198 | | |
199 | | /* Returns a monotonic timer, in seconds. */ |
200 | | time_t |
201 | | time_now(void) |
202 | 0 | { |
203 | 0 | return time_sec__(&monotonic_clock); |
204 | 0 | } |
205 | | |
206 | | /* Returns the current time, in seconds. */ |
207 | | time_t |
208 | | time_wall(void) |
209 | 0 | { |
210 | 0 | return time_sec__(&wall_clock); |
211 | 0 | } |
212 | | |
213 | | static long long int |
214 | | time_msec__(struct clock *c) |
215 | 0 | { |
216 | 0 | struct timespec ts; |
217 | |
|
218 | 0 | time_timespec__(c, &ts); |
219 | 0 | return timespec_to_msec(&ts); |
220 | 0 | } |
221 | | |
222 | | /* Returns a monotonic timer, in ms. */ |
223 | | long long int |
224 | | time_msec(void) |
225 | 0 | { |
226 | 0 | return time_msec__(&monotonic_clock); |
227 | 0 | } |
228 | | |
229 | | /* Returns the current time, in ms. */ |
230 | | long long int |
231 | | time_wall_msec(void) |
232 | 0 | { |
233 | 0 | return time_msec__(&wall_clock); |
234 | 0 | } |
235 | | |
236 | | static long long int |
237 | | time_usec__(struct clock *c) |
238 | 0 | { |
239 | 0 | struct timespec ts; |
240 | |
|
241 | 0 | time_timespec__(c, &ts); |
242 | 0 | return timespec_to_usec(&ts); |
243 | 0 | } |
244 | | |
245 | | /* Returns a monotonic timer, in microseconds. */ |
246 | | long long int |
247 | | time_usec(void) |
248 | 0 | { |
249 | 0 | return time_usec__(&monotonic_clock); |
250 | 0 | } |
251 | | |
252 | | /* Returns the current time, in microseconds. */ |
253 | | long long int |
254 | | time_wall_usec(void) |
255 | 0 | { |
256 | 0 | return time_usec__(&wall_clock); |
257 | 0 | } |
258 | | |
259 | | /* Configures the program to die with SIGALRM 'secs' seconds from now, if |
260 | | * 'secs' is nonzero, or disables the feature if 'secs' is zero. */ |
261 | | void |
262 | | time_alarm(unsigned int secs) |
263 | 0 | { |
264 | 0 | long long int now; |
265 | 0 | long long int msecs; |
266 | |
|
267 | 0 | assert_single_threaded(); |
268 | 0 | time_init(); |
269 | |
|
270 | 0 | now = time_msec(); |
271 | 0 | msecs = secs * 1000LL; |
272 | 0 | deadline = now < LLONG_MAX - msecs ? now + msecs : LLONG_MAX; |
273 | 0 | } |
274 | | |
275 | | /* Like poll(), except: |
276 | | * |
277 | | * - The timeout is specified as an absolute time, as defined by |
278 | | * time_msec(), instead of a duration. |
279 | | * |
280 | | * - On error, returns a negative error code (instead of setting errno). |
281 | | * |
282 | | * - If interrupted by a signal, retries automatically until the original |
283 | | * timeout is reached. (Because of this property, this function will |
284 | | * never return -EINTR.) |
285 | | * |
286 | | * Stores the number of milliseconds elapsed during poll in '*elapsed'. */ |
287 | | int |
288 | | time_poll(struct pollfd *pollfds, int n_pollfds, HANDLE *handles OVS_UNUSED, |
289 | | long long int timeout_when, int *elapsed) |
290 | 0 | { |
291 | 0 | long long int *last_wakeup = last_wakeup_get(); |
292 | 0 | long long int start; |
293 | 0 | bool quiescent; |
294 | 0 | int retval = 0; |
295 | |
|
296 | 0 | time_init(); |
297 | 0 | coverage_clear(); |
298 | 0 | coverage_run(); |
299 | 0 | if (*last_wakeup && !thread_is_pmd()) { |
300 | 0 | log_poll_interval(*last_wakeup); |
301 | 0 | } |
302 | 0 | start = time_msec(); |
303 | |
|
304 | 0 | timeout_when = MIN(timeout_when, deadline); |
305 | 0 | quiescent = ovsrcu_is_quiescent(); |
306 | |
|
307 | 0 | for (;;) { |
308 | 0 | long long int now = time_msec(); |
309 | 0 | int time_left; |
310 | |
|
311 | 0 | if (now >= timeout_when) { |
312 | 0 | time_left = 0; |
313 | 0 | } else if ((unsigned long long int) timeout_when - now > INT_MAX) { |
314 | 0 | time_left = INT_MAX; |
315 | 0 | } else { |
316 | 0 | time_left = timeout_when - now; |
317 | 0 | } |
318 | |
|
319 | 0 | if (!quiescent) { |
320 | 0 | if (!time_left) { |
321 | 0 | ovsrcu_quiesce(); |
322 | 0 | } else { |
323 | 0 | ovsrcu_quiesce_start(); |
324 | 0 | } |
325 | 0 | } |
326 | |
|
327 | 0 | #ifndef _WIN32 |
328 | 0 | retval = poll(pollfds, n_pollfds, time_left); |
329 | 0 | if (retval < 0) { |
330 | 0 | retval = -errno; |
331 | 0 | } |
332 | | #else |
333 | | if (n_pollfds > MAXIMUM_WAIT_OBJECTS) { |
334 | | VLOG_ERR("Cannot handle more than maximum wait objects\n"); |
335 | | } else if (n_pollfds != 0) { |
336 | | retval = WaitForMultipleObjects(n_pollfds, handles, FALSE, |
337 | | time_left); |
338 | | } |
339 | | if (retval < 0) { |
340 | | /* XXX This will be replace by a win error to errno |
341 | | conversion function */ |
342 | | retval = -WSAGetLastError(); |
343 | | retval = -EINVAL; |
344 | | } |
345 | | #endif |
346 | |
|
347 | 0 | if (!quiescent && time_left) { |
348 | 0 | ovsrcu_quiesce_end(); |
349 | 0 | } |
350 | |
|
351 | 0 | if (deadline <= time_msec()) { |
352 | 0 | #ifndef _WIN32 |
353 | 0 | fatal_signal_handler(SIGALRM); |
354 | | #else |
355 | | VLOG_ERR("wake up from WaitForMultipleObjects after deadline"); |
356 | | fatal_signal_handler(SIGTERM); |
357 | | #endif |
358 | 0 | if (retval < 0) { |
359 | 0 | retval = 0; |
360 | 0 | } |
361 | 0 | break; |
362 | 0 | } |
363 | | |
364 | 0 | if (retval != -EINTR) { |
365 | 0 | break; |
366 | 0 | } |
367 | 0 | } |
368 | 0 | *last_wakeup = time_msec(); |
369 | 0 | refresh_rusage(); |
370 | 0 | *elapsed = *last_wakeup - start; |
371 | 0 | return retval; |
372 | 0 | } |
373 | | |
374 | | long long int |
375 | | timespec_to_msec(const struct timespec *ts) |
376 | 0 | { |
377 | 0 | return (long long int) ts->tv_sec * 1000 + ts->tv_nsec / (1000 * 1000); |
378 | 0 | } |
379 | | |
380 | | long long int |
381 | | timeval_to_msec(const struct timeval *tv) |
382 | 0 | { |
383 | 0 | return (long long int) tv->tv_sec * 1000 + tv->tv_usec / 1000; |
384 | 0 | } |
385 | | |
386 | | long long int |
387 | | timespec_to_usec(const struct timespec *ts) |
388 | 0 | { |
389 | 0 | return (long long int) ts->tv_sec * 1000 * 1000 + ts->tv_nsec / 1000; |
390 | 0 | } |
391 | | |
392 | | long long int |
393 | | timeval_to_usec(const struct timeval *tv) |
394 | 0 | { |
395 | 0 | return (long long int) tv->tv_sec * 1000 * 1000 + tv->tv_usec; |
396 | 0 | } |
397 | | |
398 | | /* Returns the monotonic time at which the "time" module was initialized, in |
399 | | * milliseconds. */ |
400 | | long long int |
401 | | time_boot_msec(void) |
402 | 0 | { |
403 | 0 | time_init(); |
404 | 0 | return boot_time; |
405 | 0 | } |
406 | | |
407 | | #ifdef _WIN32 |
408 | | static ULARGE_INTEGER |
409 | | xgetfiletime(void) |
410 | | { |
411 | | ULARGE_INTEGER current_time; |
412 | | FILETIME current_time_ft; |
413 | | |
414 | | /* Returns current time in UTC as a 64-bit value representing the number |
415 | | * of 100-nanosecond intervals since January 1, 1601 . */ |
416 | | GetSystemTimePreciseAsFileTime(¤t_time_ft); |
417 | | current_time.LowPart = current_time_ft.dwLowDateTime; |
418 | | current_time.HighPart = current_time_ft.dwHighDateTime; |
419 | | |
420 | | return current_time; |
421 | | } |
422 | | |
423 | | static int |
424 | | clock_gettime(clock_t id, struct timespec *ts) |
425 | | { |
426 | | if (id == CLOCK_MONOTONIC) { |
427 | | static LARGE_INTEGER freq; |
428 | | LARGE_INTEGER count; |
429 | | long long int ns; |
430 | | |
431 | | if (!freq.QuadPart) { |
432 | | /* Number of counts per second. */ |
433 | | QueryPerformanceFrequency(&freq); |
434 | | } |
435 | | /* Total number of counts from a starting point. */ |
436 | | QueryPerformanceCounter(&count); |
437 | | |
438 | | /* Total nano seconds from a starting point. */ |
439 | | ns = (double) count.QuadPart / freq.QuadPart * 1000000000; |
440 | | |
441 | | ts->tv_sec = count.QuadPart / freq.QuadPart; |
442 | | ts->tv_nsec = ns % 1000000000; |
443 | | } else if (id == CLOCK_REALTIME) { |
444 | | ULARGE_INTEGER current_time = xgetfiletime(); |
445 | | |
446 | | /* Time from Epoch to now. */ |
447 | | ts->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000; |
448 | | ts->tv_nsec = ((current_time.QuadPart - unix_epoch) % |
449 | | 10000000) * 100; |
450 | | } else { |
451 | | return -1; |
452 | | } |
453 | | |
454 | | return 0; |
455 | | } |
456 | | #endif /* _WIN32 */ |
457 | | |
458 | | #if defined(__MACH__) && !defined(HAVE_CLOCK_GETTIME) |
459 | | #include <mach/clock.h> |
460 | | #include <mach/mach.h> |
461 | | static int |
462 | | clock_gettime(clock_t id, struct timespec *ts) |
463 | | { |
464 | | mach_timespec_t mts; |
465 | | clock_serv_t clk; |
466 | | clock_id_t cid; |
467 | | |
468 | | if (id == CLOCK_MONOTONIC) { |
469 | | cid = SYSTEM_CLOCK; |
470 | | } else if (id == CLOCK_REALTIME) { |
471 | | cid = CALENDAR_CLOCK; |
472 | | } else { |
473 | | return -1; |
474 | | } |
475 | | |
476 | | host_get_clock_service(mach_host_self(), cid, &clk); |
477 | | clock_get_time(clk, &mts); |
478 | | mach_port_deallocate(mach_task_self(), clk); |
479 | | ts->tv_sec = mts.tv_sec; |
480 | | ts->tv_nsec = mts.tv_nsec; |
481 | | |
482 | | return 0; |
483 | | } |
484 | | #endif |
485 | | |
486 | | void |
487 | | xgettimeofday(struct timeval *tv) |
488 | 0 | { |
489 | 0 | #ifndef _WIN32 |
490 | 0 | if (gettimeofday(tv, NULL) == -1) { |
491 | 0 | VLOG_FATAL("gettimeofday failed (%s)", ovs_strerror(errno)); |
492 | 0 | } |
493 | | #else |
494 | | ULARGE_INTEGER current_time = xgetfiletime(); |
495 | | |
496 | | tv->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000; |
497 | | tv->tv_usec = ((current_time.QuadPart - unix_epoch) % |
498 | | 10000000) / 10; |
499 | | #endif |
500 | 0 | } |
501 | | |
502 | | void |
503 | | xclock_gettime(clock_t id, struct timespec *ts) |
504 | 0 | { |
505 | 0 | if (clock_gettime(id, ts) == -1) { |
506 | | /* It seems like a bad idea to try to use vlog here because it is |
507 | | * likely to try to check the current time. */ |
508 | 0 | ovs_abort(errno, "xclock_gettime() failed"); |
509 | 0 | } |
510 | 0 | } |
511 | | |
512 | | static void |
513 | | msec_to_timespec(long long int ms, struct timespec *ts) |
514 | 0 | { |
515 | 0 | ts->tv_sec = ms / 1000; |
516 | 0 | ts->tv_nsec = (ms % 1000) * 1000 * 1000; |
517 | 0 | } |
518 | | |
519 | | void |
520 | | nsec_to_timespec(long long int nsec, struct timespec *ts) |
521 | 0 | { |
522 | 0 | if (!nsec) { |
523 | 0 | ts->tv_sec = ts->tv_nsec = 0; |
524 | 0 | return; |
525 | 0 | } |
526 | 0 | ts->tv_sec = nsec / (1000 * 1000 * 1000); |
527 | |
|
528 | 0 | nsec = nsec % (1000 * 1000 * 1000); |
529 | | /* This is to handle dates before epoch. */ |
530 | 0 | if (OVS_UNLIKELY(nsec < 0)) { |
531 | 0 | nsec += 1000 * 1000 * 1000; |
532 | 0 | ts->tv_sec--; |
533 | 0 | } |
534 | |
|
535 | 0 | ts->tv_nsec = nsec; |
536 | 0 | } |
537 | | |
538 | | static void |
539 | | timewarp_work(void) |
540 | 0 | { |
541 | 0 | struct clock *c = &monotonic_clock; |
542 | 0 | struct timespec warp; |
543 | |
|
544 | 0 | ovs_mutex_lock(&c->mutex); |
545 | 0 | if (!c->large_warp.conn) { |
546 | 0 | ovs_mutex_unlock(&c->mutex); |
547 | 0 | return; |
548 | 0 | } |
549 | | |
550 | 0 | if (c->large_warp.total_warp >= c->large_warp.warp) { |
551 | 0 | msec_to_timespec(c->large_warp.warp, &warp); |
552 | 0 | timespec_add(&c->warp, &c->warp, &warp); |
553 | 0 | c->large_warp.total_warp -= c->large_warp.warp; |
554 | 0 | } else if (c->large_warp.total_warp) { |
555 | 0 | msec_to_timespec(c->large_warp.total_warp, &warp); |
556 | 0 | timespec_add(&c->warp, &c->warp, &warp); |
557 | 0 | c->large_warp.total_warp = 0; |
558 | 0 | } else { |
559 | | /* c->large_warp.total_warp is 0. */ |
560 | 0 | msec_to_timespec(c->large_warp.warp, &warp); |
561 | 0 | timespec_add(&c->warp, &c->warp, &warp); |
562 | 0 | } |
563 | |
|
564 | 0 | if (!c->large_warp.total_warp) { |
565 | 0 | unixctl_command_reply(c->large_warp.conn, "warped"); |
566 | 0 | c->large_warp.conn = NULL; |
567 | 0 | } |
568 | |
|
569 | 0 | ovs_mutex_unlock(&c->mutex); |
570 | 0 | seq_change(timewarp_seq); |
571 | | |
572 | | /* give threads (eg. monitor) some chances to run */ |
573 | 0 | #ifndef _WIN32 |
574 | 0 | poll(NULL, 0, 10); |
575 | | #else |
576 | | Sleep(10); |
577 | | #endif |
578 | 0 | } |
579 | | |
580 | | /* Perform work needed for "timewarp_seq"'s producer and consumers. */ |
581 | | void |
582 | | timewarp_run(void) |
583 | 0 | { |
584 | | /* The function is a no-op unless timeval_dummy_register() is called. */ |
585 | 0 | if (timewarp_enabled) { |
586 | 0 | unsigned int thread_id; |
587 | 0 | ovs_mutex_lock(&monotonic_clock.mutex); |
588 | 0 | thread_id = monotonic_clock.large_warp.main_thread_id; |
589 | 0 | ovs_mutex_unlock(&monotonic_clock.mutex); |
590 | |
|
591 | 0 | if (thread_id != ovsthread_id_self()) { |
592 | | /* For threads other than the thread that changes the sequence, |
593 | | * wait on it. */ |
594 | 0 | uint64_t *last_seq = last_seq_get(); |
595 | |
|
596 | 0 | *last_seq = seq_read(timewarp_seq); |
597 | 0 | seq_wait(timewarp_seq, *last_seq); |
598 | 0 | } else { |
599 | | /* Work on adding the remaining warps. */ |
600 | 0 | timewarp_work(); |
601 | 0 | } |
602 | 0 | } |
603 | 0 | } |
604 | | |
605 | | static long long int |
606 | | timeval_diff_msec(const struct timeval *a, const struct timeval *b) |
607 | 0 | { |
608 | 0 | return timeval_to_msec(a) - timeval_to_msec(b); |
609 | 0 | } |
610 | | |
611 | | static void |
612 | | timespec_add(struct timespec *sum, |
613 | | const struct timespec *a, |
614 | | const struct timespec *b) |
615 | 0 | { |
616 | 0 | struct timespec tmp; |
617 | |
|
618 | 0 | tmp.tv_sec = a->tv_sec + b->tv_sec; |
619 | 0 | tmp.tv_nsec = a->tv_nsec + b->tv_nsec; |
620 | 0 | if (tmp.tv_nsec >= 1000 * 1000 * 1000) { |
621 | 0 | tmp.tv_nsec -= 1000 * 1000 * 1000; |
622 | 0 | tmp.tv_sec++; |
623 | 0 | } |
624 | |
|
625 | 0 | *sum = tmp; |
626 | 0 | } |
627 | | |
628 | | static bool |
629 | | is_warped(const struct clock *c) |
630 | 0 | { |
631 | 0 | bool warped; |
632 | |
|
633 | 0 | ovs_mutex_lock(&c->mutex); |
634 | 0 | warped = monotonic_clock.warp.tv_sec || monotonic_clock.warp.tv_nsec; |
635 | 0 | ovs_mutex_unlock(&c->mutex); |
636 | |
|
637 | 0 | return warped; |
638 | 0 | } |
639 | | |
640 | | static void |
641 | | log_poll_interval(long long int last_wakeup) |
642 | 0 | { |
643 | 0 | long long int interval = time_msec() - last_wakeup; |
644 | |
|
645 | 0 | if (interval >= 1000 && !is_warped(&monotonic_clock)) { |
646 | 0 | const struct rusage *last_rusage = get_recent_rusage(); |
647 | 0 | struct rusage rusage; |
648 | |
|
649 | 0 | COVERAGE_INC(long_poll_interval); |
650 | |
|
651 | 0 | if (!getrusage_thread(&rusage)) { |
652 | 0 | VLOG_WARN("Unreasonably long %lldms poll interval" |
653 | 0 | " (%lldms user, %lldms system)", |
654 | 0 | interval, |
655 | 0 | timeval_diff_msec(&rusage.ru_utime, |
656 | 0 | &last_rusage->ru_utime), |
657 | 0 | timeval_diff_msec(&rusage.ru_stime, |
658 | 0 | &last_rusage->ru_stime)); |
659 | |
|
660 | 0 | if (rusage.ru_minflt > last_rusage->ru_minflt |
661 | 0 | || rusage.ru_majflt > last_rusage->ru_majflt) { |
662 | 0 | VLOG_WARN("faults: %ld minor, %ld major", |
663 | 0 | rusage.ru_minflt - last_rusage->ru_minflt, |
664 | 0 | rusage.ru_majflt - last_rusage->ru_majflt); |
665 | 0 | } |
666 | 0 | if (rusage.ru_inblock > last_rusage->ru_inblock |
667 | 0 | || rusage.ru_oublock > last_rusage->ru_oublock) { |
668 | 0 | VLOG_WARN("disk: %ld reads, %ld writes", |
669 | 0 | rusage.ru_inblock - last_rusage->ru_inblock, |
670 | 0 | rusage.ru_oublock - last_rusage->ru_oublock); |
671 | 0 | } |
672 | 0 | if (rusage.ru_nvcsw > last_rusage->ru_nvcsw |
673 | 0 | || rusage.ru_nivcsw > last_rusage->ru_nivcsw) { |
674 | 0 | VLOG_WARN("context switches: %ld voluntary, %ld involuntary", |
675 | 0 | rusage.ru_nvcsw - last_rusage->ru_nvcsw, |
676 | 0 | rusage.ru_nivcsw - last_rusage->ru_nivcsw); |
677 | 0 | } |
678 | 0 | } else { |
679 | 0 | VLOG_WARN("Unreasonably long %lldms poll interval", interval); |
680 | 0 | } |
681 | 0 | coverage_log(); |
682 | 0 | } |
683 | 0 | } |
684 | | |
685 | | /* CPU usage tracking. */ |
686 | | |
687 | | struct cpu_usage { |
688 | | long long int when; /* Time that this sample was taken. */ |
689 | | unsigned long long int cpu; /* Total user+system CPU usage when sampled. */ |
690 | | }; |
691 | | |
692 | | struct cpu_tracker { |
693 | | struct cpu_usage older; |
694 | | struct cpu_usage newer; |
695 | | int cpu_usage; |
696 | | |
697 | | struct rusage recent_rusage; |
698 | | }; |
699 | | DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var); |
700 | | |
701 | | static struct cpu_tracker * |
702 | | get_cpu_tracker(void) |
703 | 0 | { |
704 | 0 | struct cpu_tracker *t = cpu_tracker_var_get(); |
705 | 0 | if (!t) { |
706 | 0 | t = xzalloc(sizeof *t); |
707 | 0 | t->older.when = LLONG_MIN; |
708 | 0 | t->newer.when = LLONG_MIN; |
709 | 0 | cpu_tracker_var_set_unsafe(t); |
710 | 0 | } |
711 | 0 | return t; |
712 | 0 | } |
713 | | |
714 | | static struct rusage * |
715 | | get_recent_rusage(void) |
716 | 0 | { |
717 | 0 | return &get_cpu_tracker()->recent_rusage; |
718 | 0 | } |
719 | | |
720 | | static int |
721 | | getrusage_thread(struct rusage *rusage OVS_UNUSED) |
722 | 0 | { |
723 | 0 | #ifdef RUSAGE_THREAD |
724 | 0 | return getrusage(RUSAGE_THREAD, rusage); |
725 | | #else |
726 | | errno = EINVAL; |
727 | | return -1; |
728 | | #endif |
729 | 0 | } |
730 | | |
731 | | static void |
732 | | refresh_rusage(void) |
733 | 0 | { |
734 | 0 | struct cpu_tracker *t = get_cpu_tracker(); |
735 | 0 | struct rusage *recent_rusage = &t->recent_rusage; |
736 | |
|
737 | 0 | if (!getrusage_thread(recent_rusage)) { |
738 | 0 | long long int now = time_msec(); |
739 | 0 | if (now >= t->newer.when + 3 * 1000) { |
740 | 0 | t->older = t->newer; |
741 | 0 | t->newer.when = now; |
742 | 0 | t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) + |
743 | 0 | timeval_to_msec(&recent_rusage->ru_stime)); |
744 | |
|
745 | 0 | if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) { |
746 | 0 | unsigned int dividend = t->newer.cpu - t->older.cpu; |
747 | 0 | unsigned int divisor = (t->newer.when - t->older.when) / 100; |
748 | 0 | t->cpu_usage = divisor > 0 ? dividend / divisor : -1; |
749 | 0 | } else { |
750 | 0 | t->cpu_usage = -1; |
751 | 0 | } |
752 | 0 | } |
753 | 0 | } |
754 | 0 | } |
755 | | |
756 | | /* Returns an estimate of this process's CPU usage, as a percentage, over the |
757 | | * past few seconds of wall-clock time. Returns -1 if no estimate is available |
758 | | * (which will happen if the process has not been running long enough to have |
759 | | * an estimate, and can happen for other reasons as well). */ |
760 | | int |
761 | | get_cpu_usage(void) |
762 | 0 | { |
763 | 0 | return get_cpu_tracker()->cpu_usage; |
764 | 0 | } |
765 | | |
766 | | /* Unixctl interface. */ |
767 | | |
768 | | /* "time/stop" stops the monotonic time returned by e.g. time_msec() from |
769 | | * advancing, except due to later calls to "time/warp". */ |
770 | | void |
771 | | timeval_stop(void) |
772 | 0 | { |
773 | 0 | ovs_mutex_lock(&monotonic_clock.mutex); |
774 | 0 | atomic_store_relaxed(&monotonic_clock.slow_path, true); |
775 | 0 | monotonic_clock.stopped = true; |
776 | 0 | xclock_gettime(monotonic_clock.id, &monotonic_clock.cache); |
777 | 0 | ovs_mutex_unlock(&monotonic_clock.mutex); |
778 | 0 | } |
779 | | |
780 | | static void |
781 | | timeval_stop_cb(struct unixctl_conn *conn, |
782 | | int argc OVS_UNUSED, const char *argv[] OVS_UNUSED, |
783 | | void *aux OVS_UNUSED) |
784 | 0 | { |
785 | 0 | timeval_stop(); |
786 | 0 | unixctl_command_reply(conn, NULL); |
787 | 0 | } |
788 | | |
789 | | /* "time/warp MSECS" advances the current monotonic time by the specified |
790 | | * number of milliseconds. Unless "time/stop" has also been executed, the |
791 | | * monotonic clock continues to tick forward at the normal rate afterward. |
792 | | * |
793 | | * "time/warp LARGE_MSECS MSECS" is a variation of the above command. It |
794 | | * advances the current monotonic time by LARGE_MSECS. This is done MSECS |
795 | | * at a time in each run of the main thread. This gives other threads |
796 | | * time to run after the clock has been advanced by MSECS. |
797 | | * |
798 | | * Does not affect wall clock readings. */ |
799 | | static void |
800 | | timeval_warp_cb(struct unixctl_conn *conn, |
801 | | int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED) |
802 | 0 | { |
803 | 0 | long long int total_warp = argc > 2 ? atoll(argv[1]) : 0; |
804 | 0 | long long int msecs = argc > 2 ? atoll(argv[2]) : atoll(argv[1]); |
805 | 0 | if (msecs <= 0 || total_warp < 0) { |
806 | 0 | unixctl_command_reply_error(conn, "invalid MSECS"); |
807 | 0 | return; |
808 | 0 | } |
809 | | |
810 | 0 | ovs_mutex_lock(&monotonic_clock.mutex); |
811 | 0 | if (monotonic_clock.large_warp.conn) { |
812 | 0 | ovs_mutex_unlock(&monotonic_clock.mutex); |
813 | 0 | unixctl_command_reply_error(conn, "A previous warp in progress"); |
814 | 0 | return; |
815 | 0 | } |
816 | 0 | atomic_store_relaxed(&monotonic_clock.slow_path, true); |
817 | 0 | monotonic_clock.large_warp.conn = conn; |
818 | 0 | monotonic_clock.large_warp.total_warp = total_warp; |
819 | 0 | monotonic_clock.large_warp.warp = msecs; |
820 | 0 | monotonic_clock.large_warp.main_thread_id = ovsthread_id_self(); |
821 | 0 | ovs_mutex_unlock(&monotonic_clock.mutex); |
822 | |
|
823 | 0 | timewarp_work(); |
824 | 0 | } |
825 | | |
826 | | /* Direct monotonic clock into slow path and advance the current monotonic |
827 | | * time by 'msecs' milliseconds directly. This is for use in unit tests. */ |
828 | | void |
829 | | timeval_warp(long long int msecs) |
830 | 0 | { |
831 | 0 | struct clock *c = &monotonic_clock; |
832 | 0 | struct timespec warp; |
833 | |
|
834 | 0 | ovs_mutex_lock(&monotonic_clock.mutex); |
835 | 0 | atomic_store_relaxed(&monotonic_clock.slow_path, true); |
836 | 0 | msec_to_timespec(msecs, &warp); |
837 | 0 | timespec_add(&c->warp, &c->warp, &warp); |
838 | 0 | ovs_mutex_unlock(&monotonic_clock.mutex); |
839 | 0 | } |
840 | | |
841 | | void |
842 | | timeval_dummy_register(void) |
843 | 0 | { |
844 | 0 | timewarp_enabled = true; |
845 | 0 | unixctl_command_register("time/stop", "", 0, 0, timeval_stop_cb, NULL); |
846 | 0 | unixctl_command_register("time/warp", "[large_msecs] msecs", 1, 2, |
847 | 0 | timeval_warp_cb, NULL); |
848 | 0 | } |
849 | | |
850 | | |
851 | | |
852 | | /* strftime() with an extension for high-resolution timestamps. Any '#'s in |
853 | | * 'format' will be replaced by subseconds, e.g. use "%S.###" to obtain results |
854 | | * like "01.123". */ |
855 | | size_t |
856 | | strftime_msec(char *s, size_t max, const char *format, |
857 | | const struct tm_msec *tm) |
858 | 0 | { |
859 | 0 | size_t n; |
860 | | |
861 | | /* Visual Studio 2013's behavior is to crash when 0 is passed as second |
862 | | * argument to strftime. */ |
863 | 0 | n = max ? strftime(s, max, format, &tm->tm) : 0; |
864 | 0 | if (n) { |
865 | 0 | char decimals[4]; |
866 | 0 | char *p; |
867 | |
|
868 | 0 | sprintf(decimals, "%03d", tm->msec); |
869 | 0 | for (p = strchr(s, '#'); p; p = strchr(p, '#')) { |
870 | 0 | char *d = decimals; |
871 | 0 | while (*p == '#') { |
872 | 0 | *p++ = *d ? *d++ : '0'; |
873 | 0 | } |
874 | 0 | } |
875 | 0 | } |
876 | |
|
877 | 0 | return n; |
878 | 0 | } |
879 | | |
880 | | struct tm_msec * |
881 | | localtime_msec(long long int now, struct tm_msec *result) |
882 | 0 | { |
883 | 0 | time_t now_sec = now / 1000; |
884 | 0 | localtime_r(&now_sec, &result->tm); |
885 | 0 | result->msec = now % 1000; |
886 | 0 | return result; |
887 | 0 | } |
888 | | |
889 | | struct tm_msec * |
890 | | gmtime_msec(long long int now, struct tm_msec *result) |
891 | 0 | { |
892 | 0 | time_t now_sec = now / 1000; |
893 | 0 | gmtime_r(&now_sec, &result->tm); |
894 | 0 | result->msec = now % 1000; |
895 | 0 | return result; |
896 | 0 | } |