Coverage Report

Created: 2025-11-24 06:47

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/openvswitch/lib/timeval.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at:
7
 *
8
 *     http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16
17
#include <config.h>
18
#include "timeval.h"
19
#include <errno.h>
20
#include <poll.h>
21
#include <pthread.h>
22
#include <signal.h>
23
#include <stdlib.h>
24
#include <string.h>
25
#include <sys/time.h>
26
#include <sys/resource.h>
27
#include <unistd.h>
28
#include "coverage.h"
29
#include "dummy.h"
30
#include "openvswitch/dynamic-string.h"
31
#include "fatal-signal.h"
32
#include "hash.h"
33
#include "openvswitch/hmap.h"
34
#include "ovs-rcu.h"
35
#include "ovs-thread.h"
36
#include "signals.h"
37
#include "seq.h"
38
#include "unixctl.h"
39
#include "util.h"
40
#include "openvswitch/vlog.h"
41
42
VLOG_DEFINE_THIS_MODULE(timeval);
43
44
COVERAGE_DEFINE(long_poll_interval);
45
46
#if !defined(HAVE_CLOCK_GETTIME)
47
typedef unsigned int clockid_t;
48
static int clock_gettime(clock_t id, struct timespec *ts);
49
50
#ifndef CLOCK_MONOTONIC
51
#define CLOCK_MONOTONIC 1
52
#endif
53
54
#ifndef CLOCK_REALTIME
55
#define CLOCK_REALTIME 2
56
#endif
57
#endif /* !defined(HAVE_CLOCK_GETTIME) */
58
59
#ifdef _WIN32
60
/* Number of 100 ns intervals from January 1, 1601 till January 1, 1970. */
61
const static unsigned long long unix_epoch = 116444736000000000;
62
#endif /* _WIN32 */
63
64
/* Structure set by unixctl time/warp command. */
65
struct large_warp {
66
    struct unixctl_conn *conn; /* Connection waiting for warp response. */
67
    long long int total_warp; /* Total offset to be added to monotonic time. */
68
    long long int warp;      /* 'total_warp' offset done in steps of 'warp'. */
69
    unsigned int main_thread_id; /* Identification for the main thread. */
70
};
71
72
struct clock {
73
    clockid_t id;               /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
74
75
    /* Features for use by unit tests.  Protected by 'mutex'. */
76
    atomic_bool slow_path;             /* True if warped or stopped. */
77
    bool stopped OVS_GUARDED;          /* Disable real-time updates if true. */
78
    struct ovs_mutex mutex;
79
    struct timespec warp OVS_GUARDED;  /* Offset added for unit tests. */
80
    struct timespec cache OVS_GUARDED; /* Last time read from kernel. */
81
    struct large_warp large_warp OVS_GUARDED; /* Connection information waiting
82
                                                 for warp response. */
83
};
84
85
/* Our clocks. */
86
static struct clock monotonic_clock; /* CLOCK_MONOTONIC, if available. */
87
static struct clock wall_clock;      /* CLOCK_REALTIME. */
88
89
/* The monotonic time at which the time module was initialized. */
90
static long long int boot_time;
91
92
/* True only when timeval_dummy_register() is called. */
93
static bool timewarp_enabled;
94
/* Reference to the seq struct.  Threads other than main thread can
95
 * wait on timewarp_seq and be waken up when time is warped. */
96
static struct seq *timewarp_seq;
97
/* Last value of 'timewarp_seq'. */
98
DEFINE_STATIC_PER_THREAD_DATA(uint64_t, last_seq, 0);
99
100
/* Monotonic time in milliseconds at which to die with SIGALRM (if not
101
 * LLONG_MAX). */
102
static long long int deadline = LLONG_MAX;
103
104
/* Monotonic time, in milliseconds, at which the last call to time_poll() woke
105
 * up. */
106
DEFINE_STATIC_PER_THREAD_DATA(long long int, last_wakeup, 0);
107
108
static void log_poll_interval(long long int last_wakeup);
109
static struct rusage *get_recent_rusage(void);
110
static int getrusage_thread(struct rusage *);
111
static void refresh_rusage(void);
112
static void timespec_add(struct timespec *sum,
113
                         const struct timespec *a, const struct timespec *b);
114
115
static void
116
init_clock(struct clock *c, clockid_t id)
117
    OVS_NO_THREAD_SAFETY_ANALYSIS
118
0
{
119
0
    memset(c, 0, sizeof *c);
120
0
    c->id = id;
121
0
    ovs_mutex_init(&c->mutex);
122
0
    atomic_init(&c->slow_path, false);
123
0
    xclock_gettime(c->id, &c->cache);
124
0
}
125
126
static void
127
do_init_time(void)
128
    OVS_NO_THREAD_SAFETY_ANALYSIS
129
0
{
130
0
    struct timespec ts;
131
132
0
    coverage_init();
133
134
0
    timewarp_seq = seq_create();
135
0
    init_clock(&monotonic_clock, (!clock_gettime(CLOCK_MONOTONIC, &ts)
136
0
                                  ? CLOCK_MONOTONIC
137
0
                                  : CLOCK_REALTIME));
138
0
    init_clock(&wall_clock, CLOCK_REALTIME);
139
0
    boot_time = timespec_to_msec(&monotonic_clock.cache);
140
0
}
141
142
/* Initializes the timetracking module, if not already initialized. */
143
static void
144
time_init(void)
145
0
{
146
0
    static pthread_once_t once = PTHREAD_ONCE_INIT;
147
0
    pthread_once(&once, do_init_time);
148
0
}
149
150
static void
151
time_timespec__(struct clock *c, struct timespec *ts)
152
0
{
153
0
    bool slow_path;
154
155
0
    time_init();
156
157
0
    atomic_read_relaxed(&c->slow_path, &slow_path);
158
0
    if (!slow_path) {
159
0
        xclock_gettime(c->id, ts);
160
0
    } else {
161
0
        struct timespec warp;
162
0
        struct timespec cache;
163
0
        bool stopped;
164
165
0
        ovs_mutex_lock(&c->mutex);
166
0
        stopped = c->stopped;
167
0
        warp = c->warp;
168
0
        cache = c->cache;
169
0
        ovs_mutex_unlock(&c->mutex);
170
171
0
        if (!stopped) {
172
0
            xclock_gettime(c->id, &cache);
173
0
        }
174
0
        timespec_add(ts, &cache, &warp);
175
0
    }
176
0
}
177
178
/* Stores a monotonic timer into '*ts'. */
179
void
180
time_timespec(struct timespec *ts)
181
0
{
182
0
    time_timespec__(&monotonic_clock, ts);
183
0
}
184
185
/* Stores the current time into '*ts'. */
186
void
187
time_wall_timespec(struct timespec *ts)
188
0
{
189
0
    time_timespec__(&wall_clock, ts);
190
0
}
191
192
static time_t
193
time_sec__(struct clock *c)
194
0
{
195
0
    struct timespec ts;
196
197
0
    time_timespec__(c, &ts);
198
0
    return ts.tv_sec;
199
0
}
200
201
/* Returns a monotonic timer, in seconds. */
202
time_t
203
time_now(void)
204
0
{
205
0
    return time_sec__(&monotonic_clock);
206
0
}
207
208
/* Returns the current time, in seconds. */
209
time_t
210
time_wall(void)
211
0
{
212
0
    return time_sec__(&wall_clock);
213
0
}
214
215
static long long int
216
time_msec__(struct clock *c)
217
0
{
218
0
    struct timespec ts;
219
220
0
    time_timespec__(c, &ts);
221
0
    return timespec_to_msec(&ts);
222
0
}
223
224
/* Returns a monotonic timer, in ms. */
225
long long int
226
time_msec(void)
227
0
{
228
0
    return time_msec__(&monotonic_clock);
229
0
}
230
231
/* Returns the current time, in ms. */
232
long long int
233
time_wall_msec(void)
234
0
{
235
0
    return time_msec__(&wall_clock);
236
0
}
237
238
static long long int
239
time_usec__(struct clock *c)
240
0
{
241
0
    struct timespec ts;
242
243
0
    time_timespec__(c, &ts);
244
0
    return timespec_to_usec(&ts);
245
0
}
246
247
/* Returns a monotonic timer, in microseconds. */
248
long long int
249
time_usec(void)
250
0
{
251
0
    return time_usec__(&monotonic_clock);
252
0
}
253
254
/* Returns the current time, in microseconds. */
255
long long int
256
time_wall_usec(void)
257
0
{
258
0
    return time_usec__(&wall_clock);
259
0
}
260
261
/* Configures the program to die with SIGALRM 'secs' seconds from now, if
262
 * 'secs' is nonzero, or disables the feature if 'secs' is zero. */
263
void
264
time_alarm(unsigned int secs)
265
0
{
266
0
    long long int now;
267
0
    long long int msecs;
268
269
0
    assert_single_threaded();
270
0
    time_init();
271
272
0
    now = time_msec();
273
0
    msecs = secs * 1000LL;
274
0
    deadline = now < LLONG_MAX - msecs ? now + msecs : LLONG_MAX;
275
0
}
276
277
/* Like poll(), except:
278
 *
279
 *      - The timeout is specified as an absolute time, as defined by
280
 *        time_msec(), instead of a duration.
281
 *
282
 *      - On error, returns a negative error code (instead of setting errno).
283
 *
284
 *      - If interrupted by a signal, retries automatically until the original
285
 *        timeout is reached.  (Because of this property, this function will
286
 *        never return -EINTR.)
287
 *
288
 * Stores the number of milliseconds elapsed during poll in '*elapsed'. */
289
int
290
time_poll(struct pollfd *pollfds, int n_pollfds, HANDLE *handles OVS_UNUSED,
291
          long long int timeout_when, int *elapsed)
292
0
{
293
0
    long long int *last_wakeup = last_wakeup_get();
294
0
    long long int start;
295
0
    bool quiescent;
296
0
    int retval = 0;
297
298
0
    time_init();
299
0
    coverage_clear();
300
0
    coverage_run();
301
0
    if (*last_wakeup && !thread_is_pmd()) {
302
0
        log_poll_interval(*last_wakeup);
303
0
    }
304
0
    start = time_msec();
305
306
0
    timeout_when = MIN(timeout_when, deadline);
307
0
    quiescent = ovsrcu_is_quiescent();
308
309
0
    for (;;) {
310
0
        long long int now = time_msec();
311
0
        int time_left;
312
313
0
        if (now >= timeout_when) {
314
0
            time_left = 0;
315
0
        } else if ((unsigned long long int) timeout_when - now > INT_MAX) {
316
0
            time_left = INT_MAX;
317
0
        } else {
318
0
            time_left = timeout_when - now;
319
0
        }
320
321
0
        if (!quiescent) {
322
0
            if (!time_left) {
323
0
                ovsrcu_quiesce();
324
0
            } else {
325
0
                ovsrcu_quiesce_start();
326
0
            }
327
0
        }
328
329
0
#ifndef _WIN32
330
0
        retval = poll(pollfds, n_pollfds, time_left);
331
0
        if (retval < 0) {
332
0
            retval = -errno;
333
0
        }
334
#else
335
        if (n_pollfds > MAXIMUM_WAIT_OBJECTS) {
336
            VLOG_ERR("Cannot handle more than maximum wait objects\n");
337
        } else if (n_pollfds != 0) {
338
            retval = WaitForMultipleObjects(n_pollfds, handles, FALSE,
339
                                            time_left);
340
        }
341
        if (retval < 0) {
342
            /* XXX This will be replace by a win error to errno
343
               conversion function */
344
            retval = -WSAGetLastError();
345
            retval = -EINVAL;
346
        }
347
#endif
348
349
0
        if (!quiescent && time_left) {
350
0
            ovsrcu_quiesce_end();
351
0
        }
352
353
0
        if (deadline <= time_msec()) {
354
0
#ifndef _WIN32
355
0
            fatal_signal_handler(SIGALRM);
356
#else
357
            VLOG_ERR("wake up from WaitForMultipleObjects after deadline");
358
            fatal_signal_handler(SIGTERM);
359
#endif
360
0
            if (retval < 0) {
361
0
                retval = 0;
362
0
            }
363
0
            break;
364
0
        }
365
366
0
        if (retval != -EINTR) {
367
0
            break;
368
0
        }
369
0
    }
370
0
    *last_wakeup = time_msec();
371
0
    refresh_rusage();
372
0
    *elapsed = *last_wakeup - start;
373
0
    return retval;
374
0
}
375
376
long long int
377
timespec_to_msec(const struct timespec *ts)
378
0
{
379
0
    return (long long int) ts->tv_sec * 1000 + ts->tv_nsec / (1000 * 1000);
380
0
}
381
382
long long int
383
timeval_to_msec(const struct timeval *tv)
384
0
{
385
0
    return (long long int) tv->tv_sec * 1000 + tv->tv_usec / 1000;
386
0
}
387
388
long long int
389
timespec_to_usec(const struct timespec *ts)
390
0
{
391
0
    return (long long int) ts->tv_sec * 1000 * 1000 + ts->tv_nsec / 1000;
392
0
}
393
394
long long int
395
timeval_to_usec(const struct timeval *tv)
396
0
{
397
0
    return (long long int) tv->tv_sec * 1000 * 1000 + tv->tv_usec;
398
0
}
399
400
/* Returns the monotonic time at which the "time" module was initialized, in
401
 * milliseconds. */
402
long long int
403
time_boot_msec(void)
404
0
{
405
0
    time_init();
406
0
    return boot_time;
407
0
}
408
409
#ifdef _WIN32
410
static ULARGE_INTEGER
411
xgetfiletime(void)
412
{
413
    ULARGE_INTEGER current_time;
414
    FILETIME current_time_ft;
415
416
    /* Returns current time in UTC as a 64-bit value representing the number
417
     * of 100-nanosecond intervals since January 1, 1601 . */
418
    GetSystemTimePreciseAsFileTime(&current_time_ft);
419
    current_time.LowPart = current_time_ft.dwLowDateTime;
420
    current_time.HighPart = current_time_ft.dwHighDateTime;
421
422
    return current_time;
423
}
424
425
static int
426
clock_gettime(clock_t id, struct timespec *ts)
427
{
428
    if (id == CLOCK_MONOTONIC) {
429
        static LARGE_INTEGER freq;
430
        LARGE_INTEGER count;
431
        long long int ns;
432
433
        if (!freq.QuadPart) {
434
            /* Number of counts per second. */
435
            QueryPerformanceFrequency(&freq);
436
        }
437
        /* Total number of counts from a starting point. */
438
        QueryPerformanceCounter(&count);
439
440
        /* Total nano seconds from a starting point. */
441
        ns = (double) count.QuadPart / freq.QuadPart * 1000000000;
442
443
        ts->tv_sec = count.QuadPart / freq.QuadPart;
444
        ts->tv_nsec = ns % 1000000000;
445
    } else if (id == CLOCK_REALTIME) {
446
        ULARGE_INTEGER current_time = xgetfiletime();
447
448
        /* Time from Epoch to now. */
449
        ts->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000;
450
        ts->tv_nsec = ((current_time.QuadPart - unix_epoch) %
451
                       10000000) * 100;
452
    } else {
453
        return -1;
454
    }
455
456
    return 0;
457
}
458
#endif /* _WIN32 */
459
460
#if defined(__MACH__) && !defined(HAVE_CLOCK_GETTIME)
461
#include <mach/clock.h>
462
#include <mach/mach.h>
463
static int
464
clock_gettime(clock_t id, struct timespec *ts)
465
{
466
    mach_timespec_t mts;
467
    clock_serv_t clk;
468
    clock_id_t cid;
469
470
    if (id == CLOCK_MONOTONIC) {
471
        cid = SYSTEM_CLOCK;
472
    } else if (id == CLOCK_REALTIME) {
473
        cid = CALENDAR_CLOCK;
474
    } else {
475
        return -1;
476
    }
477
478
    host_get_clock_service(mach_host_self(), cid, &clk);
479
    clock_get_time(clk, &mts);
480
    mach_port_deallocate(mach_task_self(), clk);
481
    ts->tv_sec = mts.tv_sec;
482
    ts->tv_nsec = mts.tv_nsec;
483
484
    return 0;
485
}
486
#endif
487
488
void
489
xgettimeofday(struct timeval *tv)
490
0
{
491
0
#ifndef _WIN32
492
0
    if (gettimeofday(tv, NULL) == -1) {
493
0
        VLOG_FATAL("gettimeofday failed (%s)", ovs_strerror(errno));
494
0
    }
495
#else
496
    ULARGE_INTEGER current_time = xgetfiletime();
497
498
    tv->tv_sec = (current_time.QuadPart - unix_epoch) / 10000000;
499
    tv->tv_usec = ((current_time.QuadPart - unix_epoch) %
500
                   10000000) / 10;
501
#endif
502
0
}
503
504
void
505
xclock_gettime(clock_t id, struct timespec *ts)
506
0
{
507
0
    if (clock_gettime(id, ts) == -1) {
508
        /* It seems like a bad idea to try to use vlog here because it is
509
         * likely to try to check the current time. */
510
0
        ovs_abort(errno, "xclock_gettime() failed");
511
0
    }
512
0
}
513
514
static void
515
msec_to_timespec(long long int ms, struct timespec *ts)
516
0
{
517
0
    ts->tv_sec = ms / 1000;
518
0
    ts->tv_nsec = (ms % 1000) * 1000 * 1000;
519
0
}
520
521
void
522
nsec_to_timespec(long long int nsec, struct timespec *ts)
523
0
{
524
0
    if (!nsec) {
525
0
        ts->tv_sec = ts->tv_nsec = 0;
526
0
        return;
527
0
    }
528
0
    ts->tv_sec = nsec / (1000 * 1000 * 1000);
529
530
0
    nsec = nsec % (1000 * 1000 * 1000);
531
    /* This is to handle dates before epoch. */
532
0
    if (OVS_UNLIKELY(nsec < 0)) {
533
0
        nsec += 1000 * 1000 * 1000;
534
0
        ts->tv_sec--;
535
0
    }
536
537
0
    ts->tv_nsec = nsec;
538
0
}
539
540
static void
541
timewarp_work(void)
542
0
{
543
0
    struct clock *c = &monotonic_clock;
544
0
    struct timespec warp;
545
546
0
    ovs_mutex_lock(&c->mutex);
547
0
    if (!c->large_warp.conn) {
548
0
        ovs_mutex_unlock(&c->mutex);
549
0
        return;
550
0
    }
551
552
0
    if (c->large_warp.total_warp >= c->large_warp.warp) {
553
0
        msec_to_timespec(c->large_warp.warp, &warp);
554
0
        timespec_add(&c->warp, &c->warp, &warp);
555
0
        c->large_warp.total_warp -= c->large_warp.warp;
556
0
    } else if (c->large_warp.total_warp) {
557
0
        msec_to_timespec(c->large_warp.total_warp, &warp);
558
0
        timespec_add(&c->warp, &c->warp, &warp);
559
0
        c->large_warp.total_warp = 0;
560
0
    } else {
561
        /* c->large_warp.total_warp is 0. */
562
0
        msec_to_timespec(c->large_warp.warp, &warp);
563
0
        timespec_add(&c->warp, &c->warp, &warp);
564
0
    }
565
566
0
    if (!c->large_warp.total_warp) {
567
0
        unixctl_command_reply(c->large_warp.conn, "warped");
568
0
        c->large_warp.conn = NULL;
569
0
    }
570
571
0
    ovs_mutex_unlock(&c->mutex);
572
0
    seq_change(timewarp_seq);
573
574
    /* give threads (eg. monitor) some chances to run */
575
0
#ifndef _WIN32
576
0
    poll(NULL, 0, 10);
577
#else
578
    Sleep(10);
579
#endif
580
0
}
581
582
/* Perform work needed for "timewarp_seq"'s producer and consumers. */
583
void
584
timewarp_run(void)
585
0
{
586
    /* The function is a no-op unless timeval_dummy_register() is called. */
587
0
    if (timewarp_enabled) {
588
0
        unsigned int thread_id;
589
0
        ovs_mutex_lock(&monotonic_clock.mutex);
590
0
        thread_id = monotonic_clock.large_warp.main_thread_id;
591
0
        ovs_mutex_unlock(&monotonic_clock.mutex);
592
593
0
        if (thread_id != ovsthread_id_self()) {
594
            /* For threads other than the thread that changes the sequence,
595
             * wait on it. */
596
0
            uint64_t *last_seq = last_seq_get();
597
598
0
            *last_seq = seq_read(timewarp_seq);
599
0
            seq_wait(timewarp_seq, *last_seq);
600
0
        } else {
601
            /* Work on adding the remaining warps. */
602
0
            timewarp_work();
603
0
        }
604
0
    }
605
0
}
606
607
static long long int
608
timeval_diff_msec(const struct timeval *a, const struct timeval *b)
609
0
{
610
0
    return timeval_to_msec(a) - timeval_to_msec(b);
611
0
}
612
613
static void
614
timespec_add(struct timespec *sum,
615
             const struct timespec *a,
616
             const struct timespec *b)
617
0
{
618
0
    struct timespec tmp;
619
620
0
    tmp.tv_sec = a->tv_sec + b->tv_sec;
621
0
    tmp.tv_nsec = a->tv_nsec + b->tv_nsec;
622
0
    if (tmp.tv_nsec >= 1000 * 1000 * 1000) {
623
0
        tmp.tv_nsec -= 1000 * 1000 * 1000;
624
0
        tmp.tv_sec++;
625
0
    }
626
627
0
    *sum = tmp;
628
0
}
629
630
static bool
631
is_warped(const struct clock *c)
632
0
{
633
0
    bool warped;
634
635
0
    ovs_mutex_lock(&c->mutex);
636
0
    warped = monotonic_clock.warp.tv_sec || monotonic_clock.warp.tv_nsec;
637
0
    ovs_mutex_unlock(&c->mutex);
638
639
0
    return warped;
640
0
}
641
642
static void
643
log_poll_interval(long long int last_wakeup)
644
0
{
645
0
    long long int interval = time_msec() - last_wakeup;
646
647
0
    if (interval >= 1000 && !is_warped(&monotonic_clock)) {
648
0
        const struct rusage *last_rusage = get_recent_rusage();
649
0
        struct rusage rusage;
650
651
0
        COVERAGE_INC(long_poll_interval);
652
653
0
        if (!getrusage_thread(&rusage)) {
654
0
            VLOG_WARN("Unreasonably long %lldms poll interval"
655
0
                      " (%lldms user, %lldms system)",
656
0
                      interval,
657
0
                      timeval_diff_msec(&rusage.ru_utime,
658
0
                                        &last_rusage->ru_utime),
659
0
                      timeval_diff_msec(&rusage.ru_stime,
660
0
                                        &last_rusage->ru_stime));
661
662
0
            if (rusage.ru_minflt > last_rusage->ru_minflt
663
0
                || rusage.ru_majflt > last_rusage->ru_majflt) {
664
0
                VLOG_WARN("faults: %ld minor, %ld major",
665
0
                          rusage.ru_minflt - last_rusage->ru_minflt,
666
0
                          rusage.ru_majflt - last_rusage->ru_majflt);
667
0
            }
668
0
            if (rusage.ru_inblock > last_rusage->ru_inblock
669
0
                || rusage.ru_oublock > last_rusage->ru_oublock) {
670
0
                VLOG_WARN("disk: %ld reads, %ld writes",
671
0
                          rusage.ru_inblock - last_rusage->ru_inblock,
672
0
                          rusage.ru_oublock - last_rusage->ru_oublock);
673
0
            }
674
0
            if (rusage.ru_nvcsw > last_rusage->ru_nvcsw
675
0
                || rusage.ru_nivcsw > last_rusage->ru_nivcsw) {
676
0
                VLOG_WARN("context switches: %ld voluntary, %ld involuntary",
677
0
                          rusage.ru_nvcsw - last_rusage->ru_nvcsw,
678
0
                          rusage.ru_nivcsw - last_rusage->ru_nivcsw);
679
0
            }
680
0
        } else {
681
0
            VLOG_WARN("Unreasonably long %lldms poll interval", interval);
682
0
        }
683
0
        coverage_log();
684
0
    }
685
0
}
686

687
/* CPU usage tracking. */
688
689
struct cpu_usage {
690
    long long int when;         /* Time that this sample was taken. */
691
    unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
692
};
693
694
struct cpu_tracker {
695
    struct cpu_usage older;
696
    struct cpu_usage newer;
697
    int cpu_usage;
698
699
    struct rusage recent_rusage;
700
};
701
0
DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var);
702
703
static struct cpu_tracker *
704
get_cpu_tracker(void)
705
0
{
706
0
    struct cpu_tracker *t = cpu_tracker_var_get();
707
0
    if (!t) {
708
0
        t = xzalloc(sizeof *t);
709
0
        t->older.when = LLONG_MIN;
710
0
        t->newer.when = LLONG_MIN;
711
0
        cpu_tracker_var_set_unsafe(t);
712
0
    }
713
0
    return t;
714
0
}
715
716
static struct rusage *
717
get_recent_rusage(void)
718
0
{
719
0
    return &get_cpu_tracker()->recent_rusage;
720
0
}
721
722
static int
723
getrusage_thread(struct rusage *rusage OVS_UNUSED)
724
0
{
725
0
#ifdef RUSAGE_THREAD
726
0
    return getrusage(RUSAGE_THREAD, rusage);
727
#else
728
    errno = EINVAL;
729
    return -1;
730
#endif
731
0
}
732
733
static void
734
refresh_rusage(void)
735
0
{
736
0
    struct cpu_tracker *t = get_cpu_tracker();
737
0
    struct rusage *recent_rusage = &t->recent_rusage;
738
739
0
    if (!getrusage_thread(recent_rusage)) {
740
0
        long long int now = time_msec();
741
0
        if (now >= t->newer.when + 3 * 1000) {
742
0
            t->older = t->newer;
743
0
            t->newer.when = now;
744
0
            t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) +
745
0
                            timeval_to_msec(&recent_rusage->ru_stime));
746
747
0
            if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) {
748
0
                unsigned int dividend = t->newer.cpu - t->older.cpu;
749
0
                unsigned int divisor = (t->newer.when - t->older.when) / 100;
750
0
                t->cpu_usage = divisor > 0 ? dividend / divisor : -1;
751
0
            } else {
752
0
                t->cpu_usage = -1;
753
0
            }
754
0
        }
755
0
    }
756
0
}
757
758
/* Returns an estimate of this process's CPU usage, as a percentage, over the
759
 * past few seconds of wall-clock time.  Returns -1 if no estimate is available
760
 * (which will happen if the process has not been running long enough to have
761
 * an estimate, and can happen for other reasons as well). */
762
int
763
get_cpu_usage(void)
764
0
{
765
0
    return get_cpu_tracker()->cpu_usage;
766
0
}
767

768
/* Unixctl interface. */
769
770
/* "time/stop" stops the monotonic time returned by e.g. time_msec() from
771
 * advancing, except due to later calls to "time/warp". */
772
void
773
timeval_stop(void)
774
0
{
775
0
    ovs_mutex_lock(&monotonic_clock.mutex);
776
0
    atomic_store_relaxed(&monotonic_clock.slow_path, true);
777
0
    monotonic_clock.stopped = true;
778
0
    xclock_gettime(monotonic_clock.id, &monotonic_clock.cache);
779
0
    ovs_mutex_unlock(&monotonic_clock.mutex);
780
0
}
781
782
static void
783
timeval_stop_cb(struct unixctl_conn *conn,
784
                int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
785
                void *aux OVS_UNUSED)
786
0
{
787
0
    timeval_stop();
788
0
    unixctl_command_reply(conn, NULL);
789
0
}
790
791
/* "time/warp MSECS" advances the current monotonic time by the specified
792
 * number of milliseconds.  Unless "time/stop" has also been executed, the
793
 * monotonic clock continues to tick forward at the normal rate afterward.
794
 *
795
 * "time/warp LARGE_MSECS MSECS" is a variation of the above command. It
796
 * advances the current monotonic time by LARGE_MSECS. This is done MSECS
797
 * at a time in each run of the main thread. This gives other threads
798
 * time to run after the clock has been advanced by MSECS.
799
 *
800
 * Does not affect wall clock readings. */
801
static void
802
timeval_warp_cb(struct unixctl_conn *conn,
803
                int argc OVS_UNUSED, const char *argv[], void *aux OVS_UNUSED)
804
0
{
805
0
    long long int total_warp = argc > 2 ? atoll(argv[1]) : 0;
806
0
    long long int msecs = argc > 2 ? atoll(argv[2]) : atoll(argv[1]);
807
0
    if (msecs <= 0 || total_warp < 0) {
808
0
        unixctl_command_reply_error(conn, "invalid MSECS");
809
0
        return;
810
0
    }
811
812
0
    ovs_mutex_lock(&monotonic_clock.mutex);
813
0
    if (monotonic_clock.large_warp.conn) {
814
0
        ovs_mutex_unlock(&monotonic_clock.mutex);
815
0
        unixctl_command_reply_error(conn, "A previous warp in progress");
816
0
        return;
817
0
    }
818
0
    atomic_store_relaxed(&monotonic_clock.slow_path, true);
819
0
    monotonic_clock.large_warp.conn = conn;
820
0
    monotonic_clock.large_warp.total_warp = total_warp;
821
0
    monotonic_clock.large_warp.warp = msecs;
822
0
    monotonic_clock.large_warp.main_thread_id = ovsthread_id_self();
823
0
    ovs_mutex_unlock(&monotonic_clock.mutex);
824
825
0
    timewarp_work();
826
0
}
827
828
/* Direct monotonic clock into slow path and advance the current monotonic
829
 * time by 'msecs' milliseconds directly.  This is for use in unit tests. */
830
void
831
timeval_warp(long long int msecs)
832
0
{
833
0
    struct clock *c = &monotonic_clock;
834
0
    struct timespec warp;
835
836
0
    ovs_mutex_lock(&monotonic_clock.mutex);
837
0
    atomic_store_relaxed(&monotonic_clock.slow_path, true);
838
0
    msec_to_timespec(msecs, &warp);
839
0
    timespec_add(&c->warp, &c->warp, &warp);
840
0
    ovs_mutex_unlock(&monotonic_clock.mutex);
841
0
}
842
843
void
844
timeval_dummy_register(void)
845
0
{
846
0
    timewarp_enabled = true;
847
0
    unixctl_command_register("time/stop", "", 0, 0, timeval_stop_cb, NULL);
848
0
    unixctl_command_register("time/warp", "[large_msecs] msecs", 1, 2,
849
0
                             timeval_warp_cb, NULL);
850
0
}
851
852
853
854
/* strftime() with an extension for high-resolution timestamps.  Any '#'s in
855
 * 'format' will be replaced by subseconds, e.g. use "%S.###" to obtain results
856
 * like "01.123".  */
857
size_t
858
strftime_msec(char *s, size_t max, const char *format,
859
              const struct tm_msec *tm)
860
0
{
861
0
    size_t n;
862
863
    /* Visual Studio 2013's behavior is to crash when 0 is passed as second
864
     * argument to strftime. */
865
0
    n = max ? strftime(s, max, format, &tm->tm) : 0;
866
0
    if (n) {
867
0
        char decimals[4];
868
0
        char *p;
869
870
0
        sprintf(decimals, "%03d", tm->msec);
871
0
        for (p = strchr(s, '#'); p; p = strchr(p, '#')) {
872
0
            char *d = decimals;
873
0
            while (*p == '#')  {
874
0
                *p++ = *d ? *d++ : '0';
875
0
            }
876
0
        }
877
0
    }
878
879
0
    return n;
880
0
}
881
882
struct tm_msec *
883
localtime_msec(long long int now, struct tm_msec *result)
884
0
{
885
0
  time_t now_sec = now / 1000;
886
0
  localtime_r(&now_sec, &result->tm);
887
0
  result->msec = now % 1000;
888
0
  return result;
889
0
}
890
891
struct tm_msec *
892
gmtime_msec(long long int now, struct tm_msec *result)
893
0
{
894
0
  time_t now_sec = now / 1000;
895
0
  gmtime_r(&now_sec, &result->tm);
896
0
  result->msec = now % 1000;
897
0
  return result;
898
0
}