Line | Count | Source |
1 | | /* |
2 | | * General time-keeping code and variables |
3 | | * |
4 | | * Copyright 2000-2021 Willy Tarreau <w@1wt.eu> |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or |
7 | | * modify it under the terms of the GNU General Public License |
8 | | * as published by the Free Software Foundation; either version |
9 | | * 2 of the License, or (at your option) any later version. |
10 | | * |
11 | | */ |
12 | | |
13 | | #include <sys/time.h> |
14 | | #include <signal.h> |
15 | | #include <time.h> |
16 | | |
17 | | #ifdef USE_THREAD |
18 | | #include <pthread.h> |
19 | | #endif |
20 | | |
21 | | #include <haproxy/api.h> |
22 | | #include <haproxy/activity.h> |
23 | | #include <haproxy/clock.h> |
24 | | #include <haproxy/signal-t.h> |
25 | | #include <haproxy/time.h> |
26 | | #include <haproxy/tinfo-t.h> |
27 | | #include <haproxy/tools.h> |
28 | | |
29 | | struct timeval start_date; /* the process's start date in wall-clock time */ |
30 | | struct timeval ready_date; /* date when the process was considered ready */ |
31 | | ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */ |
32 | | volatile ullong _global_now_ns; /* locally stored common monotonic date between all threads, in ns (wraps every 585 yr) */ |
33 | | volatile ullong *global_now_ns; /* common monotonic date, may point to _global_now_ns or shared memory */ |
34 | | volatile uint _global_now_ms; /* locally stored common monotonic date in milliseconds (may wrap) */ |
35 | | volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */ |
36 | | |
37 | | /* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */ |
38 | | THREAD_ALIGNED() static llong now_offset; /* global offset between system time and global time in ns */ |
39 | | |
40 | | THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */ |
41 | | THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */ |
42 | | THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */ |
43 | | |
44 | | static THREAD_LOCAL ullong before_poll_mono_ns; /* system wide monotonic time when entering poll last */ |
45 | | static THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */ |
46 | | static THREAD_LOCAL struct timeval after_poll; /* system date after leaving poll() */ |
47 | | static THREAD_LOCAL unsigned int samp_time; /* total elapsed time over current sample */ |
48 | | static THREAD_LOCAL unsigned int idle_time; /* total idle time over current sample */ |
49 | | static THREAD_LOCAL unsigned int iso_time_sec; /* last iso time value for this thread */ |
50 | | static THREAD_LOCAL char iso_time_str[34]; /* ISO time representation of gettimeofday() */ |
51 | | |
52 | | #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME) |
53 | | static clockid_t per_thread_clock_id[MAX_THREADS]; |
54 | | #endif |
55 | | |
56 | | /* returns the system's monotonic time in nanoseconds if supported, otherwise zero */ |
57 | | uint64_t now_mono_time(void) |
58 | 0 | { |
59 | 0 | uint64_t ret = 0; |
60 | 0 | #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_MONOTONIC_CLOCK) |
61 | 0 | struct timespec ts; |
62 | 0 | if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) |
63 | 0 | ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
64 | 0 | #endif |
65 | 0 | return ret; |
66 | 0 | } |
67 | | |
68 | | /* Returns the system's monotonic time in nanoseconds. |
69 | | * Uses the coarse clock source if supported (for fast but |
70 | | * less precise queries with limited resource usage). |
71 | | * Fallback to now_mono_time() if coarse source is not supported, |
72 | | * which may itself return 0 if not supported either. |
73 | | */ |
74 | | uint64_t now_mono_time_fast(void) |
75 | 0 | { |
76 | 0 | #if defined(CLOCK_MONOTONIC_COARSE) |
77 | 0 | struct timespec ts; |
78 | |
|
79 | 0 | if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) |
80 | 0 | return (ts.tv_sec * 1000000000ULL + ts.tv_nsec); |
81 | 0 | #endif |
82 | | /* fallback to regular mono time, |
83 | | * returns 0 if not supported |
84 | | */ |
85 | 0 | return now_mono_time(); |
86 | 0 | } |
87 | | |
88 | | /* returns the current thread's cumulated CPU time in nanoseconds if supported, otherwise zero */ |
89 | | uint64_t now_cpu_time(void) |
90 | 0 | { |
91 | 0 | uint64_t ret = 0; |
92 | 0 | #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME) |
93 | 0 | struct timespec ts; |
94 | 0 | if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) |
95 | 0 | ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
96 | 0 | #endif |
97 | 0 | return ret; |
98 | 0 | } |
99 | | |
100 | | /* Returns the current thread's cumulated CPU time in nanoseconds. |
101 | | * |
102 | | * thread_local timer is cached so that call is less precise but also less |
103 | | * expensive if heavily used. |
104 | | * We use the mono time as a cache expiration hint since now_cpu_time() is |
105 | | * known to be much more expensive than now_mono_time_fast() on systems |
106 | | * supporting the COARSE clock source. |
107 | | * |
108 | | * Returns 0 if either now_mono_time_fast() or now_cpu_time() are not |
109 | | * supported. |
110 | | */ |
111 | | uint64_t now_cpu_time_fast(void) |
112 | 0 | { |
113 | 0 | static THREAD_LOCAL uint64_t mono_cache = 0; |
114 | 0 | static THREAD_LOCAL uint64_t cpu_cache = 0; |
115 | 0 | uint64_t mono_cur; |
116 | |
|
117 | 0 | mono_cur = now_mono_time_fast(); |
118 | 0 | if (unlikely(mono_cur != mono_cache)) { |
119 | | /* global mono clock was updated: local cache is outdated */ |
120 | 0 | cpu_cache = now_cpu_time(); |
121 | 0 | mono_cache = mono_cur; |
122 | 0 | } |
123 | 0 | return cpu_cache; |
124 | 0 | } |
125 | | |
126 | | /* returns another thread's cumulated CPU time in nanoseconds if supported, otherwise zero */ |
127 | | uint64_t now_cpu_time_thread(int thr) |
128 | 0 | { |
129 | 0 | uint64_t ret = 0; |
130 | 0 | #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME) |
131 | 0 | struct timespec ts; |
132 | 0 | if (clock_gettime(per_thread_clock_id[thr], &ts) == 0) |
133 | 0 | ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
134 | 0 | #endif |
135 | 0 | return ret; |
136 | 0 | } |
137 | | |
138 | | /* set the clock source for the local thread */ |
139 | | void clock_set_local_source(void) |
140 | 0 | { |
141 | 0 | #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0) |
142 | | #ifdef USE_THREAD |
143 | | pthread_getcpuclockid(pthread_self(), &per_thread_clock_id[tid]); |
144 | | #else |
145 | 0 | per_thread_clock_id[tid] = CLOCK_THREAD_CPUTIME_ID; |
146 | 0 | #endif |
147 | 0 | #endif |
148 | 0 | } |
149 | | |
150 | | /* registers a timer <tmr> of type timer_t delivering signal <sig> with value |
151 | | * <val>. It tries on the current thread's clock ID first and falls back to |
152 | | * CLOCK_REALTIME. Returns non-zero on success, 1 on failure. |
153 | | */ |
154 | | int clock_setup_signal_timer(void *tmr, int sig, int val) |
155 | 0 | { |
156 | 0 | int ret = 0; |
157 | |
|
158 | | #if defined(USE_RT) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME) |
159 | | struct sigevent sev = { }; |
160 | | timer_t *timer = tmr; |
161 | | sigset_t set; |
162 | | |
163 | | /* unblock the WDTSIG signal we intend to use */ |
164 | | sigemptyset(&set); |
165 | | sigaddset(&set, WDTSIG); |
166 | | ha_sigmask(SIG_UNBLOCK, &set, NULL); |
167 | | |
168 | | /* this timer will signal WDTSIG when it fires, with tid in the si_int |
169 | | * field (important since any thread will receive the signal). |
170 | | */ |
171 | | sev.sigev_notify = SIGEV_SIGNAL; |
172 | | sev.sigev_signo = sig; |
173 | | sev.sigev_value.sival_int = val; |
174 | | if (timer_create(per_thread_clock_id[tid], &sev, timer) != -1 || |
175 | | timer_create(CLOCK_REALTIME, &sev, timer) != -1) |
176 | | ret = 1; |
177 | | #endif |
178 | 0 | return ret; |
179 | 0 | } |
180 | | |
181 | | /* clock_update_date: sets <date> to system time, and sets <now_ns> to something |
182 | | * as close as possible to real time, following a monotonic function. The main |
183 | | * principle consists in detecting backwards and forwards time jumps and adjust |
184 | | * an offset to correct them. This function should be called once after each |
185 | | * poll, and never farther apart than MAX_DELAY_MS*2. The poll's timeout should |
186 | | * be passed in <max_wait>, and the return value in <interrupted> (a non-zero |
187 | | * value means that we have not expired the timeout). |
188 | | * |
189 | | * clock_init_process_date() must have been called once first, and |
190 | | * clock_init_thread_date() must also have been called once for each thread. |
191 | | * |
192 | | * An offset is used to adjust the current time (date), to figure a monotonic |
193 | | * local time (now_ns). The offset is not critical, as it is only updated after |
194 | | * a clock jump is detected. From this point all threads will apply it to their |
195 | | * locally measured time, and will then agree around a common monotonic |
196 | | * global_now_ns value that serves to further refine their local time. Both |
197 | | * now_ns and global_now_ns are 64-bit integers counting nanoseconds since a |
198 | | * vague reference (it starts roughly 20s before the next wrap-around of the |
199 | | * millisecond counter after boot). The offset is also an integral number of |
200 | | * nanoseconds, but it's signed so that the clock can be adjusted in the two |
201 | | * directions. |
202 | | */ |
203 | | void clock_update_local_date(int max_wait, int interrupted) |
204 | 0 | { |
205 | 0 | struct timeval min_deadline, max_deadline; |
206 | 0 | llong ofs = HA_ATOMIC_LOAD(&now_offset); |
207 | 0 | llong date_ns; |
208 | |
|
209 | 0 | gettimeofday(&date, NULL); |
210 | 0 | th_ctx->curr_mono_time = now_mono_time(); |
211 | |
|
212 | 0 | date_ns = th_ctx->curr_mono_time; |
213 | 0 | if (date_ns) { |
214 | | /* no need to go through complex calculations, we have |
215 | | * monotonic time. The offset will never change. |
216 | | */ |
217 | 0 | goto done; |
218 | 0 | } |
219 | | |
220 | | /* compute the minimum and maximum local date we may have reached based |
221 | | * on our past date and the associated timeout. There are three possible |
222 | | * extremities: |
223 | | * - the new date cannot be older than before_poll |
224 | | * - if not interrupted, the new date cannot be older than |
225 | | * before_poll+max_wait |
226 | | * - in any case the new date cannot be newer than |
227 | | * before_poll+max_wait+some margin (100ms used here). |
228 | | * In case of violation, we'll ignore the current date and instead |
229 | | * restart from the last date we knew. |
230 | | */ |
231 | 0 | _tv_ms_add(&min_deadline, &before_poll, max_wait); |
232 | 0 | _tv_ms_add(&max_deadline, &before_poll, max_wait + 100); |
233 | 0 | date_ns = tv_to_ns(&date); |
234 | |
|
235 | 0 | if (unlikely(__tv_islt(&date, &before_poll) || // big jump backwards |
236 | 0 | (!interrupted && __tv_islt(&date, &min_deadline)) || // small jump backwards |
237 | 0 | date_ns + ofs >= now_ns + ms_to_ns(max_wait + 100)|| // offset changed by another thread |
238 | 0 | __tv_islt(&max_deadline, &date))) { // big jump forwards |
239 | 0 | if (!interrupted) |
240 | 0 | now_ns += ms_to_ns(max_wait); |
241 | | |
242 | | /* consider the most recent known date */ |
243 | 0 | now_ns = MAX(now_ns, HA_ATOMIC_LOAD(global_now_ns)); |
244 | | |
245 | | /* this event is rare, but it requires proper handling because if |
246 | | * we just left now_ns where it was, the date will not be updated |
247 | | * by clock_update_global_date(). |
248 | | */ |
249 | 0 | HA_ATOMIC_STORE(&now_offset, now_ns - date_ns); |
250 | 0 | } else { |
251 | 0 | done: |
252 | | /* The date is still within expectations. Let's apply the |
253 | | * now_offset to the system date. Note: ofs if made of two |
254 | | * independent signed ints. |
255 | | */ |
256 | 0 | now_ns = date_ns + ofs; |
257 | 0 | } |
258 | 0 | now_ms = ns_to_ms(now_ns); |
259 | | |
260 | | /* correct for TICK_ETNERITY (0) */ |
261 | 0 | if (unlikely(now_ms == TICK_ETERNITY)) |
262 | 0 | now_ms++; |
263 | 0 | } |
264 | | |
265 | | void clock_update_global_date() |
266 | 0 | { |
267 | 0 | ullong old_now_ns; |
268 | 0 | uint old_now_ms; |
269 | | |
270 | | /* now that we have bounded the local time, let's check if it's |
271 | | * realistic regarding the global date, which only moves forward, |
272 | | * otherwise catch up. |
273 | | */ |
274 | 0 | old_now_ns = _HA_ATOMIC_LOAD(global_now_ns); |
275 | 0 | old_now_ms = _HA_ATOMIC_LOAD(global_now_ms); |
276 | |
|
277 | 0 | do { |
278 | 0 | if (now_ns < old_now_ns) |
279 | 0 | now_ns = old_now_ns; |
280 | | |
281 | | /* now <now_ns> is expected to be the most accurate date, |
282 | | * equal to <global_now_ns> or newer. Updating the global |
283 | | * date too often causes extreme contention and is not |
284 | | * needed: it's only used to help threads run at the |
285 | | * same date in case of local drift, and the global date, |
286 | | * which changes, is only used by freq counters (a choice |
287 | | * which is debatable by the way since it changes under us). |
288 | | * Tests have seen that the contention can be reduced from |
289 | | * 37% in this function to almost 0% when keeping clocks |
290 | | * synchronized no better than 32 microseconds, so that's |
291 | | * what we're doing here. |
292 | | */ |
293 | 0 | now_ms = ns_to_ms(now_ns); |
294 | | /* correct for TICK_ETNERITY (0) */ |
295 | 0 | if (unlikely(now_ms == TICK_ETERNITY)) |
296 | 0 | now_ms++; |
297 | |
|
298 | 0 | if (!((now_ns ^ old_now_ns) & ~0x7FFFULL)) |
299 | 0 | return; |
300 | | |
301 | | /* let's try to update the global_now_ns (both in nanoseconds |
302 | | * and ms forms) or loop again. |
303 | | */ |
304 | 0 | } while ((!_HA_ATOMIC_CAS(global_now_ns, &old_now_ns, now_ns) || |
305 | 0 | (now_ms != old_now_ms && !_HA_ATOMIC_CAS(global_now_ms, &old_now_ms, now_ms))) && |
306 | 0 | __ha_cpu_relax()); |
307 | | |
308 | 0 | if (!th_ctx->curr_mono_time) { |
309 | | /* Only update the offset when monotonic time is not available. |
310 | | * <now_ns> and <now_ms> are now updated to the last value of |
311 | | * global_now_ns and global_now_ms, which were also monotonically |
312 | | * updated. We can compute the latest offset, we don't care who writes |
313 | | * it last, the variations will not break the monotonic property. |
314 | | */ |
315 | 0 | HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date)); |
316 | 0 | } |
317 | 0 | } |
318 | | |
319 | | /* must be called once at boot to initialize some global variables */ |
320 | | void clock_init_process_date(void) |
321 | 0 | { |
322 | 0 | now_offset = 0; |
323 | 0 | before_poll_mono_ns = now_mono_time(); // 0 if not supported |
324 | 0 | th_ctx->prev_mono_time = th_ctx->curr_mono_time = before_poll_mono_ns; |
325 | 0 | gettimeofday(&date, NULL); |
326 | 0 | after_poll = before_poll = date; |
327 | 0 | _global_now_ns = th_ctx->curr_mono_time; |
328 | 0 | if (!_global_now_ns) // CLOCK_MONOTONIC not supported |
329 | 0 | _global_now_ns = tv_to_ns(&date); |
330 | 0 | now_ns = _global_now_ns; |
331 | |
|
332 | 0 | _global_now_ms = ns_to_ms(now_ns); |
333 | | |
334 | | /* force time to wrap 20s after boot: we first compute the time offset |
335 | | * that once applied to the wall-clock date will make the local time |
336 | | * wrap in 5 seconds. This offset is applied to the process-wide time, |
337 | | * and will be used to recompute the local time, both of which will |
338 | | * match and continue from this shifted date. |
339 | | */ |
340 | 0 | now_offset = sec_to_ns((uint)((uint)(-_global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC)); |
341 | 0 | _global_now_ns += now_offset; |
342 | 0 | now_ns = _global_now_ns; |
343 | 0 | now_ms = ns_to_ms(now_ns); |
344 | | /* correct for TICK_ETNERITY (0) */ |
345 | 0 | if (now_ms == TICK_ETERNITY) |
346 | 0 | now_ms++; |
347 | 0 | _global_now_ms = now_ms; |
348 | | |
349 | | /* for now global_now_ms points to the process-local _global_now_ms */ |
350 | 0 | global_now_ms = &_global_now_ms; |
351 | | /* same goes for global_ns_ns */ |
352 | 0 | global_now_ns = &_global_now_ns; |
353 | |
|
354 | 0 | th_ctx->idle_pct = 100; |
355 | 0 | clock_update_date(0, 1); |
356 | 0 | } |
357 | | |
358 | | void clock_adjust_now_offset(void) |
359 | 0 | { |
360 | | /* Only update the offset when monotonic time is not available. */ |
361 | 0 | if (th_ctx->curr_mono_time) |
362 | 0 | return; |
363 | | |
364 | 0 | HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date)); |
365 | 0 | } |
366 | | |
367 | | void clock_set_now_offset(llong ofs) |
368 | 0 | { |
369 | 0 | HA_ATOMIC_STORE(&now_offset, ofs); |
370 | 0 | } |
371 | | |
372 | | llong clock_get_now_offset(void) |
373 | 0 | { |
374 | 0 | return HA_ATOMIC_LOAD(&now_offset); |
375 | 0 | } |
376 | | |
377 | | /* must be called once per thread to initialize their thread-local variables. |
378 | | * Note that other threads might also be initializing and running in parallel. |
379 | | */ |
380 | | void clock_init_thread_date(void) |
381 | 0 | { |
382 | 0 | gettimeofday(&date, NULL); |
383 | 0 | after_poll = before_poll = date; |
384 | |
|
385 | 0 | now_ns = _HA_ATOMIC_LOAD(global_now_ns); |
386 | 0 | th_ctx->idle_pct = 100; |
387 | 0 | th_ctx->prev_cpu_time = now_cpu_time(); |
388 | 0 | th_ctx->prev_mono_time = now_mono_time(); |
389 | 0 | th_ctx->curr_mono_time = th_ctx->prev_mono_time; |
390 | 0 | before_poll_mono_ns = th_ctx->curr_mono_time; |
391 | 0 | clock_update_date(0, 1); |
392 | 0 | } |
393 | | |
394 | | /* report the average CPU idle percentage over all running threads, between 0 and 100 */ |
395 | | uint clock_report_idle(void) |
396 | 0 | { |
397 | 0 | uint total = 0; |
398 | 0 | uint rthr = 0; |
399 | 0 | uint thr; |
400 | |
|
401 | 0 | for (thr = 0; thr < MAX_THREADS; thr++) { |
402 | 0 | if (!ha_thread_info[thr].tg || |
403 | 0 | !(ha_thread_info[thr].tg->threads_enabled & ha_thread_info[thr].ltid_bit)) |
404 | 0 | continue; |
405 | 0 | total += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].idle_pct); |
406 | 0 | rthr++; |
407 | 0 | } |
408 | 0 | return rthr ? total / rthr : 0; |
409 | 0 | } |
410 | | |
411 | | /* Update the idle time value twice a second, to be called after |
412 | | * clock_update_date() when called after poll(), and currently called only by |
413 | | * clock_leaving_poll() below. It relies on <before_poll> to be updated to |
414 | | * the system time before calling poll(). |
415 | | */ |
416 | | static inline void clock_measure_idle(void) |
417 | 0 | { |
418 | | /* Let's compute the idle to work ratio. We worked between after_poll |
419 | | * and before_poll, and slept between before_poll and date. The idle_pct |
420 | | * is updated at most twice every second. Note that the current second |
421 | | * rarely changes so we avoid a multiply when not needed. |
422 | | */ |
423 | 0 | int delta; |
424 | |
|
425 | 0 | if (before_poll_mono_ns) { |
426 | | /* CLOCK_MONOTONIC in use, use it and convert it to microseconds */ |
427 | |
|
428 | 0 | idle_time += (th_ctx->curr_mono_time - before_poll_mono_ns) / 1000ull; |
429 | 0 | samp_time += (th_ctx->curr_mono_time - th_ctx->prev_mono_time) / 1000ull; |
430 | 0 | } else { |
431 | | /* CLOCK_MONOTONIC not used */ |
432 | 0 | if ((delta = date.tv_sec - before_poll.tv_sec)) |
433 | 0 | delta *= 1000000; |
434 | 0 | idle_time += delta + (date.tv_usec - before_poll.tv_usec); |
435 | |
|
436 | 0 | if ((delta = date.tv_sec - after_poll.tv_sec)) |
437 | 0 | delta *= 1000000; |
438 | 0 | samp_time += delta + (date.tv_usec - after_poll.tv_usec); |
439 | |
|
440 | 0 | after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec; |
441 | 0 | } |
442 | 0 | if (samp_time < 500000) |
443 | 0 | return; |
444 | | |
445 | 0 | HA_ATOMIC_STORE(&th_ctx->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time); |
446 | 0 | idle_time = samp_time = 0; |
447 | 0 | } |
448 | | |
449 | | /* Collect date and time information after leaving poll(). <timeout> must be |
450 | | * set to the maximum sleep time passed to poll (in milliseconds), and |
451 | | * <interrupted> must be zero if the poller reached the timeout or non-zero |
452 | | * otherwise, which generally is provided by the poller's return value. |
453 | | */ |
454 | | void clock_leaving_poll(int timeout, int interrupted) |
455 | 0 | { |
456 | 0 | clock_measure_idle(); |
457 | 0 | th_ctx->prev_cpu_time = now_cpu_time(); |
458 | 0 | th_ctx->prev_mono_time = th_ctx->curr_mono_time; |
459 | 0 | } |
460 | | |
461 | | /* Collect date and time information before calling poll(). This will be used |
462 | | * to count the run time of the past loop and the sleep time of the next poll. |
463 | | * It also compares the elapsed and cpu times during the activity period to |
464 | | * estimate the amount of stolen time, which is reported if higher than half |
465 | | * a millisecond. |
466 | | */ |
467 | | void clock_entering_poll(void) |
468 | 0 | { |
469 | 0 | uint64_t new_mono_time; |
470 | 0 | uint64_t new_cpu_time; |
471 | 0 | uint32_t run_time; |
472 | 0 | int64_t stolen; |
473 | |
|
474 | 0 | new_cpu_time = now_cpu_time(); |
475 | 0 | new_mono_time = now_mono_time(); |
476 | | |
477 | | /* the the time when we entere poll */ |
478 | 0 | before_poll_mono_ns = new_mono_time; |
479 | | |
480 | | /* The time might have jumped either backwards or forwards during tasks |
481 | | * processing. It's easy to detect a backwards jump, but a forward jump |
482 | | * needs a margin. Here the upper limit of 2 seconds corresponds to a |
483 | | * large margin at which the watchdog would already trigger so it looks |
484 | | * sufficient to avoid false positives most of the time. The goal here |
485 | | * is to make sure that before_poll can be trusted when entering |
486 | | * clock_update_local_date() so that we can detect and fix time jumps. |
487 | | * All this will also make sure we don't report idle/run times that are |
488 | | * too much wrong during such jumps. |
489 | | */ |
490 | |
|
491 | 0 | if (before_poll_mono_ns) |
492 | 0 | run_time = (before_poll_mono_ns - th_ctx->curr_mono_time) / 1000ull; |
493 | 0 | else { |
494 | 0 | gettimeofday(&before_poll, NULL); |
495 | |
|
496 | 0 | if (unlikely(__tv_islt(&before_poll, &after_poll))) |
497 | 0 | before_poll = after_poll; |
498 | 0 | else if (unlikely(__tv_ms_elapsed(&after_poll, &before_poll) >= 2000)) |
499 | 0 | tv_ms_add(&before_poll, &after_poll, 2000); |
500 | |
|
501 | 0 | run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec); |
502 | 0 | } |
503 | |
|
504 | 0 | if (th_ctx->prev_cpu_time && th_ctx->prev_mono_time) { |
505 | 0 | new_cpu_time -= th_ctx->prev_cpu_time; |
506 | 0 | new_mono_time -= th_ctx->prev_mono_time; |
507 | 0 | stolen = new_mono_time - new_cpu_time; |
508 | 0 | if (unlikely(stolen >= 500000)) { |
509 | 0 | stolen /= 500000; |
510 | | /* more than half a millisecond difference might |
511 | | * indicate an undesired preemption. |
512 | | */ |
513 | 0 | report_stolen_time(stolen); |
514 | 0 | } |
515 | 0 | } |
516 | | |
517 | | /* update the average runtime */ |
518 | 0 | activity_count_runtime(run_time); |
519 | 0 | } |
520 | | |
521 | | /* returns the current date as returned by gettimeofday() in ISO+microsecond |
522 | | * format. It uses a thread-local static variable that the reader can consume |
523 | | * for as long as it wants until next call. Thus, do not call it from a signal |
524 | | * handler. If <pad> is non-0, a trailing space will be added. It will always |
525 | | * return exactly 32 or 33 characters (depending on padding) and will always be |
526 | | * zero-terminated, thus it will always fit into a 34 bytes buffer. |
527 | | * This also always include the local timezone (in +/-HH:mm format) . |
528 | | */ |
529 | | char *timeofday_as_iso_us(int pad) |
530 | 0 | { |
531 | 0 | struct timeval new_date; |
532 | 0 | struct tm tm; |
533 | 0 | const char *offset; |
534 | 0 | char c; |
535 | |
|
536 | 0 | gettimeofday(&new_date, NULL); |
537 | 0 | if (new_date.tv_sec != iso_time_sec || !new_date.tv_sec) { |
538 | 0 | get_localtime(new_date.tv_sec, &tm); |
539 | 0 | offset = get_gmt_offset(new_date.tv_sec, &tm); |
540 | 0 | if (unlikely(strftime(iso_time_str, sizeof(iso_time_str), "%Y-%m-%dT%H:%M:%S.000000+00:00", &tm) != 32)) |
541 | 0 | strlcpy2(iso_time_str, "YYYY-mm-ddTHH:MM:SS.000000-00:00", sizeof(iso_time_str)); // make the failure visible but respect format. |
542 | 0 | iso_time_str[26] = offset[0]; |
543 | 0 | iso_time_str[27] = offset[1]; |
544 | 0 | iso_time_str[28] = offset[2]; |
545 | 0 | iso_time_str[30] = offset[3]; |
546 | 0 | iso_time_str[31] = offset[4]; |
547 | 0 | iso_time_sec = new_date.tv_sec; |
548 | 0 | } |
549 | | |
550 | | /* utoa_pad adds a trailing 0 so we save the char for restore */ |
551 | 0 | c = iso_time_str[26]; |
552 | 0 | utoa_pad(new_date.tv_usec, iso_time_str + 20, 7); |
553 | 0 | iso_time_str[26] = c; |
554 | 0 | if (pad) { |
555 | 0 | iso_time_str[32] = ' '; |
556 | 0 | iso_time_str[33] = 0; |
557 | 0 | } |
558 | 0 | return iso_time_str; |
559 | 0 | } |