/src/libevent/evutil_time.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
3 | | * |
4 | | * Redistribution and use in source and binary forms, with or without |
5 | | * modification, are permitted provided that the following conditions |
6 | | * are met: |
7 | | * 1. Redistributions of source code must retain the above copyright |
8 | | * notice, this list of conditions and the following disclaimer. |
9 | | * 2. Redistributions in binary form must reproduce the above copyright |
10 | | * notice, this list of conditions and the following disclaimer in the |
11 | | * documentation and/or other materials provided with the distribution. |
12 | | * 3. The name of the author may not be used to endorse or promote products |
13 | | * derived from this software without specific prior written permission. |
14 | | * |
15 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | | */ |
26 | | |
27 | | #include "event2/event-config.h" |
28 | | #include "evconfig-private.h" |
29 | | |
30 | | #ifdef _WIN32 |
31 | | #include <winsock2.h> |
32 | | #define WIN32_LEAN_AND_MEAN |
33 | | #include <windows.h> |
34 | | #undef WIN32_LEAN_AND_MEAN |
35 | | #endif |
36 | | |
37 | | #include <sys/types.h> |
38 | | #ifdef EVENT__HAVE_STDLIB_H |
39 | | #include <stdlib.h> |
40 | | #endif |
41 | | #include <errno.h> |
42 | | #include <limits.h> |
43 | | #ifndef EVENT__HAVE_GETTIMEOFDAY |
44 | | #include <sys/timeb.h> |
45 | | #endif |
46 | | #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT__HAVE_USLEEP) && \ |
47 | | !defined(_WIN32) |
48 | | #include <sys/select.h> |
49 | | #endif |
50 | | #include <time.h> |
51 | | #include <sys/stat.h> |
52 | | #include <string.h> |
53 | | |
54 | | /** evutil_usleep_() */ |
55 | | #if defined(_WIN32) |
56 | | #elif defined(EVENT__HAVE_NANOSLEEP) |
57 | | #elif defined(EVENT__HAVE_USLEEP) |
58 | | #include <unistd.h> |
59 | | #endif |
60 | | |
61 | | #include "event2/util.h" |
62 | | #include "util-internal.h" |
63 | | #include "log-internal.h" |
64 | | #include "mm-internal.h" |
65 | | |
66 | | #ifndef EVENT__HAVE_GETTIMEOFDAY |
67 | | /* No gettimeofday; this must be windows. */ |
68 | | |
69 | | typedef void (WINAPI *GetSystemTimePreciseAsFileTime_fn_t) (LPFILETIME); |
70 | | |
71 | | int |
72 | | evutil_gettimeofday(struct timeval *tv, struct timezone *tz) |
73 | | { |
74 | | static GetSystemTimePreciseAsFileTime_fn_t GetSystemTimePreciseAsFileTime_fn = NULL; |
75 | | static int check_precise = 1; |
76 | | |
77 | | #ifdef _MSC_VER |
78 | | #define U64_LITERAL(n) n##ui64 |
79 | | #else |
80 | | #define U64_LITERAL(n) n##llu |
81 | | #endif |
82 | | |
83 | | /* Conversion logic taken from Tor, which in turn took it |
84 | | * from Perl. GetSystemTimeAsFileTime returns its value as |
85 | | * an unaligned (!) 64-bit value containing the number of |
86 | | * 100-nanosecond intervals since 1 January 1601 UTC. */ |
87 | | #define EPOCH_BIAS U64_LITERAL(116444736000000000) |
88 | | #define UNITS_PER_SEC U64_LITERAL(10000000) |
89 | | #define USEC_PER_SEC U64_LITERAL(1000000) |
90 | | #define UNITS_PER_USEC U64_LITERAL(10) |
91 | | union { |
92 | | FILETIME ft_ft; |
93 | | ev_uint64_t ft_64; |
94 | | } ft; |
95 | | |
96 | | if (tv == NULL) |
97 | | return -1; |
98 | | |
99 | | if (EVUTIL_UNLIKELY(check_precise)) { |
100 | | HMODULE h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); |
101 | | if (h != NULL) |
102 | | GetSystemTimePreciseAsFileTime_fn = |
103 | | (GetSystemTimePreciseAsFileTime_fn_t) |
104 | | GetProcAddress(h, "GetSystemTimePreciseAsFileTime"); |
105 | | check_precise = 0; |
106 | | } |
107 | | |
108 | | if (GetSystemTimePreciseAsFileTime_fn != NULL) |
109 | | GetSystemTimePreciseAsFileTime_fn(&ft.ft_ft); |
110 | | else |
111 | | GetSystemTimeAsFileTime(&ft.ft_ft); |
112 | | |
113 | | if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) { |
114 | | /* Time before the unix epoch. */ |
115 | | return -1; |
116 | | } |
117 | | ft.ft_64 -= EPOCH_BIAS; |
118 | | tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC); |
119 | | tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC); |
120 | | return 0; |
121 | | } |
122 | | #endif |
123 | | |
124 | | #define MAX_SECONDS_IN_MSEC_LONG \ |
125 | 0 | (((LONG_MAX) - 999) / 1000) |
126 | | |
127 | | long |
128 | | evutil_tv_to_msec_(const struct timeval *tv) |
129 | 0 | { |
130 | 0 | if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG) |
131 | 0 | return -1; |
132 | | |
133 | 0 | return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000); |
134 | 0 | } |
135 | | |
136 | | /* |
137 | | Replacement for usleep on platforms that don't have one. Not guaranteed to |
138 | | be any more finegrained than 1 msec. |
139 | | */ |
140 | | void |
141 | | evutil_usleep_(const struct timeval *tv) |
142 | 0 | { |
143 | 0 | if (!tv) |
144 | 0 | return; |
145 | | #if defined(_WIN32) |
146 | | { |
147 | | __int64 usec; |
148 | | LARGE_INTEGER li; |
149 | | HANDLE timer; |
150 | | |
151 | | usec = tv->tv_sec * 1000000LL + tv->tv_usec; |
152 | | if (!usec) |
153 | | return; |
154 | | |
155 | | li.QuadPart = -10LL * usec; |
156 | | timer = CreateWaitableTimer(NULL, TRUE, NULL); |
157 | | if (!timer) |
158 | | return; |
159 | | |
160 | | SetWaitableTimer(timer, &li, 0, NULL, NULL, 0); |
161 | | WaitForSingleObject(timer, INFINITE); |
162 | | CloseHandle(timer); |
163 | | } |
164 | | #elif defined(EVENT__HAVE_NANOSLEEP) |
165 | 0 | { |
166 | 0 | struct timespec ts; |
167 | 0 | ts.tv_sec = tv->tv_sec; |
168 | 0 | ts.tv_nsec = tv->tv_usec*1000; |
169 | 0 | nanosleep(&ts, NULL); |
170 | 0 | } |
171 | | #elif defined(EVENT__HAVE_USLEEP) |
172 | | /* Some systems don't like to usleep more than 999999 usec */ |
173 | | sleep(tv->tv_sec); |
174 | | usleep(tv->tv_usec); |
175 | | #else |
176 | | { |
177 | | struct timeval tv2 = *tv; |
178 | | select(0, NULL, NULL, NULL, &tv2); |
179 | | } |
180 | | #endif |
181 | 0 | } |
182 | | |
183 | | int |
184 | | evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm) |
185 | 0 | { |
186 | 0 | static const char *DAYS[] = |
187 | 0 | { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; |
188 | 0 | static const char *MONTHS[] = |
189 | 0 | { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; |
190 | |
|
191 | 0 | time_t t = time(NULL); |
192 | |
|
193 | 0 | #if defined(EVENT__HAVE__GMTIME64_S) || !defined(_WIN32) |
194 | 0 | struct tm sys; |
195 | 0 | #endif |
196 | | |
197 | | /* If `tm` is null, set system's current time. */ |
198 | 0 | if (tm == NULL) { |
199 | 0 | #if !defined(_WIN32) |
200 | 0 | gmtime_r(&t, &sys); |
201 | 0 | tm = &sys; |
202 | | /** detect _gmtime64()/_gmtime64_s() */ |
203 | | #elif defined(EVENT__HAVE__GMTIME64_S) |
204 | | errno_t err; |
205 | | err = _gmtime64_s(&sys, &t); |
206 | | if (err) { |
207 | | event_errx(1, "Invalid argument to _gmtime64_s"); |
208 | | } else { |
209 | | tm = &sys; |
210 | | } |
211 | | #elif defined(EVENT__HAVE__GMTIME64) |
212 | | tm = _gmtime64(&t); |
213 | | #else |
214 | | tm = gmtime(&t); |
215 | | #endif |
216 | 0 | } |
217 | |
|
218 | 0 | return evutil_snprintf( |
219 | 0 | date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT", |
220 | 0 | DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon], |
221 | 0 | 1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec); |
222 | 0 | } |
223 | | |
224 | | /* |
225 | | This function assumes it's called repeatedly with a |
226 | | not-actually-so-monotonic time source whose outputs are in 'tv'. It |
227 | | implements a trivial ratcheting mechanism so that the values never go |
228 | | backwards. |
229 | | */ |
230 | | static void |
231 | | adjust_monotonic_time(struct evutil_monotonic_timer *base, |
232 | | struct timeval *tv) |
233 | 0 | { |
234 | 0 | evutil_timeradd(tv, &base->adjust_monotonic_clock, tv); |
235 | |
|
236 | 0 | if (evutil_timercmp(tv, &base->last_time, <)) { |
237 | | /* Guess it wasn't monotonic after all. */ |
238 | 0 | struct timeval adjust; |
239 | 0 | evutil_timersub(&base->last_time, tv, &adjust); |
240 | 0 | evutil_timeradd(&adjust, &base->adjust_monotonic_clock, |
241 | 0 | &base->adjust_monotonic_clock); |
242 | 0 | *tv = base->last_time; |
243 | 0 | } |
244 | 0 | base->last_time = *tv; |
245 | 0 | } |
246 | | |
247 | | /* |
248 | | Allocate a new struct evutil_monotonic_timer |
249 | | */ |
250 | | struct evutil_monotonic_timer * |
251 | | evutil_monotonic_timer_new(void) |
252 | 0 | { |
253 | 0 | struct evutil_monotonic_timer *p = NULL; |
254 | |
|
255 | 0 | p = mm_malloc(sizeof(*p)); |
256 | 0 | if (!p) goto done; |
257 | | |
258 | 0 | memset(p, 0, sizeof(*p)); |
259 | |
|
260 | 0 | done: |
261 | 0 | return p; |
262 | 0 | } |
263 | | |
264 | | /* |
265 | | Free a struct evutil_monotonic_timer |
266 | | */ |
267 | | void |
268 | | evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer) |
269 | 0 | { |
270 | 0 | if (timer) { |
271 | 0 | mm_free(timer); |
272 | 0 | } |
273 | 0 | } |
274 | | |
275 | | /* |
276 | | Set up a struct evutil_monotonic_timer for initial use |
277 | | */ |
278 | | int |
279 | | evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer, |
280 | | int flags) |
281 | 0 | { |
282 | 0 | return evutil_configure_monotonic_time_(timer, flags); |
283 | 0 | } |
284 | | |
285 | | /* |
286 | | Query the current monotonic time |
287 | | */ |
288 | | int |
289 | | evutil_gettime_monotonic(struct evutil_monotonic_timer *timer, |
290 | | struct timeval *tp) |
291 | 0 | { |
292 | 0 | return evutil_gettime_monotonic_(timer, tp); |
293 | 0 | } |
294 | | |
295 | | |
296 | | #if defined(HAVE_POSIX_MONOTONIC) |
297 | | /* ===== |
298 | | The POSIX clock_gettime() interface provides a few ways to get at a |
299 | | monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also |
300 | | provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec. |
301 | | |
302 | | On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic. |
303 | | Platforms don't agree about whether it should jump on a sleep/resume. |
304 | | */ |
305 | | |
306 | | int |
307 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
308 | | int flags) |
309 | 0 | { |
310 | | /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to |
311 | | * check for it at runtime, because some older kernel versions won't |
312 | | * have it working. */ |
313 | 0 | #ifdef CLOCK_MONOTONIC_COARSE |
314 | 0 | const int precise = flags & EV_MONOT_PRECISE; |
315 | 0 | #endif |
316 | 0 | const int fallback = flags & EV_MONOT_FALLBACK; |
317 | 0 | struct timespec ts; |
318 | |
|
319 | 0 | memset(base, 0, sizeof(*base)); |
320 | |
|
321 | 0 | #ifdef CLOCK_MONOTONIC_COARSE |
322 | 0 | if (CLOCK_MONOTONIC_COARSE < 0) { |
323 | | /* Technically speaking, nothing keeps CLOCK_* from being |
324 | | * negative (as far as I know). This check and the one below |
325 | | * make sure that it's safe for us to use -1 as an "unset" |
326 | | * value. */ |
327 | 0 | event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"); |
328 | 0 | } |
329 | 0 | if (! precise && ! fallback) { |
330 | 0 | if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) { |
331 | 0 | base->monotonic_clock = CLOCK_MONOTONIC_COARSE; |
332 | 0 | return 0; |
333 | 0 | } |
334 | 0 | } |
335 | 0 | #endif |
336 | 0 | if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { |
337 | 0 | base->monotonic_clock = CLOCK_MONOTONIC; |
338 | 0 | return 0; |
339 | 0 | } |
340 | | |
341 | 0 | if (CLOCK_MONOTONIC < 0) { |
342 | 0 | event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0"); |
343 | 0 | } |
344 | | |
345 | 0 | base->monotonic_clock = -1; |
346 | 0 | return 0; |
347 | 0 | } |
348 | | |
349 | | int |
350 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
351 | | struct timeval *tp) |
352 | 0 | { |
353 | 0 | struct timespec ts; |
354 | |
|
355 | 0 | if (base->monotonic_clock < 0) { |
356 | 0 | if (evutil_gettimeofday(tp, NULL) < 0) |
357 | 0 | return -1; |
358 | 0 | adjust_monotonic_time(base, tp); |
359 | 0 | return 0; |
360 | 0 | } |
361 | | |
362 | 0 | if (clock_gettime(base->monotonic_clock, &ts) == -1) |
363 | 0 | return -1; |
364 | 0 | tp->tv_sec = ts.tv_sec; |
365 | 0 | tp->tv_usec = ts.tv_nsec / 1000; |
366 | |
|
367 | 0 | return 0; |
368 | 0 | } |
369 | | #endif |
370 | | |
371 | | #if defined(HAVE_MACH_MONOTONIC) |
372 | | /* ====== |
373 | | Apple is a little late to the POSIX party. And why not? Instead of |
374 | | clock_gettime(), they provide mach_absolute_time(). Its units are not |
375 | | fixed; we need to use mach_timebase_info() to get the right functions to |
376 | | convert its units into nanoseconds. |
377 | | |
378 | | To all appearances, mach_absolute_time() seems to be honest-to-goodness |
379 | | monotonic. Whether it stops during sleep or not is unspecified in |
380 | | principle, and dependent on CPU architecture in practice. |
381 | | */ |
382 | | |
383 | | int |
384 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
385 | | int flags) |
386 | | { |
387 | | const int fallback = flags & EV_MONOT_FALLBACK; |
388 | | struct mach_timebase_info mi; |
389 | | memset(base, 0, sizeof(*base)); |
390 | | /* OSX has mach_absolute_time() */ |
391 | | if (!fallback && |
392 | | mach_timebase_info(&mi) == 0 && |
393 | | mach_absolute_time() != 0) { |
394 | | /* mach_timebase_info tells us how to convert |
395 | | * mach_absolute_time() into nanoseconds, but we |
396 | | * want to use microseconds instead. */ |
397 | | mi.denom *= 1000; |
398 | | memcpy(&base->mach_timebase_units, &mi, sizeof(mi)); |
399 | | } else { |
400 | | base->mach_timebase_units.numer = 0; |
401 | | } |
402 | | return 0; |
403 | | } |
404 | | |
405 | | int |
406 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
407 | | struct timeval *tp) |
408 | | { |
409 | | ev_uint64_t abstime, usec; |
410 | | if (base->mach_timebase_units.numer == 0) { |
411 | | if (evutil_gettimeofday(tp, NULL) < 0) |
412 | | return -1; |
413 | | adjust_monotonic_time(base, tp); |
414 | | return 0; |
415 | | } |
416 | | |
417 | | abstime = mach_absolute_time(); |
418 | | usec = (abstime * base->mach_timebase_units.numer) |
419 | | / (base->mach_timebase_units.denom); |
420 | | tp->tv_sec = usec / 1000000; |
421 | | tp->tv_usec = usec % 1000000; |
422 | | |
423 | | return 0; |
424 | | } |
425 | | #endif |
426 | | |
427 | | #if defined(HAVE_WIN32_MONOTONIC) |
428 | | /* ===== |
429 | | Turn we now to Windows. Want monontonic time on Windows? |
430 | | |
431 | | Windows has QueryPerformanceCounter(), which gives time most high- |
432 | | resolution time. It's a pity it's not so monotonic in practice; it's |
433 | | also got some fun bugs, especially: with older Windowses, under |
434 | | virtualizations, with funny hardware, on multiprocessor systems, and so |
435 | | on. PEP418 [1] has a nice roundup of the issues here. |
436 | | |
437 | | There's GetTickCount64() on Vista and later, which gives a number of 1-msec |
438 | | ticks since startup. The accuracy here might be as bad as 10-20 msec, I |
439 | | hear. There's an undocumented function (NtSetTimerResolution) that |
440 | | allegedly increases the accuracy. Good luck! |
441 | | |
442 | | There's also GetTickCount(), which is only 32 bits, but seems to be |
443 | | supported on pre-Vista versions of Windows. Apparently, you can coax |
444 | | another 14 bits out of it, giving you 2231 years before rollover. |
445 | | |
446 | | The less said about timeGetTime() the better. |
447 | | |
448 | | "We don't care. We don't have to. We're the Phone Company." |
449 | | -- Lily Tomlin, SNL |
450 | | |
451 | | Our strategy, if precise timers are turned off, is to just use the best |
452 | | GetTickCount equivalent available. If we've been asked for precise timing, |
453 | | then we mostly[2] assume that GetTickCount is monotonic, and correct |
454 | | GetPerformanceCounter to approximate it. |
455 | | |
456 | | [1] http://www.python.org/dev/peps/pep-0418 |
457 | | [2] Of course, we feed the Windows stuff into adjust_monotonic_time() |
458 | | anyway, just in case it isn't. |
459 | | |
460 | | */ |
461 | | /* |
462 | | Parts of our logic in the win32 timer code here are closely based on |
463 | | BitTorrent's libUTP library. That code is subject to the following |
464 | | license: |
465 | | |
466 | | Copyright (c) 2010 BitTorrent, Inc. |
467 | | |
468 | | Permission is hereby granted, free of charge, to any person obtaining a |
469 | | copy of this software and associated documentation files (the |
470 | | "Software"), to deal in the Software without restriction, including |
471 | | without limitation the rights to use, copy, modify, merge, publish, |
472 | | distribute, sublicense, and/or sell copies of the Software, and to |
473 | | permit persons to whom the Software is furnished to do so, subject to |
474 | | the following conditions: |
475 | | |
476 | | The above copyright notice and this permission notice shall be included |
477 | | in all copies or substantial portions of the Software. |
478 | | |
479 | | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
480 | | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
481 | | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
482 | | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
483 | | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
484 | | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
485 | | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
486 | | */ |
487 | | |
488 | | static ev_uint64_t |
489 | | evutil_GetTickCount_(struct evutil_monotonic_timer *base) |
490 | | { |
491 | | if (base->GetTickCount64_fn) { |
492 | | /* Let's just use GetTickCount64 if we can. */ |
493 | | return base->GetTickCount64_fn(); |
494 | | } else if (base->GetTickCount_fn) { |
495 | | /* Greg Hazel assures me that this works, that BitTorrent has |
496 | | * done it for years, and this it won't turn around and |
497 | | * bite us. He says they found it on some game programmers' |
498 | | * forum some time around 2007. |
499 | | */ |
500 | | ev_uint64_t v = base->GetTickCount_fn(); |
501 | | return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000); |
502 | | } else { |
503 | | /* Here's the fallback implementation. We have to use |
504 | | * GetTickCount() with its given signature, so we only get |
505 | | * 32 bits worth of milliseconds, which will roll ove every |
506 | | * 49 days or so. */ |
507 | | DWORD ticks = GetTickCount(); |
508 | | if (ticks < base->last_tick_count) { |
509 | | base->adjust_tick_count += ((ev_uint64_t)1) << 32; |
510 | | } |
511 | | base->last_tick_count = ticks; |
512 | | return ticks + base->adjust_tick_count; |
513 | | } |
514 | | } |
515 | | |
516 | | int |
517 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
518 | | int flags) |
519 | | { |
520 | | const int precise = flags & EV_MONOT_PRECISE; |
521 | | const int fallback = flags & EV_MONOT_FALLBACK; |
522 | | HANDLE h; |
523 | | memset(base, 0, sizeof(*base)); |
524 | | |
525 | | h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); |
526 | | if (h != NULL && !fallback) { |
527 | | base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64"); |
528 | | base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount"); |
529 | | } |
530 | | |
531 | | base->first_tick = base->last_tick_count = evutil_GetTickCount_(base); |
532 | | if (precise && !fallback) { |
533 | | LARGE_INTEGER freq; |
534 | | if (QueryPerformanceFrequency(&freq)) { |
535 | | LARGE_INTEGER counter; |
536 | | QueryPerformanceCounter(&counter); |
537 | | base->first_counter = counter.QuadPart; |
538 | | base->usec_per_count = 1.0e6 / freq.QuadPart; |
539 | | base->use_performance_counter = 1; |
540 | | } |
541 | | } |
542 | | |
543 | | return 0; |
544 | | } |
545 | | |
546 | | static inline ev_int64_t |
547 | | abs64(ev_int64_t i) |
548 | | { |
549 | | return i < 0 ? -i : i; |
550 | | } |
551 | | |
552 | | |
553 | | int |
554 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
555 | | struct timeval *tp) |
556 | | { |
557 | | ev_uint64_t ticks = evutil_GetTickCount_(base); |
558 | | if (base->use_performance_counter) { |
559 | | /* Here's a trick we took from BitTorrent's libutp, at Greg |
560 | | * Hazel's recommendation. We use QueryPerformanceCounter for |
561 | | * our high-resolution timer, but use GetTickCount*() to keep |
562 | | * it sane, and adjust_monotonic_time() to keep it monotonic. |
563 | | */ |
564 | | LARGE_INTEGER counter; |
565 | | ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed; |
566 | | QueryPerformanceCounter(&counter); |
567 | | counter_elapsed = (ev_int64_t) |
568 | | (counter.QuadPart - base->first_counter); |
569 | | ticks_elapsed = ticks - base->first_tick; |
570 | | /* TODO: This may upset VC6. If you need this to work with |
571 | | * VC6, please supply an appropriate patch. */ |
572 | | counter_usec_elapsed = (ev_int64_t) |
573 | | (counter_elapsed * base->usec_per_count); |
574 | | |
575 | | if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) { |
576 | | /* It appears that the QueryPerformanceCounter() |
577 | | * result is more than 1 second away from |
578 | | * GetTickCount() result. Let's adjust it to be as |
579 | | * accurate as we can; adjust_monotonic_time() below |
580 | | * will keep it monotonic. */ |
581 | | counter_usec_elapsed = ticks_elapsed * 1000; |
582 | | base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count); |
583 | | } |
584 | | tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000); |
585 | | tp->tv_usec = counter_usec_elapsed % 1000000; |
586 | | |
587 | | } else { |
588 | | /* We're just using GetTickCount(). */ |
589 | | tp->tv_sec = (time_t) (ticks / 1000); |
590 | | tp->tv_usec = (ticks % 1000) * 1000; |
591 | | } |
592 | | adjust_monotonic_time(base, tp); |
593 | | |
594 | | return 0; |
595 | | } |
596 | | #endif |
597 | | |
598 | | #if defined(HAVE_FALLBACK_MONOTONIC) |
599 | | /* ===== |
600 | | And if none of the other options work, let's just use gettimeofday(), and |
601 | | ratchet it forward so that it acts like a monotonic timer, whether it |
602 | | wants to or not. |
603 | | */ |
604 | | |
605 | | int |
606 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
607 | | int precise) |
608 | | { |
609 | | memset(base, 0, sizeof(*base)); |
610 | | return 0; |
611 | | } |
612 | | |
613 | | int |
614 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
615 | | struct timeval *tp) |
616 | | { |
617 | | if (evutil_gettimeofday(tp, NULL) < 0) |
618 | | return -1; |
619 | | adjust_monotonic_time(base, tp); |
620 | | return 0; |
621 | | |
622 | | } |
623 | | #endif |