/src/libevent/evutil_time.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
3 | | * |
4 | | * Redistribution and use in source and binary forms, with or without |
5 | | * modification, are permitted provided that the following conditions |
6 | | * are met: |
7 | | * 1. Redistributions of source code must retain the above copyright |
8 | | * notice, this list of conditions and the following disclaimer. |
9 | | * 2. Redistributions in binary form must reproduce the above copyright |
10 | | * notice, this list of conditions and the following disclaimer in the |
11 | | * documentation and/or other materials provided with the distribution. |
12 | | * 3. The name of the author may not be used to endorse or promote products |
13 | | * derived from this software without specific prior written permission. |
14 | | * |
15 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | | */ |
26 | | |
27 | | #include "event2/event-config.h" |
28 | | #include "evconfig-private.h" |
29 | | |
30 | | #ifdef _WIN32 |
31 | | #include <winsock2.h> |
32 | | #define WIN32_LEAN_AND_MEAN |
33 | | #include <windows.h> |
34 | | #undef WIN32_LEAN_AND_MEAN |
35 | | #endif |
36 | | |
37 | | #include <sys/types.h> |
38 | | #ifdef EVENT__HAVE_STDLIB_H |
39 | | #include <stdlib.h> |
40 | | #endif |
41 | | #include <errno.h> |
42 | | #include <limits.h> |
43 | | #ifndef EVENT__HAVE_GETTIMEOFDAY |
44 | | #include <sys/timeb.h> |
45 | | #endif |
46 | | #if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT__HAVE_USLEEP) && \ |
47 | | !defined(_WIN32) |
48 | | #include <sys/select.h> |
49 | | #endif |
50 | | #include <time.h> |
51 | | #include <sys/stat.h> |
52 | | #include <string.h> |
53 | | |
54 | | /** evutil_usleep_() */ |
55 | | #if defined(_WIN32) |
56 | | #elif defined(EVENT__HAVE_NANOSLEEP) |
57 | | #elif defined(EVENT__HAVE_USLEEP) |
58 | | #include <unistd.h> |
59 | | #endif |
60 | | |
61 | | #include "event2/util.h" |
62 | | #include "util-internal.h" |
63 | | #include "log-internal.h" |
64 | | #include "mm-internal.h" |
65 | | |
66 | | #ifndef EVENT__HAVE_GETTIMEOFDAY |
67 | | /* No gettimeofday; this must be windows. */ |
68 | | int |
69 | | evutil_gettimeofday(struct timeval *tv, struct timezone *tz) |
70 | | { |
71 | | #ifdef _MSC_VER |
72 | | #define U64_LITERAL(n) n##ui64 |
73 | | #else |
74 | | #define U64_LITERAL(n) n##llu |
75 | | #endif |
76 | | |
77 | | /* Conversion logic taken from Tor, which in turn took it |
78 | | * from Perl. GetSystemTimeAsFileTime returns its value as |
79 | | * an unaligned (!) 64-bit value containing the number of |
80 | | * 100-nanosecond intervals since 1 January 1601 UTC. */ |
81 | | #define EPOCH_BIAS U64_LITERAL(116444736000000000) |
82 | | #define UNITS_PER_SEC U64_LITERAL(10000000) |
83 | | #define USEC_PER_SEC U64_LITERAL(1000000) |
84 | | #define UNITS_PER_USEC U64_LITERAL(10) |
85 | | union { |
86 | | FILETIME ft_ft; |
87 | | ev_uint64_t ft_64; |
88 | | } ft; |
89 | | |
90 | | if (tv == NULL) |
91 | | return -1; |
92 | | |
93 | | GetSystemTimeAsFileTime(&ft.ft_ft); |
94 | | |
95 | | if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) { |
96 | | /* Time before the unix epoch. */ |
97 | | return -1; |
98 | | } |
99 | | ft.ft_64 -= EPOCH_BIAS; |
100 | | tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC); |
101 | | tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC); |
102 | | return 0; |
103 | | } |
104 | | #endif |
105 | | |
106 | | #define MAX_SECONDS_IN_MSEC_LONG \ |
107 | 0 | (((LONG_MAX) - 999) / 1000) |
108 | | |
109 | | long |
110 | | evutil_tv_to_msec_(const struct timeval *tv) |
111 | 0 | { |
112 | 0 | if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG) |
113 | 0 | return -1; |
114 | 0 | |
115 | 0 | return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000); |
116 | 0 | } |
117 | | |
118 | | /* |
119 | | Replacement for usleep on platforms that don't have one. Not guaranteed to |
120 | | be any more finegrained than 1 msec. |
121 | | */ |
122 | | void |
123 | | evutil_usleep_(const struct timeval *tv) |
124 | 0 | { |
125 | 0 | if (!tv) |
126 | 0 | return; |
127 | | #if defined(_WIN32) |
128 | | { |
129 | | long msec = evutil_tv_to_msec_(tv); |
130 | | Sleep((DWORD)msec); |
131 | | } |
132 | | #elif defined(EVENT__HAVE_NANOSLEEP) |
133 | 0 | { |
134 | 0 | struct timespec ts; |
135 | 0 | ts.tv_sec = tv->tv_sec; |
136 | 0 | ts.tv_nsec = tv->tv_usec*1000; |
137 | 0 | nanosleep(&ts, NULL); |
138 | 0 | } |
139 | | #elif defined(EVENT__HAVE_USLEEP) |
140 | | /* Some systems don't like to usleep more than 999999 usec */ |
141 | | sleep(tv->tv_sec); |
142 | | usleep(tv->tv_usec); |
143 | | #else |
144 | | { |
145 | | struct timeval tv2 = *tv; |
146 | | select(0, NULL, NULL, NULL, &tv2); |
147 | | } |
148 | | #endif |
149 | | } |
150 | | |
151 | | int |
152 | | evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm) |
153 | 0 | { |
154 | 0 | static const char *DAYS[] = |
155 | 0 | { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; |
156 | 0 | static const char *MONTHS[] = |
157 | 0 | { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; |
158 | 0 |
|
159 | 0 | time_t t = time(NULL); |
160 | 0 |
|
161 | 0 | #ifndef _WIN32 |
162 | 0 | struct tm sys; |
163 | 0 | #endif |
164 | 0 |
|
165 | 0 | /* If `tm` is null, set system's current time. */ |
166 | 0 | if (tm == NULL) { |
167 | | #ifdef _WIN32 |
168 | | /** TODO: detect _gmtime64()/_gmtime64_s() */ |
169 | | tm = gmtime(&t); |
170 | | #else |
171 | | gmtime_r(&t, &sys); |
172 | 0 | tm = &sys; |
173 | 0 | #endif |
174 | 0 | } |
175 | 0 |
|
176 | 0 | return evutil_snprintf( |
177 | 0 | date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT", |
178 | 0 | DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon], |
179 | 0 | 1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec); |
180 | 0 | } |
181 | | |
182 | | /* |
183 | | This function assumes it's called repeatedly with a |
184 | | not-actually-so-monotonic time source whose outputs are in 'tv'. It |
185 | | implements a trivial ratcheting mechanism so that the values never go |
186 | | backwards. |
187 | | */ |
188 | | static void |
189 | | adjust_monotonic_time(struct evutil_monotonic_timer *base, |
190 | | struct timeval *tv) |
191 | 0 | { |
192 | 0 | evutil_timeradd(tv, &base->adjust_monotonic_clock, tv); |
193 | 0 |
|
194 | 0 | if (evutil_timercmp(tv, &base->last_time, <)) { |
195 | 0 | /* Guess it wasn't monotonic after all. */ |
196 | 0 | struct timeval adjust; |
197 | 0 | evutil_timersub(&base->last_time, tv, &adjust); |
198 | 0 | evutil_timeradd(&adjust, &base->adjust_monotonic_clock, |
199 | 0 | &base->adjust_monotonic_clock); |
200 | 0 | *tv = base->last_time; |
201 | 0 | } |
202 | 0 | base->last_time = *tv; |
203 | 0 | } |
204 | | |
205 | | /* |
206 | | Allocate a new struct evutil_monotonic_timer |
207 | | */ |
208 | | struct evutil_monotonic_timer * |
209 | | evutil_monotonic_timer_new(void) |
210 | 0 | { |
211 | 0 | struct evutil_monotonic_timer *p = NULL; |
212 | 0 |
|
213 | 0 | p = mm_malloc(sizeof(*p)); |
214 | 0 | if (!p) goto done; |
215 | 0 | |
216 | 0 | memset(p, 0, sizeof(*p)); |
217 | 0 |
|
218 | 0 | done: |
219 | 0 | return p; |
220 | 0 | } |
221 | | |
222 | | /* |
223 | | Free a struct evutil_monotonic_timer |
224 | | */ |
225 | | void |
226 | | evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer) |
227 | 0 | { |
228 | 0 | if (timer) { |
229 | 0 | mm_free(timer); |
230 | 0 | } |
231 | 0 | } |
232 | | |
233 | | /* |
234 | | Set up a struct evutil_monotonic_timer for initial use |
235 | | */ |
236 | | int |
237 | | evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer, |
238 | | int flags) |
239 | 0 | { |
240 | 0 | return evutil_configure_monotonic_time_(timer, flags); |
241 | 0 | } |
242 | | |
243 | | /* |
244 | | Query the current monotonic time |
245 | | */ |
246 | | int |
247 | | evutil_gettime_monotonic(struct evutil_monotonic_timer *timer, |
248 | | struct timeval *tp) |
249 | 0 | { |
250 | 0 | return evutil_gettime_monotonic_(timer, tp); |
251 | 0 | } |
252 | | |
253 | | |
254 | | #if defined(HAVE_POSIX_MONOTONIC) |
255 | | /* ===== |
256 | | The POSIX clock_gettime() interface provides a few ways to get at a |
257 | | monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also |
258 | | provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec. |
259 | | |
260 | | On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic. |
261 | | Platforms don't agree about whether it should jump on a sleep/resume. |
262 | | */ |
263 | | |
264 | | int |
265 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
266 | | int flags) |
267 | 0 | { |
268 | 0 | /* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to |
269 | 0 | * check for it at runtime, because some older kernel versions won't |
270 | 0 | * have it working. */ |
271 | 0 | #ifdef CLOCK_MONOTONIC_COARSE |
272 | 0 | const int precise = flags & EV_MONOT_PRECISE; |
273 | 0 | #endif |
274 | 0 | const int fallback = flags & EV_MONOT_FALLBACK; |
275 | 0 | struct timespec ts; |
276 | 0 |
|
277 | 0 | #ifdef CLOCK_MONOTONIC_COARSE |
278 | 0 | if (CLOCK_MONOTONIC_COARSE < 0) { |
279 | 0 | /* Technically speaking, nothing keeps CLOCK_* from being |
280 | 0 | * negative (as far as I know). This check and the one below |
281 | 0 | * make sure that it's safe for us to use -1 as an "unset" |
282 | 0 | * value. */ |
283 | 0 | event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"); |
284 | 0 | } |
285 | 0 | if (! precise && ! fallback) { |
286 | 0 | if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) { |
287 | 0 | base->monotonic_clock = CLOCK_MONOTONIC_COARSE; |
288 | 0 | return 0; |
289 | 0 | } |
290 | 0 | } |
291 | 0 | #endif |
292 | 0 | if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { |
293 | 0 | base->monotonic_clock = CLOCK_MONOTONIC; |
294 | 0 | return 0; |
295 | 0 | } |
296 | 0 | |
297 | 0 | if (CLOCK_MONOTONIC < 0) { |
298 | 0 | event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0"); |
299 | 0 | } |
300 | 0 | |
301 | 0 | base->monotonic_clock = -1; |
302 | 0 | return 0; |
303 | 0 | } |
304 | | |
305 | | int |
306 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
307 | | struct timeval *tp) |
308 | 0 | { |
309 | 0 | struct timespec ts; |
310 | 0 |
|
311 | 0 | if (base->monotonic_clock < 0) { |
312 | 0 | if (evutil_gettimeofday(tp, NULL) < 0) |
313 | 0 | return -1; |
314 | 0 | adjust_monotonic_time(base, tp); |
315 | 0 | return 0; |
316 | 0 | } |
317 | 0 | |
318 | 0 | if (clock_gettime(base->monotonic_clock, &ts) == -1) |
319 | 0 | return -1; |
320 | 0 | tp->tv_sec = ts.tv_sec; |
321 | 0 | tp->tv_usec = ts.tv_nsec / 1000; |
322 | 0 |
|
323 | 0 | return 0; |
324 | 0 | } |
325 | | #endif |
326 | | |
327 | | #if defined(HAVE_MACH_MONOTONIC) |
328 | | /* ====== |
329 | | Apple is a little late to the POSIX party. And why not? Instead of |
330 | | clock_gettime(), they provide mach_absolute_time(). Its units are not |
331 | | fixed; we need to use mach_timebase_info() to get the right functions to |
332 | | convert its units into nanoseconds. |
333 | | |
334 | | To all appearances, mach_absolute_time() seems to be honest-to-goodness |
335 | | monotonic. Whether it stops during sleep or not is unspecified in |
336 | | principle, and dependent on CPU architecture in practice. |
337 | | */ |
338 | | |
339 | | int |
340 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
341 | | int flags) |
342 | | { |
343 | | const int fallback = flags & EV_MONOT_FALLBACK; |
344 | | struct mach_timebase_info mi; |
345 | | memset(base, 0, sizeof(*base)); |
346 | | /* OSX has mach_absolute_time() */ |
347 | | if (!fallback && |
348 | | mach_timebase_info(&mi) == 0 && |
349 | | mach_absolute_time() != 0) { |
350 | | /* mach_timebase_info tells us how to convert |
351 | | * mach_absolute_time() into nanoseconds, but we |
352 | | * want to use microseconds instead. */ |
353 | | mi.denom *= 1000; |
354 | | memcpy(&base->mach_timebase_units, &mi, sizeof(mi)); |
355 | | } else { |
356 | | base->mach_timebase_units.numer = 0; |
357 | | } |
358 | | return 0; |
359 | | } |
360 | | |
361 | | int |
362 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
363 | | struct timeval *tp) |
364 | | { |
365 | | ev_uint64_t abstime, usec; |
366 | | if (base->mach_timebase_units.numer == 0) { |
367 | | if (evutil_gettimeofday(tp, NULL) < 0) |
368 | | return -1; |
369 | | adjust_monotonic_time(base, tp); |
370 | | return 0; |
371 | | } |
372 | | |
373 | | abstime = mach_absolute_time(); |
374 | | usec = (abstime * base->mach_timebase_units.numer) |
375 | | / (base->mach_timebase_units.denom); |
376 | | tp->tv_sec = usec / 1000000; |
377 | | tp->tv_usec = usec % 1000000; |
378 | | |
379 | | return 0; |
380 | | } |
381 | | #endif |
382 | | |
383 | | #if defined(HAVE_WIN32_MONOTONIC) |
384 | | /* ===== |
385 | | Turn we now to Windows. Want monontonic time on Windows? |
386 | | |
387 | | Windows has QueryPerformanceCounter(), which gives time most high- |
388 | | resolution time. It's a pity it's not so monotonic in practice; it's |
389 | | also got some fun bugs, especially: with older Windowses, under |
390 | | virtualizations, with funny hardware, on multiprocessor systems, and so |
391 | | on. PEP418 [1] has a nice roundup of the issues here. |
392 | | |
393 | | There's GetTickCount64() on Vista and later, which gives a number of 1-msec |
394 | | ticks since startup. The accuracy here might be as bad as 10-20 msec, I |
395 | | hear. There's an undocumented function (NtSetTimerResolution) that |
396 | | allegedly increases the accuracy. Good luck! |
397 | | |
398 | | There's also GetTickCount(), which is only 32 bits, but seems to be |
399 | | supported on pre-Vista versions of Windows. Apparently, you can coax |
400 | | another 14 bits out of it, giving you 2231 years before rollover. |
401 | | |
402 | | The less said about timeGetTime() the better. |
403 | | |
404 | | "We don't care. We don't have to. We're the Phone Company." |
405 | | -- Lily Tomlin, SNL |
406 | | |
407 | | Our strategy, if precise timers are turned off, is to just use the best |
408 | | GetTickCount equivalent available. If we've been asked for precise timing, |
409 | | then we mostly[2] assume that GetTickCount is monotonic, and correct |
410 | | GetPerformanceCounter to approximate it. |
411 | | |
412 | | [1] http://www.python.org/dev/peps/pep-0418 |
413 | | [2] Of course, we feed the Windows stuff into adjust_monotonic_time() |
414 | | anyway, just in case it isn't. |
415 | | |
416 | | */ |
417 | | /* |
418 | | Parts of our logic in the win32 timer code here are closely based on |
419 | | BitTorrent's libUTP library. That code is subject to the following |
420 | | license: |
421 | | |
422 | | Copyright (c) 2010 BitTorrent, Inc. |
423 | | |
424 | | Permission is hereby granted, free of charge, to any person obtaining a |
425 | | copy of this software and associated documentation files (the |
426 | | "Software"), to deal in the Software without restriction, including |
427 | | without limitation the rights to use, copy, modify, merge, publish, |
428 | | distribute, sublicense, and/or sell copies of the Software, and to |
429 | | permit persons to whom the Software is furnished to do so, subject to |
430 | | the following conditions: |
431 | | |
432 | | The above copyright notice and this permission notice shall be included |
433 | | in all copies or substantial portions of the Software. |
434 | | |
435 | | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
436 | | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
437 | | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
438 | | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
439 | | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
440 | | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
441 | | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
442 | | */ |
443 | | |
444 | | static ev_uint64_t |
445 | | evutil_GetTickCount_(struct evutil_monotonic_timer *base) |
446 | | { |
447 | | if (base->GetTickCount64_fn) { |
448 | | /* Let's just use GetTickCount64 if we can. */ |
449 | | return base->GetTickCount64_fn(); |
450 | | } else if (base->GetTickCount_fn) { |
451 | | /* Greg Hazel assures me that this works, that BitTorrent has |
452 | | * done it for years, and this it won't turn around and |
453 | | * bite us. He says they found it on some game programmers' |
454 | | * forum some time around 2007. |
455 | | */ |
456 | | ev_uint64_t v = base->GetTickCount_fn(); |
457 | | return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000); |
458 | | } else { |
459 | | /* Here's the fallback implementation. We have to use |
460 | | * GetTickCount() with its given signature, so we only get |
461 | | * 32 bits worth of milliseconds, which will roll ove every |
462 | | * 49 days or so. */ |
463 | | DWORD ticks = GetTickCount(); |
464 | | if (ticks < base->last_tick_count) { |
465 | | base->adjust_tick_count += ((ev_uint64_t)1) << 32; |
466 | | } |
467 | | base->last_tick_count = ticks; |
468 | | return ticks + base->adjust_tick_count; |
469 | | } |
470 | | } |
471 | | |
472 | | int |
473 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
474 | | int flags) |
475 | | { |
476 | | const int precise = flags & EV_MONOT_PRECISE; |
477 | | const int fallback = flags & EV_MONOT_FALLBACK; |
478 | | HANDLE h; |
479 | | memset(base, 0, sizeof(*base)); |
480 | | |
481 | | h = evutil_load_windows_system_library_(TEXT("kernel32.dll")); |
482 | | if (h != NULL && !fallback) { |
483 | | base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64"); |
484 | | base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount"); |
485 | | } |
486 | | |
487 | | base->first_tick = base->last_tick_count = evutil_GetTickCount_(base); |
488 | | if (precise && !fallback) { |
489 | | LARGE_INTEGER freq; |
490 | | if (QueryPerformanceFrequency(&freq)) { |
491 | | LARGE_INTEGER counter; |
492 | | QueryPerformanceCounter(&counter); |
493 | | base->first_counter = counter.QuadPart; |
494 | | base->usec_per_count = 1.0e6 / freq.QuadPart; |
495 | | base->use_performance_counter = 1; |
496 | | } |
497 | | } |
498 | | |
499 | | return 0; |
500 | | } |
501 | | |
502 | | static inline ev_int64_t |
503 | | abs64(ev_int64_t i) |
504 | | { |
505 | | return i < 0 ? -i : i; |
506 | | } |
507 | | |
508 | | |
509 | | int |
510 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
511 | | struct timeval *tp) |
512 | | { |
513 | | ev_uint64_t ticks = evutil_GetTickCount_(base); |
514 | | if (base->use_performance_counter) { |
515 | | /* Here's a trick we took from BitTorrent's libutp, at Greg |
516 | | * Hazel's recommendation. We use QueryPerformanceCounter for |
517 | | * our high-resolution timer, but use GetTickCount*() to keep |
518 | | * it sane, and adjust_monotonic_time() to keep it monotonic. |
519 | | */ |
520 | | LARGE_INTEGER counter; |
521 | | ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed; |
522 | | QueryPerformanceCounter(&counter); |
523 | | counter_elapsed = (ev_int64_t) |
524 | | (counter.QuadPart - base->first_counter); |
525 | | ticks_elapsed = ticks - base->first_tick; |
526 | | /* TODO: This may upset VC6. If you need this to work with |
527 | | * VC6, please supply an appropriate patch. */ |
528 | | counter_usec_elapsed = (ev_int64_t) |
529 | | (counter_elapsed * base->usec_per_count); |
530 | | |
531 | | if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) { |
532 | | /* It appears that the QueryPerformanceCounter() |
533 | | * result is more than 1 second away from |
534 | | * GetTickCount() result. Let's adjust it to be as |
535 | | * accurate as we can; adjust_monotnonic_time() below |
536 | | * will keep it monotonic. */ |
537 | | counter_usec_elapsed = ticks_elapsed * 1000; |
538 | | base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count); |
539 | | } |
540 | | tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000); |
541 | | tp->tv_usec = counter_usec_elapsed % 1000000; |
542 | | |
543 | | } else { |
544 | | /* We're just using GetTickCount(). */ |
545 | | tp->tv_sec = (time_t) (ticks / 1000); |
546 | | tp->tv_usec = (ticks % 1000) * 1000; |
547 | | } |
548 | | adjust_monotonic_time(base, tp); |
549 | | |
550 | | return 0; |
551 | | } |
552 | | #endif |
553 | | |
554 | | #if defined(HAVE_FALLBACK_MONOTONIC) |
555 | | /* ===== |
556 | | And if none of the other options work, let's just use gettimeofday(), and |
557 | | ratchet it forward so that it acts like a monotonic timer, whether it |
558 | | wants to or not. |
559 | | */ |
560 | | |
561 | | int |
562 | | evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base, |
563 | | int precise) |
564 | | { |
565 | | memset(base, 0, sizeof(*base)); |
566 | | return 0; |
567 | | } |
568 | | |
569 | | int |
570 | | evutil_gettime_monotonic_(struct evutil_monotonic_timer *base, |
571 | | struct timeval *tp) |
572 | | { |
573 | | if (evutil_gettimeofday(tp, NULL) < 0) |
574 | | return -1; |
575 | | adjust_monotonic_time(base, tp); |
576 | | return 0; |
577 | | |
578 | | } |
579 | | #endif |