Line data Source code
1 : // Copyright 2013 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/base/platform/time.h"
6 :
7 : #if V8_OS_POSIX
8 : #include <fcntl.h> // for O_RDONLY
9 : #include <sys/time.h>
10 : #include <unistd.h>
11 : #endif
12 : #if V8_OS_MACOSX
13 : #include <mach/mach.h>
14 : #include <mach/mach_time.h>
15 : #include <pthread.h>
16 : #endif
17 :
18 : #include <cstring>
19 : #include <ostream>
20 :
21 : #if V8_OS_WIN
22 : #include "src/base/atomicops.h"
23 : #include "src/base/lazy-instance.h"
24 : #include "src/base/win32-headers.h"
25 : #endif
26 : #include "src/base/cpu.h"
27 : #include "src/base/logging.h"
28 : #include "src/base/platform/platform.h"
29 :
30 : namespace {
31 :
32 : #if V8_OS_MACOSX
33 : int64_t ComputeThreadTicks() {
34 : mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
35 : thread_basic_info_data_t thread_info_data;
36 : kern_return_t kr = thread_info(
37 : pthread_mach_thread_np(pthread_self()),
38 : THREAD_BASIC_INFO,
39 : reinterpret_cast<thread_info_t>(&thread_info_data),
40 : &thread_info_count);
41 : CHECK_EQ(kr, KERN_SUCCESS);
42 :
43 : v8::base::CheckedNumeric<int64_t> absolute_micros(
44 : thread_info_data.user_time.seconds +
45 : thread_info_data.system_time.seconds);
46 : absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
47 : absolute_micros += (thread_info_data.user_time.microseconds +
48 : thread_info_data.system_time.microseconds);
49 : return absolute_micros.ValueOrDie();
50 : }
51 : #elif V8_OS_POSIX
52 : // Helper function to get results from clock_gettime() and convert to a
53 : // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
54 : // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
55 : // _POSIX_MONOTONIC_CLOCK to -1.
56 : V8_INLINE int64_t ClockNow(clockid_t clk_id) {
57 : #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
58 : defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
59 : // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
60 : // resolution of 10ms. thread_cputime API provides the time in ns
61 : #if defined(V8_OS_AIX)
62 : thread_cputime_t tc;
63 : if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
64 : if (thread_cputime(-1, &tc) != 0) {
65 : UNREACHABLE();
66 : }
67 : }
68 : #endif
69 : struct timespec ts;
70 43497679 : if (clock_gettime(clk_id, &ts) != 0) {
71 0 : UNREACHABLE();
72 : }
73 43505907 : v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
74 43505907 : result *= v8::base::Time::kMicrosecondsPerSecond;
75 : #if defined(V8_OS_AIX)
76 : if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
77 : result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
78 : } else {
79 : result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
80 : }
81 : #else
82 43506745 : result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
83 : #endif
84 43505238 : return result.ValueOrDie();
85 : #else // Monotonic clock not supported.
86 : return 0;
87 : #endif
88 : }
89 :
90 : V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
91 : // Limit duration of timer resolution measurement to 100 ms. If we cannot
92 : // measure timer resoltuion within this time, we assume a low resolution
93 : // timer.
94 : int64_t end =
95 1 : ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
96 : int64_t start, delta;
97 1 : do {
98 : start = ClockNow(clk_id);
99 : // Loop until we can detect that the clock has changed. Non-HighRes timers
100 : // will increment in chunks, i.e. 15ms. By spinning until we see a clock
101 : // change, we detect the minimum time between measurements.
102 10 : do {
103 10 : delta = ClockNow(clk_id) - start;
104 : } while (delta == 0);
105 1 : } while (delta > 1 && start < end);
106 1 : return delta <= 1;
107 : }
108 :
109 : #elif V8_OS_WIN
110 : V8_INLINE bool IsQPCReliable() {
111 : v8::base::CPU cpu;
112 : // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
113 : return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
114 : }
115 :
116 : // Returns the current value of the performance counter.
117 : V8_INLINE uint64_t QPCNowRaw() {
118 : LARGE_INTEGER perf_counter_now = {};
119 : // According to the MSDN documentation for QueryPerformanceCounter(), this
120 : // will never fail on systems that run XP or later.
121 : // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
122 : BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
123 : DCHECK(result);
124 : USE(result);
125 : return perf_counter_now.QuadPart;
126 : }
127 : #endif // V8_OS_MACOSX
128 :
129 :
130 : } // namespace
131 :
132 : namespace v8 {
133 : namespace base {
134 :
135 2 : int TimeDelta::InDays() const {
136 2 : if (IsMax()) {
137 : // Preserve max to prevent overflow.
138 : return std::numeric_limits<int>::max();
139 : }
140 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
141 : }
142 :
143 2 : int TimeDelta::InHours() const {
144 2 : if (IsMax()) {
145 : // Preserve max to prevent overflow.
146 : return std::numeric_limits<int>::max();
147 : }
148 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
149 : }
150 :
151 2 : int TimeDelta::InMinutes() const {
152 2 : if (IsMax()) {
153 : // Preserve max to prevent overflow.
154 : return std::numeric_limits<int>::max();
155 : }
156 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
157 : }
158 :
159 2 : double TimeDelta::InSecondsF() const {
160 2 : if (IsMax()) {
161 : // Preserve max to prevent overflow.
162 : return std::numeric_limits<double>::infinity();
163 : }
164 1 : return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
165 : }
166 :
167 2 : int64_t TimeDelta::InSeconds() const {
168 2 : if (IsMax()) {
169 : // Preserve max to prevent overflow.
170 : return std::numeric_limits<int64_t>::max();
171 : }
172 1 : return delta_ / Time::kMicrosecondsPerSecond;
173 : }
174 :
175 6901619 : double TimeDelta::InMillisecondsF() const {
176 6901619 : if (IsMax()) {
177 : // Preserve max to prevent overflow.
178 : return std::numeric_limits<double>::infinity();
179 : }
180 6901625 : return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
181 : }
182 :
183 2 : int64_t TimeDelta::InMilliseconds() const {
184 2 : if (IsMax()) {
185 : // Preserve max to prevent overflow.
186 : return std::numeric_limits<int64_t>::max();
187 : }
188 1 : return delta_ / Time::kMicrosecondsPerMillisecond;
189 : }
190 :
191 1 : int64_t TimeDelta::InMillisecondsRoundedUp() const {
192 1 : if (IsMax()) {
193 : // Preserve max to prevent overflow.
194 : return std::numeric_limits<int64_t>::max();
195 : }
196 0 : return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
197 0 : Time::kMicrosecondsPerMillisecond;
198 : }
199 :
200 921048 : int64_t TimeDelta::InMicroseconds() const {
201 921048 : if (IsMax()) {
202 : // Preserve max to prevent overflow.
203 : return std::numeric_limits<int64_t>::max();
204 : }
205 921048 : return delta_;
206 : }
207 :
208 0 : int64_t TimeDelta::InNanoseconds() const {
209 0 : if (IsMax()) {
210 : // Preserve max to prevent overflow.
211 : return std::numeric_limits<int64_t>::max();
212 : }
213 0 : return delta_ * Time::kNanosecondsPerMicrosecond;
214 : }
215 :
216 :
217 : #if V8_OS_MACOSX
218 :
219 : TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
220 : DCHECK_GE(ts.tv_nsec, 0);
221 : DCHECK_LT(ts.tv_nsec,
222 : static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
223 : return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
224 : ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
225 : }
226 :
227 :
228 : struct mach_timespec TimeDelta::ToMachTimespec() const {
229 : struct mach_timespec ts;
230 : DCHECK_GE(delta_, 0);
231 : ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
232 : ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
233 : Time::kNanosecondsPerMicrosecond;
234 : return ts;
235 : }
236 :
237 : #endif // V8_OS_MACOSX
238 :
239 :
240 : #if V8_OS_POSIX
241 :
242 0 : TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
243 : DCHECK_GE(ts.tv_nsec, 0);
244 : DCHECK_LT(ts.tv_nsec,
245 : static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
246 0 : return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
247 0 : ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
248 : }
249 :
250 :
251 0 : struct timespec TimeDelta::ToTimespec() const {
252 : struct timespec ts;
253 0 : ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
254 0 : ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
255 : Time::kNanosecondsPerMicrosecond;
256 0 : return ts;
257 : }
258 :
259 : #endif // V8_OS_POSIX
260 :
261 :
262 : #if V8_OS_WIN
263 :
264 : // We implement time using the high-resolution timers so that we can get
265 : // timeouts which are smaller than 10-15ms. To avoid any drift, we
266 : // periodically resync the internal clock to the system clock.
267 : class Clock final {
268 : public:
269 : Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
270 :
271 : Time Now() {
272 : // Time between resampling the un-granular clock for this API (1 minute).
273 : const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
274 :
275 : MutexGuard lock_guard(&mutex_);
276 :
277 : // Determine current time and ticks.
278 : TimeTicks ticks = GetSystemTicks();
279 : Time time = GetSystemTime();
280 :
281 : // Check if we need to synchronize with the system clock due to a backwards
282 : // time change or the amount of time elapsed.
283 : TimeDelta elapsed = ticks - initial_ticks_;
284 : if (time < initial_time_ || elapsed > kMaxElapsedTime) {
285 : initial_ticks_ = ticks;
286 : initial_time_ = time;
287 : return time;
288 : }
289 :
290 : return initial_time_ + elapsed;
291 : }
292 :
293 : Time NowFromSystemTime() {
294 : MutexGuard lock_guard(&mutex_);
295 : initial_ticks_ = GetSystemTicks();
296 : initial_time_ = GetSystemTime();
297 : return initial_time_;
298 : }
299 :
300 : private:
301 : static TimeTicks GetSystemTicks() {
302 : return TimeTicks::Now();
303 : }
304 :
305 : static Time GetSystemTime() {
306 : FILETIME ft;
307 : ::GetSystemTimeAsFileTime(&ft);
308 : return Time::FromFiletime(ft);
309 : }
310 :
311 : TimeTicks initial_ticks_;
312 : Time initial_time_;
313 : Mutex mutex_;
314 : };
315 :
316 : namespace {
317 : DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock);
318 : };
319 :
320 : Time Time::Now() { return GetClock()->Now(); }
321 :
322 : Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
323 :
324 : // Time between windows epoch and standard epoch.
325 : static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
326 :
327 : Time Time::FromFiletime(FILETIME ft) {
328 : if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
329 : return Time();
330 : }
331 : if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
332 : ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
333 : return Max();
334 : }
335 : int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
336 : (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
337 : return Time(us - kTimeToEpochInMicroseconds);
338 : }
339 :
340 :
341 : FILETIME Time::ToFiletime() const {
342 : DCHECK_GE(us_, 0);
343 : FILETIME ft;
344 : if (IsNull()) {
345 : ft.dwLowDateTime = 0;
346 : ft.dwHighDateTime = 0;
347 : return ft;
348 : }
349 : if (IsMax()) {
350 : ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
351 : ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
352 : return ft;
353 : }
354 : uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
355 : ft.dwLowDateTime = static_cast<DWORD>(us);
356 : ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
357 : return ft;
358 : }
359 :
360 : #elif V8_OS_POSIX
361 :
362 165707 : Time Time::Now() {
363 : struct timeval tv;
364 165707 : int result = gettimeofday(&tv, nullptr);
365 : DCHECK_EQ(0, result);
366 : USE(result);
367 165707 : return FromTimeval(tv);
368 : }
369 :
370 :
371 215 : Time Time::NowFromSystemTime() {
372 215 : return Now();
373 : }
374 :
375 :
376 12659 : Time Time::FromTimespec(struct timespec ts) {
377 : DCHECK_GE(ts.tv_nsec, 0);
378 : DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
379 12659 : if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
380 2 : return Time();
381 : }
382 12657 : if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
383 : ts.tv_sec == std::numeric_limits<time_t>::max()) {
384 : return Max();
385 : }
386 12656 : return Time(ts.tv_sec * kMicrosecondsPerSecond +
387 12656 : ts.tv_nsec / kNanosecondsPerMicrosecond);
388 : }
389 :
390 :
391 12872 : struct timespec Time::ToTimespec() const {
392 : struct timespec ts;
393 12872 : if (IsNull()) {
394 : ts.tv_sec = 0;
395 : ts.tv_nsec = 0;
396 2 : return ts;
397 : }
398 12870 : if (IsMax()) {
399 : ts.tv_sec = std::numeric_limits<time_t>::max();
400 : ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
401 1 : return ts;
402 : }
403 12869 : ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
404 12869 : ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
405 12869 : return ts;
406 : }
407 :
408 :
409 5 : Time Time::FromTimeval(struct timeval tv) {
410 : DCHECK_GE(tv.tv_usec, 0);
411 : DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
412 165712 : if (tv.tv_usec == 0 && tv.tv_sec == 0) {
413 2 : return Time();
414 : }
415 165710 : if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
416 : tv.tv_sec == std::numeric_limits<time_t>::max()) {
417 : return Max();
418 : }
419 165709 : return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
420 : }
421 :
422 :
423 5 : struct timeval Time::ToTimeval() const {
424 : struct timeval tv;
425 5 : if (IsNull()) {
426 : tv.tv_sec = 0;
427 : tv.tv_usec = 0;
428 2 : return tv;
429 : }
430 3 : if (IsMax()) {
431 : tv.tv_sec = std::numeric_limits<time_t>::max();
432 : tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
433 1 : return tv;
434 : }
435 2 : tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
436 2 : tv.tv_usec = us_ % kMicrosecondsPerSecond;
437 2 : return tv;
438 : }
439 :
440 : #endif // V8_OS_WIN
441 :
442 : // static
443 43130241 : TimeTicks TimeTicks::HighResolutionNow() {
444 : // a DCHECK of TimeTicks::IsHighResolution() was removed from here
445 : // as it turns out this path is used in the wild for logs and counters.
446 : //
447 : // TODO(hpayer) We may eventually want to split TimedHistograms based
448 : // on low resolution clocks to avoid polluting metrics
449 43130241 : return TimeTicks::Now();
450 : }
451 :
452 1 : Time Time::FromJsTime(double ms_since_epoch) {
453 : // The epoch is a valid time, so this constructor doesn't interpret
454 : // 0 as the null time.
455 1 : if (ms_since_epoch == std::numeric_limits<double>::max()) {
456 : return Max();
457 : }
458 : return Time(
459 1 : static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
460 : }
461 :
462 :
463 165477 : double Time::ToJsTime() const {
464 165477 : if (IsNull()) {
465 : // Preserve 0 so the invalid result doesn't depend on the platform.
466 : return 0;
467 : }
468 165477 : if (IsMax()) {
469 : // Preserve max without offset to prevent overflow.
470 : return std::numeric_limits<double>::max();
471 : }
472 165477 : return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
473 : }
474 :
475 :
476 0 : std::ostream& operator<<(std::ostream& os, const Time& time) {
477 0 : return os << time.ToJsTime();
478 : }
479 :
480 :
481 : #if V8_OS_WIN
482 :
483 : namespace {
484 :
485 : // We define a wrapper to adapt between the __stdcall and __cdecl call of the
486 : // mock function, and to avoid a static constructor. Assigning an import to a
487 : // function pointer directly would require setup code to fetch from the IAT.
488 : DWORD timeGetTimeWrapper() { return timeGetTime(); }
489 :
490 : DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
491 :
492 : // A structure holding the most significant bits of "last seen" and a
493 : // "rollover" counter.
494 : union LastTimeAndRolloversState {
495 : // The state as a single 32-bit opaque value.
496 : base::Atomic32 as_opaque_32;
497 :
498 : // The state as usable values.
499 : struct {
500 : // The top 8-bits of the "last" time. This is enough to check for rollovers
501 : // and the small bit-size means fewer CompareAndSwap operations to store
502 : // changes in state, which in turn makes for fewer retries.
503 : uint8_t last_8;
504 : // A count of the number of detected rollovers. Using this as bits 47-32
505 : // of the upper half of a 64-bit value results in a 48-bit tick counter.
506 : // This extends the total rollover period from about 49 days to about 8800
507 : // years while still allowing it to be stored with last_8 in a single
508 : // 32-bit value.
509 : uint16_t rollovers;
510 : } as_values;
511 : };
512 : base::Atomic32 g_last_time_and_rollovers = 0;
513 : static_assert(sizeof(LastTimeAndRolloversState) <=
514 : sizeof(g_last_time_and_rollovers),
515 : "LastTimeAndRolloversState does not fit in a single atomic word");
516 :
517 : // We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
518 : // because it returns the number of milliseconds since Windows has started,
519 : // which will roll over the 32-bit value every ~49 days. We try to track
520 : // rollover ourselves, which works if TimeTicks::Now() is called at least every
521 : // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
522 : TimeTicks RolloverProtectedNow() {
523 : LastTimeAndRolloversState state;
524 : DWORD now; // DWORD is always unsigned 32 bits.
525 :
526 : while (true) {
527 : // Fetch the "now" and "last" tick values, updating "last" with "now" and
528 : // incrementing the "rollovers" counter if the tick-value has wrapped back
529 : // around. Atomic operations ensure that both "last" and "rollovers" are
530 : // always updated together.
531 : int32_t original = base::Acquire_Load(&g_last_time_and_rollovers);
532 : state.as_opaque_32 = original;
533 : now = g_tick_function();
534 : uint8_t now_8 = static_cast<uint8_t>(now >> 24);
535 : if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
536 : state.as_values.last_8 = now_8;
537 :
538 : // If the state hasn't changed, exit the loop.
539 : if (state.as_opaque_32 == original) break;
540 :
541 : // Save the changed state. If the existing value is unchanged from the
542 : // original, exit the loop.
543 : int32_t check = base::Release_CompareAndSwap(&g_last_time_and_rollovers,
544 : original, state.as_opaque_32);
545 : if (check == original) break;
546 :
547 : // Another thread has done something in between so retry from the top.
548 : }
549 :
550 : return TimeTicks() +
551 : TimeDelta::FromMilliseconds(
552 : now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
553 : }
554 :
555 : // Discussion of tick counter options on Windows:
556 : //
557 : // (1) CPU cycle counter. (Retrieved via RDTSC)
558 : // The CPU counter provides the highest resolution time stamp and is the least
559 : // expensive to retrieve. However, on older CPUs, two issues can affect its
560 : // reliability: First it is maintained per processor and not synchronized
561 : // between processors. Also, the counters will change frequency due to thermal
562 : // and power changes, and stop in some states.
563 : //
564 : // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
565 : // resolution (<1 microsecond) time stamp. On most hardware running today, it
566 : // auto-detects and uses the constant-rate RDTSC counter to provide extremely
567 : // efficient and reliable time stamps.
568 : //
569 : // On older CPUs where RDTSC is unreliable, it falls back to using more
570 : // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
571 : // PM timer, and can involve system calls; and all this is up to the HAL (with
572 : // some help from ACPI). According to
573 : // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
574 : // worst case, it gets the counter from the rollover interrupt on the
575 : // programmable interrupt timer. In best cases, the HAL may conclude that the
576 : // RDTSC counter runs at a constant frequency, then it uses that instead. On
577 : // multiprocessor machines, it will try to verify the values returned from
578 : // RDTSC on each processor are consistent with each other, and apply a handful
579 : // of workarounds for known buggy hardware. In other words, QPC is supposed to
580 : // give consistent results on a multiprocessor computer, but for older CPUs it
581 : // can be unreliable due bugs in BIOS or HAL.
582 : //
583 : // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
584 : // milliseconds) time stamp but is comparatively less expensive to retrieve and
585 : // more reliable. Time::EnableHighResolutionTimer() and
586 : // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
587 : // this timer; and also other Windows applications can alter it, affecting this
588 : // one.
589 :
590 : TimeTicks InitialTimeTicksNowFunction();
591 :
592 : // See "threading notes" in InitializeNowFunctionPointer() for details on how
593 : // concurrent reads/writes to these globals has been made safe.
594 : using TimeTicksNowFunction = decltype(&TimeTicks::Now);
595 : TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
596 : int64_t g_qpc_ticks_per_second = 0;
597 :
598 : // As of January 2015, use of <atomic> is forbidden in Chromium code. This is
599 : // what std::atomic_thread_fence does on Windows on all Intel architectures when
600 : // the memory_order argument is anything but std::memory_order_seq_cst:
601 : #define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
602 :
603 : TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
604 : // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
605 : // InitializeNowFunctionPointer(), has happened by this point.
606 : ATOMIC_THREAD_FENCE(memory_order_acquire);
607 :
608 : DCHECK_GT(g_qpc_ticks_per_second, 0);
609 :
610 : // If the QPC Value is below the overflow threshold, we proceed with
611 : // simple multiply and divide.
612 : if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
613 : return TimeDelta::FromMicroseconds(
614 : qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
615 : }
616 : // Otherwise, calculate microseconds in a round about manner to avoid
617 : // overflow and precision issues.
618 : int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
619 : int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
620 : return TimeDelta::FromMicroseconds(
621 : (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
622 : ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
623 : g_qpc_ticks_per_second));
624 : }
625 :
626 : TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
627 :
628 : bool IsBuggyAthlon(const CPU& cpu) {
629 : // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
630 : return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
631 : }
632 :
633 : void InitializeTimeTicksNowFunctionPointer() {
634 : LARGE_INTEGER ticks_per_sec = {};
635 : if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
636 :
637 : // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
638 : // the low-resolution clock.
639 : //
640 : // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
641 : // will still use the low-resolution clock. A CPU lacking a non-stop time
642 : // counter will cause Windows to provide an alternate QPC implementation that
643 : // works, but is expensive to use. Certain Athlon CPUs are known to make the
644 : // QPC implementation unreliable.
645 : //
646 : // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
647 : // ~72% of users fall within this category.
648 : TimeTicksNowFunction now_function;
649 : CPU cpu;
650 : if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
651 : IsBuggyAthlon(cpu)) {
652 : now_function = &RolloverProtectedNow;
653 : } else {
654 : now_function = &QPCNow;
655 : }
656 :
657 : // Threading note 1: In an unlikely race condition, it's possible for two or
658 : // more threads to enter InitializeNowFunctionPointer() in parallel. This is
659 : // not a problem since all threads should end up writing out the same values
660 : // to the global variables.
661 : //
662 : // Threading note 2: A release fence is placed here to ensure, from the
663 : // perspective of other threads using the function pointers, that the
664 : // assignment to |g_qpc_ticks_per_second| happens before the function pointers
665 : // are changed.
666 : g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
667 : ATOMIC_THREAD_FENCE(memory_order_release);
668 : g_time_ticks_now_function = now_function;
669 : }
670 :
671 : TimeTicks InitialTimeTicksNowFunction() {
672 : InitializeTimeTicksNowFunctionPointer();
673 : return g_time_ticks_now_function();
674 : }
675 :
676 : #undef ATOMIC_THREAD_FENCE
677 :
678 : } // namespace
679 :
680 : // static
681 : TimeTicks TimeTicks::Now() {
682 : // Make sure we never return 0 here.
683 : TimeTicks ticks(g_time_ticks_now_function());
684 : DCHECK(!ticks.IsNull());
685 : return ticks;
686 : }
687 :
688 : // static
689 : bool TimeTicks::IsHighResolution() {
690 : if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
691 : InitializeTimeTicksNowFunctionPointer();
692 : return g_time_ticks_now_function == &QPCNow;
693 : }
694 :
695 : #else // V8_OS_WIN
696 :
697 43497523 : TimeTicks TimeTicks::Now() {
698 : int64_t ticks;
699 : #if V8_OS_MACOSX
700 : static struct mach_timebase_info info;
701 : if (info.denom == 0) {
702 : kern_return_t result = mach_timebase_info(&info);
703 : DCHECK_EQ(KERN_SUCCESS, result);
704 : USE(result);
705 : }
706 : ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
707 : info.numer / info.denom);
708 : #elif V8_OS_SOLARIS
709 : ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
710 : #elif V8_OS_POSIX
711 : ticks = ClockNow(CLOCK_MONOTONIC);
712 : #else
713 : #error platform does not implement TimeTicks::HighResolutionNow.
714 : #endif // V8_OS_MACOSX
715 : // Make sure we never return 0 here.
716 43504451 : return TimeTicks(ticks + 1);
717 : }
718 :
719 : // static
720 1 : bool TimeTicks::IsHighResolution() {
721 : #if V8_OS_MACOSX
722 : return true;
723 : #elif V8_OS_POSIX
724 2 : static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
725 1 : return is_high_resolution;
726 : #else
727 : return true;
728 : #endif
729 : }
730 :
731 : #endif // V8_OS_WIN
732 :
733 :
734 1 : bool ThreadTicks::IsSupported() {
735 : #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
736 : defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
737 1 : return true;
738 : #elif defined(V8_OS_WIN)
739 : return IsSupportedWin();
740 : #else
741 : return false;
742 : #endif
743 : }
744 :
745 :
746 144 : ThreadTicks ThreadTicks::Now() {
747 : #if V8_OS_MACOSX
748 : return ThreadTicks(ComputeThreadTicks());
749 : #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
750 : defined(V8_OS_ANDROID)
751 144 : return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
752 : #elif V8_OS_SOLARIS
753 : return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
754 : #elif V8_OS_WIN
755 : return ThreadTicks::GetForThread(::GetCurrentThread());
756 : #else
757 : UNREACHABLE();
758 : #endif
759 : }
760 :
761 :
762 : #if V8_OS_WIN
763 : ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
764 : DCHECK(IsSupported());
765 :
766 : // Get the number of TSC ticks used by the current thread.
767 : ULONG64 thread_cycle_time = 0;
768 : ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
769 :
770 : // Get the frequency of the TSC.
771 : double tsc_ticks_per_second = TSCTicksPerSecond();
772 : if (tsc_ticks_per_second == 0)
773 : return ThreadTicks();
774 :
775 : // Return the CPU time of the current thread.
776 : double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
777 : return ThreadTicks(
778 : static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
779 : }
780 :
781 : // static
782 : bool ThreadTicks::IsSupportedWin() {
783 : static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
784 : !IsQPCReliable();
785 : return is_supported;
786 : }
787 :
788 : // static
789 : void ThreadTicks::WaitUntilInitializedWin() {
790 : while (TSCTicksPerSecond() == 0)
791 : ::Sleep(10);
792 : }
793 :
794 : #ifdef V8_HOST_ARCH_ARM64
795 : #define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
796 : #else
797 : #define ReadCycleCounter() __rdtsc()
798 : #endif
799 :
800 : double ThreadTicks::TSCTicksPerSecond() {
801 : DCHECK(IsSupported());
802 :
803 : // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
804 : // frequency, because there is no guarantee that the TSC frequency is equal to
805 : // the performance counter frequency.
806 :
807 : // The TSC frequency is cached in a static variable because it takes some time
808 : // to compute it.
809 : static double tsc_ticks_per_second = 0;
810 : if (tsc_ticks_per_second != 0)
811 : return tsc_ticks_per_second;
812 :
813 : // Increase the thread priority to reduces the chances of having a context
814 : // switch during a reading of the TSC and the performance counter.
815 : int previous_priority = ::GetThreadPriority(::GetCurrentThread());
816 : ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
817 :
818 : // The first time that this function is called, make an initial reading of the
819 : // TSC and the performance counter.
820 : static const uint64_t tsc_initial = ReadCycleCounter();
821 : static const uint64_t perf_counter_initial = QPCNowRaw();
822 :
823 : // Make a another reading of the TSC and the performance counter every time
824 : // that this function is called.
825 : uint64_t tsc_now = ReadCycleCounter();
826 : uint64_t perf_counter_now = QPCNowRaw();
827 :
828 : // Reset the thread priority.
829 : ::SetThreadPriority(::GetCurrentThread(), previous_priority);
830 :
831 : // Make sure that at least 50 ms elapsed between the 2 readings. The first
832 : // time that this function is called, we don't expect this to be the case.
833 : // Note: The longer the elapsed time between the 2 readings is, the more
834 : // accurate the computed TSC frequency will be. The 50 ms value was
835 : // chosen because local benchmarks show that it allows us to get a
836 : // stddev of less than 1 tick/us between multiple runs.
837 : // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
838 : // this will never fail on systems that run XP or later.
839 : // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
840 : LARGE_INTEGER perf_counter_frequency = {};
841 : ::QueryPerformanceFrequency(&perf_counter_frequency);
842 : DCHECK_GE(perf_counter_now, perf_counter_initial);
843 : uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
844 : double elapsed_time_seconds =
845 : perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
846 :
847 : const double kMinimumEvaluationPeriodSeconds = 0.05;
848 : if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
849 : return 0;
850 :
851 : // Compute the frequency of the TSC.
852 : DCHECK_GE(tsc_now, tsc_initial);
853 : uint64_t tsc_ticks = tsc_now - tsc_initial;
854 : tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
855 :
856 : return tsc_ticks_per_second;
857 : }
858 : #undef ReadCycleCounter
859 : #endif // V8_OS_WIN
860 :
861 : } // namespace base
862 : } // namespace v8
|