Line data Source code
1 : // Copyright 2013 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/base/platform/time.h"
6 :
7 : #if V8_OS_POSIX
8 : #include <fcntl.h> // for O_RDONLY
9 : #include <sys/time.h>
10 : #include <unistd.h>
11 : #endif
12 : #if V8_OS_MACOSX
13 : #include <mach/mach.h>
14 : #include <mach/mach_time.h>
15 : #include <pthread.h>
16 : #endif
17 :
18 : #include <cstring>
19 : #include <ostream>
20 :
21 : #if V8_OS_WIN
22 : #include "src/base/lazy-instance.h"
23 : #include "src/base/win32-headers.h"
24 : #endif
25 : #include "src/base/cpu.h"
26 : #include "src/base/logging.h"
27 : #include "src/base/platform/platform.h"
28 :
29 : namespace {
30 :
31 : #if V8_OS_MACOSX
32 : int64_t ComputeThreadTicks() {
33 : mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
34 : thread_basic_info_data_t thread_info_data;
35 : kern_return_t kr = thread_info(
36 : pthread_mach_thread_np(pthread_self()),
37 : THREAD_BASIC_INFO,
38 : reinterpret_cast<thread_info_t>(&thread_info_data),
39 : &thread_info_count);
40 : CHECK_EQ(kr, KERN_SUCCESS);
41 :
42 : v8::base::CheckedNumeric<int64_t> absolute_micros(
43 : thread_info_data.user_time.seconds +
44 : thread_info_data.system_time.seconds);
45 : absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
46 : absolute_micros += (thread_info_data.user_time.microseconds +
47 : thread_info_data.system_time.microseconds);
48 : return absolute_micros.ValueOrDie();
49 : }
50 : #elif V8_OS_POSIX
51 : // Helper function to get results from clock_gettime() and convert to a
52 : // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
53 : // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
54 : // _POSIX_MONOTONIC_CLOCK to -1.
55 : V8_INLINE int64_t ClockNow(clockid_t clk_id) {
56 : #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
57 : defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
58 : // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
59 : // resolution of 10ms. thread_cputime API provides the time in ns
60 : #if defined(V8_OS_AIX)
61 : thread_cputime_t tc;
62 : if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
63 : if (thread_cputime(-1, &tc) != 0) {
64 : UNREACHABLE();
65 : }
66 : }
67 : #endif
68 : struct timespec ts;
69 45463912 : if (clock_gettime(clk_id, &ts) != 0) {
70 0 : UNREACHABLE();
71 : }
72 45483615 : v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
73 45478650 : result *= v8::base::Time::kMicrosecondsPerSecond;
74 : #if defined(V8_OS_AIX)
75 : if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
76 : result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
77 : } else {
78 : result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
79 : }
80 : #else
81 45476804 : result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
82 : #endif
83 45474519 : return result.ValueOrDie();
84 : #else // Monotonic clock not supported.
85 : return 0;
86 : #endif
87 : }
88 :
89 : V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
90 : // Limit duration of timer resolution measurement to 100 ms. If we cannot
91 : // measure timer resoltuion within this time, we assume a low resolution
92 : // timer.
93 : int64_t end =
94 1 : ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
95 : int64_t start, delta;
96 : do {
97 : start = ClockNow(clk_id);
98 : // Loop until we can detect that the clock has changed. Non-HighRes timers
99 : // will increment in chunks, i.e. 15ms. By spinning until we see a clock
100 : // change, we detect the minimum time between measurements.
101 : do {
102 1 : delta = ClockNow(clk_id) - start;
103 1 : } while (delta == 0);
104 1 : } while (delta > 1 && start < end);
105 : return delta <= 1;
106 : }
107 :
108 : #elif V8_OS_WIN
109 : V8_INLINE bool IsQPCReliable() {
110 : v8::base::CPU cpu;
111 : // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
112 : return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
113 : }
114 :
115 : // Returns the current value of the performance counter.
116 : V8_INLINE uint64_t QPCNowRaw() {
117 : LARGE_INTEGER perf_counter_now = {};
118 : // According to the MSDN documentation for QueryPerformanceCounter(), this
119 : // will never fail on systems that run XP or later.
120 : // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
121 : BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
122 : DCHECK(result);
123 : USE(result);
124 : return perf_counter_now.QuadPart;
125 : }
126 : #endif // V8_OS_MACOSX
127 :
128 :
129 : } // namespace
130 :
131 : namespace v8 {
132 : namespace base {
133 :
134 2 : int TimeDelta::InDays() const {
135 2 : if (IsMax()) {
136 : // Preserve max to prevent overflow.
137 : return std::numeric_limits<int>::max();
138 : }
139 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
140 : }
141 :
142 2 : int TimeDelta::InHours() const {
143 2 : if (IsMax()) {
144 : // Preserve max to prevent overflow.
145 : return std::numeric_limits<int>::max();
146 : }
147 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
148 : }
149 :
150 2 : int TimeDelta::InMinutes() const {
151 2 : if (IsMax()) {
152 : // Preserve max to prevent overflow.
153 : return std::numeric_limits<int>::max();
154 : }
155 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
156 : }
157 :
158 2 : double TimeDelta::InSecondsF() const {
159 2 : if (IsMax()) {
160 : // Preserve max to prevent overflow.
161 : return std::numeric_limits<double>::infinity();
162 : }
163 1 : return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
164 : }
165 :
166 2 : int64_t TimeDelta::InSeconds() const {
167 2 : if (IsMax()) {
168 : // Preserve max to prevent overflow.
169 : return std::numeric_limits<int64_t>::max();
170 : }
171 1 : return delta_ / Time::kMicrosecondsPerSecond;
172 : }
173 :
174 6922775 : double TimeDelta::InMillisecondsF() const {
175 6922775 : if (IsMax()) {
176 : // Preserve max to prevent overflow.
177 : return std::numeric_limits<double>::infinity();
178 : }
179 6922780 : return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
180 : }
181 :
182 2 : int64_t TimeDelta::InMilliseconds() const {
183 2 : if (IsMax()) {
184 : // Preserve max to prevent overflow.
185 : return std::numeric_limits<int64_t>::max();
186 : }
187 1 : return delta_ / Time::kMicrosecondsPerMillisecond;
188 : }
189 :
190 1 : int64_t TimeDelta::InMillisecondsRoundedUp() const {
191 1 : if (IsMax()) {
192 : // Preserve max to prevent overflow.
193 : return std::numeric_limits<int64_t>::max();
194 : }
195 0 : return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
196 0 : Time::kMicrosecondsPerMillisecond;
197 : }
198 :
199 1172608 : int64_t TimeDelta::InMicroseconds() const {
200 1172608 : if (IsMax()) {
201 : // Preserve max to prevent overflow.
202 : return std::numeric_limits<int64_t>::max();
203 : }
204 1172608 : return delta_;
205 : }
206 :
207 0 : int64_t TimeDelta::InNanoseconds() const {
208 0 : if (IsMax()) {
209 : // Preserve max to prevent overflow.
210 : return std::numeric_limits<int64_t>::max();
211 : }
212 0 : return delta_ * Time::kNanosecondsPerMicrosecond;
213 : }
214 :
215 :
216 : #if V8_OS_MACOSX
217 :
218 : TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
219 : DCHECK_GE(ts.tv_nsec, 0);
220 : DCHECK_LT(ts.tv_nsec,
221 : static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
222 : return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
223 : ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
224 : }
225 :
226 :
227 : struct mach_timespec TimeDelta::ToMachTimespec() const {
228 : struct mach_timespec ts;
229 : DCHECK_GE(delta_, 0);
230 : ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
231 : ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
232 : Time::kNanosecondsPerMicrosecond;
233 : return ts;
234 : }
235 :
236 : #endif // V8_OS_MACOSX
237 :
238 :
239 : #if V8_OS_POSIX
240 :
241 0 : TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
242 : DCHECK_GE(ts.tv_nsec, 0);
243 : DCHECK_LT(ts.tv_nsec,
244 : static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
245 0 : return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
246 0 : ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
247 : }
248 :
249 :
250 0 : struct timespec TimeDelta::ToTimespec() const {
251 : struct timespec ts;
252 0 : ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
253 0 : ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
254 : Time::kNanosecondsPerMicrosecond;
255 0 : return ts;
256 : }
257 :
258 : #endif // V8_OS_POSIX
259 :
260 :
261 : #if V8_OS_WIN
262 :
263 : // We implement time using the high-resolution timers so that we can get
264 : // timeouts which are smaller than 10-15ms. To avoid any drift, we
265 : // periodically resync the internal clock to the system clock.
266 : class Clock final {
267 : public:
268 : Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
269 :
270 : Time Now() {
271 : // Time between resampling the un-granular clock for this API (1 minute).
272 : const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
273 :
274 : MutexGuard lock_guard(&mutex_);
275 :
276 : // Determine current time and ticks.
277 : TimeTicks ticks = GetSystemTicks();
278 : Time time = GetSystemTime();
279 :
280 : // Check if we need to synchronize with the system clock due to a backwards
281 : // time change or the amount of time elapsed.
282 : TimeDelta elapsed = ticks - initial_ticks_;
283 : if (time < initial_time_ || elapsed > kMaxElapsedTime) {
284 : initial_ticks_ = ticks;
285 : initial_time_ = time;
286 : return time;
287 : }
288 :
289 : return initial_time_ + elapsed;
290 : }
291 :
292 : Time NowFromSystemTime() {
293 : MutexGuard lock_guard(&mutex_);
294 : initial_ticks_ = GetSystemTicks();
295 : initial_time_ = GetSystemTime();
296 : return initial_time_;
297 : }
298 :
299 : private:
300 : static TimeTicks GetSystemTicks() {
301 : return TimeTicks::Now();
302 : }
303 :
304 : static Time GetSystemTime() {
305 : FILETIME ft;
306 : ::GetSystemTimeAsFileTime(&ft);
307 : return Time::FromFiletime(ft);
308 : }
309 :
310 : TimeTicks initial_ticks_;
311 : Time initial_time_;
312 : Mutex mutex_;
313 : };
314 :
315 : namespace {
316 : DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
317 : }
318 :
319 : Time Time::Now() { return GetClock()->Now(); }
320 :
321 : Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
322 :
323 : // Time between windows epoch and standard epoch.
324 : static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
325 :
326 : Time Time::FromFiletime(FILETIME ft) {
327 : if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
328 : return Time();
329 : }
330 : if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
331 : ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
332 : return Max();
333 : }
334 : int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
335 : (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
336 : return Time(us - kTimeToEpochInMicroseconds);
337 : }
338 :
339 :
340 : FILETIME Time::ToFiletime() const {
341 : DCHECK_GE(us_, 0);
342 : FILETIME ft;
343 : if (IsNull()) {
344 : ft.dwLowDateTime = 0;
345 : ft.dwHighDateTime = 0;
346 : return ft;
347 : }
348 : if (IsMax()) {
349 : ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
350 : ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
351 : return ft;
352 : }
353 : uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
354 : ft.dwLowDateTime = static_cast<DWORD>(us);
355 : ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
356 : return ft;
357 : }
358 :
359 : #elif V8_OS_POSIX
360 :
361 1147927 : Time Time::Now() {
362 : struct timeval tv;
363 1147927 : int result = gettimeofday(&tv, nullptr);
364 : DCHECK_EQ(0, result);
365 : USE(result);
366 1147927 : return FromTimeval(tv);
367 : }
368 :
369 :
370 217 : Time Time::NowFromSystemTime() {
371 217 : return Now();
372 : }
373 :
374 :
375 74663 : Time Time::FromTimespec(struct timespec ts) {
376 : DCHECK_GE(ts.tv_nsec, 0);
377 : DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
378 74663 : if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
379 2 : return Time();
380 : }
381 74661 : if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
382 : ts.tv_sec == std::numeric_limits<time_t>::max()) {
383 : return Max();
384 : }
385 74660 : return Time(ts.tv_sec * kMicrosecondsPerSecond +
386 74660 : ts.tv_nsec / kNanosecondsPerMicrosecond);
387 : }
388 :
389 :
390 74878 : struct timespec Time::ToTimespec() const {
391 : struct timespec ts;
392 74878 : if (IsNull()) {
393 : ts.tv_sec = 0;
394 : ts.tv_nsec = 0;
395 2 : return ts;
396 : }
397 74876 : if (IsMax()) {
398 : ts.tv_sec = std::numeric_limits<time_t>::max();
399 : ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
400 1 : return ts;
401 : }
402 74875 : ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
403 74875 : ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
404 74875 : return ts;
405 : }
406 :
407 :
408 5 : Time Time::FromTimeval(struct timeval tv) {
409 : DCHECK_GE(tv.tv_usec, 0);
410 : DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
411 1147932 : if (tv.tv_usec == 0 && tv.tv_sec == 0) {
412 2 : return Time();
413 : }
414 1147930 : if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
415 : tv.tv_sec == std::numeric_limits<time_t>::max()) {
416 : return Max();
417 : }
418 1147929 : return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
419 : }
420 :
421 :
422 5 : struct timeval Time::ToTimeval() const {
423 : struct timeval tv;
424 5 : if (IsNull()) {
425 : tv.tv_sec = 0;
426 : tv.tv_usec = 0;
427 2 : return tv;
428 : }
429 3 : if (IsMax()) {
430 : tv.tv_sec = std::numeric_limits<time_t>::max();
431 : tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
432 1 : return tv;
433 : }
434 2 : tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
435 2 : tv.tv_usec = us_ % kMicrosecondsPerSecond;
436 2 : return tv;
437 : }
438 :
439 : #endif // V8_OS_WIN
440 :
441 : // static
442 44966821 : TimeTicks TimeTicks::HighResolutionNow() {
443 : // a DCHECK of TimeTicks::IsHighResolution() was removed from here
444 : // as it turns out this path is used in the wild for logs and counters.
445 : //
446 : // TODO(hpayer) We may eventually want to split TimedHistograms based
447 : // on low resolution clocks to avoid polluting metrics
448 44966821 : return TimeTicks::Now();
449 : }
450 :
451 1 : Time Time::FromJsTime(double ms_since_epoch) {
452 : // The epoch is a valid time, so this constructor doesn't interpret
453 : // 0 as the null time.
454 1 : if (ms_since_epoch == std::numeric_limits<double>::max()) {
455 : return Max();
456 : }
457 : return Time(
458 1 : static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
459 : }
460 :
461 :
462 1147705 : double Time::ToJsTime() const {
463 1147705 : if (IsNull()) {
464 : // Preserve 0 so the invalid result doesn't depend on the platform.
465 : return 0;
466 : }
467 1147705 : if (IsMax()) {
468 : // Preserve max without offset to prevent overflow.
469 : return std::numeric_limits<double>::max();
470 : }
471 1147705 : return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
472 : }
473 :
474 :
475 0 : std::ostream& operator<<(std::ostream& os, const Time& time) {
476 0 : return os << time.ToJsTime();
477 : }
478 :
479 :
480 : #if V8_OS_WIN
481 :
482 : namespace {
483 :
484 : // We define a wrapper to adapt between the __stdcall and __cdecl call of the
485 : // mock function, and to avoid a static constructor. Assigning an import to a
486 : // function pointer directly would require setup code to fetch from the IAT.
487 : DWORD timeGetTimeWrapper() { return timeGetTime(); }
488 :
489 : DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
490 :
491 : // A structure holding the most significant bits of "last seen" and a
492 : // "rollover" counter.
493 : union LastTimeAndRolloversState {
494 : // The state as a single 32-bit opaque value.
495 : int32_t as_opaque_32;
496 :
497 : // The state as usable values.
498 : struct {
499 : // The top 8-bits of the "last" time. This is enough to check for rollovers
500 : // and the small bit-size means fewer CompareAndSwap operations to store
501 : // changes in state, which in turn makes for fewer retries.
502 : uint8_t last_8;
503 : // A count of the number of detected rollovers. Using this as bits 47-32
504 : // of the upper half of a 64-bit value results in a 48-bit tick counter.
505 : // This extends the total rollover period from about 49 days to about 8800
506 : // years while still allowing it to be stored with last_8 in a single
507 : // 32-bit value.
508 : uint16_t rollovers;
509 : } as_values;
510 : };
511 : std::atomic<int32_t> g_last_time_and_rollovers{0};
512 : static_assert(sizeof(LastTimeAndRolloversState) <=
513 : sizeof(g_last_time_and_rollovers),
514 : "LastTimeAndRolloversState does not fit in a single atomic word");
515 :
516 : // We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
517 : // because it returns the number of milliseconds since Windows has started,
518 : // which will roll over the 32-bit value every ~49 days. We try to track
519 : // rollover ourselves, which works if TimeTicks::Now() is called at least every
520 : // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
521 : TimeTicks RolloverProtectedNow() {
522 : LastTimeAndRolloversState state;
523 : DWORD now; // DWORD is always unsigned 32 bits.
524 :
525 : // Fetch the "now" and "last" tick values, updating "last" with "now" and
526 : // incrementing the "rollovers" counter if the tick-value has wrapped back
527 : // around. Atomic operations ensure that both "last" and "rollovers" are
528 : // always updated together.
529 : int32_t original = g_last_time_and_rollovers.load(std::memory_order_acquire);
530 : while (true) {
531 : state.as_opaque_32 = original;
532 : now = g_tick_function();
533 : uint8_t now_8 = static_cast<uint8_t>(now >> 24);
534 : if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
535 : state.as_values.last_8 = now_8;
536 :
537 : // If the state hasn't changed, exit the loop.
538 : if (state.as_opaque_32 == original) break;
539 :
540 : // Save the changed state. If the existing value is unchanged from the
541 : // original, exit the loop.
542 : if (g_last_time_and_rollovers.compare_exchange_weak(
543 : original, state.as_opaque_32, std::memory_order_acq_rel)) {
544 : break;
545 : }
546 :
547 : // Another thread has done something in between so retry from the top.
548 : // {original} has been updated by the {compare_exchange_weak}.
549 : }
550 :
551 : return TimeTicks() +
552 : TimeDelta::FromMilliseconds(
553 : now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
554 : }
555 :
556 : // Discussion of tick counter options on Windows:
557 : //
558 : // (1) CPU cycle counter. (Retrieved via RDTSC)
559 : // The CPU counter provides the highest resolution time stamp and is the least
560 : // expensive to retrieve. However, on older CPUs, two issues can affect its
561 : // reliability: First it is maintained per processor and not synchronized
562 : // between processors. Also, the counters will change frequency due to thermal
563 : // and power changes, and stop in some states.
564 : //
565 : // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
566 : // resolution (<1 microsecond) time stamp. On most hardware running today, it
567 : // auto-detects and uses the constant-rate RDTSC counter to provide extremely
568 : // efficient and reliable time stamps.
569 : //
570 : // On older CPUs where RDTSC is unreliable, it falls back to using more
571 : // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
572 : // PM timer, and can involve system calls; and all this is up to the HAL (with
573 : // some help from ACPI). According to
574 : // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
575 : // worst case, it gets the counter from the rollover interrupt on the
576 : // programmable interrupt timer. In best cases, the HAL may conclude that the
577 : // RDTSC counter runs at a constant frequency, then it uses that instead. On
578 : // multiprocessor machines, it will try to verify the values returned from
579 : // RDTSC on each processor are consistent with each other, and apply a handful
580 : // of workarounds for known buggy hardware. In other words, QPC is supposed to
581 : // give consistent results on a multiprocessor computer, but for older CPUs it
582 : // can be unreliable due bugs in BIOS or HAL.
583 : //
584 : // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
585 : // milliseconds) time stamp but is comparatively less expensive to retrieve and
586 : // more reliable. Time::EnableHighResolutionTimer() and
587 : // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
588 : // this timer; and also other Windows applications can alter it, affecting this
589 : // one.
590 :
591 : TimeTicks InitialTimeTicksNowFunction();
592 :
593 : // See "threading notes" in InitializeNowFunctionPointer() for details on how
594 : // concurrent reads/writes to these globals has been made safe.
595 : using TimeTicksNowFunction = decltype(&TimeTicks::Now);
596 : TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
597 : int64_t g_qpc_ticks_per_second = 0;
598 :
599 : // As of January 2015, use of <atomic> is forbidden in Chromium code. This is
600 : // what std::atomic_thread_fence does on Windows on all Intel architectures when
601 : // the memory_order argument is anything but std::memory_order_seq_cst:
602 : #define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
603 :
604 : TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
605 : // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
606 : // InitializeNowFunctionPointer(), has happened by this point.
607 : ATOMIC_THREAD_FENCE(memory_order_acquire);
608 :
609 : DCHECK_GT(g_qpc_ticks_per_second, 0);
610 :
611 : // If the QPC Value is below the overflow threshold, we proceed with
612 : // simple multiply and divide.
613 : if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
614 : return TimeDelta::FromMicroseconds(
615 : qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
616 : }
617 : // Otherwise, calculate microseconds in a round about manner to avoid
618 : // overflow and precision issues.
619 : int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
620 : int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
621 : return TimeDelta::FromMicroseconds(
622 : (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
623 : ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
624 : g_qpc_ticks_per_second));
625 : }
626 :
627 : TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
628 :
629 : bool IsBuggyAthlon(const CPU& cpu) {
630 : // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
631 : return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
632 : }
633 :
634 : void InitializeTimeTicksNowFunctionPointer() {
635 : LARGE_INTEGER ticks_per_sec = {};
636 : if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
637 :
638 : // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
639 : // the low-resolution clock.
640 : //
641 : // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
642 : // will still use the low-resolution clock. A CPU lacking a non-stop time
643 : // counter will cause Windows to provide an alternate QPC implementation that
644 : // works, but is expensive to use. Certain Athlon CPUs are known to make the
645 : // QPC implementation unreliable.
646 : //
647 : // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
648 : // ~72% of users fall within this category.
649 : TimeTicksNowFunction now_function;
650 : CPU cpu;
651 : if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
652 : IsBuggyAthlon(cpu)) {
653 : now_function = &RolloverProtectedNow;
654 : } else {
655 : now_function = &QPCNow;
656 : }
657 :
658 : // Threading note 1: In an unlikely race condition, it's possible for two or
659 : // more threads to enter InitializeNowFunctionPointer() in parallel. This is
660 : // not a problem since all threads should end up writing out the same values
661 : // to the global variables.
662 : //
663 : // Threading note 2: A release fence is placed here to ensure, from the
664 : // perspective of other threads using the function pointers, that the
665 : // assignment to |g_qpc_ticks_per_second| happens before the function pointers
666 : // are changed.
667 : g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
668 : ATOMIC_THREAD_FENCE(memory_order_release);
669 : g_time_ticks_now_function = now_function;
670 : }
671 :
672 : TimeTicks InitialTimeTicksNowFunction() {
673 : InitializeTimeTicksNowFunctionPointer();
674 : return g_time_ticks_now_function();
675 : }
676 :
677 : #undef ATOMIC_THREAD_FENCE
678 :
679 : } // namespace
680 :
681 : // static
682 : TimeTicks TimeTicks::Now() {
683 : // Make sure we never return 0 here.
684 : TimeTicks ticks(g_time_ticks_now_function());
685 : DCHECK(!ticks.IsNull());
686 : return ticks;
687 : }
688 :
689 : // static
690 : bool TimeTicks::IsHighResolution() {
691 : if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
692 : InitializeTimeTicksNowFunctionPointer();
693 : return g_time_ticks_now_function == &QPCNow;
694 : }
695 :
696 : #else // V8_OS_WIN
697 :
698 45461986 : TimeTicks TimeTicks::Now() {
699 : int64_t ticks;
700 : #if V8_OS_MACOSX
701 : static struct mach_timebase_info info;
702 : if (info.denom == 0) {
703 : kern_return_t result = mach_timebase_info(&info);
704 : DCHECK_EQ(KERN_SUCCESS, result);
705 : USE(result);
706 : }
707 : ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
708 : info.numer / info.denom);
709 : #elif V8_OS_SOLARIS
710 : ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
711 : #elif V8_OS_POSIX
712 : ticks = ClockNow(CLOCK_MONOTONIC);
713 : #else
714 : #error platform does not implement TimeTicks::HighResolutionNow.
715 : #endif // V8_OS_MACOSX
716 : // Make sure we never return 0 here.
717 45470931 : return TimeTicks(ticks + 1);
718 : }
719 :
720 : // static
721 1 : bool TimeTicks::IsHighResolution() {
722 : #if V8_OS_MACOSX
723 : return true;
724 : #elif V8_OS_POSIX
725 2 : static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
726 1 : return is_high_resolution;
727 : #else
728 : return true;
729 : #endif
730 : }
731 :
732 : #endif // V8_OS_WIN
733 :
734 :
735 1 : bool ThreadTicks::IsSupported() {
736 : #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
737 : defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
738 1 : return true;
739 : #elif defined(V8_OS_WIN)
740 : return IsSupportedWin();
741 : #else
742 : return false;
743 : #endif
744 : }
745 :
746 :
747 1923 : ThreadTicks ThreadTicks::Now() {
748 : #if V8_OS_MACOSX
749 : return ThreadTicks(ComputeThreadTicks());
750 : #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
751 : defined(V8_OS_ANDROID)
752 1923 : return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
753 : #elif V8_OS_SOLARIS
754 : return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
755 : #elif V8_OS_WIN
756 : return ThreadTicks::GetForThread(::GetCurrentThread());
757 : #else
758 : UNREACHABLE();
759 : #endif
760 : }
761 :
762 :
763 : #if V8_OS_WIN
764 : ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
765 : DCHECK(IsSupported());
766 :
767 : // Get the number of TSC ticks used by the current thread.
768 : ULONG64 thread_cycle_time = 0;
769 : ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
770 :
771 : // Get the frequency of the TSC.
772 : double tsc_ticks_per_second = TSCTicksPerSecond();
773 : if (tsc_ticks_per_second == 0)
774 : return ThreadTicks();
775 :
776 : // Return the CPU time of the current thread.
777 : double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
778 : return ThreadTicks(
779 : static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
780 : }
781 :
782 : // static
783 : bool ThreadTicks::IsSupportedWin() {
784 : static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
785 : !IsQPCReliable();
786 : return is_supported;
787 : }
788 :
789 : // static
790 : void ThreadTicks::WaitUntilInitializedWin() {
791 : while (TSCTicksPerSecond() == 0)
792 : ::Sleep(10);
793 : }
794 :
795 : #ifdef V8_HOST_ARCH_ARM64
796 : #define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
797 : #else
798 : #define ReadCycleCounter() __rdtsc()
799 : #endif
800 :
801 : double ThreadTicks::TSCTicksPerSecond() {
802 : DCHECK(IsSupported());
803 :
804 : // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
805 : // frequency, because there is no guarantee that the TSC frequency is equal to
806 : // the performance counter frequency.
807 :
808 : // The TSC frequency is cached in a static variable because it takes some time
809 : // to compute it.
810 : static double tsc_ticks_per_second = 0;
811 : if (tsc_ticks_per_second != 0)
812 : return tsc_ticks_per_second;
813 :
814 : // Increase the thread priority to reduces the chances of having a context
815 : // switch during a reading of the TSC and the performance counter.
816 : int previous_priority = ::GetThreadPriority(::GetCurrentThread());
817 : ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
818 :
819 : // The first time that this function is called, make an initial reading of the
820 : // TSC and the performance counter.
821 : static const uint64_t tsc_initial = ReadCycleCounter();
822 : static const uint64_t perf_counter_initial = QPCNowRaw();
823 :
824 : // Make a another reading of the TSC and the performance counter every time
825 : // that this function is called.
826 : uint64_t tsc_now = ReadCycleCounter();
827 : uint64_t perf_counter_now = QPCNowRaw();
828 :
829 : // Reset the thread priority.
830 : ::SetThreadPriority(::GetCurrentThread(), previous_priority);
831 :
832 : // Make sure that at least 50 ms elapsed between the 2 readings. The first
833 : // time that this function is called, we don't expect this to be the case.
834 : // Note: The longer the elapsed time between the 2 readings is, the more
835 : // accurate the computed TSC frequency will be. The 50 ms value was
836 : // chosen because local benchmarks show that it allows us to get a
837 : // stddev of less than 1 tick/us between multiple runs.
838 : // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
839 : // this will never fail on systems that run XP or later.
840 : // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
841 : LARGE_INTEGER perf_counter_frequency = {};
842 : ::QueryPerformanceFrequency(&perf_counter_frequency);
843 : DCHECK_GE(perf_counter_now, perf_counter_initial);
844 : uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
845 : double elapsed_time_seconds =
846 : perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
847 :
848 : const double kMinimumEvaluationPeriodSeconds = 0.05;
849 : if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
850 : return 0;
851 :
852 : // Compute the frequency of the TSC.
853 : DCHECK_GE(tsc_now, tsc_initial);
854 : uint64_t tsc_ticks = tsc_now - tsc_initial;
855 : tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
856 :
857 : return tsc_ticks_per_second;
858 : }
859 : #undef ReadCycleCounter
860 : #endif // V8_OS_WIN
861 :
862 : } // namespace base
863 : } // namespace v8
|