Line data Source code
1 : // Copyright 2013 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/base/platform/time.h"
6 :
7 : #if V8_OS_POSIX
8 : #include <fcntl.h> // for O_RDONLY
9 : #include <sys/time.h>
10 : #include <unistd.h>
11 : #endif
12 : #if V8_OS_MACOSX
13 : #include <mach/mach.h>
14 : #include <mach/mach_time.h>
15 : #include <pthread.h>
16 : #endif
17 :
18 : #include <cstring>
19 : #include <ostream>
20 :
21 : #if V8_OS_WIN
22 : #include "src/base/lazy-instance.h"
23 : #include "src/base/win32-headers.h"
24 : #endif
25 : #include "src/base/cpu.h"
26 : #include "src/base/logging.h"
27 : #include "src/base/platform/platform.h"
28 :
29 : namespace {
30 :
31 : #if V8_OS_MACOSX
32 : int64_t ComputeThreadTicks() {
33 : mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
34 : thread_basic_info_data_t thread_info_data;
35 : kern_return_t kr = thread_info(
36 : pthread_mach_thread_np(pthread_self()),
37 : THREAD_BASIC_INFO,
38 : reinterpret_cast<thread_info_t>(&thread_info_data),
39 : &thread_info_count);
40 : CHECK_EQ(kr, KERN_SUCCESS);
41 :
42 : // We can add the seconds into a {int64_t} without overflow.
43 : CHECK_LE(thread_info_data.user_time.seconds,
44 : std::numeric_limits<int64_t>::max() -
45 : thread_info_data.system_time.seconds);
46 : int64_t seconds =
47 : thread_info_data.user_time.seconds + thread_info_data.system_time.seconds;
48 : // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
49 : // in [0, 2 * kMicrosecondsPerSecond) must result in a valid {int64_t}.
50 : static constexpr int64_t kSecondsLimit =
51 : (std::numeric_limits<int64_t>::max() /
52 : v8::base::Time::kMicrosecondsPerSecond) -
53 : 2;
54 : CHECK_GT(kSecondsLimit, seconds);
55 : int64_t micros = seconds * v8::base::Time::kMicrosecondsPerSecond;
56 : micros += (thread_info_data.user_time.microseconds +
57 : thread_info_data.system_time.microseconds);
58 : return micros;
59 : }
60 : #elif V8_OS_POSIX
61 : // Helper function to get results from clock_gettime() and convert to a
62 : // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
63 : // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
64 : // _POSIX_MONOTONIC_CLOCK to -1.
65 : V8_INLINE int64_t ClockNow(clockid_t clk_id) {
66 : #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
67 : defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
68 : // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
69 : // resolution of 10ms. thread_cputime API provides the time in ns
70 : #if defined(V8_OS_AIX)
71 : thread_cputime_t tc;
72 : if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
73 : if (thread_cputime(-1, &tc) != 0) {
74 : UNREACHABLE();
75 : }
76 : }
77 : #endif
78 : struct timespec ts;
79 54576864 : if (clock_gettime(clk_id, &ts) != 0) {
80 0 : UNREACHABLE();
81 : }
82 : // Multiplying the seconds by {kMicrosecondsPerSecond}, and adding something
83 : // in [0, kMicrosecondsPerSecond) must result in a valid {int64_t}.
84 : static constexpr int64_t kSecondsLimit =
85 : (std::numeric_limits<int64_t>::max() /
86 : v8::base::Time::kMicrosecondsPerSecond) -
87 : 1;
88 54621463 : CHECK_GT(kSecondsLimit, ts.tv_sec);
89 54621463 : int64_t result = int64_t{ts.tv_sec} * v8::base::Time::kMicrosecondsPerSecond;
90 : #if defined(V8_OS_AIX)
91 : if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
92 : result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
93 : } else {
94 : result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
95 : }
96 : #else
97 54621463 : result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
98 : #endif
99 : return result;
100 : #else // Monotonic clock not supported.
101 : return 0;
102 : #endif
103 : }
104 :
105 : V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
106 : // Limit duration of timer resolution measurement to 100 ms. If we cannot
107 : // measure timer resoltuion within this time, we assume a low resolution
108 : // timer.
109 : int64_t end =
110 21339 : ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
111 : int64_t start, delta;
112 : do {
113 : start = ClockNow(clk_id);
114 : // Loop until we can detect that the clock has changed. Non-HighRes timers
115 : // will increment in chunks, i.e. 15ms. By spinning until we see a clock
116 : // change, we detect the minimum time between measurements.
117 : do {
118 209703 : delta = ClockNow(clk_id) - start;
119 209703 : } while (delta == 0);
120 21369 : } while (delta > 1 && start < end);
121 : return delta <= 1;
122 : }
123 :
124 : #elif V8_OS_WIN
125 : V8_INLINE bool IsQPCReliable() {
126 : v8::base::CPU cpu;
127 : // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
128 : return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
129 : }
130 :
131 : // Returns the current value of the performance counter.
132 : V8_INLINE uint64_t QPCNowRaw() {
133 : LARGE_INTEGER perf_counter_now = {};
134 : // According to the MSDN documentation for QueryPerformanceCounter(), this
135 : // will never fail on systems that run XP or later.
136 : // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
137 : BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
138 : DCHECK(result);
139 : USE(result);
140 : return perf_counter_now.QuadPart;
141 : }
142 : #endif // V8_OS_MACOSX
143 :
144 :
145 : } // namespace
146 :
147 : namespace v8 {
148 : namespace base {
149 :
150 2 : int TimeDelta::InDays() const {
151 2 : if (IsMax()) {
152 : // Preserve max to prevent overflow.
153 : return std::numeric_limits<int>::max();
154 : }
155 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
156 : }
157 :
158 2 : int TimeDelta::InHours() const {
159 2 : if (IsMax()) {
160 : // Preserve max to prevent overflow.
161 : return std::numeric_limits<int>::max();
162 : }
163 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
164 : }
165 :
166 2 : int TimeDelta::InMinutes() const {
167 2 : if (IsMax()) {
168 : // Preserve max to prevent overflow.
169 : return std::numeric_limits<int>::max();
170 : }
171 1 : return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
172 : }
173 :
174 8121 : double TimeDelta::InSecondsF() const {
175 8121 : if (IsMax()) {
176 : // Preserve max to prevent overflow.
177 : return std::numeric_limits<double>::infinity();
178 : }
179 8120 : return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
180 : }
181 :
182 2 : int64_t TimeDelta::InSeconds() const {
183 2 : if (IsMax()) {
184 : // Preserve max to prevent overflow.
185 : return std::numeric_limits<int64_t>::max();
186 : }
187 1 : return delta_ / Time::kMicrosecondsPerSecond;
188 : }
189 :
190 6965816 : double TimeDelta::InMillisecondsF() const {
191 6965816 : if (IsMax()) {
192 : // Preserve max to prevent overflow.
193 : return std::numeric_limits<double>::infinity();
194 : }
195 6965823 : return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
196 : }
197 :
198 2 : int64_t TimeDelta::InMilliseconds() const {
199 2 : if (IsMax()) {
200 : // Preserve max to prevent overflow.
201 : return std::numeric_limits<int64_t>::max();
202 : }
203 1 : return delta_ / Time::kMicrosecondsPerMillisecond;
204 : }
205 :
206 1 : int64_t TimeDelta::InMillisecondsRoundedUp() const {
207 1 : if (IsMax()) {
208 : // Preserve max to prevent overflow.
209 : return std::numeric_limits<int64_t>::max();
210 : }
211 0 : return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
212 0 : Time::kMicrosecondsPerMillisecond;
213 : }
214 :
215 4020926 : int64_t TimeDelta::InMicroseconds() const {
216 4020926 : if (IsMax()) {
217 : // Preserve max to prevent overflow.
218 : return std::numeric_limits<int64_t>::max();
219 : }
220 4020926 : return delta_;
221 : }
222 :
223 0 : int64_t TimeDelta::InNanoseconds() const {
224 0 : if (IsMax()) {
225 : // Preserve max to prevent overflow.
226 : return std::numeric_limits<int64_t>::max();
227 : }
228 0 : return delta_ * Time::kNanosecondsPerMicrosecond;
229 : }
230 :
231 :
232 : #if V8_OS_MACOSX
233 :
234 : TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
235 : DCHECK_GE(ts.tv_nsec, 0);
236 : DCHECK_LT(ts.tv_nsec,
237 : static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
238 : return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
239 : ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
240 : }
241 :
242 :
243 : struct mach_timespec TimeDelta::ToMachTimespec() const {
244 : struct mach_timespec ts;
245 : DCHECK_GE(delta_, 0);
246 : ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
247 : ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
248 : Time::kNanosecondsPerMicrosecond;
249 : return ts;
250 : }
251 :
252 : #endif // V8_OS_MACOSX
253 :
254 :
255 : #if V8_OS_POSIX
256 :
257 0 : TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
258 : DCHECK_GE(ts.tv_nsec, 0);
259 : DCHECK_LT(ts.tv_nsec,
260 : static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
261 0 : return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
262 0 : ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
263 : }
264 :
265 :
266 0 : struct timespec TimeDelta::ToTimespec() const {
267 : struct timespec ts;
268 0 : ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
269 0 : ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
270 : Time::kNanosecondsPerMicrosecond;
271 0 : return ts;
272 : }
273 :
274 : #endif // V8_OS_POSIX
275 :
276 :
277 : #if V8_OS_WIN
278 :
279 : // We implement time using the high-resolution timers so that we can get
280 : // timeouts which are smaller than 10-15ms. To avoid any drift, we
281 : // periodically resync the internal clock to the system clock.
282 : class Clock final {
283 : public:
284 : Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
285 :
286 : Time Now() {
287 : // Time between resampling the un-granular clock for this API (1 minute).
288 : const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
289 :
290 : MutexGuard lock_guard(&mutex_);
291 :
292 : // Determine current time and ticks.
293 : TimeTicks ticks = GetSystemTicks();
294 : Time time = GetSystemTime();
295 :
296 : // Check if we need to synchronize with the system clock due to a backwards
297 : // time change or the amount of time elapsed.
298 : TimeDelta elapsed = ticks - initial_ticks_;
299 : if (time < initial_time_ || elapsed > kMaxElapsedTime) {
300 : initial_ticks_ = ticks;
301 : initial_time_ = time;
302 : return time;
303 : }
304 :
305 : return initial_time_ + elapsed;
306 : }
307 :
308 : Time NowFromSystemTime() {
309 : MutexGuard lock_guard(&mutex_);
310 : initial_ticks_ = GetSystemTicks();
311 : initial_time_ = GetSystemTime();
312 : return initial_time_;
313 : }
314 :
315 : private:
316 : static TimeTicks GetSystemTicks() {
317 : return TimeTicks::Now();
318 : }
319 :
320 : static Time GetSystemTime() {
321 : FILETIME ft;
322 : ::GetSystemTimeAsFileTime(&ft);
323 : return Time::FromFiletime(ft);
324 : }
325 :
326 : TimeTicks initial_ticks_;
327 : Time initial_time_;
328 : Mutex mutex_;
329 : };
330 :
331 : namespace {
332 : DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock)
333 : }
334 :
335 : Time Time::Now() { return GetClock()->Now(); }
336 :
337 : Time Time::NowFromSystemTime() { return GetClock()->NowFromSystemTime(); }
338 :
339 : // Time between windows epoch and standard epoch.
340 : static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
341 :
342 : Time Time::FromFiletime(FILETIME ft) {
343 : if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
344 : return Time();
345 : }
346 : if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
347 : ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
348 : return Max();
349 : }
350 : int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
351 : (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
352 : return Time(us - kTimeToEpochInMicroseconds);
353 : }
354 :
355 :
356 : FILETIME Time::ToFiletime() const {
357 : DCHECK_GE(us_, 0);
358 : FILETIME ft;
359 : if (IsNull()) {
360 : ft.dwLowDateTime = 0;
361 : ft.dwHighDateTime = 0;
362 : return ft;
363 : }
364 : if (IsMax()) {
365 : ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
366 : ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
367 : return ft;
368 : }
369 : uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
370 : ft.dwLowDateTime = static_cast<DWORD>(us);
371 : ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
372 : return ft;
373 : }
374 :
375 : #elif V8_OS_POSIX
376 :
377 1245814 : Time Time::Now() {
378 : struct timeval tv;
379 1245814 : int result = gettimeofday(&tv, nullptr);
380 : DCHECK_EQ(0, result);
381 : USE(result);
382 1245814 : return FromTimeval(tv);
383 : }
384 :
385 :
386 217 : Time Time::NowFromSystemTime() {
387 217 : return Now();
388 : }
389 :
390 :
391 77340 : Time Time::FromTimespec(struct timespec ts) {
392 : DCHECK_GE(ts.tv_nsec, 0);
393 : DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
394 77340 : if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
395 2 : return Time();
396 : }
397 77338 : if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
398 : ts.tv_sec == std::numeric_limits<time_t>::max()) {
399 : return Max();
400 : }
401 77337 : return Time(ts.tv_sec * kMicrosecondsPerSecond +
402 77337 : ts.tv_nsec / kNanosecondsPerMicrosecond);
403 : }
404 :
405 :
406 77555 : struct timespec Time::ToTimespec() const {
407 : struct timespec ts;
408 77555 : if (IsNull()) {
409 : ts.tv_sec = 0;
410 : ts.tv_nsec = 0;
411 2 : return ts;
412 : }
413 77553 : if (IsMax()) {
414 : ts.tv_sec = std::numeric_limits<time_t>::max();
415 : ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
416 1 : return ts;
417 : }
418 77552 : ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
419 77552 : ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
420 77552 : return ts;
421 : }
422 :
423 :
424 5 : Time Time::FromTimeval(struct timeval tv) {
425 : DCHECK_GE(tv.tv_usec, 0);
426 : DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
427 1245819 : if (tv.tv_usec == 0 && tv.tv_sec == 0) {
428 2 : return Time();
429 : }
430 1245817 : if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
431 : tv.tv_sec == std::numeric_limits<time_t>::max()) {
432 : return Max();
433 : }
434 1245816 : return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
435 : }
436 :
437 :
438 5 : struct timeval Time::ToTimeval() const {
439 : struct timeval tv;
440 5 : if (IsNull()) {
441 : tv.tv_sec = 0;
442 : tv.tv_usec = 0;
443 2 : return tv;
444 : }
445 3 : if (IsMax()) {
446 : tv.tv_sec = std::numeric_limits<time_t>::max();
447 : tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
448 1 : return tv;
449 : }
450 2 : tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
451 2 : tv.tv_usec = us_ % kMicrosecondsPerSecond;
452 2 : return tv;
453 : }
454 :
455 : #endif // V8_OS_WIN
456 :
457 : // static
458 53774634 : TimeTicks TimeTicks::HighResolutionNow() {
459 : // a DCHECK of TimeTicks::IsHighResolution() was removed from here
460 : // as it turns out this path is used in the wild for logs and counters.
461 : //
462 : // TODO(hpayer) We may eventually want to split TimedHistograms based
463 : // on low resolution clocks to avoid polluting metrics
464 53774634 : return TimeTicks::Now();
465 : }
466 :
467 1 : Time Time::FromJsTime(double ms_since_epoch) {
468 : // The epoch is a valid time, so this constructor doesn't interpret
469 : // 0 as the null time.
470 1 : if (ms_since_epoch == std::numeric_limits<double>::max()) {
471 : return Max();
472 : }
473 : return Time(
474 1 : static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
475 : }
476 :
477 :
478 1245576 : double Time::ToJsTime() const {
479 1245576 : if (IsNull()) {
480 : // Preserve 0 so the invalid result doesn't depend on the platform.
481 : return 0;
482 : }
483 1245576 : if (IsMax()) {
484 : // Preserve max without offset to prevent overflow.
485 : return std::numeric_limits<double>::max();
486 : }
487 1245576 : return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
488 : }
489 :
490 :
491 0 : std::ostream& operator<<(std::ostream& os, const Time& time) {
492 0 : return os << time.ToJsTime();
493 : }
494 :
495 :
496 : #if V8_OS_WIN
497 :
498 : namespace {
499 :
500 : // We define a wrapper to adapt between the __stdcall and __cdecl call of the
501 : // mock function, and to avoid a static constructor. Assigning an import to a
502 : // function pointer directly would require setup code to fetch from the IAT.
503 : DWORD timeGetTimeWrapper() { return timeGetTime(); }
504 :
505 : DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
506 :
507 : // A structure holding the most significant bits of "last seen" and a
508 : // "rollover" counter.
509 : union LastTimeAndRolloversState {
510 : // The state as a single 32-bit opaque value.
511 : int32_t as_opaque_32;
512 :
513 : // The state as usable values.
514 : struct {
515 : // The top 8-bits of the "last" time. This is enough to check for rollovers
516 : // and the small bit-size means fewer CompareAndSwap operations to store
517 : // changes in state, which in turn makes for fewer retries.
518 : uint8_t last_8;
519 : // A count of the number of detected rollovers. Using this as bits 47-32
520 : // of the upper half of a 64-bit value results in a 48-bit tick counter.
521 : // This extends the total rollover period from about 49 days to about 8800
522 : // years while still allowing it to be stored with last_8 in a single
523 : // 32-bit value.
524 : uint16_t rollovers;
525 : } as_values;
526 : };
527 : std::atomic<int32_t> g_last_time_and_rollovers{0};
528 : static_assert(sizeof(LastTimeAndRolloversState) <=
529 : sizeof(g_last_time_and_rollovers),
530 : "LastTimeAndRolloversState does not fit in a single atomic word");
531 :
532 : // We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
533 : // because it returns the number of milliseconds since Windows has started,
534 : // which will roll over the 32-bit value every ~49 days. We try to track
535 : // rollover ourselves, which works if TimeTicks::Now() is called at least every
536 : // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
537 : TimeTicks RolloverProtectedNow() {
538 : LastTimeAndRolloversState state;
539 : DWORD now; // DWORD is always unsigned 32 bits.
540 :
541 : // Fetch the "now" and "last" tick values, updating "last" with "now" and
542 : // incrementing the "rollovers" counter if the tick-value has wrapped back
543 : // around. Atomic operations ensure that both "last" and "rollovers" are
544 : // always updated together.
545 : int32_t original = g_last_time_and_rollovers.load(std::memory_order_acquire);
546 : while (true) {
547 : state.as_opaque_32 = original;
548 : now = g_tick_function();
549 : uint8_t now_8 = static_cast<uint8_t>(now >> 24);
550 : if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
551 : state.as_values.last_8 = now_8;
552 :
553 : // If the state hasn't changed, exit the loop.
554 : if (state.as_opaque_32 == original) break;
555 :
556 : // Save the changed state. If the existing value is unchanged from the
557 : // original, exit the loop.
558 : if (g_last_time_and_rollovers.compare_exchange_weak(
559 : original, state.as_opaque_32, std::memory_order_acq_rel)) {
560 : break;
561 : }
562 :
563 : // Another thread has done something in between so retry from the top.
564 : // {original} has been updated by the {compare_exchange_weak}.
565 : }
566 :
567 : return TimeTicks() +
568 : TimeDelta::FromMilliseconds(
569 : now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
570 : }
571 :
572 : // Discussion of tick counter options on Windows:
573 : //
574 : // (1) CPU cycle counter. (Retrieved via RDTSC)
575 : // The CPU counter provides the highest resolution time stamp and is the least
576 : // expensive to retrieve. However, on older CPUs, two issues can affect its
577 : // reliability: First it is maintained per processor and not synchronized
578 : // between processors. Also, the counters will change frequency due to thermal
579 : // and power changes, and stop in some states.
580 : //
581 : // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
582 : // resolution (<1 microsecond) time stamp. On most hardware running today, it
583 : // auto-detects and uses the constant-rate RDTSC counter to provide extremely
584 : // efficient and reliable time stamps.
585 : //
586 : // On older CPUs where RDTSC is unreliable, it falls back to using more
587 : // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
588 : // PM timer, and can involve system calls; and all this is up to the HAL (with
589 : // some help from ACPI). According to
590 : // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
591 : // worst case, it gets the counter from the rollover interrupt on the
592 : // programmable interrupt timer. In best cases, the HAL may conclude that the
593 : // RDTSC counter runs at a constant frequency, then it uses that instead. On
594 : // multiprocessor machines, it will try to verify the values returned from
595 : // RDTSC on each processor are consistent with each other, and apply a handful
596 : // of workarounds for known buggy hardware. In other words, QPC is supposed to
597 : // give consistent results on a multiprocessor computer, but for older CPUs it
598 : // can be unreliable due bugs in BIOS or HAL.
599 : //
600 : // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
601 : // milliseconds) time stamp but is comparatively less expensive to retrieve and
602 : // more reliable. Time::EnableHighResolutionTimer() and
603 : // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
604 : // this timer; and also other Windows applications can alter it, affecting this
605 : // one.
606 :
607 : TimeTicks InitialTimeTicksNowFunction();
608 :
609 : // See "threading notes" in InitializeNowFunctionPointer() for details on how
610 : // concurrent reads/writes to these globals has been made safe.
611 : using TimeTicksNowFunction = decltype(&TimeTicks::Now);
612 : TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
613 : int64_t g_qpc_ticks_per_second = 0;
614 :
615 : // As of January 2015, use of <atomic> is forbidden in Chromium code. This is
616 : // what std::atomic_thread_fence does on Windows on all Intel architectures when
617 : // the memory_order argument is anything but std::memory_order_seq_cst:
618 : #define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
619 :
620 : TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
621 : // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
622 : // InitializeNowFunctionPointer(), has happened by this point.
623 : ATOMIC_THREAD_FENCE(memory_order_acquire);
624 :
625 : DCHECK_GT(g_qpc_ticks_per_second, 0);
626 :
627 : // If the QPC Value is below the overflow threshold, we proceed with
628 : // simple multiply and divide.
629 : if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
630 : return TimeDelta::FromMicroseconds(
631 : qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
632 : }
633 : // Otherwise, calculate microseconds in a round about manner to avoid
634 : // overflow and precision issues.
635 : int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
636 : int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
637 : return TimeDelta::FromMicroseconds(
638 : (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
639 : ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
640 : g_qpc_ticks_per_second));
641 : }
642 :
643 : TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
644 :
645 : bool IsBuggyAthlon(const CPU& cpu) {
646 : // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
647 : return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
648 : }
649 :
650 : void InitializeTimeTicksNowFunctionPointer() {
651 : LARGE_INTEGER ticks_per_sec = {};
652 : if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
653 :
654 : // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
655 : // the low-resolution clock.
656 : //
657 : // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
658 : // will still use the low-resolution clock. A CPU lacking a non-stop time
659 : // counter will cause Windows to provide an alternate QPC implementation that
660 : // works, but is expensive to use. Certain Athlon CPUs are known to make the
661 : // QPC implementation unreliable.
662 : //
663 : // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
664 : // ~72% of users fall within this category.
665 : TimeTicksNowFunction now_function;
666 : CPU cpu;
667 : if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
668 : IsBuggyAthlon(cpu)) {
669 : now_function = &RolloverProtectedNow;
670 : } else {
671 : now_function = &QPCNow;
672 : }
673 :
674 : // Threading note 1: In an unlikely race condition, it's possible for two or
675 : // more threads to enter InitializeNowFunctionPointer() in parallel. This is
676 : // not a problem since all threads should end up writing out the same values
677 : // to the global variables.
678 : //
679 : // Threading note 2: A release fence is placed here to ensure, from the
680 : // perspective of other threads using the function pointers, that the
681 : // assignment to |g_qpc_ticks_per_second| happens before the function pointers
682 : // are changed.
683 : g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
684 : ATOMIC_THREAD_FENCE(memory_order_release);
685 : g_time_ticks_now_function = now_function;
686 : }
687 :
688 : TimeTicks InitialTimeTicksNowFunction() {
689 : InitializeTimeTicksNowFunctionPointer();
690 : return g_time_ticks_now_function();
691 : }
692 :
693 : #undef ATOMIC_THREAD_FENCE
694 :
695 : } // namespace
696 :
697 : // static
698 : TimeTicks TimeTicks::Now() {
699 : // Make sure we never return 0 here.
700 : TimeTicks ticks(g_time_ticks_now_function());
701 : DCHECK(!ticks.IsNull());
702 : return ticks;
703 : }
704 :
705 : // static
706 : bool TimeTicks::IsHighResolution() {
707 : if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
708 : InitializeTimeTicksNowFunctionPointer();
709 : return g_time_ticks_now_function == &QPCNow;
710 : }
711 :
712 : #else // V8_OS_WIN
713 :
714 54322654 : TimeTicks TimeTicks::Now() {
715 : int64_t ticks;
716 : #if V8_OS_MACOSX
717 : static struct mach_timebase_info info;
718 : if (info.denom == 0) {
719 : kern_return_t result = mach_timebase_info(&info);
720 : DCHECK_EQ(KERN_SUCCESS, result);
721 : USE(result);
722 : }
723 : ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
724 : info.numer / info.denom);
725 : #elif V8_OS_SOLARIS
726 : ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
727 : #elif V8_OS_POSIX
728 : ticks = ClockNow(CLOCK_MONOTONIC);
729 : #else
730 : #error platform does not implement TimeTicks::HighResolutionNow.
731 : #endif // V8_OS_MACOSX
732 : // Make sure we never return 0 here.
733 54367253 : return TimeTicks(ticks + 1);
734 : }
735 :
736 : // static
737 483738 : bool TimeTicks::IsHighResolution() {
738 : #if V8_OS_MACOSX
739 : return true;
740 : #elif V8_OS_POSIX
741 505077 : static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
742 483738 : return is_high_resolution;
743 : #else
744 : return true;
745 : #endif
746 : }
747 :
748 : #endif // V8_OS_WIN
749 :
750 :
751 1 : bool ThreadTicks::IsSupported() {
752 : #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
753 : defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
754 1 : return true;
755 : #elif defined(V8_OS_WIN)
756 : return IsSupportedWin();
757 : #else
758 : return false;
759 : #endif
760 : }
761 :
762 :
763 1799 : ThreadTicks ThreadTicks::Now() {
764 : #if V8_OS_MACOSX
765 : return ThreadTicks(ComputeThreadTicks());
766 : #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
767 : defined(V8_OS_ANDROID)
768 1799 : return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
769 : #elif V8_OS_SOLARIS
770 : return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
771 : #elif V8_OS_WIN
772 : return ThreadTicks::GetForThread(::GetCurrentThread());
773 : #else
774 : UNREACHABLE();
775 : #endif
776 : }
777 :
778 :
779 : #if V8_OS_WIN
780 : ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
781 : DCHECK(IsSupported());
782 :
783 : // Get the number of TSC ticks used by the current thread.
784 : ULONG64 thread_cycle_time = 0;
785 : ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
786 :
787 : // Get the frequency of the TSC.
788 : double tsc_ticks_per_second = TSCTicksPerSecond();
789 : if (tsc_ticks_per_second == 0)
790 : return ThreadTicks();
791 :
792 : // Return the CPU time of the current thread.
793 : double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
794 : return ThreadTicks(
795 : static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
796 : }
797 :
798 : // static
799 : bool ThreadTicks::IsSupportedWin() {
800 : static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
801 : !IsQPCReliable();
802 : return is_supported;
803 : }
804 :
805 : // static
806 : void ThreadTicks::WaitUntilInitializedWin() {
807 : while (TSCTicksPerSecond() == 0)
808 : ::Sleep(10);
809 : }
810 :
811 : #ifdef V8_HOST_ARCH_ARM64
812 : #define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
813 : #else
814 : #define ReadCycleCounter() __rdtsc()
815 : #endif
816 :
817 : double ThreadTicks::TSCTicksPerSecond() {
818 : DCHECK(IsSupported());
819 :
820 : // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
821 : // frequency, because there is no guarantee that the TSC frequency is equal to
822 : // the performance counter frequency.
823 :
824 : // The TSC frequency is cached in a static variable because it takes some time
825 : // to compute it.
826 : static double tsc_ticks_per_second = 0;
827 : if (tsc_ticks_per_second != 0)
828 : return tsc_ticks_per_second;
829 :
830 : // Increase the thread priority to reduces the chances of having a context
831 : // switch during a reading of the TSC and the performance counter.
832 : int previous_priority = ::GetThreadPriority(::GetCurrentThread());
833 : ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
834 :
835 : // The first time that this function is called, make an initial reading of the
836 : // TSC and the performance counter.
837 : static const uint64_t tsc_initial = ReadCycleCounter();
838 : static const uint64_t perf_counter_initial = QPCNowRaw();
839 :
840 : // Make a another reading of the TSC and the performance counter every time
841 : // that this function is called.
842 : uint64_t tsc_now = ReadCycleCounter();
843 : uint64_t perf_counter_now = QPCNowRaw();
844 :
845 : // Reset the thread priority.
846 : ::SetThreadPriority(::GetCurrentThread(), previous_priority);
847 :
848 : // Make sure that at least 50 ms elapsed between the 2 readings. The first
849 : // time that this function is called, we don't expect this to be the case.
850 : // Note: The longer the elapsed time between the 2 readings is, the more
851 : // accurate the computed TSC frequency will be. The 50 ms value was
852 : // chosen because local benchmarks show that it allows us to get a
853 : // stddev of less than 1 tick/us between multiple runs.
854 : // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
855 : // this will never fail on systems that run XP or later.
856 : // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
857 : LARGE_INTEGER perf_counter_frequency = {};
858 : ::QueryPerformanceFrequency(&perf_counter_frequency);
859 : DCHECK_GE(perf_counter_now, perf_counter_initial);
860 : uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
861 : double elapsed_time_seconds =
862 : perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
863 :
864 : const double kMinimumEvaluationPeriodSeconds = 0.05;
865 : if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
866 : return 0;
867 :
868 : // Compute the frequency of the TSC.
869 : DCHECK_GE(tsc_now, tsc_initial);
870 : uint64_t tsc_ticks = tsc_now - tsc_initial;
871 : tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
872 :
873 : return tsc_ticks_per_second;
874 : }
875 : #undef ReadCycleCounter
876 : #endif // V8_OS_WIN
877 :
878 : } // namespace base
879 : } // namespace v8
|