1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/platform/time.h"
6
7#if V8_OS_POSIX
8#include <fcntl.h>  // for O_RDONLY
9#include <sys/time.h>
10#include <unistd.h>
11#endif
12#if V8_OS_MACOSX
13#include <mach/mach.h>
14#include <mach/mach_time.h>
15#include <pthread.h>
16#endif
17
18#include <cstring>
19#include <ostream>
20
21#if V8_OS_WIN
22#include "src/base/atomicops.h"
23#include "src/base/lazy-instance.h"
24#include "src/base/win32-headers.h"
25#endif
26#include "src/base/cpu.h"
27#include "src/base/logging.h"
28#include "src/base/platform/platform.h"
29
30namespace {
31
32#if V8_OS_MACOSX
33int64_t ComputeThreadTicks() {
34  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
35  thread_basic_info_data_t thread_info_data;
36  kern_return_t kr = thread_info(
37      pthread_mach_thread_np(pthread_self()),
38      THREAD_BASIC_INFO,
39      reinterpret_cast<thread_info_t>(&thread_info_data),
40      &thread_info_count);
41  CHECK(kr == KERN_SUCCESS);
42
43  v8::base::CheckedNumeric<int64_t> absolute_micros(
44      thread_info_data.user_time.seconds +
45      thread_info_data.system_time.seconds);
46  absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
47  absolute_micros += (thread_info_data.user_time.microseconds +
48                      thread_info_data.system_time.microseconds);
49  return absolute_micros.ValueOrDie();
50}
51#elif V8_OS_POSIX
52// Helper function to get results from clock_gettime() and convert to a
53// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
54// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
55// _POSIX_MONOTONIC_CLOCK to -1.
56V8_INLINE int64_t ClockNow(clockid_t clk_id) {
57#if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
58  defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
59// On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
60// resolution of 10ms. thread_cputime API provides the time in ns
61#if defined(V8_OS_AIX)
62  thread_cputime_t tc;
63  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
64    if (thread_cputime(-1, &tc) != 0) {
65      UNREACHABLE();
66      return 0;
67    }
68  }
69#endif
70  struct timespec ts;
71  if (clock_gettime(clk_id, &ts) != 0) {
72    UNREACHABLE();
73    return 0;
74  }
75  v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
76  result *= v8::base::Time::kMicrosecondsPerSecond;
77#if defined(V8_OS_AIX)
78  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
79    result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
80  } else {
81    result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
82  }
83#else
84  result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
85#endif
86  return result.ValueOrDie();
87#else  // Monotonic clock not supported.
88  return 0;
89#endif
90}
91#elif V8_OS_WIN
92V8_INLINE bool IsQPCReliable() {
93  v8::base::CPU cpu;
94  // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
95  return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
96}
97
98// Returns the current value of the performance counter.
99V8_INLINE uint64_t QPCNowRaw() {
100  LARGE_INTEGER perf_counter_now = {};
101  // According to the MSDN documentation for QueryPerformanceCounter(), this
102  // will never fail on systems that run XP or later.
103  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
104  BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
105  DCHECK(result);
106  USE(result);
107  return perf_counter_now.QuadPart;
108}
109#endif  // V8_OS_MACOSX
110
111
112}  // namespace
113
114namespace v8 {
115namespace base {
116
117TimeDelta TimeDelta::FromDays(int days) {
118  return TimeDelta(days * Time::kMicrosecondsPerDay);
119}
120
121
122TimeDelta TimeDelta::FromHours(int hours) {
123  return TimeDelta(hours * Time::kMicrosecondsPerHour);
124}
125
126
127TimeDelta TimeDelta::FromMinutes(int minutes) {
128  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
129}
130
131
132TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
133  return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
134}
135
136
137TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
138  return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
139}
140
141
142TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
143  return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
144}
145
146
147int TimeDelta::InDays() const {
148  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
149}
150
151
152int TimeDelta::InHours() const {
153  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
154}
155
156
157int TimeDelta::InMinutes() const {
158  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
159}
160
161
162double TimeDelta::InSecondsF() const {
163  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
164}
165
166
167int64_t TimeDelta::InSeconds() const {
168  return delta_ / Time::kMicrosecondsPerSecond;
169}
170
171
172double TimeDelta::InMillisecondsF() const {
173  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
174}
175
176
177int64_t TimeDelta::InMilliseconds() const {
178  return delta_ / Time::kMicrosecondsPerMillisecond;
179}
180
181
182int64_t TimeDelta::InNanoseconds() const {
183  return delta_ * Time::kNanosecondsPerMicrosecond;
184}
185
186
187#if V8_OS_MACOSX
188
189TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
190  DCHECK_GE(ts.tv_nsec, 0);
191  DCHECK_LT(ts.tv_nsec,
192            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
193  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
194                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
195}
196
197
198struct mach_timespec TimeDelta::ToMachTimespec() const {
199  struct mach_timespec ts;
200  DCHECK(delta_ >= 0);
201  ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
202  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
203      Time::kNanosecondsPerMicrosecond;
204  return ts;
205}
206
207#endif  // V8_OS_MACOSX
208
209
210#if V8_OS_POSIX
211
212TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
213  DCHECK_GE(ts.tv_nsec, 0);
214  DCHECK_LT(ts.tv_nsec,
215            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
216  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
217                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
218}
219
220
221struct timespec TimeDelta::ToTimespec() const {
222  struct timespec ts;
223  ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
224  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
225      Time::kNanosecondsPerMicrosecond;
226  return ts;
227}
228
229#endif  // V8_OS_POSIX
230
231
232#if V8_OS_WIN
233
234// We implement time using the high-resolution timers so that we can get
235// timeouts which are smaller than 10-15ms. To avoid any drift, we
236// periodically resync the internal clock to the system clock.
237class Clock final {
238 public:
239  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
240
241  Time Now() {
242    // Time between resampling the un-granular clock for this API (1 minute).
243    const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
244
245    LockGuard<Mutex> lock_guard(&mutex_);
246
247    // Determine current time and ticks.
248    TimeTicks ticks = GetSystemTicks();
249    Time time = GetSystemTime();
250
251    // Check if we need to synchronize with the system clock due to a backwards
252    // time change or the amount of time elapsed.
253    TimeDelta elapsed = ticks - initial_ticks_;
254    if (time < initial_time_ || elapsed > kMaxElapsedTime) {
255      initial_ticks_ = ticks;
256      initial_time_ = time;
257      return time;
258    }
259
260    return initial_time_ + elapsed;
261  }
262
263  Time NowFromSystemTime() {
264    LockGuard<Mutex> lock_guard(&mutex_);
265    initial_ticks_ = GetSystemTicks();
266    initial_time_ = GetSystemTime();
267    return initial_time_;
268  }
269
270 private:
271  static TimeTicks GetSystemTicks() {
272    return TimeTicks::Now();
273  }
274
275  static Time GetSystemTime() {
276    FILETIME ft;
277    ::GetSystemTimeAsFileTime(&ft);
278    return Time::FromFiletime(ft);
279  }
280
281  TimeTicks initial_ticks_;
282  Time initial_time_;
283  Mutex mutex_;
284};
285
286
287static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
288                          ThreadSafeInitOnceTrait>::type clock =
289    LAZY_STATIC_INSTANCE_INITIALIZER;
290
291
292Time Time::Now() {
293  return clock.Pointer()->Now();
294}
295
296
297Time Time::NowFromSystemTime() {
298  return clock.Pointer()->NowFromSystemTime();
299}
300
301
302// Time between windows epoch and standard epoch.
303static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
304
305
306Time Time::FromFiletime(FILETIME ft) {
307  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
308    return Time();
309  }
310  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
311      ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
312    return Max();
313  }
314  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
315                (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
316  return Time(us - kTimeToEpochInMicroseconds);
317}
318
319
320FILETIME Time::ToFiletime() const {
321  DCHECK(us_ >= 0);
322  FILETIME ft;
323  if (IsNull()) {
324    ft.dwLowDateTime = 0;
325    ft.dwHighDateTime = 0;
326    return ft;
327  }
328  if (IsMax()) {
329    ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
330    ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
331    return ft;
332  }
333  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
334  ft.dwLowDateTime = static_cast<DWORD>(us);
335  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
336  return ft;
337}
338
339#elif V8_OS_POSIX
340
341Time Time::Now() {
342  struct timeval tv;
343  int result = gettimeofday(&tv, NULL);
344  DCHECK_EQ(0, result);
345  USE(result);
346  return FromTimeval(tv);
347}
348
349
350Time Time::NowFromSystemTime() {
351  return Now();
352}
353
354
355Time Time::FromTimespec(struct timespec ts) {
356  DCHECK(ts.tv_nsec >= 0);
357  DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond));  // NOLINT
358  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
359    return Time();
360  }
361  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
362      ts.tv_sec == std::numeric_limits<time_t>::max()) {
363    return Max();
364  }
365  return Time(ts.tv_sec * kMicrosecondsPerSecond +
366              ts.tv_nsec / kNanosecondsPerMicrosecond);
367}
368
369
370struct timespec Time::ToTimespec() const {
371  struct timespec ts;
372  if (IsNull()) {
373    ts.tv_sec = 0;
374    ts.tv_nsec = 0;
375    return ts;
376  }
377  if (IsMax()) {
378    ts.tv_sec = std::numeric_limits<time_t>::max();
379    ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
380    return ts;
381  }
382  ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
383  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
384  return ts;
385}
386
387
388Time Time::FromTimeval(struct timeval tv) {
389  DCHECK(tv.tv_usec >= 0);
390  DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
391  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
392    return Time();
393  }
394  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
395      tv.tv_sec == std::numeric_limits<time_t>::max()) {
396    return Max();
397  }
398  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
399}
400
401
402struct timeval Time::ToTimeval() const {
403  struct timeval tv;
404  if (IsNull()) {
405    tv.tv_sec = 0;
406    tv.tv_usec = 0;
407    return tv;
408  }
409  if (IsMax()) {
410    tv.tv_sec = std::numeric_limits<time_t>::max();
411    tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
412    return tv;
413  }
414  tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
415  tv.tv_usec = us_ % kMicrosecondsPerSecond;
416  return tv;
417}
418
419#endif  // V8_OS_WIN
420
421
422Time Time::FromJsTime(double ms_since_epoch) {
423  // The epoch is a valid time, so this constructor doesn't interpret
424  // 0 as the null time.
425  if (ms_since_epoch == std::numeric_limits<double>::max()) {
426    return Max();
427  }
428  return Time(
429      static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
430}
431
432
433double Time::ToJsTime() const {
434  if (IsNull()) {
435    // Preserve 0 so the invalid result doesn't depend on the platform.
436    return 0;
437  }
438  if (IsMax()) {
439    // Preserve max without offset to prevent overflow.
440    return std::numeric_limits<double>::max();
441  }
442  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
443}
444
445
446std::ostream& operator<<(std::ostream& os, const Time& time) {
447  return os << time.ToJsTime();
448}
449
450
451#if V8_OS_WIN
452
453class TickClock {
454 public:
455  virtual ~TickClock() {}
456  virtual int64_t Now() = 0;
457  virtual bool IsHighResolution() = 0;
458};
459
460
461// Overview of time counters:
462// (1) CPU cycle counter. (Retrieved via RDTSC)
463// The CPU counter provides the highest resolution time stamp and is the least
464// expensive to retrieve. However, the CPU counter is unreliable and should not
465// be used in production. Its biggest issue is that it is per processor and it
466// is not synchronized between processors. Also, on some computers, the counters
467// will change frequency due to thermal and power changes, and stop in some
468// states.
469//
470// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
471// resolution (100 nanoseconds) time stamp but is comparatively more expensive
472// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
473// (with some help from ACPI).
474// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
475// in the worst case, it gets the counter from the rollover interrupt on the
476// programmable interrupt timer. In best cases, the HAL may conclude that the
477// RDTSC counter runs at a constant frequency, then it uses that instead. On
478// multiprocessor machines, it will try to verify the values returned from
479// RDTSC on each processor are consistent with each other, and apply a handful
480// of workarounds for known buggy hardware. In other words, QPC is supposed to
481// give consistent result on a multiprocessor computer, but it is unreliable in
482// reality due to bugs in BIOS or HAL on some, especially old computers.
483// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
484// it should be used with caution.
485//
486// (3) System time. The system time provides a low-resolution (typically 10ms
487// to 55 milliseconds) time stamp but is comparatively less expensive to
488// retrieve and more reliable.
489class HighResolutionTickClock final : public TickClock {
490 public:
491  explicit HighResolutionTickClock(int64_t ticks_per_second)
492      : ticks_per_second_(ticks_per_second) {
493    DCHECK_LT(0, ticks_per_second);
494  }
495  virtual ~HighResolutionTickClock() {}
496
497  int64_t Now() override {
498    uint64_t now = QPCNowRaw();
499
500    // Intentionally calculate microseconds in a round about manner to avoid
501    // overflow and precision issues. Think twice before simplifying!
502    int64_t whole_seconds = now / ticks_per_second_;
503    int64_t leftover_ticks = now % ticks_per_second_;
504    int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
505        ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
506
507    // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
508    // will never return 0.
509    return ticks + 1;
510  }
511
512  bool IsHighResolution() override { return true; }
513
514 private:
515  int64_t ticks_per_second_;
516};
517
518
519class RolloverProtectedTickClock final : public TickClock {
520 public:
521  RolloverProtectedTickClock() : rollover_(0) {}
522  virtual ~RolloverProtectedTickClock() {}
523
524  int64_t Now() override {
525    // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
526    // every ~49.7 days. We try to track rollover ourselves, which works if
527    // TimeTicks::Now() is called at least every 24 days.
528    // Note that we do not use GetTickCount() here, since timeGetTime() gives
529    // more predictable delta values, as described here:
530    // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
531    // timeGetTime() provides 1ms granularity when combined with
532    // timeBeginPeriod(). If the host application for V8 wants fast timers, it
533    // can use timeBeginPeriod() to increase the resolution.
534    // We use a lock-free version because the sampler thread calls it
535    // while having the rest of the world stopped, that could cause a deadlock.
536    base::Atomic32 rollover = base::Acquire_Load(&rollover_);
537    uint32_t now = static_cast<uint32_t>(timeGetTime());
538    if ((now >> 31) != static_cast<uint32_t>(rollover & 1)) {
539      base::Release_CompareAndSwap(&rollover_, rollover, rollover + 1);
540      ++rollover;
541    }
542    uint64_t ms = (static_cast<uint64_t>(rollover) << 31) | now;
543    return static_cast<int64_t>(ms * Time::kMicrosecondsPerMillisecond);
544  }
545
546  bool IsHighResolution() override { return false; }
547
548 private:
549  base::Atomic32 rollover_;
550};
551
552
553static LazyStaticInstance<RolloverProtectedTickClock,
554                          DefaultConstructTrait<RolloverProtectedTickClock>,
555                          ThreadSafeInitOnceTrait>::type tick_clock =
556    LAZY_STATIC_INSTANCE_INITIALIZER;
557
558
559struct CreateHighResTickClockTrait {
560  static TickClock* Create() {
561    // Check if the installed hardware supports a high-resolution performance
562    // counter, and if not fallback to the low-resolution tick clock.
563    LARGE_INTEGER ticks_per_second;
564    if (!QueryPerformanceFrequency(&ticks_per_second)) {
565      return tick_clock.Pointer();
566    }
567
568    // If QPC not reliable, fallback to low-resolution tick clock.
569    if (IsQPCReliable()) {
570      return tick_clock.Pointer();
571    }
572
573    return new HighResolutionTickClock(ticks_per_second.QuadPart);
574  }
575};
576
577
578static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
579                           ThreadSafeInitOnceTrait>::type high_res_tick_clock =
580    LAZY_DYNAMIC_INSTANCE_INITIALIZER;
581
582
583TimeTicks TimeTicks::Now() {
584  // Make sure we never return 0 here.
585  TimeTicks ticks(tick_clock.Pointer()->Now());
586  DCHECK(!ticks.IsNull());
587  return ticks;
588}
589
590
591TimeTicks TimeTicks::HighResolutionNow() {
592  // Make sure we never return 0 here.
593  TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
594  DCHECK(!ticks.IsNull());
595  return ticks;
596}
597
598
599// static
600bool TimeTicks::IsHighResolutionClockWorking() {
601  return high_res_tick_clock.Pointer()->IsHighResolution();
602}
603
604#else  // V8_OS_WIN
605
606TimeTicks TimeTicks::Now() {
607  return HighResolutionNow();
608}
609
610
611TimeTicks TimeTicks::HighResolutionNow() {
612  int64_t ticks;
613#if V8_OS_MACOSX
614  static struct mach_timebase_info info;
615  if (info.denom == 0) {
616    kern_return_t result = mach_timebase_info(&info);
617    DCHECK_EQ(KERN_SUCCESS, result);
618    USE(result);
619  }
620  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
621           info.numer / info.denom);
622#elif V8_OS_SOLARIS
623  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
624#elif V8_OS_POSIX
625  ticks = ClockNow(CLOCK_MONOTONIC);
626#endif  // V8_OS_MACOSX
627  // Make sure we never return 0 here.
628  return TimeTicks(ticks + 1);
629}
630
631
632// static
633bool TimeTicks::IsHighResolutionClockWorking() {
634  return true;
635}
636
637#endif  // V8_OS_WIN
638
639
640bool ThreadTicks::IsSupported() {
641#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
642    defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
643  return true;
644#elif defined(V8_OS_WIN)
645  return IsSupportedWin();
646#else
647  return false;
648#endif
649}
650
651
652ThreadTicks ThreadTicks::Now() {
653#if V8_OS_MACOSX
654  return ThreadTicks(ComputeThreadTicks());
655#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
656  defined(V8_OS_ANDROID)
657  return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
658#elif V8_OS_SOLARIS
659  return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
660#elif V8_OS_WIN
661  return ThreadTicks::GetForThread(::GetCurrentThread());
662#else
663  UNREACHABLE();
664  return ThreadTicks();
665#endif
666}
667
668
669#if V8_OS_WIN
670ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
671  DCHECK(IsSupported());
672
673  // Get the number of TSC ticks used by the current thread.
674  ULONG64 thread_cycle_time = 0;
675  ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
676
677  // Get the frequency of the TSC.
678  double tsc_ticks_per_second = TSCTicksPerSecond();
679  if (tsc_ticks_per_second == 0)
680    return ThreadTicks();
681
682  // Return the CPU time of the current thread.
683  double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
684  return ThreadTicks(
685      static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
686}
687
688// static
689bool ThreadTicks::IsSupportedWin() {
690  static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
691                             !IsQPCReliable();
692  return is_supported;
693}
694
695// static
696void ThreadTicks::WaitUntilInitializedWin() {
697  while (TSCTicksPerSecond() == 0)
698    ::Sleep(10);
699}
700
701double ThreadTicks::TSCTicksPerSecond() {
702  DCHECK(IsSupported());
703
704  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
705  // frequency, because there is no guarantee that the TSC frequency is equal to
706  // the performance counter frequency.
707
708  // The TSC frequency is cached in a static variable because it takes some time
709  // to compute it.
710  static double tsc_ticks_per_second = 0;
711  if (tsc_ticks_per_second != 0)
712    return tsc_ticks_per_second;
713
714  // Increase the thread priority to reduces the chances of having a context
715  // switch during a reading of the TSC and the performance counter.
716  int previous_priority = ::GetThreadPriority(::GetCurrentThread());
717  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
718
719  // The first time that this function is called, make an initial reading of the
720  // TSC and the performance counter.
721  static const uint64_t tsc_initial = __rdtsc();
722  static const uint64_t perf_counter_initial = QPCNowRaw();
723
724  // Make a another reading of the TSC and the performance counter every time
725  // that this function is called.
726  uint64_t tsc_now = __rdtsc();
727  uint64_t perf_counter_now = QPCNowRaw();
728
729  // Reset the thread priority.
730  ::SetThreadPriority(::GetCurrentThread(), previous_priority);
731
732  // Make sure that at least 50 ms elapsed between the 2 readings. The first
733  // time that this function is called, we don't expect this to be the case.
734  // Note: The longer the elapsed time between the 2 readings is, the more
735  //   accurate the computed TSC frequency will be. The 50 ms value was
736  //   chosen because local benchmarks show that it allows us to get a
737  //   stddev of less than 1 tick/us between multiple runs.
738  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
739  //   this will never fail on systems that run XP or later.
740  //   https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
741  LARGE_INTEGER perf_counter_frequency = {};
742  ::QueryPerformanceFrequency(&perf_counter_frequency);
743  DCHECK_GE(perf_counter_now, perf_counter_initial);
744  uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
745  double elapsed_time_seconds =
746      perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
747
748  const double kMinimumEvaluationPeriodSeconds = 0.05;
749  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
750    return 0;
751
752  // Compute the frequency of the TSC.
753  DCHECK_GE(tsc_now, tsc_initial);
754  uint64_t tsc_ticks = tsc_now - tsc_initial;
755  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
756
757  return tsc_ticks_per_second;
758}
759#endif  // V8_OS_WIN
760
761}  // namespace base
762}  // namespace v8
763