1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// This module contains the platform-specific code. This make the rest of the
29// code less dependent on operating system, compilers and runtime libraries.
30// This module does specifically not deal with differences between different
31// processor architecture.
32// The platform classes have the same definition for all platforms. The
33// implementation for a particular platform is put in platform_<os>.cc.
34// The build system then uses the implementation for the target platform.
35//
36// This design has been chosen because it is simple and fast. Alternatively,
37// the platform dependent classes could have been implemented using abstract
38// superclasses with virtual methods and having specializations for each
39// platform. This design was rejected because it was more complicated and
40// slower. It would require factory methods for selecting the right
41// implementation and the overhead of virtual methods for performance
42// sensitive like mutex locking/unlocking.
43
44#ifndef V8_PLATFORM_H_
45#define V8_PLATFORM_H_
46
47#include <cstdarg>
48
49#include "platform/mutex.h"
50#include "platform/semaphore.h"
51#include "utils.h"
52#include "v8globals.h"
53
54#ifdef __sun
55# ifndef signbit
56namespace std {
57int signbit(double x);
58}
59# endif
60#endif
61
62// Microsoft Visual C++ specific stuff.
63#if V8_CC_MSVC
64
65#include "win32-headers.h"
66#include "win32-math.h"
67
68int strncasecmp(const char* s1, const char* s2, int n);
69
70// Visual C++ 2013 and higher implement this function.
71#if (_MSC_VER < 1800)
72inline int lrint(double flt) {
73  int intgr;
74#if V8_TARGET_ARCH_IA32
75  __asm {
76    fld flt
77    fistp intgr
78  };
79#else
80  intgr = static_cast<int>(flt + 0.5);
81  if ((intgr & 1) != 0 && intgr - flt == 0.5) {
82    // If the number is halfway between two integers, round to the even one.
83    intgr--;
84  }
85#endif
86  return intgr;
87}
88
89#endif  // _MSC_VER < 1800
90
91#endif  // V8_CC_MSVC
92
93namespace v8 {
94namespace internal {
95
96double modulo(double x, double y);
97
98// Custom implementation of math functions.
99double fast_sin(double input);
100double fast_cos(double input);
101double fast_tan(double input);
102double fast_log(double input);
103double fast_exp(double input);
104double fast_sqrt(double input);
105// The custom exp implementation needs 16KB of lookup data; initialize it
106// on demand.
107void lazily_initialize_fast_exp();
108
109// ----------------------------------------------------------------------------
110// Fast TLS support
111
112#ifndef V8_NO_FAST_TLS
113
114#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
115
116#define V8_FAST_TLS_SUPPORTED 1
117
118INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
119
120inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
121  const intptr_t kTibInlineTlsOffset = 0xE10;
122  const intptr_t kTibExtraTlsOffset = 0xF94;
123  const intptr_t kMaxInlineSlots = 64;
124  const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
125  ASSERT(0 <= index && index < kMaxSlots);
126  if (index < kMaxInlineSlots) {
127    return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
128                                               kPointerSize * index));
129  }
130  intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
131  ASSERT(extra != 0);
132  return *reinterpret_cast<intptr_t*>(extra +
133                                      kPointerSize * (index - kMaxInlineSlots));
134}
135
136#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
137
138#define V8_FAST_TLS_SUPPORTED 1
139
140extern intptr_t kMacTlsBaseOffset;
141
142INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
143
144inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
145  intptr_t result;
146#if V8_HOST_ARCH_IA32
147  asm("movl %%gs:(%1,%2,4), %0;"
148      :"=r"(result)  // Output must be a writable register.
149      :"r"(kMacTlsBaseOffset), "r"(index));
150#else
151  asm("movq %%gs:(%1,%2,8), %0;"
152      :"=r"(result)
153      :"r"(kMacTlsBaseOffset), "r"(index));
154#endif
155  return result;
156}
157
158#endif
159
160#endif  // V8_NO_FAST_TLS
161
162
163// ----------------------------------------------------------------------------
164// OS
165//
166// This class has static methods for the different platform specific
167// functions. Add methods here to cope with differences between the
168// supported platforms.
169
170class OS {
171 public:
172  // Initializes the platform OS support that depend on CPU features. This is
173  // called after CPU initialization.
174  static void PostSetUp();
175
176  // Returns the accumulated user time for thread. This routine
177  // can be used for profiling. The implementation should
178  // strive for high-precision timer resolution, preferable
179  // micro-second resolution.
180  static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
181
182  // Returns current time as the number of milliseconds since
183  // 00:00:00 UTC, January 1, 1970.
184  static double TimeCurrentMillis();
185
186  // Returns a string identifying the current time zone. The
187  // timestamp is used for determining if DST is in effect.
188  static const char* LocalTimezone(double time);
189
190  // Returns the local time offset in milliseconds east of UTC without
191  // taking daylight savings time into account.
192  static double LocalTimeOffset();
193
194  // Returns the daylight savings offset for the given time.
195  static double DaylightSavingsOffset(double time);
196
197  // Returns last OS error.
198  static int GetLastError();
199
200  static FILE* FOpen(const char* path, const char* mode);
201  static bool Remove(const char* path);
202
203  // Opens a temporary file, the file is auto removed on close.
204  static FILE* OpenTemporaryFile();
205
206  // Log file open mode is platform-dependent due to line ends issues.
207  static const char* const LogFileOpenMode;
208
209  // Print output to console. This is mostly used for debugging output.
210  // On platforms that has standard terminal output, the output
211  // should go to stdout.
212  static void Print(const char* format, ...);
213  static void VPrint(const char* format, va_list args);
214
215  // Print output to a file. This is mostly used for debugging output.
216  static void FPrint(FILE* out, const char* format, ...);
217  static void VFPrint(FILE* out, const char* format, va_list args);
218
219  // Print error output to console. This is mostly used for error message
220  // output. On platforms that has standard terminal output, the output
221  // should go to stderr.
222  static void PrintError(const char* format, ...);
223  static void VPrintError(const char* format, va_list args);
224
225  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
226  // they are not guaranteed to be executable unless 'executable' is true.
227  // Returns the address of allocated memory, or NULL if failed.
228  static void* Allocate(const size_t requested,
229                        size_t* allocated,
230                        bool is_executable);
231  static void Free(void* address, const size_t size);
232
233  // This is the granularity at which the ProtectCode(...) call can set page
234  // permissions.
235  static intptr_t CommitPageSize();
236
237  // Mark code segments non-writable.
238  static void ProtectCode(void* address, const size_t size);
239
240  // Assign memory as a guard page so that access will cause an exception.
241  static void Guard(void* address, const size_t size);
242
243  // Generate a random address to be used for hinting mmap().
244  static void* GetRandomMmapAddr();
245
246  // Get the Alignment guaranteed by Allocate().
247  static size_t AllocateAlignment();
248
249  // Sleep for a number of milliseconds.
250  static void Sleep(const int milliseconds);
251
252  // Abort the current process.
253  static void Abort();
254
255  // Debug break.
256  static void DebugBreak();
257
258  // Walk the stack.
259  static const int kStackWalkError = -1;
260  static const int kStackWalkMaxNameLen = 256;
261  static const int kStackWalkMaxTextLen = 256;
262  struct StackFrame {
263    void* address;
264    char text[kStackWalkMaxTextLen];
265  };
266
267  class MemoryMappedFile {
268   public:
269    static MemoryMappedFile* open(const char* name);
270    static MemoryMappedFile* create(const char* name, int size, void* initial);
271    virtual ~MemoryMappedFile() { }
272    virtual void* memory() = 0;
273    virtual int size() = 0;
274  };
275
276  // Safe formatting print. Ensures that str is always null-terminated.
277  // Returns the number of chars written, or -1 if output was truncated.
278  static int SNPrintF(Vector<char> str, const char* format, ...);
279  static int VSNPrintF(Vector<char> str,
280                       const char* format,
281                       va_list args);
282
283  static char* StrChr(char* str, int c);
284  static void StrNCpy(Vector<char> dest, const char* src, size_t n);
285
286  // Support for the profiler.  Can do nothing, in which case ticks
287  // occuring in shared libraries will not be properly accounted for.
288  static void LogSharedLibraryAddresses(Isolate* isolate);
289
290  // Support for the profiler.  Notifies the external profiling
291  // process that a code moving garbage collection starts.  Can do
292  // nothing, in which case the code objects must not move (e.g., by
293  // using --never-compact) if accurate profiling is desired.
294  static void SignalCodeMovingGC();
295
296  // The return value indicates the CPU features we are sure of because of the
297  // OS.  For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
298  // instructions.
299  // This is a little messy because the interpretation is subject to the cross
300  // of the CPU and the OS.  The bits in the answer correspond to the bit
301  // positions indicated by the members of the CpuFeature enum from globals.h
302  static uint64_t CpuFeaturesImpliedByPlatform();
303
304  // The total amount of physical memory available on the current system.
305  static uint64_t TotalPhysicalMemory();
306
307  // Maximum size of the virtual memory.  0 means there is no artificial
308  // limit.
309  static intptr_t MaxVirtualMemory();
310
311  // Returns the double constant NAN
312  static double nan_value();
313
314  // Support runtime detection of whether the hard float option of the
315  // EABI is used.
316  static bool ArmUsingHardFloat();
317
318  // Returns the activation frame alignment constraint or zero if
319  // the platform doesn't care. Guaranteed to be a power of two.
320  static int ActivationFrameAlignment();
321
322#if defined(V8_TARGET_ARCH_IA32)
323  // Limit below which the extra overhead of the MemCopy function is likely
324  // to outweigh the benefits of faster copying.
325  static const int kMinComplexMemCopy = 64;
326
327  // Copy memory area. No restrictions.
328  static void MemMove(void* dest, const void* src, size_t size);
329  typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
330
331  // Keep the distinction of "move" vs. "copy" for the benefit of other
332  // architectures.
333  static void MemCopy(void* dest, const void* src, size_t size) {
334    MemMove(dest, src, size);
335  }
336#elif defined(V8_HOST_ARCH_ARM)
337  typedef void (*MemCopyUint8Function)(uint8_t* dest,
338                                       const uint8_t* src,
339                                       size_t size);
340  static MemCopyUint8Function memcopy_uint8_function;
341  static void MemCopyUint8Wrapper(uint8_t* dest,
342                                  const uint8_t* src,
343                                  size_t chars) {
344    memcpy(dest, src, chars);
345  }
346  // For values < 16, the assembler function is slower than the inlined C code.
347  static const int kMinComplexMemCopy = 16;
348  static void MemCopy(void* dest, const void* src, size_t size) {
349    (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
350                              reinterpret_cast<const uint8_t*>(src),
351                              size);
352  }
353  static void MemMove(void* dest, const void* src, size_t size) {
354    memmove(dest, src, size);
355  }
356
357  typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
358                                             const uint8_t* src,
359                                             size_t size);
360  static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
361  static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
362                                        const uint8_t* src,
363                                        size_t chars);
364  // For values < 12, the assembler function is slower than the inlined C code.
365  static const int kMinComplexConvertMemCopy = 12;
366  static void MemCopyUint16Uint8(uint16_t* dest,
367                                 const uint8_t* src,
368                                 size_t size) {
369    (*memcopy_uint16_uint8_function)(dest, src, size);
370  }
371#else
372  // Copy memory area to disjoint memory area.
373  static void MemCopy(void* dest, const void* src, size_t size) {
374    memcpy(dest, src, size);
375  }
376  static void MemMove(void* dest, const void* src, size_t size) {
377    memmove(dest, src, size);
378  }
379  static const int kMinComplexMemCopy = 16 * kPointerSize;
380#endif  // V8_TARGET_ARCH_IA32
381
382  static int GetCurrentProcessId();
383
384 private:
385  static const int msPerSecond = 1000;
386
387  DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
388};
389
390// Represents and controls an area of reserved memory.
391// Control of the reserved memory can be assigned to another VirtualMemory
392// object by assignment or copy-contructing. This removes the reserved memory
393// from the original object.
394class VirtualMemory {
395 public:
396  // Empty VirtualMemory object, controlling no reserved memory.
397  VirtualMemory();
398
399  // Reserves virtual memory with size.
400  explicit VirtualMemory(size_t size);
401
402  // Reserves virtual memory containing an area of the given size that
403  // is aligned per alignment. This may not be at the position returned
404  // by address().
405  VirtualMemory(size_t size, size_t alignment);
406
407  // Releases the reserved memory, if any, controlled by this VirtualMemory
408  // object.
409  ~VirtualMemory();
410
411  // Returns whether the memory has been reserved.
412  bool IsReserved();
413
414  // Initialize or resets an embedded VirtualMemory object.
415  void Reset();
416
417  // Returns the start address of the reserved memory.
418  // If the memory was reserved with an alignment, this address is not
419  // necessarily aligned. The user might need to round it up to a multiple of
420  // the alignment to get the start of the aligned block.
421  void* address() {
422    ASSERT(IsReserved());
423    return address_;
424  }
425
426  // Returns the size of the reserved memory. The returned value is only
427  // meaningful when IsReserved() returns true.
428  // If the memory was reserved with an alignment, this size may be larger
429  // than the requested size.
430  size_t size() { return size_; }
431
432  // Commits real memory. Returns whether the operation succeeded.
433  bool Commit(void* address, size_t size, bool is_executable);
434
435  // Uncommit real memory.  Returns whether the operation succeeded.
436  bool Uncommit(void* address, size_t size);
437
438  // Creates a single guard page at the given address.
439  bool Guard(void* address);
440
441  void Release() {
442    ASSERT(IsReserved());
443    // Notice: Order is important here. The VirtualMemory object might live
444    // inside the allocated region.
445    void* address = address_;
446    size_t size = size_;
447    Reset();
448    bool result = ReleaseRegion(address, size);
449    USE(result);
450    ASSERT(result);
451  }
452
453  // Assign control of the reserved region to a different VirtualMemory object.
454  // The old object is no longer functional (IsReserved() returns false).
455  void TakeControl(VirtualMemory* from) {
456    ASSERT(!IsReserved());
457    address_ = from->address_;
458    size_ = from->size_;
459    from->Reset();
460  }
461
462  static void* ReserveRegion(size_t size);
463
464  static bool CommitRegion(void* base, size_t size, bool is_executable);
465
466  static bool UncommitRegion(void* base, size_t size);
467
468  // Must be called with a base pointer that has been returned by ReserveRegion
469  // and the same size it was reserved with.
470  static bool ReleaseRegion(void* base, size_t size);
471
472  // Returns true if OS performs lazy commits, i.e. the memory allocation call
473  // defers actual physical memory allocation till the first memory access.
474  // Otherwise returns false.
475  static bool HasLazyCommits();
476
477 private:
478  void* address_;  // Start address of the virtual memory.
479  size_t size_;  // Size of the virtual memory.
480};
481
482
483// ----------------------------------------------------------------------------
484// Thread
485//
486// Thread objects are used for creating and running threads. When the start()
487// method is called the new thread starts running the run() method in the new
488// thread. The Thread object should not be deallocated before the thread has
489// terminated.
490
491class Thread {
492 public:
493  // Opaque data type for thread-local storage keys.
494  // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
495  // to ensure that enumeration type has correct value range (see Issue 830 for
496  // more details).
497  enum LocalStorageKey {
498    LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
499    LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
500  };
501
502  class Options {
503   public:
504    Options() : name_("v8:<unknown>"), stack_size_(0) {}
505    Options(const char* name, int stack_size = 0)
506        : name_(name), stack_size_(stack_size) {}
507
508    const char* name() const { return name_; }
509    int stack_size() const { return stack_size_; }
510
511   private:
512    const char* name_;
513    int stack_size_;
514  };
515
516  // Create new thread.
517  explicit Thread(const Options& options);
518  virtual ~Thread();
519
520  // Start new thread by calling the Run() method on the new thread.
521  void Start();
522
523  // Start new thread and wait until Run() method is called on the new thread.
524  void StartSynchronously() {
525    start_semaphore_ = new Semaphore(0);
526    Start();
527    start_semaphore_->Wait();
528    delete start_semaphore_;
529    start_semaphore_ = NULL;
530  }
531
532  // Wait until thread terminates.
533  void Join();
534
535  inline const char* name() const {
536    return name_;
537  }
538
539  // Abstract method for run handler.
540  virtual void Run() = 0;
541
542  // Thread-local storage.
543  static LocalStorageKey CreateThreadLocalKey();
544  static void DeleteThreadLocalKey(LocalStorageKey key);
545  static void* GetThreadLocal(LocalStorageKey key);
546  static int GetThreadLocalInt(LocalStorageKey key) {
547    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
548  }
549  static void SetThreadLocal(LocalStorageKey key, void* value);
550  static void SetThreadLocalInt(LocalStorageKey key, int value) {
551    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
552  }
553  static bool HasThreadLocal(LocalStorageKey key) {
554    return GetThreadLocal(key) != NULL;
555  }
556
557#ifdef V8_FAST_TLS_SUPPORTED
558  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
559    void* result = reinterpret_cast<void*>(
560        InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
561    ASSERT(result == GetThreadLocal(key));
562    return result;
563  }
564#else
565  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
566    return GetThreadLocal(key);
567  }
568#endif
569
570  // A hint to the scheduler to let another thread run.
571  static void YieldCPU();
572
573
574  // The thread name length is limited to 16 based on Linux's implementation of
575  // prctl().
576  static const int kMaxThreadNameLength = 16;
577
578  class PlatformData;
579  PlatformData* data() { return data_; }
580
581  void NotifyStartedAndRun() {
582    if (start_semaphore_) start_semaphore_->Signal();
583    Run();
584  }
585
586 private:
587  void set_name(const char* name);
588
589  PlatformData* data_;
590
591  char name_[kMaxThreadNameLength];
592  int stack_size_;
593  Semaphore* start_semaphore_;
594
595  DISALLOW_COPY_AND_ASSIGN(Thread);
596};
597
598} }  // namespace v8::internal
599
600#endif  // V8_PLATFORM_H_
601