1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// This module contains the platform-specific code. This make the rest of the
29// code less dependent on operating system, compilers and runtime libraries.
30// This module does specifically not deal with differences between different
31// processor architecture.
32// The platform classes have the same definition for all platforms. The
33// implementation for a particular platform is put in platform_<os>.cc.
34// The build system then uses the implementation for the target platform.
35//
36// This design has been chosen because it is simple and fast. Alternatively,
37// the platform dependent classes could have been implemented using abstract
38// superclasses with virtual methods and having specializations for each
39// platform. This design was rejected because it was more complicated and
40// slower. It would require factory methods for selecting the right
41// implementation and the overhead of virtual methods for performance
42// sensitive like mutex locking/unlocking.
43
44#ifndef V8_PLATFORM_H_
45#define V8_PLATFORM_H_
46
47#ifdef __sun
48# ifndef signbit
49namespace std {
50int signbit(double x);
51}
52# endif
53#endif
54
55// GCC specific stuff
56#ifdef __GNUC__
57
58// Needed for va_list on at least MinGW and Android.
59#include <stdarg.h>
60
61#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
62
63#endif  // __GNUC__
64
65
66// Windows specific stuff.
67#ifdef WIN32
68
69// Microsoft Visual C++ specific stuff.
70#ifdef _MSC_VER
71
72#include "win32-headers.h"
73#include "win32-math.h"
74
75int strncasecmp(const char* s1, const char* s2, int n);
76
77inline int lrint(double flt) {
78  int intgr;
79#if defined(V8_TARGET_ARCH_IA32)
80  __asm {
81    fld flt
82    fistp intgr
83  };
84#else
85  intgr = static_cast<int>(flt + 0.5);
86  if ((intgr & 1) != 0 && intgr - flt == 0.5) {
87    // If the number is halfway between two integers, round to the even one.
88    intgr--;
89  }
90#endif
91  return intgr;
92}
93
94#endif  // _MSC_VER
95
96#ifndef __CYGWIN__
97// Random is missing on both Visual Studio and MinGW.
98int random();
99#endif
100
101#endif  // WIN32
102
103#include "lazy-instance.h"
104#include "utils.h"
105#include "v8globals.h"
106
107namespace v8 {
108namespace internal {
109
110class Semaphore;
111class Mutex;
112
113double ceiling(double x);
114double modulo(double x, double y);
115
116// Custom implementation of math functions.
117double fast_sin(double input);
118double fast_cos(double input);
119double fast_tan(double input);
120double fast_log(double input);
121double fast_exp(double input);
122double fast_sqrt(double input);
123// The custom exp implementation needs 16KB of lookup data; initialize it
124// on demand.
125void lazily_initialize_fast_exp();
126
127// Forward declarations.
128class Socket;
129
130// ----------------------------------------------------------------------------
131// Fast TLS support
132
133#ifndef V8_NO_FAST_TLS
134
135#if defined(_MSC_VER) && V8_HOST_ARCH_IA32
136
137#define V8_FAST_TLS_SUPPORTED 1
138
139INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
140
141inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
142  const intptr_t kTibInlineTlsOffset = 0xE10;
143  const intptr_t kTibExtraTlsOffset = 0xF94;
144  const intptr_t kMaxInlineSlots = 64;
145  const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
146  ASSERT(0 <= index && index < kMaxSlots);
147  if (index < kMaxInlineSlots) {
148    return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
149                                               kPointerSize * index));
150  }
151  intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
152  ASSERT(extra != 0);
153  return *reinterpret_cast<intptr_t*>(extra +
154                                      kPointerSize * (index - kMaxInlineSlots));
155}
156
157#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
158
159#define V8_FAST_TLS_SUPPORTED 1
160
161extern intptr_t kMacTlsBaseOffset;
162
163INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
164
165inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
166  intptr_t result;
167#if V8_HOST_ARCH_IA32
168  asm("movl %%gs:(%1,%2,4), %0;"
169      :"=r"(result)  // Output must be a writable register.
170      :"r"(kMacTlsBaseOffset), "r"(index));
171#else
172  asm("movq %%gs:(%1,%2,8), %0;"
173      :"=r"(result)
174      :"r"(kMacTlsBaseOffset), "r"(index));
175#endif
176  return result;
177}
178
179#endif
180
181#endif  // V8_NO_FAST_TLS
182
183
184// ----------------------------------------------------------------------------
185// OS
186//
187// This class has static methods for the different platform specific
188// functions. Add methods here to cope with differences between the
189// supported platforms.
190
191class OS {
192 public:
193  // Initializes the platform OS support. Called once at VM startup.
194  static void SetUp();
195
196  // Initializes the platform OS support that depend on CPU features. This is
197  // called after CPU initialization.
198  static void PostSetUp();
199
200  // Clean up platform-OS-related things. Called once at VM shutdown.
201  static void TearDown();
202
203  // Returns the accumulated user time for thread. This routine
204  // can be used for profiling. The implementation should
205  // strive for high-precision timer resolution, preferable
206  // micro-second resolution.
207  static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
208
209  // Get a tick counter normalized to one tick per microsecond.
210  // Used for calculating time intervals.
211  static int64_t Ticks();
212
213  // Returns current time as the number of milliseconds since
214  // 00:00:00 UTC, January 1, 1970.
215  static double TimeCurrentMillis();
216
217  // Returns a string identifying the current time zone. The
218  // timestamp is used for determining if DST is in effect.
219  static const char* LocalTimezone(double time);
220
221  // Returns the local time offset in milliseconds east of UTC without
222  // taking daylight savings time into account.
223  static double LocalTimeOffset();
224
225  // Returns the daylight savings offset for the given time.
226  static double DaylightSavingsOffset(double time);
227
228  // Returns last OS error.
229  static int GetLastError();
230
231  static FILE* FOpen(const char* path, const char* mode);
232  static bool Remove(const char* path);
233
234  // Opens a temporary file, the file is auto removed on close.
235  static FILE* OpenTemporaryFile();
236
237  // Log file open mode is platform-dependent due to line ends issues.
238  static const char* const LogFileOpenMode;
239
240  // Print output to console. This is mostly used for debugging output.
241  // On platforms that has standard terminal output, the output
242  // should go to stdout.
243  static void Print(const char* format, ...);
244  static void VPrint(const char* format, va_list args);
245
246  // Print output to a file. This is mostly used for debugging output.
247  static void FPrint(FILE* out, const char* format, ...);
248  static void VFPrint(FILE* out, const char* format, va_list args);
249
250  // Print error output to console. This is mostly used for error message
251  // output. On platforms that has standard terminal output, the output
252  // should go to stderr.
253  static void PrintError(const char* format, ...);
254  static void VPrintError(const char* format, va_list args);
255
256  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
257  // they are not guaranteed to be executable unless 'executable' is true.
258  // Returns the address of allocated memory, or NULL if failed.
259  static void* Allocate(const size_t requested,
260                        size_t* allocated,
261                        bool is_executable);
262  static void Free(void* address, const size_t size);
263
264  // This is the granularity at which the ProtectCode(...) call can set page
265  // permissions.
266  static intptr_t CommitPageSize();
267
268  // Mark code segments non-writable.
269  static void ProtectCode(void* address, const size_t size);
270
271  // Assign memory as a guard page so that access will cause an exception.
272  static void Guard(void* address, const size_t size);
273
274  // Generate a random address to be used for hinting mmap().
275  static void* GetRandomMmapAddr();
276
277  // Get the Alignment guaranteed by Allocate().
278  static size_t AllocateAlignment();
279
280  // Returns an indication of whether a pointer is in a space that
281  // has been allocated by Allocate().  This method may conservatively
282  // always return false, but giving more accurate information may
283  // improve the robustness of the stack dump code in the presence of
284  // heap corruption.
285  static bool IsOutsideAllocatedSpace(void* pointer);
286
287  // Sleep for a number of milliseconds.
288  static void Sleep(const int milliseconds);
289
290  static int NumberOfCores();
291
292  // Abort the current process.
293  static void Abort();
294
295  // Debug break.
296  static void DebugBreak();
297
298  // Dump C++ current stack trace (only functional on Linux).
299  static void DumpBacktrace();
300
301  // Walk the stack.
302  static const int kStackWalkError = -1;
303  static const int kStackWalkMaxNameLen = 256;
304  static const int kStackWalkMaxTextLen = 256;
305  struct StackFrame {
306    void* address;
307    char text[kStackWalkMaxTextLen];
308  };
309
310  static int StackWalk(Vector<StackFrame> frames);
311
312  // Factory method for creating platform dependent Mutex.
313  // Please use delete to reclaim the storage for the returned Mutex.
314  static Mutex* CreateMutex();
315
316  // Factory method for creating platform dependent Semaphore.
317  // Please use delete to reclaim the storage for the returned Semaphore.
318  static Semaphore* CreateSemaphore(int count);
319
320  // Factory method for creating platform dependent Socket.
321  // Please use delete to reclaim the storage for the returned Socket.
322  static Socket* CreateSocket();
323
324  class MemoryMappedFile {
325   public:
326    static MemoryMappedFile* open(const char* name);
327    static MemoryMappedFile* create(const char* name, int size, void* initial);
328    virtual ~MemoryMappedFile() { }
329    virtual void* memory() = 0;
330    virtual int size() = 0;
331  };
332
333  // Safe formatting print. Ensures that str is always null-terminated.
334  // Returns the number of chars written, or -1 if output was truncated.
335  static int SNPrintF(Vector<char> str, const char* format, ...);
336  static int VSNPrintF(Vector<char> str,
337                       const char* format,
338                       va_list args);
339
340  static char* StrChr(char* str, int c);
341  static void StrNCpy(Vector<char> dest, const char* src, size_t n);
342
343  // Support for the profiler.  Can do nothing, in which case ticks
344  // occuring in shared libraries will not be properly accounted for.
345  static void LogSharedLibraryAddresses();
346
347  // Support for the profiler.  Notifies the external profiling
348  // process that a code moving garbage collection starts.  Can do
349  // nothing, in which case the code objects must not move (e.g., by
350  // using --never-compact) if accurate profiling is desired.
351  static void SignalCodeMovingGC();
352
353  // The return value indicates the CPU features we are sure of because of the
354  // OS.  For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
355  // instructions.
356  // This is a little messy because the interpretation is subject to the cross
357  // of the CPU and the OS.  The bits in the answer correspond to the bit
358  // positions indicated by the members of the CpuFeature enum from globals.h
359  static uint64_t CpuFeaturesImpliedByPlatform();
360
361  // Maximum size of the virtual memory.  0 means there is no artificial
362  // limit.
363  static intptr_t MaxVirtualMemory();
364
365  // Returns the double constant NAN
366  static double nan_value();
367
368  // Support runtime detection of Cpu implementer
369  static CpuImplementer GetCpuImplementer();
370
371  // Support runtime detection of Cpu implementer
372  static CpuPart GetCpuPart(CpuImplementer implementer);
373
374  // Support runtime detection of VFP3 on ARM CPUs.
375  static bool ArmCpuHasFeature(CpuFeature feature);
376
377  // Support runtime detection of whether the hard float option of the
378  // EABI is used.
379  static bool ArmUsingHardFloat();
380
381  // Support runtime detection of FPU on MIPS CPUs.
382  static bool MipsCpuHasFeature(CpuFeature feature);
383
384  // Returns the activation frame alignment constraint or zero if
385  // the platform doesn't care. Guaranteed to be a power of two.
386  static int ActivationFrameAlignment();
387
388#if defined(V8_TARGET_ARCH_IA32)
389  // Limit below which the extra overhead of the MemCopy function is likely
390  // to outweigh the benefits of faster copying.
391  static const int kMinComplexMemCopy = 64;
392
393  // Copy memory area. No restrictions.
394  static void MemMove(void* dest, const void* src, size_t size);
395  typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
396
397  // Keep the distinction of "move" vs. "copy" for the benefit of other
398  // architectures.
399  static void MemCopy(void* dest, const void* src, size_t size) {
400    MemMove(dest, src, size);
401  }
402#elif defined(V8_HOST_ARCH_ARM)
403  typedef void (*MemCopyUint8Function)(uint8_t* dest,
404                                       const uint8_t* src,
405                                       size_t size);
406  static MemCopyUint8Function memcopy_uint8_function;
407  static void MemCopyUint8Wrapper(uint8_t* dest,
408                                  const uint8_t* src,
409                                  size_t chars) {
410    memcpy(dest, src, chars);
411  }
412  // For values < 16, the assembler function is slower than the inlined C code.
413  static const int kMinComplexMemCopy = 16;
414  static void MemCopy(void* dest, const void* src, size_t size) {
415    (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
416                              reinterpret_cast<const uint8_t*>(src),
417                              size);
418  }
419  static void MemMove(void* dest, const void* src, size_t size) {
420    memmove(dest, src, size);
421  }
422
423  typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest,
424                                             const uint8_t* src,
425                                             size_t size);
426  static MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
427  static void MemCopyUint16Uint8Wrapper(uint16_t* dest,
428                                        const uint8_t* src,
429                                        size_t chars);
430  // For values < 12, the assembler function is slower than the inlined C code.
431  static const int kMinComplexConvertMemCopy = 12;
432  static void MemCopyUint16Uint8(uint16_t* dest,
433                                 const uint8_t* src,
434                                 size_t size) {
435    (*memcopy_uint16_uint8_function)(dest, src, size);
436  }
437#else
438  // Copy memory area to disjoint memory area.
439  static void MemCopy(void* dest, const void* src, size_t size) {
440    memcpy(dest, src, size);
441  }
442  static void MemMove(void* dest, const void* src, size_t size) {
443    memmove(dest, src, size);
444  }
445  static const int kMinComplexMemCopy = 16 * kPointerSize;
446#endif  // V8_TARGET_ARCH_IA32
447
448  static int GetCurrentProcessId();
449
450 private:
451  static const int msPerSecond = 1000;
452
453  DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
454};
455
456// Represents and controls an area of reserved memory.
457// Control of the reserved memory can be assigned to another VirtualMemory
458// object by assignment or copy-contructing. This removes the reserved memory
459// from the original object.
460class VirtualMemory {
461 public:
462  // Empty VirtualMemory object, controlling no reserved memory.
463  VirtualMemory();
464
465  // Reserves virtual memory with size.
466  explicit VirtualMemory(size_t size);
467
468  // Reserves virtual memory containing an area of the given size that
469  // is aligned per alignment. This may not be at the position returned
470  // by address().
471  VirtualMemory(size_t size, size_t alignment);
472
473  // Releases the reserved memory, if any, controlled by this VirtualMemory
474  // object.
475  ~VirtualMemory();
476
477  // Returns whether the memory has been reserved.
478  bool IsReserved();
479
480  // Initialize or resets an embedded VirtualMemory object.
481  void Reset();
482
483  // Returns the start address of the reserved memory.
484  // If the memory was reserved with an alignment, this address is not
485  // necessarily aligned. The user might need to round it up to a multiple of
486  // the alignment to get the start of the aligned block.
487  void* address() {
488    ASSERT(IsReserved());
489    return address_;
490  }
491
492  // Returns the size of the reserved memory. The returned value is only
493  // meaningful when IsReserved() returns true.
494  // If the memory was reserved with an alignment, this size may be larger
495  // than the requested size.
496  size_t size() { return size_; }
497
498  // Commits real memory. Returns whether the operation succeeded.
499  bool Commit(void* address, size_t size, bool is_executable);
500
501  // Uncommit real memory.  Returns whether the operation succeeded.
502  bool Uncommit(void* address, size_t size);
503
504  // Creates a single guard page at the given address.
505  bool Guard(void* address);
506
507  void Release() {
508    ASSERT(IsReserved());
509    // Notice: Order is important here. The VirtualMemory object might live
510    // inside the allocated region.
511    void* address = address_;
512    size_t size = size_;
513    Reset();
514    bool result = ReleaseRegion(address, size);
515    USE(result);
516    ASSERT(result);
517  }
518
519  // Assign control of the reserved region to a different VirtualMemory object.
520  // The old object is no longer functional (IsReserved() returns false).
521  void TakeControl(VirtualMemory* from) {
522    ASSERT(!IsReserved());
523    address_ = from->address_;
524    size_ = from->size_;
525    from->Reset();
526  }
527
528  static void* ReserveRegion(size_t size);
529
530  static bool CommitRegion(void* base, size_t size, bool is_executable);
531
532  static bool UncommitRegion(void* base, size_t size);
533
534  // Must be called with a base pointer that has been returned by ReserveRegion
535  // and the same size it was reserved with.
536  static bool ReleaseRegion(void* base, size_t size);
537
538  // Returns true if OS performs lazy commits, i.e. the memory allocation call
539  // defers actual physical memory allocation till the first memory access.
540  // Otherwise returns false.
541  static bool HasLazyCommits();
542
543 private:
544  void* address_;  // Start address of the virtual memory.
545  size_t size_;  // Size of the virtual memory.
546};
547
548
549// ----------------------------------------------------------------------------
550// Semaphore
551//
552// A semaphore object is a synchronization object that maintains a count. The
553// count is decremented each time a thread completes a wait for the semaphore
554// object and incremented each time a thread signals the semaphore. When the
555// count reaches zero,  threads waiting for the semaphore blocks until the
556// count becomes non-zero.
557
558class Semaphore {
559 public:
560  virtual ~Semaphore() {}
561
562  // Suspends the calling thread until the semaphore counter is non zero
563  // and then decrements the semaphore counter.
564  virtual void Wait() = 0;
565
566  // Suspends the calling thread until the counter is non zero or the timeout
567  // time has passed. If timeout happens the return value is false and the
568  // counter is unchanged. Otherwise the semaphore counter is decremented and
569  // true is returned. The timeout value is specified in microseconds.
570  virtual bool Wait(int timeout) = 0;
571
572  // Increments the semaphore counter.
573  virtual void Signal() = 0;
574};
575
576template <int InitialValue>
577struct CreateSemaphoreTrait {
578  static Semaphore* Create() {
579    return OS::CreateSemaphore(InitialValue);
580  }
581};
582
583// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
584// Usage:
585//   // The following semaphore starts at 0.
586//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
587//
588//   void my_function() {
589//     // Do something with my_semaphore.Pointer().
590//   }
591//
592template <int InitialValue>
593struct LazySemaphore {
594  typedef typename LazyDynamicInstance<
595      Semaphore, CreateSemaphoreTrait<InitialValue>,
596      ThreadSafeInitOnceTrait>::type type;
597};
598
599#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
600
601
602// ----------------------------------------------------------------------------
603// Thread
604//
605// Thread objects are used for creating and running threads. When the start()
606// method is called the new thread starts running the run() method in the new
607// thread. The Thread object should not be deallocated before the thread has
608// terminated.
609
610class Thread {
611 public:
612  // Opaque data type for thread-local storage keys.
613  // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
614  // to ensure that enumeration type has correct value range (see Issue 830 for
615  // more details).
616  enum LocalStorageKey {
617    LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
618    LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
619  };
620
621  class Options {
622   public:
623    Options() : name_("v8:<unknown>"), stack_size_(0) {}
624    Options(const char* name, int stack_size = 0)
625        : name_(name), stack_size_(stack_size) {}
626
627    const char* name() const { return name_; }
628    int stack_size() const { return stack_size_; }
629
630   private:
631    const char* name_;
632    int stack_size_;
633  };
634
635  // Create new thread.
636  explicit Thread(const Options& options);
637  virtual ~Thread();
638
639  // Start new thread by calling the Run() method on the new thread.
640  void Start();
641
642  // Start new thread and wait until Run() method is called on the new thread.
643  void StartSynchronously() {
644    start_semaphore_ = OS::CreateSemaphore(0);
645    Start();
646    start_semaphore_->Wait();
647    delete start_semaphore_;
648    start_semaphore_ = NULL;
649  }
650
651  // Wait until thread terminates.
652  void Join();
653
654  inline const char* name() const {
655    return name_;
656  }
657
658  // Abstract method for run handler.
659  virtual void Run() = 0;
660
661  // Thread-local storage.
662  static LocalStorageKey CreateThreadLocalKey();
663  static void DeleteThreadLocalKey(LocalStorageKey key);
664  static void* GetThreadLocal(LocalStorageKey key);
665  static int GetThreadLocalInt(LocalStorageKey key) {
666    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
667  }
668  static void SetThreadLocal(LocalStorageKey key, void* value);
669  static void SetThreadLocalInt(LocalStorageKey key, int value) {
670    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
671  }
672  static bool HasThreadLocal(LocalStorageKey key) {
673    return GetThreadLocal(key) != NULL;
674  }
675
676#ifdef V8_FAST_TLS_SUPPORTED
677  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
678    void* result = reinterpret_cast<void*>(
679        InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
680    ASSERT(result == GetThreadLocal(key));
681    return result;
682  }
683#else
684  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
685    return GetThreadLocal(key);
686  }
687#endif
688
689  // A hint to the scheduler to let another thread run.
690  static void YieldCPU();
691
692
693  // The thread name length is limited to 16 based on Linux's implementation of
694  // prctl().
695  static const int kMaxThreadNameLength = 16;
696
697  class PlatformData;
698  PlatformData* data() { return data_; }
699
700  void NotifyStartedAndRun() {
701    if (start_semaphore_) start_semaphore_->Signal();
702    Run();
703  }
704
705 private:
706  void set_name(const char* name);
707
708  PlatformData* data_;
709
710  char name_[kMaxThreadNameLength];
711  int stack_size_;
712  Semaphore* start_semaphore_;
713
714  DISALLOW_COPY_AND_ASSIGN(Thread);
715};
716
717
718// ----------------------------------------------------------------------------
719// Mutex
720//
721// Mutexes are used for serializing access to non-reentrant sections of code.
722// The implementations of mutex should allow for nested/recursive locking.
723
724class Mutex {
725 public:
726  virtual ~Mutex() {}
727
728  // Locks the given mutex. If the mutex is currently unlocked, it becomes
729  // locked and owned by the calling thread, and immediately. If the mutex
730  // is already locked by another thread, suspends the calling thread until
731  // the mutex is unlocked.
732  virtual int Lock() = 0;
733
734  // Unlocks the given mutex. The mutex is assumed to be locked and owned by
735  // the calling thread on entrance.
736  virtual int Unlock() = 0;
737
738  // Tries to lock the given mutex. Returns whether the mutex was
739  // successfully locked.
740  virtual bool TryLock() = 0;
741};
742
743struct CreateMutexTrait {
744  static Mutex* Create() {
745    return OS::CreateMutex();
746  }
747};
748
749// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
750// Usage:
751//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
752//
753//   void my_function() {
754//     ScopedLock my_lock(my_mutex.Pointer());
755//     // Do something.
756//   }
757//
758typedef LazyDynamicInstance<
759    Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
760
761#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
762
763// ----------------------------------------------------------------------------
764// ScopedLock
765//
766// Stack-allocated ScopedLocks provide block-scoped locking and
767// unlocking of a mutex.
768class ScopedLock {
769 public:
770  explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
771    ASSERT(mutex_ != NULL);
772    mutex_->Lock();
773  }
774  ~ScopedLock() {
775    mutex_->Unlock();
776  }
777
778 private:
779  Mutex* mutex_;
780  DISALLOW_COPY_AND_ASSIGN(ScopedLock);
781};
782
783
784// ----------------------------------------------------------------------------
785// Socket
786//
787
788class Socket {
789 public:
790  virtual ~Socket() {}
791
792  // Server initialization.
793  virtual bool Bind(const int port) = 0;
794  virtual bool Listen(int backlog) const = 0;
795  virtual Socket* Accept() const = 0;
796
797  // Client initialization.
798  virtual bool Connect(const char* host, const char* port) = 0;
799
800  // Shutdown socket for both read and write. This causes blocking Send and
801  // Receive calls to exit. After Shutdown the Socket object cannot be used for
802  // any communication.
803  virtual bool Shutdown() = 0;
804
805  // Data Transimission
806  // Return 0 on failure.
807  virtual int Send(const char* data, int len) const = 0;
808  virtual int Receive(char* data, int len) const = 0;
809
810  // Set the value of the SO_REUSEADDR socket option.
811  virtual bool SetReuseAddress(bool reuse_address) = 0;
812
813  virtual bool IsValid() const = 0;
814
815  static bool SetUp();
816  static int LastError();
817  static uint16_t HToN(uint16_t value);
818  static uint16_t NToH(uint16_t value);
819  static uint32_t HToN(uint32_t value);
820  static uint32_t NToH(uint32_t value);
821};
822
823
824} }  // namespace v8::internal
825
826#endif  // V8_PLATFORM_H_
827