platform-linux.cc revision 257744e915dfc84d6d07a6b2accf8402d9ffc708
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for Linux goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <pthread.h>
32#include <semaphore.h>
33#include <signal.h>
34#include <sys/prctl.h>
35#include <sys/time.h>
36#include <sys/resource.h>
37#include <sys/syscall.h>
38#include <sys/types.h>
39#include <stdlib.h>
40
41// Ubuntu Dapper requires memory pages to be marked as
42// executable. Otherwise, OS raises an exception when executing code
43// in that page.
44#include <sys/types.h>  // mmap & munmap
45#include <sys/mman.h>   // mmap & munmap
46#include <sys/stat.h>   // open
47#include <fcntl.h>      // open
48#include <unistd.h>     // sysconf
49#ifdef __GLIBC__
50#include <execinfo.h>   // backtrace, backtrace_symbols
51#endif  // def __GLIBC__
52#include <strings.h>    // index
53#include <errno.h>
54#include <stdarg.h>
55
56#undef MAP_TYPE
57
58#include "v8.h"
59
60#include "platform.h"
61#include "v8threads.h"
62#include "vm-state-inl.h"
63
64
65namespace v8 {
66namespace internal {
67
68// 0 is never a valid thread id on Linux since tids and pids share a
69// name space and pid 0 is reserved (see man 2 kill).
70static const pthread_t kNoThread = (pthread_t) 0;
71
72
73double ceiling(double x) {
74  return ceil(x);
75}
76
77
78static Mutex* limit_mutex = NULL;
79
80
81void OS::Setup() {
82  // Seed the random number generator.
83  // Convert the current time to a 64-bit integer first, before converting it
84  // to an unsigned. Going directly can cause an overflow and the seed to be
85  // set to all ones. The seed will be identical for different instances that
86  // call this setup code within the same millisecond.
87  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
88  srandom(static_cast<unsigned int>(seed));
89  limit_mutex = CreateMutex();
90
91#ifdef __arm__
92  // When running on ARM hardware check that the EABI used by V8 and
93  // by the C code is the same.
94  bool hard_float = OS::ArmUsingHardFloat();
95  if (hard_float) {
96#if !USE_EABI_HARDFLOAT
97    PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
98           "-DUSE_EABI_HARDFLOAT\n");
99    exit(1);
100#endif
101  } else {
102#if USE_EABI_HARDFLOAT
103    PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
104           "-DUSE_EABI_HARDFLOAT\n");
105    exit(1);
106#endif
107  }
108#endif
109}
110
111
112uint64_t OS::CpuFeaturesImpliedByPlatform() {
113#if(defined(__mips_hard_float) && __mips_hard_float != 0)
114    // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
115    // have FPU instructions.  If gcc can assume it then so can we.
116    return 1u << FPU;
117#else
118  return 0;  // Linux runs on anything.
119#endif
120}
121
122
123#ifdef __arm__
124static bool CPUInfoContainsString(const char * search_string) {
125  const char* file_name = "/proc/cpuinfo";
126  // This is written as a straight shot one pass parser
127  // and not using STL string and ifstream because,
128  // on Linux, it's reading from a (non-mmap-able)
129  // character special device.
130  FILE* f = NULL;
131  const char* what = search_string;
132
133  if (NULL == (f = fopen(file_name, "r")))
134    return false;
135
136  int k;
137  while (EOF != (k = fgetc(f))) {
138    if (k == *what) {
139      ++what;
140      while ((*what != '\0') && (*what == fgetc(f))) {
141        ++what;
142      }
143      if (*what == '\0') {
144        fclose(f);
145        return true;
146      } else {
147        what = search_string;
148      }
149    }
150  }
151  fclose(f);
152
153  // Did not find string in the proc file.
154  return false;
155}
156
157
158bool OS::ArmCpuHasFeature(CpuFeature feature) {
159  const char* search_string = NULL;
160  // Simple detection of VFP at runtime for Linux.
161  // It is based on /proc/cpuinfo, which reveals hardware configuration
162  // to user-space applications.  According to ARM (mid 2009), no similar
163  // facility is universally available on the ARM architectures,
164  // so it's up to individual OSes to provide such.
165  switch (feature) {
166    case VFP3:
167      search_string = "vfpv3";
168      break;
169    case ARMv7:
170      search_string = "ARMv7";
171      break;
172    default:
173      UNREACHABLE();
174  }
175
176  if (CPUInfoContainsString(search_string)) {
177    return true;
178  }
179
180  if (feature == VFP3) {
181    // Some old kernels will report vfp not vfpv3. Here we make a last attempt
182    // to detect vfpv3 by checking for vfp *and* neon, since neon is only
183    // available on architectures with vfpv3.
184    // Checking neon on its own is not enough as it is possible to have neon
185    // without vfp.
186    if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
187      return true;
188    }
189  }
190
191  return false;
192}
193
194
195// Simple helper function to detect whether the C code is compiled with
196// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
197// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
198// calling this will return 1.0 and otherwise 0.0.
199static void ArmUsingHardFloatHelper() {
200  asm("mov r0, #0");
201#if defined(__VFP_FP__) && !defined(__SOFTFP__)
202  // Load 0x3ff00000 into r1 using instructions available in both ARM
203  // and Thumb mode.
204  asm("mov r1, #3");
205  asm("mov r2, #255");
206  asm("lsl r1, r1, #8");
207  asm("orr r1, r1, r2");
208  asm("lsl r1, r1, #20");
209  // For vmov d0, r0, r1 use ARM mode.
210#ifdef __thumb__
211  asm volatile(
212    "@   Enter ARM Mode  \n\t"
213    "    adr r3, 1f      \n\t"
214    "    bx  r3          \n\t"
215    "    .ALIGN 4        \n\t"
216    "    .ARM            \n"
217    "1:  vmov d0, r0, r1 \n\t"
218    "@   Enter THUMB Mode\n\t"
219    "    adr r3, 2f+1    \n\t"
220    "    bx  r3          \n\t"
221    "    .THUMB          \n"
222    "2:                  \n\t");
223#else
224  asm("vmov d0, r0, r1");
225#endif  // __thumb__
226#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
227  asm("mov r1, #0");
228}
229
230
231bool OS::ArmUsingHardFloat() {
232  // Cast helper function from returning void to returning double.
233  typedef double (*F)();
234  F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
235  return f() == 1.0;
236}
237#endif  // def __arm__
238
239
240#ifdef __mips__
241bool OS::MipsCpuHasFeature(CpuFeature feature) {
242  const char* search_string = NULL;
243  const char* file_name = "/proc/cpuinfo";
244  // Simple detection of FPU at runtime for Linux.
245  // It is based on /proc/cpuinfo, which reveals hardware configuration
246  // to user-space applications.  According to MIPS (early 2010), no similar
247  // facility is universally available on the MIPS architectures,
248  // so it's up to individual OSes to provide such.
249  //
250  // This is written as a straight shot one pass parser
251  // and not using STL string and ifstream because,
252  // on Linux, it's reading from a (non-mmap-able)
253  // character special device.
254
255  switch (feature) {
256    case FPU:
257      search_string = "FPU";
258      break;
259    default:
260      UNREACHABLE();
261  }
262
263  FILE* f = NULL;
264  const char* what = search_string;
265
266  if (NULL == (f = fopen(file_name, "r")))
267    return false;
268
269  int k;
270  while (EOF != (k = fgetc(f))) {
271    if (k == *what) {
272      ++what;
273      while ((*what != '\0') && (*what == fgetc(f))) {
274        ++what;
275      }
276      if (*what == '\0') {
277        fclose(f);
278        return true;
279      } else {
280        what = search_string;
281      }
282    }
283  }
284  fclose(f);
285
286  // Did not find string in the proc file.
287  return false;
288}
289#endif  // def __mips__
290
291
292int OS::ActivationFrameAlignment() {
293#ifdef V8_TARGET_ARCH_ARM
294  // On EABI ARM targets this is required for fp correctness in the
295  // runtime system.
296  return 8;
297#elif V8_TARGET_ARCH_MIPS
298  return 8;
299#endif
300  // With gcc 4.4 the tree vectorization optimizer can generate code
301  // that requires 16 byte alignment such as movdqa on x86.
302  return 16;
303}
304
305
306void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
307#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
308    (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
309  // Only use on ARM or MIPS hardware.
310  MemoryBarrier();
311#else
312  __asm__ __volatile__("" : : : "memory");
313  // An x86 store acts as a release barrier.
314#endif
315  *ptr = value;
316}
317
318
319const char* OS::LocalTimezone(double time) {
320  if (isnan(time)) return "";
321  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
322  struct tm* t = localtime(&tv);
323  if (NULL == t) return "";
324  return t->tm_zone;
325}
326
327
328double OS::LocalTimeOffset() {
329  time_t tv = time(NULL);
330  struct tm* t = localtime(&tv);
331  // tm_gmtoff includes any daylight savings offset, so subtract it.
332  return static_cast<double>(t->tm_gmtoff * msPerSecond -
333                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
334}
335
336
337// We keep the lowest and highest addresses mapped as a quick way of
338// determining that pointers are outside the heap (used mostly in assertions
339// and verification).  The estimate is conservative, ie, not all addresses in
340// 'allocated' space are actually allocated to our heap.  The range is
341// [lowest, highest), inclusive on the low and and exclusive on the high end.
342static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
343static void* highest_ever_allocated = reinterpret_cast<void*>(0);
344
345
346static void UpdateAllocatedSpaceLimits(void* address, int size) {
347  ASSERT(limit_mutex != NULL);
348  ScopedLock lock(limit_mutex);
349
350  lowest_ever_allocated = Min(lowest_ever_allocated, address);
351  highest_ever_allocated =
352      Max(highest_ever_allocated,
353          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
354}
355
356
357bool OS::IsOutsideAllocatedSpace(void* address) {
358  return address < lowest_ever_allocated || address >= highest_ever_allocated;
359}
360
361
362size_t OS::AllocateAlignment() {
363  return sysconf(_SC_PAGESIZE);
364}
365
366
367void* OS::Allocate(const size_t requested,
368                   size_t* allocated,
369                   bool is_executable) {
370  // TODO(805): Port randomization of allocated executable memory to Linux.
371  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
372  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
373  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
374  if (mbase == MAP_FAILED) {
375    LOG(i::Isolate::Current(),
376        StringEvent("OS::Allocate", "mmap failed"));
377    return NULL;
378  }
379  *allocated = msize;
380  UpdateAllocatedSpaceLimits(mbase, msize);
381  return mbase;
382}
383
384
385void OS::Free(void* address, const size_t size) {
386  // TODO(1240712): munmap has a return value which is ignored here.
387  int result = munmap(address, size);
388  USE(result);
389  ASSERT(result == 0);
390}
391
392
393#ifdef ENABLE_HEAP_PROTECTION
394
395void OS::Protect(void* address, size_t size) {
396  // TODO(1240712): mprotect has a return value which is ignored here.
397  mprotect(address, size, PROT_READ);
398}
399
400
401void OS::Unprotect(void* address, size_t size, bool is_executable) {
402  // TODO(1240712): mprotect has a return value which is ignored here.
403  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
404  mprotect(address, size, prot);
405}
406
407#endif
408
409
410void OS::Sleep(int milliseconds) {
411  unsigned int ms = static_cast<unsigned int>(milliseconds);
412  usleep(1000 * ms);
413}
414
415
416void OS::Abort() {
417  // Redirect to std abort to signal abnormal program termination.
418  abort();
419}
420
421
422void OS::DebugBreak() {
423// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
424//  which is the architecture of generated code).
425#if (defined(__arm__) || defined(__thumb__))
426# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
427  asm("bkpt 0");
428# endif
429#elif defined(__mips__)
430  asm("break");
431#else
432  asm("int $3");
433#endif
434}
435
436
437class PosixMemoryMappedFile : public OS::MemoryMappedFile {
438 public:
439  PosixMemoryMappedFile(FILE* file, void* memory, int size)
440    : file_(file), memory_(memory), size_(size) { }
441  virtual ~PosixMemoryMappedFile();
442  virtual void* memory() { return memory_; }
443  virtual int size() { return size_; }
444 private:
445  FILE* file_;
446  void* memory_;
447  int size_;
448};
449
450
451OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
452  FILE* file = fopen(name, "r+");
453  if (file == NULL) return NULL;
454
455  fseek(file, 0, SEEK_END);
456  int size = ftell(file);
457
458  void* memory =
459      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
460  return new PosixMemoryMappedFile(file, memory, size);
461}
462
463
464OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
465    void* initial) {
466  FILE* file = fopen(name, "w+");
467  if (file == NULL) return NULL;
468  int result = fwrite(initial, size, 1, file);
469  if (result < 1) {
470    fclose(file);
471    return NULL;
472  }
473  void* memory =
474      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
475  return new PosixMemoryMappedFile(file, memory, size);
476}
477
478
479PosixMemoryMappedFile::~PosixMemoryMappedFile() {
480  if (memory_) munmap(memory_, size_);
481  fclose(file_);
482}
483
484
485void OS::LogSharedLibraryAddresses() {
486#ifdef ENABLE_LOGGING_AND_PROFILING
487  // This function assumes that the layout of the file is as follows:
488  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
489  // If we encounter an unexpected situation we abort scanning further entries.
490  FILE* fp = fopen("/proc/self/maps", "r");
491  if (fp == NULL) return;
492
493  // Allocate enough room to be able to store a full file name.
494  const int kLibNameLen = FILENAME_MAX + 1;
495  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
496
497  i::Isolate* isolate = ISOLATE;
498  // This loop will terminate once the scanning hits an EOF.
499  while (true) {
500    uintptr_t start, end;
501    char attr_r, attr_w, attr_x, attr_p;
502    // Parse the addresses and permission bits at the beginning of the line.
503    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
504    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
505
506    int c;
507    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
508      // Found a read-only executable entry. Skip characters until we reach
509      // the beginning of the filename or the end of the line.
510      do {
511        c = getc(fp);
512      } while ((c != EOF) && (c != '\n') && (c != '/'));
513      if (c == EOF) break;  // EOF: Was unexpected, just exit.
514
515      // Process the filename if found.
516      if (c == '/') {
517        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
518
519        // Read to the end of the line. Exit if the read fails.
520        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
521
522        // Drop the newline character read by fgets. We do not need to check
523        // for a zero-length string because we know that we at least read the
524        // '/' character.
525        lib_name[strlen(lib_name) - 1] = '\0';
526      } else {
527        // No library name found, just record the raw address range.
528        snprintf(lib_name, kLibNameLen,
529                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
530      }
531      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
532    } else {
533      // Entry not describing executable data. Skip to end of line to setup
534      // reading the next entry.
535      do {
536        c = getc(fp);
537      } while ((c != EOF) && (c != '\n'));
538      if (c == EOF) break;
539    }
540  }
541  free(lib_name);
542  fclose(fp);
543#endif
544}
545
546
547static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
548
549
550void OS::SignalCodeMovingGC() {
551#ifdef ENABLE_LOGGING_AND_PROFILING
552  // Support for ll_prof.py.
553  //
554  // The Linux profiler built into the kernel logs all mmap's with
555  // PROT_EXEC so that analysis tools can properly attribute ticks. We
556  // do a mmap with a name known by ll_prof.py and immediately munmap
557  // it. This injects a GC marker into the stream of events generated
558  // by the kernel and allows us to synchronize V8 code log and the
559  // kernel log.
560  int size = sysconf(_SC_PAGESIZE);
561  FILE* f = fopen(kGCFakeMmap, "w+");
562  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
563                    fileno(f), 0);
564  ASSERT(addr != MAP_FAILED);
565  munmap(addr, size);
566  fclose(f);
567#endif
568}
569
570
571int OS::StackWalk(Vector<OS::StackFrame> frames) {
572  // backtrace is a glibc extension.
573#ifdef __GLIBC__
574  int frames_size = frames.length();
575  ScopedVector<void*> addresses(frames_size);
576
577  int frames_count = backtrace(addresses.start(), frames_size);
578
579  char** symbols = backtrace_symbols(addresses.start(), frames_count);
580  if (symbols == NULL) {
581    return kStackWalkError;
582  }
583
584  for (int i = 0; i < frames_count; i++) {
585    frames[i].address = addresses[i];
586    // Format a text representation of the frame based on the information
587    // available.
588    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
589             "%s",
590             symbols[i]);
591    // Make sure line termination is in place.
592    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
593  }
594
595  free(symbols);
596
597  return frames_count;
598#else  // ndef __GLIBC__
599  return 0;
600#endif  // ndef __GLIBC__
601}
602
603
604// Constants used for mmap.
605static const int kMmapFd = -1;
606static const int kMmapFdOffset = 0;
607
608
609VirtualMemory::VirtualMemory(size_t size) {
610  address_ = mmap(NULL, size, PROT_NONE,
611                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
612                  kMmapFd, kMmapFdOffset);
613  size_ = size;
614}
615
616
617VirtualMemory::~VirtualMemory() {
618  if (IsReserved()) {
619    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
620  }
621}
622
623
624bool VirtualMemory::IsReserved() {
625  return address_ != MAP_FAILED;
626}
627
628
629bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
630  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
631  if (MAP_FAILED == mmap(address, size, prot,
632                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
633                         kMmapFd, kMmapFdOffset)) {
634    return false;
635  }
636
637  UpdateAllocatedSpaceLimits(address, size);
638  return true;
639}
640
641
642bool VirtualMemory::Uncommit(void* address, size_t size) {
643  return mmap(address, size, PROT_NONE,
644              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
645              kMmapFd, kMmapFdOffset) != MAP_FAILED;
646}
647
648
649class Thread::PlatformData : public Malloced {
650 public:
651  PlatformData() : thread_(kNoThread) {}
652
653  pthread_t thread_;  // Thread handle for pthread.
654};
655
656Thread::Thread(Isolate* isolate, const Options& options)
657    : data_(new PlatformData()),
658      isolate_(isolate),
659      stack_size_(options.stack_size) {
660  set_name(options.name);
661}
662
663
664Thread::Thread(Isolate* isolate, const char* name)
665    : data_(new PlatformData()),
666      isolate_(isolate),
667      stack_size_(0) {
668  set_name(name);
669}
670
671
672Thread::~Thread() {
673  delete data_;
674}
675
676
677static void* ThreadEntry(void* arg) {
678  Thread* thread = reinterpret_cast<Thread*>(arg);
679  // This is also initialized by the first argument to pthread_create() but we
680  // don't know which thread will run first (the original thread or the new
681  // one) so we initialize it here too.
682  prctl(PR_SET_NAME,
683        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
684        0, 0, 0);
685  thread->data()->thread_ = pthread_self();
686  ASSERT(thread->data()->thread_ != kNoThread);
687  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
688  thread->Run();
689  return NULL;
690}
691
692
693void Thread::set_name(const char* name) {
694  strncpy(name_, name, sizeof(name_));
695  name_[sizeof(name_) - 1] = '\0';
696}
697
698
699void Thread::Start() {
700  pthread_attr_t* attr_ptr = NULL;
701  pthread_attr_t attr;
702  if (stack_size_ > 0) {
703    pthread_attr_init(&attr);
704    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
705    attr_ptr = &attr;
706  }
707  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
708  ASSERT(data_->thread_ != kNoThread);
709}
710
711
712void Thread::Join() {
713  pthread_join(data_->thread_, NULL);
714}
715
716
717Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
718  pthread_key_t key;
719  int result = pthread_key_create(&key, NULL);
720  USE(result);
721  ASSERT(result == 0);
722  return static_cast<LocalStorageKey>(key);
723}
724
725
726void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
727  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
728  int result = pthread_key_delete(pthread_key);
729  USE(result);
730  ASSERT(result == 0);
731}
732
733
734void* Thread::GetThreadLocal(LocalStorageKey key) {
735  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
736  return pthread_getspecific(pthread_key);
737}
738
739
740void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
741  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
742  pthread_setspecific(pthread_key, value);
743}
744
745
746void Thread::YieldCPU() {
747  sched_yield();
748}
749
750
751class LinuxMutex : public Mutex {
752 public:
753
754  LinuxMutex() {
755    pthread_mutexattr_t attrs;
756    int result = pthread_mutexattr_init(&attrs);
757    ASSERT(result == 0);
758    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
759    ASSERT(result == 0);
760    result = pthread_mutex_init(&mutex_, &attrs);
761    ASSERT(result == 0);
762  }
763
764  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
765
766  virtual int Lock() {
767    int result = pthread_mutex_lock(&mutex_);
768    return result;
769  }
770
771  virtual int Unlock() {
772    int result = pthread_mutex_unlock(&mutex_);
773    return result;
774  }
775
776  virtual bool TryLock() {
777    int result = pthread_mutex_trylock(&mutex_);
778    // Return false if the lock is busy and locking failed.
779    if (result == EBUSY) {
780      return false;
781    }
782    ASSERT(result == 0);  // Verify no other errors.
783    return true;
784  }
785
786 private:
787  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
788};
789
790
791Mutex* OS::CreateMutex() {
792  return new LinuxMutex();
793}
794
795
796class LinuxSemaphore : public Semaphore {
797 public:
798  explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
799  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
800
801  virtual void Wait();
802  virtual bool Wait(int timeout);
803  virtual void Signal() { sem_post(&sem_); }
804 private:
805  sem_t sem_;
806};
807
808
809void LinuxSemaphore::Wait() {
810  while (true) {
811    int result = sem_wait(&sem_);
812    if (result == 0) return;  // Successfully got semaphore.
813    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
814  }
815}
816
817
818#ifndef TIMEVAL_TO_TIMESPEC
819#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
820    (ts)->tv_sec = (tv)->tv_sec;                                    \
821    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
822} while (false)
823#endif
824
825
826bool LinuxSemaphore::Wait(int timeout) {
827  const long kOneSecondMicros = 1000000;  // NOLINT
828
829  // Split timeout into second and nanosecond parts.
830  struct timeval delta;
831  delta.tv_usec = timeout % kOneSecondMicros;
832  delta.tv_sec = timeout / kOneSecondMicros;
833
834  struct timeval current_time;
835  // Get the current time.
836  if (gettimeofday(&current_time, NULL) == -1) {
837    return false;
838  }
839
840  // Calculate time for end of timeout.
841  struct timeval end_time;
842  timeradd(&current_time, &delta, &end_time);
843
844  struct timespec ts;
845  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
846  // Wait for semaphore signalled or timeout.
847  while (true) {
848    int result = sem_timedwait(&sem_, &ts);
849    if (result == 0) return true;  // Successfully got semaphore.
850    if (result > 0) {
851      // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
852      errno = result;
853      result = -1;
854    }
855    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
856    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
857  }
858}
859
860
861Semaphore* OS::CreateSemaphore(int count) {
862  return new LinuxSemaphore(count);
863}
864
865
866#ifdef ENABLE_LOGGING_AND_PROFILING
867
868#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
869// Android runs a fairly new Linux kernel, so signal info is there,
870// but the C library doesn't have the structs defined.
871
872struct sigcontext {
873  uint32_t trap_no;
874  uint32_t error_code;
875  uint32_t oldmask;
876  uint32_t gregs[16];
877  uint32_t arm_cpsr;
878  uint32_t fault_address;
879};
880typedef uint32_t __sigset_t;
881typedef struct sigcontext mcontext_t;
882typedef struct ucontext {
883  uint32_t uc_flags;
884  struct ucontext* uc_link;
885  stack_t uc_stack;
886  mcontext_t uc_mcontext;
887  __sigset_t uc_sigmask;
888} ucontext_t;
889enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
890
891#endif
892
893
894static int GetThreadID() {
895  // Glibc doesn't provide a wrapper for gettid(2).
896#if defined(ANDROID)
897  return syscall(__NR_gettid);
898#else
899  return syscall(SYS_gettid);
900#endif
901}
902
903
904static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
905#ifndef V8_HOST_ARCH_MIPS
906  USE(info);
907  if (signal != SIGPROF) return;
908  Isolate* isolate = Isolate::UncheckedCurrent();
909  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
910    // We require a fully initialized and entered isolate.
911    return;
912  }
913  if (v8::Locker::IsActive() &&
914      !isolate->thread_manager()->IsLockedByCurrentThread()) {
915    return;
916  }
917
918  Sampler* sampler = isolate->logger()->sampler();
919  if (sampler == NULL || !sampler->IsActive()) return;
920
921  TickSample sample_obj;
922  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
923  if (sample == NULL) sample = &sample_obj;
924
925  // Extracting the sample from the context is extremely machine dependent.
926  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
927  mcontext_t& mcontext = ucontext->uc_mcontext;
928  sample->state = isolate->current_vm_state();
929#if V8_HOST_ARCH_IA32
930  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
931  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
932  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
933#elif V8_HOST_ARCH_X64
934  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
935  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
936  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
937#elif V8_HOST_ARCH_ARM
938// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
939#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
940  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
941  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
942  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
943#else
944  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
945  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
946  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
947#endif
948#elif V8_HOST_ARCH_MIPS
949  sample.pc = reinterpret_cast<Address>(mcontext.pc);
950  sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
951  sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
952#endif
953  sampler->SampleStack(sample);
954  sampler->Tick(sample);
955#endif
956}
957
958
959class Sampler::PlatformData : public Malloced {
960 public:
961  PlatformData() : vm_tid_(GetThreadID()) {}
962
963  int vm_tid() const { return vm_tid_; }
964
965 private:
966  const int vm_tid_;
967};
968
969
970class SignalSender : public Thread {
971 public:
972  enum SleepInterval {
973    HALF_INTERVAL,
974    FULL_INTERVAL
975  };
976
977  explicit SignalSender(int interval)
978      : Thread(NULL, "SignalSender"),
979        vm_tgid_(getpid()),
980        interval_(interval) {}
981
982  static void InstallSignalHandler() {
983    struct sigaction sa;
984    sa.sa_sigaction = ProfilerSignalHandler;
985    sigemptyset(&sa.sa_mask);
986    sa.sa_flags = SA_RESTART | SA_SIGINFO;
987    signal_handler_installed_ =
988        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
989  }
990
991  static void RestoreSignalHandler() {
992    if (signal_handler_installed_) {
993      sigaction(SIGPROF, &old_signal_handler_, 0);
994      signal_handler_installed_ = false;
995    }
996  }
997
998  static void AddActiveSampler(Sampler* sampler) {
999    ScopedLock lock(mutex_);
1000    SamplerRegistry::AddActiveSampler(sampler);
1001    if (instance_ == NULL) {
1002      // Start a thread that will send SIGPROF signal to VM threads,
1003      // when CPU profiling will be enabled.
1004      instance_ = new SignalSender(sampler->interval());
1005      instance_->Start();
1006    } else {
1007      ASSERT(instance_->interval_ == sampler->interval());
1008    }
1009  }
1010
1011  static void RemoveActiveSampler(Sampler* sampler) {
1012    ScopedLock lock(mutex_);
1013    SamplerRegistry::RemoveActiveSampler(sampler);
1014    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
1015      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
1016      instance_->Join();
1017      delete instance_;
1018      instance_ = NULL;
1019      RestoreSignalHandler();
1020    }
1021  }
1022
1023  // Implement Thread::Run().
1024  virtual void Run() {
1025    SamplerRegistry::State state;
1026    while ((state = SamplerRegistry::GetState()) !=
1027           SamplerRegistry::HAS_NO_SAMPLERS) {
1028      bool cpu_profiling_enabled =
1029          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
1030      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
1031      if (cpu_profiling_enabled && !signal_handler_installed_)
1032        InstallSignalHandler();
1033      else if (!cpu_profiling_enabled && signal_handler_installed_)
1034        RestoreSignalHandler();
1035      // When CPU profiling is enabled both JavaScript and C++ code is
1036      // profiled. We must not suspend.
1037      if (!cpu_profiling_enabled) {
1038        if (rate_limiter_.SuspendIfNecessary()) continue;
1039      }
1040      if (cpu_profiling_enabled && runtime_profiler_enabled) {
1041        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
1042          return;
1043        }
1044        Sleep(HALF_INTERVAL);
1045        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
1046          return;
1047        }
1048        Sleep(HALF_INTERVAL);
1049      } else {
1050        if (cpu_profiling_enabled) {
1051          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
1052                                                      this)) {
1053            return;
1054          }
1055        }
1056        if (runtime_profiler_enabled) {
1057          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
1058                                                      NULL)) {
1059            return;
1060          }
1061        }
1062        Sleep(FULL_INTERVAL);
1063      }
1064    }
1065  }
1066
1067  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
1068    if (!sampler->IsProfiling()) return;
1069    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
1070    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
1071  }
1072
1073  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
1074    if (!sampler->isolate()->IsInitialized()) return;
1075    sampler->isolate()->runtime_profiler()->NotifyTick();
1076  }
1077
1078  void SendProfilingSignal(int tid) {
1079    if (!signal_handler_installed_) return;
1080    // Glibc doesn't provide a wrapper for tgkill(2).
1081#if defined(ANDROID)
1082    syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
1083#else
1084    syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
1085#endif
1086  }
1087
1088  void Sleep(SleepInterval full_or_half) {
1089    // Convert ms to us and subtract 100 us to compensate delays
1090    // occuring during signal delivery.
1091    useconds_t interval = interval_ * 1000 - 100;
1092    if (full_or_half == HALF_INTERVAL) interval /= 2;
1093    int result = usleep(interval);
1094#ifdef DEBUG
1095    if (result != 0 && errno != EINTR) {
1096      fprintf(stderr,
1097              "SignalSender usleep error; interval = %u, errno = %d\n",
1098              interval,
1099              errno);
1100      ASSERT(result == 0 || errno == EINTR);
1101    }
1102#endif
1103    USE(result);
1104  }
1105
1106  const int vm_tgid_;
1107  const int interval_;
1108  RuntimeProfilerRateLimiter rate_limiter_;
1109
1110  // Protects the process wide state below.
1111  static Mutex* mutex_;
1112  static SignalSender* instance_;
1113  static bool signal_handler_installed_;
1114  static struct sigaction old_signal_handler_;
1115
1116  DISALLOW_COPY_AND_ASSIGN(SignalSender);
1117};
1118
1119
1120Mutex* SignalSender::mutex_ = OS::CreateMutex();
1121SignalSender* SignalSender::instance_ = NULL;
1122struct sigaction SignalSender::old_signal_handler_;
1123bool SignalSender::signal_handler_installed_ = false;
1124
1125
1126Sampler::Sampler(Isolate* isolate, int interval)
1127    : isolate_(isolate),
1128      interval_(interval),
1129      profiling_(false),
1130      active_(false),
1131      samples_taken_(0) {
1132  data_ = new PlatformData;
1133}
1134
1135
1136Sampler::~Sampler() {
1137  ASSERT(!IsActive());
1138  delete data_;
1139}
1140
1141
1142void Sampler::Start() {
1143  ASSERT(!IsActive());
1144  SetActive(true);
1145  SignalSender::AddActiveSampler(this);
1146}
1147
1148
1149void Sampler::Stop() {
1150  ASSERT(IsActive());
1151  SignalSender::RemoveActiveSampler(this);
1152  SetActive(false);
1153}
1154
1155#endif  // ENABLE_LOGGING_AND_PROFILING
1156
1157} }  // namespace v8::internal
1158