1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for Linux goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <pthread.h>
32#include <semaphore.h>
33#include <signal.h>
34#include <sys/prctl.h>
35#include <sys/time.h>
36#include <sys/resource.h>
37#include <sys/syscall.h>
38#include <sys/types.h>
39#include <stdlib.h>
40
41// Ubuntu Dapper requires memory pages to be marked as
42// executable. Otherwise, OS raises an exception when executing code
43// in that page.
44#include <sys/types.h>  // mmap & munmap
45#include <sys/mman.h>   // mmap & munmap
46#include <sys/stat.h>   // open
47#include <fcntl.h>      // open
48#include <unistd.h>     // sysconf
49#ifdef __GLIBC__
50#include <execinfo.h>   // backtrace, backtrace_symbols
51#endif  // def __GLIBC__
52#include <strings.h>    // index
53#include <errno.h>
54#include <stdarg.h>
55
56#undef MAP_TYPE
57
58#include "v8.h"
59
60#include "platform-posix.h"
61#include "platform.h"
62#include "v8threads.h"
63#include "vm-state-inl.h"
64
65
66namespace v8 {
67namespace internal {
68
69// 0 is never a valid thread id on Linux since tids and pids share a
70// name space and pid 0 is reserved (see man 2 kill).
71static const pthread_t kNoThread = (pthread_t) 0;
72
73
74double ceiling(double x) {
75  return ceil(x);
76}
77
78
79static Mutex* limit_mutex = NULL;
80
81
82void OS::SetUp() {
83  // Seed the random number generator. We preserve microsecond resolution.
84  uint64_t seed = Ticks() ^ (getpid() << 16);
85  srandom(static_cast<unsigned int>(seed));
86  limit_mutex = CreateMutex();
87
88#ifdef __arm__
89  // When running on ARM hardware check that the EABI used by V8 and
90  // by the C code is the same.
91  bool hard_float = OS::ArmUsingHardFloat();
92  if (hard_float) {
93#if !USE_EABI_HARDFLOAT
94    PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
95           "-DUSE_EABI_HARDFLOAT\n");
96    exit(1);
97#endif
98  } else {
99#if USE_EABI_HARDFLOAT
100    PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
101           "-DUSE_EABI_HARDFLOAT\n");
102    exit(1);
103#endif
104  }
105#endif
106}
107
108
109void OS::PostSetUp() {
110  // Math functions depend on CPU features therefore they are initialized after
111  // CPU.
112  MathSetup();
113}
114
115
116uint64_t OS::CpuFeaturesImpliedByPlatform() {
117  return 0;  // Linux runs on anything.
118}
119
120
121#ifdef __arm__
122static bool CPUInfoContainsString(const char * search_string) {
123  const char* file_name = "/proc/cpuinfo";
124  // This is written as a straight shot one pass parser
125  // and not using STL string and ifstream because,
126  // on Linux, it's reading from a (non-mmap-able)
127  // character special device.
128  FILE* f = NULL;
129  const char* what = search_string;
130
131  if (NULL == (f = fopen(file_name, "r")))
132    return false;
133
134  int k;
135  while (EOF != (k = fgetc(f))) {
136    if (k == *what) {
137      ++what;
138      while ((*what != '\0') && (*what == fgetc(f))) {
139        ++what;
140      }
141      if (*what == '\0') {
142        fclose(f);
143        return true;
144      } else {
145        what = search_string;
146      }
147    }
148  }
149  fclose(f);
150
151  // Did not find string in the proc file.
152  return false;
153}
154
155
156bool OS::ArmCpuHasFeature(CpuFeature feature) {
157  const char* search_string = NULL;
158  // Simple detection of VFP at runtime for Linux.
159  // It is based on /proc/cpuinfo, which reveals hardware configuration
160  // to user-space applications.  According to ARM (mid 2009), no similar
161  // facility is universally available on the ARM architectures,
162  // so it's up to individual OSes to provide such.
163  switch (feature) {
164    case VFP3:
165      search_string = "vfpv3";
166      break;
167    case ARMv7:
168      search_string = "ARMv7";
169      break;
170    default:
171      UNREACHABLE();
172  }
173
174  if (CPUInfoContainsString(search_string)) {
175    return true;
176  }
177
178  if (feature == VFP3) {
179    // Some old kernels will report vfp not vfpv3. Here we make a last attempt
180    // to detect vfpv3 by checking for vfp *and* neon, since neon is only
181    // available on architectures with vfpv3.
182    // Checking neon on its own is not enough as it is possible to have neon
183    // without vfp.
184    if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
185      return true;
186    }
187  }
188
189  return false;
190}
191
192
193// Simple helper function to detect whether the C code is compiled with
194// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
195// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
196// calling this will return 1.0 and otherwise 0.0.
197static void ArmUsingHardFloatHelper() {
198  asm("mov r0, #0":::"r0");
199#if defined(__VFP_FP__) && !defined(__SOFTFP__)
200  // Load 0x3ff00000 into r1 using instructions available in both ARM
201  // and Thumb mode.
202  asm("mov r1, #3":::"r1");
203  asm("mov r2, #255":::"r2");
204  asm("lsl r1, r1, #8":::"r1");
205  asm("orr r1, r1, r2":::"r1");
206  asm("lsl r1, r1, #20":::"r1");
207  // For vmov d0, r0, r1 use ARM mode.
208#ifdef __thumb__
209  asm volatile(
210    "@   Enter ARM Mode  \n\t"
211    "    adr r3, 1f      \n\t"
212    "    bx  r3          \n\t"
213    "    .ALIGN 4        \n\t"
214    "    .ARM            \n"
215    "1:  vmov d0, r0, r1 \n\t"
216    "@   Enter THUMB Mode\n\t"
217    "    adr r3, 2f+1    \n\t"
218    "    bx  r3          \n\t"
219    "    .THUMB          \n"
220    "2:                  \n\t":::"r3");
221#else
222  asm("vmov d0, r0, r1");
223#endif  // __thumb__
224#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
225  asm("mov r1, #0":::"r1");
226}
227
228
229bool OS::ArmUsingHardFloat() {
230  // Cast helper function from returning void to returning double.
231  typedef double (*F)();
232  F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
233  return f() == 1.0;
234}
235#endif  // def __arm__
236
237
238#ifdef __mips__
239bool OS::MipsCpuHasFeature(CpuFeature feature) {
240  const char* search_string = NULL;
241  const char* file_name = "/proc/cpuinfo";
242  // Simple detection of FPU at runtime for Linux.
243  // It is based on /proc/cpuinfo, which reveals hardware configuration
244  // to user-space applications.  According to MIPS (early 2010), no similar
245  // facility is universally available on the MIPS architectures,
246  // so it's up to individual OSes to provide such.
247  //
248  // This is written as a straight shot one pass parser
249  // and not using STL string and ifstream because,
250  // on Linux, it's reading from a (non-mmap-able)
251  // character special device.
252
253  switch (feature) {
254    case FPU:
255      search_string = "FPU";
256      break;
257    default:
258      UNREACHABLE();
259  }
260
261  FILE* f = NULL;
262  const char* what = search_string;
263
264  if (NULL == (f = fopen(file_name, "r")))
265    return false;
266
267  int k;
268  while (EOF != (k = fgetc(f))) {
269    if (k == *what) {
270      ++what;
271      while ((*what != '\0') && (*what == fgetc(f))) {
272        ++what;
273      }
274      if (*what == '\0') {
275        fclose(f);
276        return true;
277      } else {
278        what = search_string;
279      }
280    }
281  }
282  fclose(f);
283
284  // Did not find string in the proc file.
285  return false;
286}
287#endif  // def __mips__
288
289
290int OS::ActivationFrameAlignment() {
291#ifdef V8_TARGET_ARCH_ARM
292  // On EABI ARM targets this is required for fp correctness in the
293  // runtime system.
294  return 8;
295#elif V8_TARGET_ARCH_MIPS
296  return 8;
297#endif
298  // With gcc 4.4 the tree vectorization optimizer can generate code
299  // that requires 16 byte alignment such as movdqa on x86.
300  return 16;
301}
302
303
304void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
305#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
306    (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
307  // Only use on ARM or MIPS hardware.
308  MemoryBarrier();
309#else
310  __asm__ __volatile__("" : : : "memory");
311  // An x86 store acts as a release barrier.
312#endif
313  *ptr = value;
314}
315
316
317const char* OS::LocalTimezone(double time) {
318  if (isnan(time)) return "";
319  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
320  struct tm* t = localtime(&tv);
321  if (NULL == t) return "";
322  return t->tm_zone;
323}
324
325
326double OS::LocalTimeOffset() {
327  time_t tv = time(NULL);
328  struct tm* t = localtime(&tv);
329  // tm_gmtoff includes any daylight savings offset, so subtract it.
330  return static_cast<double>(t->tm_gmtoff * msPerSecond -
331                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
332}
333
334
335// We keep the lowest and highest addresses mapped as a quick way of
336// determining that pointers are outside the heap (used mostly in assertions
337// and verification).  The estimate is conservative, i.e., not all addresses in
338// 'allocated' space are actually allocated to our heap.  The range is
339// [lowest, highest), inclusive on the low and and exclusive on the high end.
340static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
341static void* highest_ever_allocated = reinterpret_cast<void*>(0);
342
343
344static void UpdateAllocatedSpaceLimits(void* address, int size) {
345  ASSERT(limit_mutex != NULL);
346  ScopedLock lock(limit_mutex);
347
348  lowest_ever_allocated = Min(lowest_ever_allocated, address);
349  highest_ever_allocated =
350      Max(highest_ever_allocated,
351          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
352}
353
354
355bool OS::IsOutsideAllocatedSpace(void* address) {
356  return address < lowest_ever_allocated || address >= highest_ever_allocated;
357}
358
359
360size_t OS::AllocateAlignment() {
361  return sysconf(_SC_PAGESIZE);
362}
363
364
365void* OS::Allocate(const size_t requested,
366                   size_t* allocated,
367                   bool is_executable) {
368  const size_t msize = RoundUp(requested, AllocateAlignment());
369  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
370  void* addr = OS::GetRandomMmapAddr();
371  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
372  if (mbase == MAP_FAILED) {
373    LOG(i::Isolate::Current(),
374        StringEvent("OS::Allocate", "mmap failed"));
375    return NULL;
376  }
377  *allocated = msize;
378  UpdateAllocatedSpaceLimits(mbase, msize);
379  return mbase;
380}
381
382
383void OS::Free(void* address, const size_t size) {
384  // TODO(1240712): munmap has a return value which is ignored here.
385  int result = munmap(address, size);
386  USE(result);
387  ASSERT(result == 0);
388}
389
390
391void OS::Sleep(int milliseconds) {
392  unsigned int ms = static_cast<unsigned int>(milliseconds);
393  usleep(1000 * ms);
394}
395
396
397void OS::Abort() {
398  // Redirect to std abort to signal abnormal program termination.
399  if (FLAG_break_on_abort) {
400    DebugBreak();
401  }
402  abort();
403}
404
405
406void OS::DebugBreak() {
407// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
408//  which is the architecture of generated code).
409#if (defined(__arm__) || defined(__thumb__))
410# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
411  asm("bkpt 0");
412# endif
413#elif defined(__mips__)
414  asm("break");
415#else
416  asm("int $3");
417#endif
418}
419
420
421class PosixMemoryMappedFile : public OS::MemoryMappedFile {
422 public:
423  PosixMemoryMappedFile(FILE* file, void* memory, int size)
424    : file_(file), memory_(memory), size_(size) { }
425  virtual ~PosixMemoryMappedFile();
426  virtual void* memory() { return memory_; }
427  virtual int size() { return size_; }
428 private:
429  FILE* file_;
430  void* memory_;
431  int size_;
432};
433
434
435OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
436  FILE* file = fopen(name, "r+");
437  if (file == NULL) return NULL;
438
439  fseek(file, 0, SEEK_END);
440  int size = ftell(file);
441
442  void* memory =
443      mmap(OS::GetRandomMmapAddr(),
444           size,
445           PROT_READ | PROT_WRITE,
446           MAP_SHARED,
447           fileno(file),
448           0);
449  return new PosixMemoryMappedFile(file, memory, size);
450}
451
452
453OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
454    void* initial) {
455  FILE* file = fopen(name, "w+");
456  if (file == NULL) return NULL;
457  int result = fwrite(initial, size, 1, file);
458  if (result < 1) {
459    fclose(file);
460    return NULL;
461  }
462  void* memory =
463      mmap(OS::GetRandomMmapAddr(),
464           size,
465           PROT_READ | PROT_WRITE,
466           MAP_SHARED,
467           fileno(file),
468           0);
469  return new PosixMemoryMappedFile(file, memory, size);
470}
471
472
473PosixMemoryMappedFile::~PosixMemoryMappedFile() {
474  if (memory_) OS::Free(memory_, size_);
475  fclose(file_);
476}
477
478
479void OS::LogSharedLibraryAddresses() {
480  // This function assumes that the layout of the file is as follows:
481  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
482  // If we encounter an unexpected situation we abort scanning further entries.
483  FILE* fp = fopen("/proc/self/maps", "r");
484  if (fp == NULL) return;
485
486  // Allocate enough room to be able to store a full file name.
487  const int kLibNameLen = FILENAME_MAX + 1;
488  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
489
490  i::Isolate* isolate = ISOLATE;
491  // This loop will terminate once the scanning hits an EOF.
492  while (true) {
493    uintptr_t start, end;
494    char attr_r, attr_w, attr_x, attr_p;
495    // Parse the addresses and permission bits at the beginning of the line.
496    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
497    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
498
499    int c;
500    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
501      // Found a read-only executable entry. Skip characters until we reach
502      // the beginning of the filename or the end of the line.
503      do {
504        c = getc(fp);
505      } while ((c != EOF) && (c != '\n') && (c != '/'));
506      if (c == EOF) break;  // EOF: Was unexpected, just exit.
507
508      // Process the filename if found.
509      if (c == '/') {
510        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
511
512        // Read to the end of the line. Exit if the read fails.
513        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
514
515        // Drop the newline character read by fgets. We do not need to check
516        // for a zero-length string because we know that we at least read the
517        // '/' character.
518        lib_name[strlen(lib_name) - 1] = '\0';
519      } else {
520        // No library name found, just record the raw address range.
521        snprintf(lib_name, kLibNameLen,
522                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
523      }
524      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
525    } else {
526      // Entry not describing executable data. Skip to end of line to set up
527      // reading the next entry.
528      do {
529        c = getc(fp);
530      } while ((c != EOF) && (c != '\n'));
531      if (c == EOF) break;
532    }
533  }
534  free(lib_name);
535  fclose(fp);
536}
537
538
539static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
540
541
542void OS::SignalCodeMovingGC() {
543  // Support for ll_prof.py.
544  //
545  // The Linux profiler built into the kernel logs all mmap's with
546  // PROT_EXEC so that analysis tools can properly attribute ticks. We
547  // do a mmap with a name known by ll_prof.py and immediately munmap
548  // it. This injects a GC marker into the stream of events generated
549  // by the kernel and allows us to synchronize V8 code log and the
550  // kernel log.
551  int size = sysconf(_SC_PAGESIZE);
552  FILE* f = fopen(kGCFakeMmap, "w+");
553  void* addr = mmap(OS::GetRandomMmapAddr(),
554                    size,
555                    PROT_READ | PROT_EXEC,
556                    MAP_PRIVATE,
557                    fileno(f),
558                    0);
559  ASSERT(addr != MAP_FAILED);
560  OS::Free(addr, size);
561  fclose(f);
562}
563
564
565int OS::StackWalk(Vector<OS::StackFrame> frames) {
566  // backtrace is a glibc extension.
567#ifdef __GLIBC__
568  int frames_size = frames.length();
569  ScopedVector<void*> addresses(frames_size);
570
571  int frames_count = backtrace(addresses.start(), frames_size);
572
573  char** symbols = backtrace_symbols(addresses.start(), frames_count);
574  if (symbols == NULL) {
575    return kStackWalkError;
576  }
577
578  for (int i = 0; i < frames_count; i++) {
579    frames[i].address = addresses[i];
580    // Format a text representation of the frame based on the information
581    // available.
582    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
583             "%s",
584             symbols[i]);
585    // Make sure line termination is in place.
586    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
587  }
588
589  free(symbols);
590
591  return frames_count;
592#else  // ndef __GLIBC__
593  return 0;
594#endif  // ndef __GLIBC__
595}
596
597
598// Constants used for mmap.
599static const int kMmapFd = -1;
600static const int kMmapFdOffset = 0;
601
602VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
603
604VirtualMemory::VirtualMemory(size_t size) {
605  address_ = ReserveRegion(size);
606  size_ = size;
607}
608
609
610VirtualMemory::VirtualMemory(size_t size, size_t alignment)
611    : address_(NULL), size_(0) {
612  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
613  size_t request_size = RoundUp(size + alignment,
614                                static_cast<intptr_t>(OS::AllocateAlignment()));
615  void* reservation = mmap(OS::GetRandomMmapAddr(),
616                           request_size,
617                           PROT_NONE,
618                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
619                           kMmapFd,
620                           kMmapFdOffset);
621  if (reservation == MAP_FAILED) return;
622
623  Address base = static_cast<Address>(reservation);
624  Address aligned_base = RoundUp(base, alignment);
625  ASSERT_LE(base, aligned_base);
626
627  // Unmap extra memory reserved before and after the desired block.
628  if (aligned_base != base) {
629    size_t prefix_size = static_cast<size_t>(aligned_base - base);
630    OS::Free(base, prefix_size);
631    request_size -= prefix_size;
632  }
633
634  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
635  ASSERT_LE(aligned_size, request_size);
636
637  if (aligned_size != request_size) {
638    size_t suffix_size = request_size - aligned_size;
639    OS::Free(aligned_base + aligned_size, suffix_size);
640    request_size -= suffix_size;
641  }
642
643  ASSERT(aligned_size == request_size);
644
645  address_ = static_cast<void*>(aligned_base);
646  size_ = aligned_size;
647}
648
649
650VirtualMemory::~VirtualMemory() {
651  if (IsReserved()) {
652    bool result = ReleaseRegion(address(), size());
653    ASSERT(result);
654    USE(result);
655  }
656}
657
658
659bool VirtualMemory::IsReserved() {
660  return address_ != NULL;
661}
662
663
664void VirtualMemory::Reset() {
665  address_ = NULL;
666  size_ = 0;
667}
668
669
670bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
671  return CommitRegion(address, size, is_executable);
672}
673
674
675bool VirtualMemory::Uncommit(void* address, size_t size) {
676  return UncommitRegion(address, size);
677}
678
679
680bool VirtualMemory::Guard(void* address) {
681  OS::Guard(address, OS::CommitPageSize());
682  return true;
683}
684
685
686void* VirtualMemory::ReserveRegion(size_t size) {
687  void* result = mmap(OS::GetRandomMmapAddr(),
688                      size,
689                      PROT_NONE,
690                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
691                      kMmapFd,
692                      kMmapFdOffset);
693
694  if (result == MAP_FAILED) return NULL;
695
696  return result;
697}
698
699
700bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
701  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
702  if (MAP_FAILED == mmap(base,
703                         size,
704                         prot,
705                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
706                         kMmapFd,
707                         kMmapFdOffset)) {
708    return false;
709  }
710
711  UpdateAllocatedSpaceLimits(base, size);
712  return true;
713}
714
715
716bool VirtualMemory::UncommitRegion(void* base, size_t size) {
717  return mmap(base,
718              size,
719              PROT_NONE,
720              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
721              kMmapFd,
722              kMmapFdOffset) != MAP_FAILED;
723}
724
725
726bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
727  return munmap(base, size) == 0;
728}
729
730
731class Thread::PlatformData : public Malloced {
732 public:
733  PlatformData() : thread_(kNoThread) {}
734
735  pthread_t thread_;  // Thread handle for pthread.
736};
737
738Thread::Thread(const Options& options)
739    : data_(new PlatformData()),
740      stack_size_(options.stack_size()) {
741  set_name(options.name());
742}
743
744
745Thread::~Thread() {
746  delete data_;
747}
748
749
750static void* ThreadEntry(void* arg) {
751  Thread* thread = reinterpret_cast<Thread*>(arg);
752  // This is also initialized by the first argument to pthread_create() but we
753  // don't know which thread will run first (the original thread or the new
754  // one) so we initialize it here too.
755#ifdef PR_SET_NAME
756  prctl(PR_SET_NAME,
757        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
758        0, 0, 0);
759#endif
760  thread->data()->thread_ = pthread_self();
761  ASSERT(thread->data()->thread_ != kNoThread);
762  thread->Run();
763  return NULL;
764}
765
766
767void Thread::set_name(const char* name) {
768  strncpy(name_, name, sizeof(name_));
769  name_[sizeof(name_) - 1] = '\0';
770}
771
772
773void Thread::Start() {
774  pthread_attr_t* attr_ptr = NULL;
775  pthread_attr_t attr;
776  if (stack_size_ > 0) {
777    pthread_attr_init(&attr);
778    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
779    attr_ptr = &attr;
780  }
781  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
782  CHECK_EQ(0, result);
783  ASSERT(data_->thread_ != kNoThread);
784}
785
786
787void Thread::Join() {
788  pthread_join(data_->thread_, NULL);
789}
790
791
792Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
793  pthread_key_t key;
794  int result = pthread_key_create(&key, NULL);
795  USE(result);
796  ASSERT(result == 0);
797  return static_cast<LocalStorageKey>(key);
798}
799
800
801void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
802  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
803  int result = pthread_key_delete(pthread_key);
804  USE(result);
805  ASSERT(result == 0);
806}
807
808
809void* Thread::GetThreadLocal(LocalStorageKey key) {
810  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
811  return pthread_getspecific(pthread_key);
812}
813
814
815void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
816  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
817  pthread_setspecific(pthread_key, value);
818}
819
820
821void Thread::YieldCPU() {
822  sched_yield();
823}
824
825
826class LinuxMutex : public Mutex {
827 public:
828  LinuxMutex() {
829    pthread_mutexattr_t attrs;
830    int result = pthread_mutexattr_init(&attrs);
831    ASSERT(result == 0);
832    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
833    ASSERT(result == 0);
834    result = pthread_mutex_init(&mutex_, &attrs);
835    ASSERT(result == 0);
836    USE(result);
837  }
838
839  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
840
841  virtual int Lock() {
842    int result = pthread_mutex_lock(&mutex_);
843    return result;
844  }
845
846  virtual int Unlock() {
847    int result = pthread_mutex_unlock(&mutex_);
848    return result;
849  }
850
851  virtual bool TryLock() {
852    int result = pthread_mutex_trylock(&mutex_);
853    // Return false if the lock is busy and locking failed.
854    if (result == EBUSY) {
855      return false;
856    }
857    ASSERT(result == 0);  // Verify no other errors.
858    return true;
859  }
860
861 private:
862  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
863};
864
865
866Mutex* OS::CreateMutex() {
867  return new LinuxMutex();
868}
869
870
871class LinuxSemaphore : public Semaphore {
872 public:
873  explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
874  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
875
876  virtual void Wait();
877  virtual bool Wait(int timeout);
878  virtual void Signal() { sem_post(&sem_); }
879 private:
880  sem_t sem_;
881};
882
883
884void LinuxSemaphore::Wait() {
885  while (true) {
886    int result = sem_wait(&sem_);
887    if (result == 0) return;  // Successfully got semaphore.
888    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
889  }
890}
891
892
893#ifndef TIMEVAL_TO_TIMESPEC
894#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
895    (ts)->tv_sec = (tv)->tv_sec;                                    \
896    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
897} while (false)
898#endif
899
900
901bool LinuxSemaphore::Wait(int timeout) {
902  const long kOneSecondMicros = 1000000;  // NOLINT
903
904  // Split timeout into second and nanosecond parts.
905  struct timeval delta;
906  delta.tv_usec = timeout % kOneSecondMicros;
907  delta.tv_sec = timeout / kOneSecondMicros;
908
909  struct timeval current_time;
910  // Get the current time.
911  if (gettimeofday(&current_time, NULL) == -1) {
912    return false;
913  }
914
915  // Calculate time for end of timeout.
916  struct timeval end_time;
917  timeradd(&current_time, &delta, &end_time);
918
919  struct timespec ts;
920  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
921  // Wait for semaphore signalled or timeout.
922  while (true) {
923    int result = sem_timedwait(&sem_, &ts);
924    if (result == 0) return true;  // Successfully got semaphore.
925    if (result > 0) {
926      // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
927      errno = result;
928      result = -1;
929    }
930    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
931    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
932  }
933}
934
935
936Semaphore* OS::CreateSemaphore(int count) {
937  return new LinuxSemaphore(count);
938}
939
940
941#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
942// Android runs a fairly new Linux kernel, so signal info is there,
943// but the C library doesn't have the structs defined.
944
945struct sigcontext {
946  uint32_t trap_no;
947  uint32_t error_code;
948  uint32_t oldmask;
949  uint32_t gregs[16];
950  uint32_t arm_cpsr;
951  uint32_t fault_address;
952};
953typedef uint32_t __sigset_t;
954typedef struct sigcontext mcontext_t;
955typedef struct ucontext {
956  uint32_t uc_flags;
957  struct ucontext* uc_link;
958  stack_t uc_stack;
959  mcontext_t uc_mcontext;
960  __sigset_t uc_sigmask;
961} ucontext_t;
962enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
963
964#elif !defined(__GLIBC__) && defined(__mips__)
965// MIPS version of sigcontext, for Android bionic.
966struct sigcontext {
967  uint32_t regmask;
968  uint32_t status;
969  uint64_t pc;
970  uint64_t gregs[32];
971  uint64_t fpregs[32];
972  uint32_t acx;
973  uint32_t fpc_csr;
974  uint32_t fpc_eir;
975  uint32_t used_math;
976  uint32_t dsp;
977  uint64_t mdhi;
978  uint64_t mdlo;
979  uint32_t hi1;
980  uint32_t lo1;
981  uint32_t hi2;
982  uint32_t lo2;
983  uint32_t hi3;
984  uint32_t lo3;
985};
986typedef uint32_t __sigset_t;
987typedef struct sigcontext mcontext_t;
988typedef struct ucontext {
989  uint32_t uc_flags;
990  struct ucontext* uc_link;
991  stack_t uc_stack;
992  mcontext_t uc_mcontext;
993  __sigset_t uc_sigmask;
994} ucontext_t;
995
996#elif !defined(__GLIBC__) && defined(__i386__)
997// x86 version for Android.
998struct sigcontext {
999  uint32_t gregs[19];
1000  void* fpregs;
1001  uint32_t oldmask;
1002  uint32_t cr2;
1003};
1004
1005typedef uint32_t __sigset_t;
1006typedef struct sigcontext mcontext_t;
1007typedef struct ucontext {
1008  uint32_t uc_flags;
1009  struct ucontext* uc_link;
1010  stack_t uc_stack;
1011  mcontext_t uc_mcontext;
1012  __sigset_t uc_sigmask;
1013} ucontext_t;
1014enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
1015#endif
1016
1017
1018static int GetThreadID() {
1019  // Glibc doesn't provide a wrapper for gettid(2).
1020#if defined(ANDROID)
1021  return syscall(__NR_gettid);
1022#else
1023  return syscall(SYS_gettid);
1024#endif
1025}
1026
1027
1028static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
1029  USE(info);
1030  if (signal != SIGPROF) return;
1031  Isolate* isolate = Isolate::UncheckedCurrent();
1032  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
1033    // We require a fully initialized and entered isolate.
1034    return;
1035  }
1036  if (v8::Locker::IsActive() &&
1037      !isolate->thread_manager()->IsLockedByCurrentThread()) {
1038    return;
1039  }
1040
1041  Sampler* sampler = isolate->logger()->sampler();
1042  if (sampler == NULL || !sampler->IsActive()) return;
1043
1044  TickSample sample_obj;
1045  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
1046  if (sample == NULL) sample = &sample_obj;
1047
1048  // Extracting the sample from the context is extremely machine dependent.
1049  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
1050  mcontext_t& mcontext = ucontext->uc_mcontext;
1051  sample->state = isolate->current_vm_state();
1052#if V8_HOST_ARCH_IA32
1053  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
1054  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
1055  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
1056#elif V8_HOST_ARCH_X64
1057  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
1058  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
1059  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
1060#elif V8_HOST_ARCH_ARM
1061// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
1062#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1063  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
1064  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
1065  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
1066#else
1067  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
1068  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
1069  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
1070#endif  // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1071#elif V8_HOST_ARCH_MIPS
1072  sample->pc = reinterpret_cast<Address>(mcontext.pc);
1073  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
1074  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
1075#endif  // V8_HOST_ARCH_*
1076  sampler->SampleStack(sample);
1077  sampler->Tick(sample);
1078}
1079
1080
1081class Sampler::PlatformData : public Malloced {
1082 public:
1083  PlatformData() : vm_tid_(GetThreadID()) {}
1084
1085  int vm_tid() const { return vm_tid_; }
1086
1087 private:
1088  const int vm_tid_;
1089};
1090
1091
1092class SignalSender : public Thread {
1093 public:
1094  enum SleepInterval {
1095    HALF_INTERVAL,
1096    FULL_INTERVAL
1097  };
1098
1099  static const int kSignalSenderStackSize = 64 * KB;
1100
1101  explicit SignalSender(int interval)
1102      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
1103        vm_tgid_(getpid()),
1104        interval_(interval) {}
1105
1106  static void InstallSignalHandler() {
1107    struct sigaction sa;
1108    sa.sa_sigaction = ProfilerSignalHandler;
1109    sigemptyset(&sa.sa_mask);
1110    sa.sa_flags = SA_RESTART | SA_SIGINFO;
1111    signal_handler_installed_ =
1112        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
1113  }
1114
1115  static void RestoreSignalHandler() {
1116    if (signal_handler_installed_) {
1117      sigaction(SIGPROF, &old_signal_handler_, 0);
1118      signal_handler_installed_ = false;
1119    }
1120  }
1121
1122  static void AddActiveSampler(Sampler* sampler) {
1123    ScopedLock lock(mutex_.Pointer());
1124    SamplerRegistry::AddActiveSampler(sampler);
1125    if (instance_ == NULL) {
1126      // Start a thread that will send SIGPROF signal to VM threads,
1127      // when CPU profiling will be enabled.
1128      instance_ = new SignalSender(sampler->interval());
1129      instance_->Start();
1130    } else {
1131      ASSERT(instance_->interval_ == sampler->interval());
1132    }
1133  }
1134
1135  static void RemoveActiveSampler(Sampler* sampler) {
1136    ScopedLock lock(mutex_.Pointer());
1137    SamplerRegistry::RemoveActiveSampler(sampler);
1138    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
1139      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
1140      delete instance_;
1141      instance_ = NULL;
1142      RestoreSignalHandler();
1143    }
1144  }
1145
1146  // Implement Thread::Run().
1147  virtual void Run() {
1148    SamplerRegistry::State state;
1149    while ((state = SamplerRegistry::GetState()) !=
1150           SamplerRegistry::HAS_NO_SAMPLERS) {
1151      bool cpu_profiling_enabled =
1152          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
1153      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
1154      if (cpu_profiling_enabled && !signal_handler_installed_) {
1155        InstallSignalHandler();
1156      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
1157        RestoreSignalHandler();
1158      }
1159      // When CPU profiling is enabled both JavaScript and C++ code is
1160      // profiled. We must not suspend.
1161      if (!cpu_profiling_enabled) {
1162        if (rate_limiter_.SuspendIfNecessary()) continue;
1163      }
1164      if (cpu_profiling_enabled && runtime_profiler_enabled) {
1165        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
1166          return;
1167        }
1168        Sleep(HALF_INTERVAL);
1169        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
1170          return;
1171        }
1172        Sleep(HALF_INTERVAL);
1173      } else {
1174        if (cpu_profiling_enabled) {
1175          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
1176                                                      this)) {
1177            return;
1178          }
1179        }
1180        if (runtime_profiler_enabled) {
1181          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
1182                                                      NULL)) {
1183            return;
1184          }
1185        }
1186        Sleep(FULL_INTERVAL);
1187      }
1188    }
1189  }
1190
1191  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
1192    if (!sampler->IsProfiling()) return;
1193    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
1194    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
1195  }
1196
1197  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
1198    if (!sampler->isolate()->IsInitialized()) return;
1199    sampler->isolate()->runtime_profiler()->NotifyTick();
1200  }
1201
1202  void SendProfilingSignal(int tid) {
1203    if (!signal_handler_installed_) return;
1204    // Glibc doesn't provide a wrapper for tgkill(2).
1205#if defined(ANDROID)
1206    syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
1207#else
1208    syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
1209#endif
1210  }
1211
1212  void Sleep(SleepInterval full_or_half) {
1213    // Convert ms to us and subtract 100 us to compensate delays
1214    // occuring during signal delivery.
1215    useconds_t interval = interval_ * 1000 - 100;
1216    if (full_or_half == HALF_INTERVAL) interval /= 2;
1217#if defined(ANDROID)
1218    usleep(interval);
1219#else
1220    int result = usleep(interval);
1221#ifdef DEBUG
1222    if (result != 0 && errno != EINTR) {
1223      fprintf(stderr,
1224              "SignalSender usleep error; interval = %u, errno = %d\n",
1225              interval,
1226              errno);
1227      ASSERT(result == 0 || errno == EINTR);
1228    }
1229#endif  // DEBUG
1230    USE(result);
1231#endif  // ANDROID
1232  }
1233
1234  const int vm_tgid_;
1235  const int interval_;
1236  RuntimeProfilerRateLimiter rate_limiter_;
1237
1238  // Protects the process wide state below.
1239  static LazyMutex mutex_;
1240  static SignalSender* instance_;
1241  static bool signal_handler_installed_;
1242  static struct sigaction old_signal_handler_;
1243
1244 private:
1245  DISALLOW_COPY_AND_ASSIGN(SignalSender);
1246};
1247
1248
1249LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
1250SignalSender* SignalSender::instance_ = NULL;
1251struct sigaction SignalSender::old_signal_handler_;
1252bool SignalSender::signal_handler_installed_ = false;
1253
1254
1255Sampler::Sampler(Isolate* isolate, int interval)
1256    : isolate_(isolate),
1257      interval_(interval),
1258      profiling_(false),
1259      active_(false),
1260      samples_taken_(0) {
1261  data_ = new PlatformData;
1262}
1263
1264
1265Sampler::~Sampler() {
1266  ASSERT(!IsActive());
1267  delete data_;
1268}
1269
1270
1271void Sampler::Start() {
1272  ASSERT(!IsActive());
1273  SetActive(true);
1274  SignalSender::AddActiveSampler(this);
1275}
1276
1277
1278void Sampler::Stop() {
1279  ASSERT(IsActive());
1280  SignalSender::RemoveActiveSampler(this);
1281  SetActive(false);
1282}
1283
1284
1285} }  // namespace v8::internal
1286