platform-linux.cc revision 9fac840a46e8b7e26894f4792ba26dde14c56b04
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for Linux goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <pthread.h>
32#include <semaphore.h>
33#include <signal.h>
34#include <sys/prctl.h>
35#include <sys/time.h>
36#include <sys/resource.h>
37#include <sys/syscall.h>
38#include <sys/types.h>
39#include <stdlib.h>
40
41// Ubuntu Dapper requires memory pages to be marked as
42// executable. Otherwise, OS raises an exception when executing code
43// in that page.
44#include <sys/types.h>  // mmap & munmap
45#include <sys/mman.h>   // mmap & munmap
46#include <sys/stat.h>   // open
47#include <fcntl.h>      // open
48#include <unistd.h>     // sysconf
49#ifdef __GLIBC__
50#include <execinfo.h>   // backtrace, backtrace_symbols
51#endif  // def __GLIBC__
52#include <strings.h>    // index
53#include <errno.h>
54#include <stdarg.h>
55
56#undef MAP_TYPE
57
58#include "v8.h"
59
60#include "platform.h"
61#include "top.h"
62#include "v8threads.h"
63#include "vm-state-inl.h"
64
65
66namespace v8 {
67namespace internal {
68
69// 0 is never a valid thread id on Linux since tids and pids share a
70// name space and pid 0 is reserved (see man 2 kill).
71static const pthread_t kNoThread = (pthread_t) 0;
72
73
74double ceiling(double x) {
75  return ceil(x);
76}
77
78
79void OS::Setup() {
80  // Seed the random number generator.
81  // Convert the current time to a 64-bit integer first, before converting it
82  // to an unsigned. Going directly can cause an overflow and the seed to be
83  // set to all ones. The seed will be identical for different instances that
84  // call this setup code within the same millisecond.
85  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
86  srandom(static_cast<unsigned int>(seed));
87}
88
89
90uint64_t OS::CpuFeaturesImpliedByPlatform() {
91#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
92  // Here gcc is telling us that we are on an ARM and gcc is assuming that we
93  // have VFP3 instructions.  If gcc can assume it then so can we.
94  return 1u << VFP3;
95#elif CAN_USE_ARMV7_INSTRUCTIONS
96  return 1u << ARMv7;
97#else
98  return 0;  // Linux runs on anything.
99#endif
100}
101
102
103#ifdef __arm__
104static bool CPUInfoContainsString(const char * search_string) {
105  const char* file_name = "/proc/cpuinfo";
106  // This is written as a straight shot one pass parser
107  // and not using STL string and ifstream because,
108  // on Linux, it's reading from a (non-mmap-able)
109  // character special device.
110  FILE* f = NULL;
111  const char* what = search_string;
112
113  if (NULL == (f = fopen(file_name, "r")))
114    return false;
115
116  int k;
117  while (EOF != (k = fgetc(f))) {
118    if (k == *what) {
119      ++what;
120      while ((*what != '\0') && (*what == fgetc(f))) {
121        ++what;
122      }
123      if (*what == '\0') {
124        fclose(f);
125        return true;
126      } else {
127        what = search_string;
128      }
129    }
130  }
131  fclose(f);
132
133  // Did not find string in the proc file.
134  return false;
135}
136
137bool OS::ArmCpuHasFeature(CpuFeature feature) {
138  const char* search_string = NULL;
139  // Simple detection of VFP at runtime for Linux.
140  // It is based on /proc/cpuinfo, which reveals hardware configuration
141  // to user-space applications.  According to ARM (mid 2009), no similar
142  // facility is universally available on the ARM architectures,
143  // so it's up to individual OSes to provide such.
144  switch (feature) {
145    case VFP3:
146      search_string = "vfpv3";
147      break;
148    case ARMv7:
149      search_string = "ARMv7";
150      break;
151    default:
152      UNREACHABLE();
153  }
154
155  if (CPUInfoContainsString(search_string)) {
156    return true;
157  }
158
159  if (feature == VFP3) {
160    // Some old kernels will report vfp not vfpv3. Here we make a last attempt
161    // to detect vfpv3 by checking for vfp *and* neon, since neon is only
162    // available on architectures with vfpv3.
163    // Checking neon on its own is not enough as it is possible to have neon
164    // without vfp.
165    if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
166      return true;
167    }
168  }
169
170  return false;
171}
172#endif  // def __arm__
173
174
175int OS::ActivationFrameAlignment() {
176#ifdef V8_TARGET_ARCH_ARM
177  // On EABI ARM targets this is required for fp correctness in the
178  // runtime system.
179  return 8;
180#elif V8_TARGET_ARCH_MIPS
181  return 8;
182#endif
183  // With gcc 4.4 the tree vectorization optimizer can generate code
184  // that requires 16 byte alignment such as movdqa on x86.
185  return 16;
186}
187
188
189void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
190#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
191  // Only use on ARM hardware.
192  MemoryBarrier();
193#else
194  __asm__ __volatile__("" : : : "memory");
195  // An x86 store acts as a release barrier.
196#endif
197  *ptr = value;
198}
199
200
201const char* OS::LocalTimezone(double time) {
202  if (isnan(time)) return "";
203  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
204  struct tm* t = localtime(&tv);
205  if (NULL == t) return "";
206  return t->tm_zone;
207}
208
209
210double OS::LocalTimeOffset() {
211  time_t tv = time(NULL);
212  struct tm* t = localtime(&tv);
213  // tm_gmtoff includes any daylight savings offset, so subtract it.
214  return static_cast<double>(t->tm_gmtoff * msPerSecond -
215                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
216}
217
218
219// We keep the lowest and highest addresses mapped as a quick way of
220// determining that pointers are outside the heap (used mostly in assertions
221// and verification).  The estimate is conservative, ie, not all addresses in
222// 'allocated' space are actually allocated to our heap.  The range is
223// [lowest, highest), inclusive on the low and and exclusive on the high end.
224static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
225static void* highest_ever_allocated = reinterpret_cast<void*>(0);
226
227
228static void UpdateAllocatedSpaceLimits(void* address, int size) {
229  lowest_ever_allocated = Min(lowest_ever_allocated, address);
230  highest_ever_allocated =
231      Max(highest_ever_allocated,
232          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
233}
234
235
236bool OS::IsOutsideAllocatedSpace(void* address) {
237  return address < lowest_ever_allocated || address >= highest_ever_allocated;
238}
239
240
241size_t OS::AllocateAlignment() {
242  return sysconf(_SC_PAGESIZE);
243}
244
245
246void* OS::Allocate(const size_t requested,
247                   size_t* allocated,
248                   bool is_executable) {
249  // TODO(805): Port randomization of allocated executable memory to Linux.
250  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
251  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
252  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
253  if (mbase == MAP_FAILED) {
254    LOG(StringEvent("OS::Allocate", "mmap failed"));
255    return NULL;
256  }
257  *allocated = msize;
258  UpdateAllocatedSpaceLimits(mbase, msize);
259  return mbase;
260}
261
262
263void OS::Free(void* address, const size_t size) {
264  // TODO(1240712): munmap has a return value which is ignored here.
265  int result = munmap(address, size);
266  USE(result);
267  ASSERT(result == 0);
268}
269
270
271#ifdef ENABLE_HEAP_PROTECTION
272
273void OS::Protect(void* address, size_t size) {
274  // TODO(1240712): mprotect has a return value which is ignored here.
275  mprotect(address, size, PROT_READ);
276}
277
278
279void OS::Unprotect(void* address, size_t size, bool is_executable) {
280  // TODO(1240712): mprotect has a return value which is ignored here.
281  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
282  mprotect(address, size, prot);
283}
284
285#endif
286
287
288void OS::Sleep(int milliseconds) {
289  unsigned int ms = static_cast<unsigned int>(milliseconds);
290  usleep(1000 * ms);
291}
292
293
294void OS::Abort() {
295  // Redirect to std abort to signal abnormal program termination.
296  abort();
297}
298
299
300void OS::DebugBreak() {
301// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
302//  which is the architecture of generated code).
303#if (defined(__arm__) || defined(__thumb__))
304# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
305  asm("bkpt 0");
306# endif
307#elif defined(__mips__)
308  asm("break");
309#else
310  asm("int $3");
311#endif
312}
313
314
315class PosixMemoryMappedFile : public OS::MemoryMappedFile {
316 public:
317  PosixMemoryMappedFile(FILE* file, void* memory, int size)
318    : file_(file), memory_(memory), size_(size) { }
319  virtual ~PosixMemoryMappedFile();
320  virtual void* memory() { return memory_; }
321 private:
322  FILE* file_;
323  void* memory_;
324  int size_;
325};
326
327
328OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
329    void* initial) {
330  FILE* file = fopen(name, "w+");
331  if (file == NULL) return NULL;
332  int result = fwrite(initial, size, 1, file);
333  if (result < 1) {
334    fclose(file);
335    return NULL;
336  }
337  void* memory =
338      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
339  return new PosixMemoryMappedFile(file, memory, size);
340}
341
342
343PosixMemoryMappedFile::~PosixMemoryMappedFile() {
344  if (memory_) munmap(memory_, size_);
345  fclose(file_);
346}
347
348
349void OS::LogSharedLibraryAddresses() {
350#ifdef ENABLE_LOGGING_AND_PROFILING
351  // This function assumes that the layout of the file is as follows:
352  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
353  // If we encounter an unexpected situation we abort scanning further entries.
354  FILE* fp = fopen("/proc/self/maps", "r");
355  if (fp == NULL) return;
356
357  // Allocate enough room to be able to store a full file name.
358  const int kLibNameLen = FILENAME_MAX + 1;
359  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
360
361  // This loop will terminate once the scanning hits an EOF.
362  while (true) {
363    uintptr_t start, end;
364    char attr_r, attr_w, attr_x, attr_p;
365    // Parse the addresses and permission bits at the beginning of the line.
366    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
367    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
368
369    int c;
370    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
371      // Found a read-only executable entry. Skip characters until we reach
372      // the beginning of the filename or the end of the line.
373      do {
374        c = getc(fp);
375      } while ((c != EOF) && (c != '\n') && (c != '/'));
376      if (c == EOF) break;  // EOF: Was unexpected, just exit.
377
378      // Process the filename if found.
379      if (c == '/') {
380        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
381
382        // Read to the end of the line. Exit if the read fails.
383        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
384
385        // Drop the newline character read by fgets. We do not need to check
386        // for a zero-length string because we know that we at least read the
387        // '/' character.
388        lib_name[strlen(lib_name) - 1] = '\0';
389      } else {
390        // No library name found, just record the raw address range.
391        snprintf(lib_name, kLibNameLen,
392                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
393      }
394      LOG(SharedLibraryEvent(lib_name, start, end));
395    } else {
396      // Entry not describing executable data. Skip to end of line to setup
397      // reading the next entry.
398      do {
399        c = getc(fp);
400      } while ((c != EOF) && (c != '\n'));
401      if (c == EOF) break;
402    }
403  }
404  free(lib_name);
405  fclose(fp);
406#endif
407}
408
409
410static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
411
412
413void OS::SignalCodeMovingGC() {
414#ifdef ENABLE_LOGGING_AND_PROFILING
415  // Support for ll_prof.py.
416  //
417  // The Linux profiler built into the kernel logs all mmap's with
418  // PROT_EXEC so that analysis tools can properly attribute ticks. We
419  // do a mmap with a name known by ll_prof.py and immediately munmap
420  // it. This injects a GC marker into the stream of events generated
421  // by the kernel and allows us to synchronize V8 code log and the
422  // kernel log.
423  int size = sysconf(_SC_PAGESIZE);
424  FILE* f = fopen(kGCFakeMmap, "w+");
425  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
426                    fileno(f), 0);
427  ASSERT(addr != MAP_FAILED);
428  munmap(addr, size);
429  fclose(f);
430#endif
431}
432
433
434int OS::StackWalk(Vector<OS::StackFrame> frames) {
435  // backtrace is a glibc extension.
436#ifdef __GLIBC__
437  int frames_size = frames.length();
438  ScopedVector<void*> addresses(frames_size);
439
440  int frames_count = backtrace(addresses.start(), frames_size);
441
442  char** symbols = backtrace_symbols(addresses.start(), frames_count);
443  if (symbols == NULL) {
444    return kStackWalkError;
445  }
446
447  for (int i = 0; i < frames_count; i++) {
448    frames[i].address = addresses[i];
449    // Format a text representation of the frame based on the information
450    // available.
451    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
452             "%s",
453             symbols[i]);
454    // Make sure line termination is in place.
455    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
456  }
457
458  free(symbols);
459
460  return frames_count;
461#else  // ndef __GLIBC__
462  return 0;
463#endif  // ndef __GLIBC__
464}
465
466
467// Constants used for mmap.
468static const int kMmapFd = -1;
469static const int kMmapFdOffset = 0;
470
471
472VirtualMemory::VirtualMemory(size_t size) {
473  address_ = mmap(NULL, size, PROT_NONE,
474                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
475                  kMmapFd, kMmapFdOffset);
476  size_ = size;
477}
478
479
480VirtualMemory::~VirtualMemory() {
481  if (IsReserved()) {
482    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
483  }
484}
485
486
487bool VirtualMemory::IsReserved() {
488  return address_ != MAP_FAILED;
489}
490
491
492bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
493  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
494  if (MAP_FAILED == mmap(address, size, prot,
495                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
496                         kMmapFd, kMmapFdOffset)) {
497    return false;
498  }
499
500  UpdateAllocatedSpaceLimits(address, size);
501  return true;
502}
503
504
505bool VirtualMemory::Uncommit(void* address, size_t size) {
506  return mmap(address, size, PROT_NONE,
507              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
508              kMmapFd, kMmapFdOffset) != MAP_FAILED;
509}
510
511
512class ThreadHandle::PlatformData : public Malloced {
513 public:
514  explicit PlatformData(ThreadHandle::Kind kind) {
515    Initialize(kind);
516  }
517
518  void Initialize(ThreadHandle::Kind kind) {
519    switch (kind) {
520      case ThreadHandle::SELF: thread_ = pthread_self(); break;
521      case ThreadHandle::INVALID: thread_ = kNoThread; break;
522    }
523  }
524
525  pthread_t thread_;  // Thread handle for pthread.
526};
527
528
529ThreadHandle::ThreadHandle(Kind kind) {
530  data_ = new PlatformData(kind);
531}
532
533
534void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
535  data_->Initialize(kind);
536}
537
538
539ThreadHandle::~ThreadHandle() {
540  delete data_;
541}
542
543
544bool ThreadHandle::IsSelf() const {
545  return pthread_equal(data_->thread_, pthread_self());
546}
547
548
549bool ThreadHandle::IsValid() const {
550  return data_->thread_ != kNoThread;
551}
552
553
554Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
555  set_name("v8:<unknown>");
556}
557
558
559Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
560  set_name(name);
561}
562
563
564Thread::~Thread() {
565}
566
567
568static void* ThreadEntry(void* arg) {
569  Thread* thread = reinterpret_cast<Thread*>(arg);
570  // This is also initialized by the first argument to pthread_create() but we
571  // don't know which thread will run first (the original thread or the new
572  // one) so we initialize it here too.
573  prctl(PR_SET_NAME,
574        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
575        0, 0, 0);
576  thread->thread_handle_data()->thread_ = pthread_self();
577  ASSERT(thread->IsValid());
578  thread->Run();
579  return NULL;
580}
581
582
583void Thread::set_name(const char* name) {
584  strncpy(name_, name, sizeof(name_));
585  name_[sizeof(name_) - 1] = '\0';
586}
587
588
589void Thread::Start() {
590  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
591  ASSERT(IsValid());
592}
593
594
595void Thread::Join() {
596  pthread_join(thread_handle_data()->thread_, NULL);
597}
598
599
600Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
601  pthread_key_t key;
602  int result = pthread_key_create(&key, NULL);
603  USE(result);
604  ASSERT(result == 0);
605  return static_cast<LocalStorageKey>(key);
606}
607
608
609void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
610  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
611  int result = pthread_key_delete(pthread_key);
612  USE(result);
613  ASSERT(result == 0);
614}
615
616
617void* Thread::GetThreadLocal(LocalStorageKey key) {
618  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
619  return pthread_getspecific(pthread_key);
620}
621
622
623void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
624  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
625  pthread_setspecific(pthread_key, value);
626}
627
628
629void Thread::YieldCPU() {
630  sched_yield();
631}
632
633
634class LinuxMutex : public Mutex {
635 public:
636
637  LinuxMutex() {
638    pthread_mutexattr_t attrs;
639    int result = pthread_mutexattr_init(&attrs);
640    ASSERT(result == 0);
641    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
642    ASSERT(result == 0);
643    result = pthread_mutex_init(&mutex_, &attrs);
644    ASSERT(result == 0);
645  }
646
647  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
648
649  virtual int Lock() {
650    int result = pthread_mutex_lock(&mutex_);
651    return result;
652  }
653
654  virtual int Unlock() {
655    int result = pthread_mutex_unlock(&mutex_);
656    return result;
657  }
658
659  virtual bool TryLock() {
660    int result = pthread_mutex_trylock(&mutex_);
661    // Return false if the lock is busy and locking failed.
662    if (result == EBUSY) {
663      return false;
664    }
665    ASSERT(result == 0);  // Verify no other errors.
666    return true;
667  }
668
669 private:
670  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
671};
672
673
674Mutex* OS::CreateMutex() {
675  return new LinuxMutex();
676}
677
678
679class LinuxSemaphore : public Semaphore {
680 public:
681  explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
682  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
683
684  virtual void Wait();
685  virtual bool Wait(int timeout);
686  virtual void Signal() { sem_post(&sem_); }
687 private:
688  sem_t sem_;
689};
690
691
692void LinuxSemaphore::Wait() {
693  while (true) {
694    int result = sem_wait(&sem_);
695    if (result == 0) return;  // Successfully got semaphore.
696    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
697  }
698}
699
700
701#ifndef TIMEVAL_TO_TIMESPEC
702#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
703    (ts)->tv_sec = (tv)->tv_sec;                                    \
704    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
705} while (false)
706#endif
707
708
709bool LinuxSemaphore::Wait(int timeout) {
710  const long kOneSecondMicros = 1000000;  // NOLINT
711
712  // Split timeout into second and nanosecond parts.
713  struct timeval delta;
714  delta.tv_usec = timeout % kOneSecondMicros;
715  delta.tv_sec = timeout / kOneSecondMicros;
716
717  struct timeval current_time;
718  // Get the current time.
719  if (gettimeofday(&current_time, NULL) == -1) {
720    return false;
721  }
722
723  // Calculate time for end of timeout.
724  struct timeval end_time;
725  timeradd(&current_time, &delta, &end_time);
726
727  struct timespec ts;
728  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
729  // Wait for semaphore signalled or timeout.
730  while (true) {
731    int result = sem_timedwait(&sem_, &ts);
732    if (result == 0) return true;  // Successfully got semaphore.
733    if (result > 0) {
734      // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
735      errno = result;
736      result = -1;
737    }
738    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
739    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
740  }
741}
742
743
744Semaphore* OS::CreateSemaphore(int count) {
745  return new LinuxSemaphore(count);
746}
747
748
749#ifdef ENABLE_LOGGING_AND_PROFILING
750
751static Sampler* active_sampler_ = NULL;
752static int vm_tid_ = 0;
753
754
755#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
756// Android runs a fairly new Linux kernel, so signal info is there,
757// but the C library doesn't have the structs defined.
758
759struct sigcontext {
760  uint32_t trap_no;
761  uint32_t error_code;
762  uint32_t oldmask;
763  uint32_t gregs[16];
764  uint32_t arm_cpsr;
765  uint32_t fault_address;
766};
767typedef uint32_t __sigset_t;
768typedef struct sigcontext mcontext_t;
769typedef struct ucontext {
770  uint32_t uc_flags;
771  struct ucontext* uc_link;
772  stack_t uc_stack;
773  mcontext_t uc_mcontext;
774  __sigset_t uc_sigmask;
775} ucontext_t;
776enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
777
778#endif
779
780
781static int GetThreadID() {
782  // Glibc doesn't provide a wrapper for gettid(2).
783  return syscall(SYS_gettid);
784}
785
786
787static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
788#ifndef V8_HOST_ARCH_MIPS
789  USE(info);
790  if (signal != SIGPROF) return;
791  if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
792  if (vm_tid_ != GetThreadID()) return;
793
794  TickSample sample_obj;
795  TickSample* sample = CpuProfiler::TickSampleEvent();
796  if (sample == NULL) sample = &sample_obj;
797
798  // Extracting the sample from the context is extremely machine dependent.
799  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
800  mcontext_t& mcontext = ucontext->uc_mcontext;
801  sample->state = Top::current_vm_state();
802#if V8_HOST_ARCH_IA32
803  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
804  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
805  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
806#elif V8_HOST_ARCH_X64
807  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
808  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
809  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
810#elif V8_HOST_ARCH_ARM
811// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
812#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
813  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
814  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
815  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
816#else
817  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
818  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
819  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
820#endif
821#elif V8_HOST_ARCH_MIPS
822  // Implement this on MIPS.
823  UNIMPLEMENTED();
824#endif
825  active_sampler_->SampleStack(sample);
826  active_sampler_->Tick(sample);
827#endif
828}
829
830
831class Sampler::PlatformData : public Malloced {
832 public:
833  enum SleepInterval {
834    FULL_INTERVAL,
835    HALF_INTERVAL
836  };
837
838  explicit PlatformData(Sampler* sampler)
839      : sampler_(sampler),
840        signal_handler_installed_(false),
841        vm_tgid_(getpid()),
842        signal_sender_launched_(false) {
843  }
844
845  void SignalSender() {
846    while (sampler_->IsActive()) {
847      if (rate_limiter_.SuspendIfNecessary()) continue;
848      if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
849        SendProfilingSignal();
850        Sleep(HALF_INTERVAL);
851        RuntimeProfiler::NotifyTick();
852        Sleep(HALF_INTERVAL);
853      } else {
854        if (sampler_->IsProfiling()) SendProfilingSignal();
855        if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
856        Sleep(FULL_INTERVAL);
857      }
858    }
859  }
860
861  void SendProfilingSignal() {
862    // Glibc doesn't provide a wrapper for tgkill(2).
863    syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
864  }
865
866  void Sleep(SleepInterval full_or_half) {
867    // Convert ms to us and subtract 100 us to compensate delays
868    // occuring during signal delivery.
869    useconds_t interval = sampler_->interval_ * 1000 - 100;
870    if (full_or_half == HALF_INTERVAL) interval /= 2;
871    int result = usleep(interval);
872#ifdef DEBUG
873    if (result != 0 && errno != EINTR) {
874      fprintf(stderr,
875              "SignalSender usleep error; interval = %u, errno = %d\n",
876              interval,
877              errno);
878      ASSERT(result == 0 || errno == EINTR);
879    }
880#endif
881    USE(result);
882  }
883
884  Sampler* sampler_;
885  bool signal_handler_installed_;
886  struct sigaction old_signal_handler_;
887  int vm_tgid_;
888  bool signal_sender_launched_;
889  pthread_t signal_sender_thread_;
890  RuntimeProfilerRateLimiter rate_limiter_;
891};
892
893
894static void* SenderEntry(void* arg) {
895  Sampler::PlatformData* data =
896      reinterpret_cast<Sampler::PlatformData*>(arg);
897  data->SignalSender();
898  return 0;
899}
900
901
902Sampler::Sampler(int interval)
903    : interval_(interval),
904      profiling_(false),
905      active_(false),
906      samples_taken_(0) {
907  data_ = new PlatformData(this);
908}
909
910
911Sampler::~Sampler() {
912  ASSERT(!data_->signal_sender_launched_);
913  delete data_;
914}
915
916
917void Sampler::Start() {
918  // There can only be one active sampler at the time on POSIX
919  // platforms.
920  ASSERT(!IsActive());
921  vm_tid_ = GetThreadID();
922
923  // Request profiling signals.
924  struct sigaction sa;
925  sa.sa_sigaction = ProfilerSignalHandler;
926  sigemptyset(&sa.sa_mask);
927  sa.sa_flags = SA_RESTART | SA_SIGINFO;
928  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
929  data_->signal_handler_installed_ = true;
930
931  // Start a thread that sends SIGPROF signal to VM thread.
932  // Sending the signal ourselves instead of relying on itimer provides
933  // much better accuracy.
934  SetActive(true);
935  if (pthread_create(
936          &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
937    data_->signal_sender_launched_ = true;
938  }
939
940  // Set this sampler as the active sampler.
941  active_sampler_ = this;
942}
943
944
945void Sampler::Stop() {
946  SetActive(false);
947
948  // Wait for signal sender termination (it will exit after setting
949  // active_ to false).
950  if (data_->signal_sender_launched_) {
951    Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
952    pthread_join(data_->signal_sender_thread_, NULL);
953    data_->signal_sender_launched_ = false;
954  }
955
956  // Restore old signal handler
957  if (data_->signal_handler_installed_) {
958    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
959    data_->signal_handler_installed_ = false;
960  }
961
962  // This sampler is no longer the active sampler.
963  active_sampler_ = NULL;
964}
965
966
967#endif  // ENABLE_LOGGING_AND_PROFILING
968
969} }  // namespace v8::internal
970