platform-linux.cc revision 888f6729be6a6f6fbe246cb5a9f122e2dbe455b7
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for Linux goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <pthread.h>
32#include <semaphore.h>
33#include <signal.h>
34#include <sys/time.h>
35#include <sys/resource.h>
36#include <sys/types.h>
37#include <stdlib.h>
38
39// Ubuntu Dapper requires memory pages to be marked as
40// executable. Otherwise, OS raises an exception when executing code
41// in that page.
42#include <sys/types.h>  // mmap & munmap
43#include <sys/mman.h>   // mmap & munmap
44#include <sys/stat.h>   // open
45#include <fcntl.h>      // open
46#include <unistd.h>     // sysconf
47#ifdef __GLIBC__
48#include <execinfo.h>   // backtrace, backtrace_symbols
49#endif  // def __GLIBC__
50#include <strings.h>    // index
51#include <errno.h>
52#include <stdarg.h>
53
54#undef MAP_TYPE
55
56#include "v8.h"
57
58#include "platform.h"
59#include "top.h"
60#include "v8threads.h"
61
62
63namespace v8 {
64namespace internal {
65
66// 0 is never a valid thread id on Linux since tids and pids share a
67// name space and pid 0 is reserved (see man 2 kill).
68static const pthread_t kNoThread = (pthread_t) 0;
69
70
71double ceiling(double x) {
72  return ceil(x);
73}
74
75
76void OS::Setup() {
77  // Seed the random number generator.
78  // Convert the current time to a 64-bit integer first, before converting it
79  // to an unsigned. Going directly can cause an overflow and the seed to be
80  // set to all ones. The seed will be identical for different instances that
81  // call this setup code within the same millisecond.
82  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
83  srandom(static_cast<unsigned int>(seed));
84}
85
86
87uint64_t OS::CpuFeaturesImpliedByPlatform() {
88#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
89  // Here gcc is telling us that we are on an ARM and gcc is assuming that we
90  // have VFP3 instructions.  If gcc can assume it then so can we.
91  return 1u << VFP3;
92#else
93  return 0;  // Linux runs on anything.
94#endif
95}
96
97
98#ifdef __arm__
99bool OS::ArmCpuHasFeature(CpuFeature feature) {
100  const char* search_string = NULL;
101  const char* file_name = "/proc/cpuinfo";
102  // Simple detection of VFP at runtime for Linux.
103  // It is based on /proc/cpuinfo, which reveals hardware configuration
104  // to user-space applications.  According to ARM (mid 2009), no similar
105  // facility is universally available on the ARM architectures,
106  // so it's up to individual OSes to provide such.
107  //
108  // This is written as a straight shot one pass parser
109  // and not using STL string and ifstream because,
110  // on Linux, it's reading from a (non-mmap-able)
111  // character special device.
112  switch (feature) {
113    case VFP3:
114      search_string = "vfp";
115      break;
116    default:
117      UNREACHABLE();
118  }
119
120  FILE* f = NULL;
121  const char* what = search_string;
122
123  if (NULL == (f = fopen(file_name, "r")))
124    return false;
125
126  int k;
127  while (EOF != (k = fgetc(f))) {
128    if (k == *what) {
129      ++what;
130      while ((*what != '\0') && (*what == fgetc(f))) {
131        ++what;
132      }
133      if (*what == '\0') {
134        fclose(f);
135        return true;
136      } else {
137        what = search_string;
138      }
139    }
140  }
141  fclose(f);
142
143  // Did not find string in the proc file.
144  return false;
145}
146#endif  // def __arm__
147
148
149int OS::ActivationFrameAlignment() {
150#ifdef V8_TARGET_ARCH_ARM
151  // On EABI ARM targets this is required for fp correctness in the
152  // runtime system.
153  return 8;
154#else
155  // With gcc 4.4 the tree vectorization optimiser can generate code
156  // that requires 16 byte alignment such as movdqa on x86.
157  return 16;
158#endif
159}
160
161
162const char* OS::LocalTimezone(double time) {
163  if (isnan(time)) return "";
164  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
165  struct tm* t = localtime(&tv);
166  if (NULL == t) return "";
167  return t->tm_zone;
168}
169
170
171double OS::LocalTimeOffset() {
172  time_t tv = time(NULL);
173  struct tm* t = localtime(&tv);
174  // tm_gmtoff includes any daylight savings offset, so subtract it.
175  return static_cast<double>(t->tm_gmtoff * msPerSecond -
176                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
177}
178
179
180// We keep the lowest and highest addresses mapped as a quick way of
181// determining that pointers are outside the heap (used mostly in assertions
182// and verification).  The estimate is conservative, ie, not all addresses in
183// 'allocated' space are actually allocated to our heap.  The range is
184// [lowest, highest), inclusive on the low and and exclusive on the high end.
185static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
186static void* highest_ever_allocated = reinterpret_cast<void*>(0);
187
188
189static void UpdateAllocatedSpaceLimits(void* address, int size) {
190  lowest_ever_allocated = Min(lowest_ever_allocated, address);
191  highest_ever_allocated =
192      Max(highest_ever_allocated,
193          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
194}
195
196
197bool OS::IsOutsideAllocatedSpace(void* address) {
198  return address < lowest_ever_allocated || address >= highest_ever_allocated;
199}
200
201
202size_t OS::AllocateAlignment() {
203  return sysconf(_SC_PAGESIZE);
204}
205
206
207void* OS::Allocate(const size_t requested,
208                   size_t* allocated,
209                   bool is_executable) {
210  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
211  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
212  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
213  if (mbase == MAP_FAILED) {
214    LOG(StringEvent("OS::Allocate", "mmap failed"));
215    return NULL;
216  }
217  *allocated = msize;
218  UpdateAllocatedSpaceLimits(mbase, msize);
219  return mbase;
220}
221
222
223void OS::Free(void* address, const size_t size) {
224  // TODO(1240712): munmap has a return value which is ignored here.
225  int result = munmap(address, size);
226  USE(result);
227  ASSERT(result == 0);
228}
229
230
231#ifdef ENABLE_HEAP_PROTECTION
232
233void OS::Protect(void* address, size_t size) {
234  // TODO(1240712): mprotect has a return value which is ignored here.
235  mprotect(address, size, PROT_READ);
236}
237
238
239void OS::Unprotect(void* address, size_t size, bool is_executable) {
240  // TODO(1240712): mprotect has a return value which is ignored here.
241  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
242  mprotect(address, size, prot);
243}
244
245#endif
246
247
248void OS::Sleep(int milliseconds) {
249  unsigned int ms = static_cast<unsigned int>(milliseconds);
250  usleep(1000 * ms);
251}
252
253
254void OS::Abort() {
255  // Redirect to std abort to signal abnormal program termination.
256  abort();
257}
258
259
260void OS::DebugBreak() {
261// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
262//  which is the architecture of generated code).
263#if defined(__arm__) || defined(__thumb__)
264  asm("bkpt 0");
265#else
266  asm("int $3");
267#endif
268}
269
270
271class PosixMemoryMappedFile : public OS::MemoryMappedFile {
272 public:
273  PosixMemoryMappedFile(FILE* file, void* memory, int size)
274    : file_(file), memory_(memory), size_(size) { }
275  virtual ~PosixMemoryMappedFile();
276  virtual void* memory() { return memory_; }
277 private:
278  FILE* file_;
279  void* memory_;
280  int size_;
281};
282
283
284OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
285    void* initial) {
286  FILE* file = fopen(name, "w+");
287  if (file == NULL) return NULL;
288  int result = fwrite(initial, size, 1, file);
289  if (result < 1) {
290    fclose(file);
291    return NULL;
292  }
293  void* memory =
294      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
295  return new PosixMemoryMappedFile(file, memory, size);
296}
297
298
299PosixMemoryMappedFile::~PosixMemoryMappedFile() {
300  if (memory_) munmap(memory_, size_);
301  fclose(file_);
302}
303
304
305void OS::LogSharedLibraryAddresses() {
306#ifdef ENABLE_LOGGING_AND_PROFILING
307  // This function assumes that the layout of the file is as follows:
308  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
309  // If we encounter an unexpected situation we abort scanning further entries.
310  FILE* fp = fopen("/proc/self/maps", "r");
311  if (fp == NULL) return;
312
313  // Allocate enough room to be able to store a full file name.
314  const int kLibNameLen = FILENAME_MAX + 1;
315  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
316
317  // This loop will terminate once the scanning hits an EOF.
318  while (true) {
319    uintptr_t start, end;
320    char attr_r, attr_w, attr_x, attr_p;
321    // Parse the addresses and permission bits at the beginning of the line.
322    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
323    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
324
325    int c;
326    if (attr_r == 'r' && attr_x == 'x') {
327      // Found a readable and executable entry. Skip characters until we reach
328      // the beginning of the filename or the end of the line.
329      do {
330        c = getc(fp);
331      } while ((c != EOF) && (c != '\n') && (c != '/'));
332      if (c == EOF) break;  // EOF: Was unexpected, just exit.
333
334      // Process the filename if found.
335      if (c == '/') {
336        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
337
338        // Read to the end of the line. Exit if the read fails.
339        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
340
341        // Drop the newline character read by fgets. We do not need to check
342        // for a zero-length string because we know that we at least read the
343        // '/' character.
344        lib_name[strlen(lib_name) - 1] = '\0';
345      } else {
346        // No library name found, just record the raw address range.
347        snprintf(lib_name, kLibNameLen,
348                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
349      }
350      LOG(SharedLibraryEvent(lib_name, start, end));
351    } else {
352      // Entry not describing executable data. Skip to end of line to setup
353      // reading the next entry.
354      do {
355        c = getc(fp);
356      } while ((c != EOF) && (c != '\n'));
357      if (c == EOF) break;
358    }
359  }
360  free(lib_name);
361  fclose(fp);
362#endif
363}
364
365
366int OS::StackWalk(Vector<OS::StackFrame> frames) {
367  // backtrace is a glibc extension.
368#ifdef __GLIBC__
369  int frames_size = frames.length();
370  void** addresses = NewArray<void*>(frames_size);
371
372  int frames_count = backtrace(addresses, frames_size);
373
374  char** symbols;
375  symbols = backtrace_symbols(addresses, frames_count);
376  if (symbols == NULL) {
377    DeleteArray(addresses);
378    return kStackWalkError;
379  }
380
381  for (int i = 0; i < frames_count; i++) {
382    frames[i].address = addresses[i];
383    // Format a text representation of the frame based on the information
384    // available.
385    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
386             "%s",
387             symbols[i]);
388    // Make sure line termination is in place.
389    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
390  }
391
392  DeleteArray(addresses);
393  free(symbols);
394
395  return frames_count;
396#else  // ndef __GLIBC__
397  return 0;
398#endif  // ndef __GLIBC__
399}
400
401
402// Constants used for mmap.
403static const int kMmapFd = -1;
404static const int kMmapFdOffset = 0;
405
406
407VirtualMemory::VirtualMemory(size_t size) {
408  address_ = mmap(NULL, size, PROT_NONE,
409                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
410                  kMmapFd, kMmapFdOffset);
411  size_ = size;
412}
413
414
415VirtualMemory::~VirtualMemory() {
416  if (IsReserved()) {
417    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
418  }
419}
420
421
422bool VirtualMemory::IsReserved() {
423  return address_ != MAP_FAILED;
424}
425
426
427bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
428  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
429  if (MAP_FAILED == mmap(address, size, prot,
430                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
431                         kMmapFd, kMmapFdOffset)) {
432    return false;
433  }
434
435  UpdateAllocatedSpaceLimits(address, size);
436  return true;
437}
438
439
440bool VirtualMemory::Uncommit(void* address, size_t size) {
441  return mmap(address, size, PROT_NONE,
442              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
443              kMmapFd, kMmapFdOffset) != MAP_FAILED;
444}
445
446
447class ThreadHandle::PlatformData : public Malloced {
448 public:
449  explicit PlatformData(ThreadHandle::Kind kind) {
450    Initialize(kind);
451  }
452
453  void Initialize(ThreadHandle::Kind kind) {
454    switch (kind) {
455      case ThreadHandle::SELF: thread_ = pthread_self(); break;
456      case ThreadHandle::INVALID: thread_ = kNoThread; break;
457    }
458  }
459
460  pthread_t thread_;  // Thread handle for pthread.
461};
462
463
464ThreadHandle::ThreadHandle(Kind kind) {
465  data_ = new PlatformData(kind);
466}
467
468
469void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
470  data_->Initialize(kind);
471}
472
473
474ThreadHandle::~ThreadHandle() {
475  delete data_;
476}
477
478
479bool ThreadHandle::IsSelf() const {
480  return pthread_equal(data_->thread_, pthread_self());
481}
482
483
484bool ThreadHandle::IsValid() const {
485  return data_->thread_ != kNoThread;
486}
487
488
489Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
490}
491
492
493Thread::~Thread() {
494}
495
496
497static void* ThreadEntry(void* arg) {
498  Thread* thread = reinterpret_cast<Thread*>(arg);
499  // This is also initialized by the first argument to pthread_create() but we
500  // don't know which thread will run first (the original thread or the new
501  // one) so we initialize it here too.
502  thread->thread_handle_data()->thread_ = pthread_self();
503  ASSERT(thread->IsValid());
504  thread->Run();
505  return NULL;
506}
507
508
509void Thread::Start() {
510  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
511  ASSERT(IsValid());
512}
513
514
515void Thread::Join() {
516  pthread_join(thread_handle_data()->thread_, NULL);
517}
518
519
520Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
521  pthread_key_t key;
522  int result = pthread_key_create(&key, NULL);
523  USE(result);
524  ASSERT(result == 0);
525  return static_cast<LocalStorageKey>(key);
526}
527
528
529void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
530  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
531  int result = pthread_key_delete(pthread_key);
532  USE(result);
533  ASSERT(result == 0);
534}
535
536
537void* Thread::GetThreadLocal(LocalStorageKey key) {
538  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
539  return pthread_getspecific(pthread_key);
540}
541
542
543void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
544  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
545  pthread_setspecific(pthread_key, value);
546}
547
548
549void Thread::YieldCPU() {
550  sched_yield();
551}
552
553
554class LinuxMutex : public Mutex {
555 public:
556
557  LinuxMutex() {
558    pthread_mutexattr_t attrs;
559    int result = pthread_mutexattr_init(&attrs);
560    ASSERT(result == 0);
561    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
562    ASSERT(result == 0);
563    result = pthread_mutex_init(&mutex_, &attrs);
564    ASSERT(result == 0);
565  }
566
567  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
568
569  virtual int Lock() {
570    int result = pthread_mutex_lock(&mutex_);
571    return result;
572  }
573
574  virtual int Unlock() {
575    int result = pthread_mutex_unlock(&mutex_);
576    return result;
577  }
578
579 private:
580  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
581};
582
583
584Mutex* OS::CreateMutex() {
585  return new LinuxMutex();
586}
587
588
589class LinuxSemaphore : public Semaphore {
590 public:
591  explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
592  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
593
594  virtual void Wait();
595  virtual bool Wait(int timeout);
596  virtual void Signal() { sem_post(&sem_); }
597 private:
598  sem_t sem_;
599};
600
601
602void LinuxSemaphore::Wait() {
603  while (true) {
604    int result = sem_wait(&sem_);
605    if (result == 0) return;  // Successfully got semaphore.
606    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
607  }
608}
609
610
611#ifndef TIMEVAL_TO_TIMESPEC
612#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
613    (ts)->tv_sec = (tv)->tv_sec;                                    \
614    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
615} while (false)
616#endif
617
618
619bool LinuxSemaphore::Wait(int timeout) {
620  const long kOneSecondMicros = 1000000;  // NOLINT
621
622  // Split timeout into second and nanosecond parts.
623  struct timeval delta;
624  delta.tv_usec = timeout % kOneSecondMicros;
625  delta.tv_sec = timeout / kOneSecondMicros;
626
627  struct timeval current_time;
628  // Get the current time.
629  if (gettimeofday(&current_time, NULL) == -1) {
630    return false;
631  }
632
633  // Calculate time for end of timeout.
634  struct timeval end_time;
635  timeradd(&current_time, &delta, &end_time);
636
637  struct timespec ts;
638  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
639  // Wait for semaphore signalled or timeout.
640  while (true) {
641    int result = sem_timedwait(&sem_, &ts);
642    if (result == 0) return true;  // Successfully got semaphore.
643    if (result > 0) {
644      // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
645      errno = result;
646      result = -1;
647    }
648    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
649    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
650  }
651}
652
653
654Semaphore* OS::CreateSemaphore(int count) {
655  return new LinuxSemaphore(count);
656}
657
658
659#ifdef ENABLE_LOGGING_AND_PROFILING
660
661static Sampler* active_sampler_ = NULL;
662static pthread_t vm_thread_ = 0;
663
664
665#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
666// Android runs a fairly new Linux kernel, so signal info is there,
667// but the C library doesn't have the structs defined.
668
669struct sigcontext {
670  uint32_t trap_no;
671  uint32_t error_code;
672  uint32_t oldmask;
673  uint32_t gregs[16];
674  uint32_t arm_cpsr;
675  uint32_t fault_address;
676};
677typedef uint32_t __sigset_t;
678typedef struct sigcontext mcontext_t;
679typedef struct ucontext {
680  uint32_t uc_flags;
681  struct ucontext* uc_link;
682  stack_t uc_stack;
683  mcontext_t uc_mcontext;
684  __sigset_t uc_sigmask;
685} ucontext_t;
686enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
687
688#endif
689
690
691// A function that determines if a signal handler is called in the context
692// of a VM thread.
693//
694// The problem is that SIGPROF signal can be delivered to an arbitrary thread
695// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
696// So, if the signal is being handled in the context of a non-VM thread,
697// it means that the VM thread is running, and trying to sample its stack can
698// cause a crash.
699static inline bool IsVmThread() {
700  // In the case of a single VM thread, this check is enough.
701  if (pthread_equal(pthread_self(), vm_thread_)) return true;
702  // If there are multiple threads that use VM, they must have a thread id
703  // stored in TLS. To verify that the thread is really executing VM,
704  // we check Top's data. Having that ThreadManager::RestoreThread first
705  // restores ThreadLocalTop from TLS, and only then erases the TLS value,
706  // reading Top::thread_id() should not be affected by races.
707  if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
708      ThreadManager::CurrentId() == Top::thread_id()) {
709    return true;
710  }
711  return false;
712}
713
714
715static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
716  USE(info);
717  if (signal != SIGPROF) return;
718  if (active_sampler_ == NULL) return;
719
720  TickSample sample;
721
722  // If profiling, we extract the current pc and sp.
723  if (active_sampler_->IsProfiling()) {
724    // Extracting the sample from the context is extremely machine dependent.
725    ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
726    mcontext_t& mcontext = ucontext->uc_mcontext;
727#if V8_HOST_ARCH_IA32
728    sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
729    sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
730    sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
731#elif V8_HOST_ARCH_X64
732    sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
733    sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
734    sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
735#elif V8_HOST_ARCH_ARM
736// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
737#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
738    sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
739    sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
740    sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
741#else
742    sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
743    sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
744    sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
745#endif
746#endif
747    if (IsVmThread())
748      active_sampler_->SampleStack(&sample);
749  }
750
751  // We always sample the VM state.
752  sample.state = Logger::state();
753
754  active_sampler_->Tick(&sample);
755}
756
757
758class Sampler::PlatformData : public Malloced {
759 public:
760  PlatformData() {
761    signal_handler_installed_ = false;
762  }
763
764  bool signal_handler_installed_;
765  struct sigaction old_signal_handler_;
766  struct itimerval old_timer_value_;
767};
768
769
770Sampler::Sampler(int interval, bool profiling)
771    : interval_(interval), profiling_(profiling), active_(false) {
772  data_ = new PlatformData();
773}
774
775
776Sampler::~Sampler() {
777  delete data_;
778}
779
780
781void Sampler::Start() {
782  // There can only be one active sampler at the time on POSIX
783  // platforms.
784  if (active_sampler_ != NULL) return;
785
786  vm_thread_ = pthread_self();
787
788  // Request profiling signals.
789  struct sigaction sa;
790  sa.sa_sigaction = ProfilerSignalHandler;
791  sigemptyset(&sa.sa_mask);
792  sa.sa_flags = SA_SIGINFO;
793  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
794  data_->signal_handler_installed_ = true;
795
796  // Set the itimer to generate a tick for each interval.
797  itimerval itimer;
798  itimer.it_interval.tv_sec = interval_ / 1000;
799  itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
800  itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
801  itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
802  setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
803
804  // Set this sampler as the active sampler.
805  active_sampler_ = this;
806  active_ = true;
807}
808
809
810void Sampler::Stop() {
811  // Restore old signal handler
812  if (data_->signal_handler_installed_) {
813    setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
814    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
815    data_->signal_handler_installed_ = false;
816  }
817
818  // This sampler is no longer the active sampler.
819  active_sampler_ = NULL;
820  active_ = false;
821}
822
823
824#endif  // ENABLE_LOGGING_AND_PROFILING
825
826} }  // namespace v8::internal
827