platform-solaris.cc revision 5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
29// parts the implementation is in platform-posix.cc.
30
31#ifdef __sparc
32# error "V8 does not support the SPARC CPU architecture."
33#endif
34
35#include <sys/stack.h>  // for stack alignment
36#include <unistd.h>  // getpagesize(), usleep()
37#include <sys/mman.h>  // mmap()
38#include <ucontext.h>  // walkstack(), getcontext()
39#include <dlfcn.h>     // dladdr
40#include <pthread.h>
41#include <sched.h>  // for sched_yield
42#include <semaphore.h>
43#include <time.h>
44#include <sys/time.h>  // gettimeofday(), timeradd()
45#include <errno.h>
46#include <ieeefp.h>  // finite()
47#include <signal.h>  // sigemptyset(), etc
48#include <sys/regset.h>
49
50
51#undef MAP_TYPE
52
53#include "v8.h"
54
55#include "platform.h"
56#include "v8threads.h"
57#include "vm-state-inl.h"
58
59
60// It seems there is a bug in some Solaris distributions (experienced in
61// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
62// access signbit() despite the availability of other C99 math functions.
63#ifndef signbit
64// Test sign - usually defined in math.h
65int signbit(double x) {
66  // We need to take care of the special case of both positive and negative
67  // versions of zero.
68  if (x == 0) {
69    return fpclass(x) & FP_NZERO;
70  } else {
71    // This won't detect negative NaN but that should be okay since we don't
72    // assume that behavior.
73    return x < 0;
74  }
75}
76#endif  // signbit
77
78namespace v8 {
79namespace internal {
80
81
82// 0 is never a valid thread id on Solaris since the main thread is 1 and
83// subsequent have their ids incremented from there
84static const pthread_t kNoThread = (pthread_t) 0;
85
86
87double ceiling(double x) {
88  return ceil(x);
89}
90
91
92static Mutex* limit_mutex = NULL;
93void OS::SetUp() {
94  // Seed the random number generator.
95  // Convert the current time to a 64-bit integer first, before converting it
96  // to an unsigned. Going directly will cause an overflow and the seed to be
97  // set to all ones. The seed will be identical for different instances that
98  // call this setup code within the same millisecond.
99  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
100  srandom(static_cast<unsigned int>(seed));
101  limit_mutex = CreateMutex();
102}
103
104
105uint64_t OS::CpuFeaturesImpliedByPlatform() {
106  return 0;  // Solaris runs on a lot of things.
107}
108
109
110int OS::ActivationFrameAlignment() {
111  // GCC generates code that requires 16 byte alignment such as movdqa.
112  return Max(STACK_ALIGN, 16);
113}
114
115
116void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
117  __asm__ __volatile__("" : : : "memory");
118  *ptr = value;
119}
120
121
122const char* OS::LocalTimezone(double time) {
123  if (isnan(time)) return "";
124  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
125  struct tm* t = localtime(&tv);
126  if (NULL == t) return "";
127  return tzname[0];  // The location of the timezone string on Solaris.
128}
129
130
131double OS::LocalTimeOffset() {
132  // On Solaris, struct tm does not contain a tm_gmtoff field.
133  time_t utc = time(NULL);
134  ASSERT(utc != -1);
135  struct tm* loc = localtime(&utc);
136  ASSERT(loc != NULL);
137  return static_cast<double>((mktime(loc) - utc) * msPerSecond);
138}
139
140
141// We keep the lowest and highest addresses mapped as a quick way of
142// determining that pointers are outside the heap (used mostly in assertions
143// and verification).  The estimate is conservative, i.e., not all addresses in
144// 'allocated' space are actually allocated to our heap.  The range is
145// [lowest, highest), inclusive on the low and and exclusive on the high end.
146static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
147static void* highest_ever_allocated = reinterpret_cast<void*>(0);
148
149
150static void UpdateAllocatedSpaceLimits(void* address, int size) {
151  ASSERT(limit_mutex != NULL);
152  ScopedLock lock(limit_mutex);
153
154  lowest_ever_allocated = Min(lowest_ever_allocated, address);
155  highest_ever_allocated =
156      Max(highest_ever_allocated,
157          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
158}
159
160
161bool OS::IsOutsideAllocatedSpace(void* address) {
162  return address < lowest_ever_allocated || address >= highest_ever_allocated;
163}
164
165
166size_t OS::AllocateAlignment() {
167  return static_cast<size_t>(getpagesize());
168}
169
170
171void* OS::Allocate(const size_t requested,
172                   size_t* allocated,
173                   bool is_executable) {
174  const size_t msize = RoundUp(requested, getpagesize());
175  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
176  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
177
178  if (mbase == MAP_FAILED) {
179    LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
180    return NULL;
181  }
182  *allocated = msize;
183  UpdateAllocatedSpaceLimits(mbase, msize);
184  return mbase;
185}
186
187
188void OS::Free(void* address, const size_t size) {
189  // TODO(1240712): munmap has a return value which is ignored here.
190  int result = munmap(address, size);
191  USE(result);
192  ASSERT(result == 0);
193}
194
195
196void OS::Sleep(int milliseconds) {
197  useconds_t ms = static_cast<useconds_t>(milliseconds);
198  usleep(1000 * ms);
199}
200
201
202void OS::Abort() {
203  // Redirect to std abort to signal abnormal program termination.
204  abort();
205}
206
207
208void OS::DebugBreak() {
209  asm("int $3");
210}
211
212
213class PosixMemoryMappedFile : public OS::MemoryMappedFile {
214 public:
215  PosixMemoryMappedFile(FILE* file, void* memory, int size)
216    : file_(file), memory_(memory), size_(size) { }
217  virtual ~PosixMemoryMappedFile();
218  virtual void* memory() { return memory_; }
219  virtual int size() { return size_; }
220 private:
221  FILE* file_;
222  void* memory_;
223  int size_;
224};
225
226
227OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
228  FILE* file = fopen(name, "r+");
229  if (file == NULL) return NULL;
230
231  fseek(file, 0, SEEK_END);
232  int size = ftell(file);
233
234  void* memory =
235      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
236  return new PosixMemoryMappedFile(file, memory, size);
237}
238
239
240OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
241    void* initial) {
242  FILE* file = fopen(name, "w+");
243  if (file == NULL) return NULL;
244  int result = fwrite(initial, size, 1, file);
245  if (result < 1) {
246    fclose(file);
247    return NULL;
248  }
249  void* memory =
250      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
251  return new PosixMemoryMappedFile(file, memory, size);
252}
253
254
255PosixMemoryMappedFile::~PosixMemoryMappedFile() {
256  if (memory_) munmap(memory_, size_);
257  fclose(file_);
258}
259
260
261void OS::LogSharedLibraryAddresses() {
262}
263
264
265void OS::SignalCodeMovingGC() {
266}
267
268
269struct StackWalker {
270  Vector<OS::StackFrame>& frames;
271  int index;
272};
273
274
275static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
276  struct StackWalker* walker = static_cast<struct StackWalker*>(data);
277  Dl_info info;
278
279  int i = walker->index;
280
281  walker->frames[i].address = reinterpret_cast<void*>(pc);
282
283  // Make sure line termination is in place.
284  walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
285
286  Vector<char> text = MutableCStrVector(walker->frames[i].text,
287                                        OS::kStackWalkMaxTextLen);
288
289  if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
290    OS::SNPrintF(text, "[0x%p]", pc);
291  } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
292    // We have symbol info.
293    OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
294  } else {
295    // No local symbol info.
296    OS::SNPrintF(text,
297                 "%s'0x%p [0x%p]",
298                 info.dli_fname,
299                 pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
300                 pc);
301  }
302  walker->index++;
303  return 0;
304}
305
306
307int OS::StackWalk(Vector<OS::StackFrame> frames) {
308  ucontext_t ctx;
309  struct StackWalker walker = { frames, 0 };
310
311  if (getcontext(&ctx) < 0) return kStackWalkError;
312
313  if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
314    return kStackWalkError;
315  }
316
317  return walker.index;
318}
319
320
321// Constants used for mmap.
322static const int kMmapFd = -1;
323static const int kMmapFdOffset = 0;
324
325
326VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
327
328VirtualMemory::VirtualMemory(size_t size) {
329  address_ = ReserveRegion(size);
330  size_ = size;
331}
332
333
334VirtualMemory::VirtualMemory(size_t size, size_t alignment)
335    : address_(NULL), size_(0) {
336  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
337  size_t request_size = RoundUp(size + alignment,
338                                static_cast<intptr_t>(OS::AllocateAlignment()));
339  void* reservation = mmap(OS::GetRandomMmapAddr(),
340                           request_size,
341                           PROT_NONE,
342                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
343                           kMmapFd,
344                           kMmapFdOffset);
345  if (reservation == MAP_FAILED) return;
346
347  Address base = static_cast<Address>(reservation);
348  Address aligned_base = RoundUp(base, alignment);
349  ASSERT_LE(base, aligned_base);
350
351  // Unmap extra memory reserved before and after the desired block.
352  if (aligned_base != base) {
353    size_t prefix_size = static_cast<size_t>(aligned_base - base);
354    OS::Free(base, prefix_size);
355    request_size -= prefix_size;
356  }
357
358  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
359  ASSERT_LE(aligned_size, request_size);
360
361  if (aligned_size != request_size) {
362    size_t suffix_size = request_size - aligned_size;
363    OS::Free(aligned_base + aligned_size, suffix_size);
364    request_size -= suffix_size;
365  }
366
367  ASSERT(aligned_size == request_size);
368
369  address_ = static_cast<void*>(aligned_base);
370  size_ = aligned_size;
371}
372
373
374VirtualMemory::~VirtualMemory() {
375  if (IsReserved()) {
376    bool result = ReleaseRegion(address(), size());
377    ASSERT(result);
378    USE(result);
379  }
380}
381
382
383bool VirtualMemory::IsReserved() {
384  return address_ != NULL;
385}
386
387
388void VirtualMemory::Reset() {
389  address_ = NULL;
390  size_ = 0;
391}
392
393
394bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
395  return CommitRegion(address, size, is_executable);
396}
397
398
399bool VirtualMemory::Uncommit(void* address, size_t size) {
400  return UncommitRegion(address, size);
401}
402
403
404bool VirtualMemory::Guard(void* address) {
405  OS::Guard(address, OS::CommitPageSize());
406  return true;
407}
408
409
410void* VirtualMemory::ReserveRegion(size_t size) {
411  void* result = mmap(OS::GetRandomMmapAddr(),
412                      size,
413                      PROT_NONE,
414                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
415                      kMmapFd,
416                      kMmapFdOffset);
417
418  if (result == MAP_FAILED) return NULL;
419
420  return result;
421}
422
423
424bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
425  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
426  if (MAP_FAILED == mmap(base,
427                         size,
428                         prot,
429                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
430                         kMmapFd,
431                         kMmapFdOffset)) {
432    return false;
433  }
434
435  UpdateAllocatedSpaceLimits(base, size);
436  return true;
437}
438
439
440bool VirtualMemory::UncommitRegion(void* base, size_t size) {
441  return mmap(base,
442              size,
443              PROT_NONE,
444              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
445              kMmapFd,
446              kMmapFdOffset) != MAP_FAILED;
447}
448
449
450bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
451  return munmap(base, size) == 0;
452}
453
454
455class Thread::PlatformData : public Malloced {
456 public:
457  PlatformData() : thread_(kNoThread) {  }
458
459  pthread_t thread_;  // Thread handle for pthread.
460};
461
462
463Thread::Thread(const Options& options)
464    : data_(new PlatformData()),
465      stack_size_(options.stack_size()) {
466  set_name(options.name());
467}
468
469
470Thread::~Thread() {
471  delete data_;
472}
473
474
475static void* ThreadEntry(void* arg) {
476  Thread* thread = reinterpret_cast<Thread*>(arg);
477  // This is also initialized by the first argument to pthread_create() but we
478  // don't know which thread will run first (the original thread or the new
479  // one) so we initialize it here too.
480  thread->data()->thread_ = pthread_self();
481  ASSERT(thread->data()->thread_ != kNoThread);
482  thread->Run();
483  return NULL;
484}
485
486
487void Thread::set_name(const char* name) {
488  strncpy(name_, name, sizeof(name_));
489  name_[sizeof(name_) - 1] = '\0';
490}
491
492
493void Thread::Start() {
494  pthread_attr_t* attr_ptr = NULL;
495  pthread_attr_t attr;
496  if (stack_size_ > 0) {
497    pthread_attr_init(&attr);
498    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
499    attr_ptr = &attr;
500  }
501  pthread_create(&data_->thread_, NULL, ThreadEntry, this);
502  ASSERT(data_->thread_ != kNoThread);
503}
504
505
506void Thread::Join() {
507  pthread_join(data_->thread_, NULL);
508}
509
510
511Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
512  pthread_key_t key;
513  int result = pthread_key_create(&key, NULL);
514  USE(result);
515  ASSERT(result == 0);
516  return static_cast<LocalStorageKey>(key);
517}
518
519
520void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
521  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
522  int result = pthread_key_delete(pthread_key);
523  USE(result);
524  ASSERT(result == 0);
525}
526
527
528void* Thread::GetThreadLocal(LocalStorageKey key) {
529  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
530  return pthread_getspecific(pthread_key);
531}
532
533
534void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
535  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
536  pthread_setspecific(pthread_key, value);
537}
538
539
540void Thread::YieldCPU() {
541  sched_yield();
542}
543
544
545class SolarisMutex : public Mutex {
546 public:
547  SolarisMutex() {
548    pthread_mutexattr_t attr;
549    pthread_mutexattr_init(&attr);
550    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
551    pthread_mutex_init(&mutex_, &attr);
552  }
553
554  ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
555
556  int Lock() { return pthread_mutex_lock(&mutex_); }
557
558  int Unlock() { return pthread_mutex_unlock(&mutex_); }
559
560  virtual bool TryLock() {
561    int result = pthread_mutex_trylock(&mutex_);
562    // Return false if the lock is busy and locking failed.
563    if (result == EBUSY) {
564      return false;
565    }
566    ASSERT(result == 0);  // Verify no other errors.
567    return true;
568  }
569
570 private:
571  pthread_mutex_t mutex_;
572};
573
574
575Mutex* OS::CreateMutex() {
576  return new SolarisMutex();
577}
578
579
580class SolarisSemaphore : public Semaphore {
581 public:
582  explicit SolarisSemaphore(int count) {  sem_init(&sem_, 0, count); }
583  virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
584
585  virtual void Wait();
586  virtual bool Wait(int timeout);
587  virtual void Signal() { sem_post(&sem_); }
588 private:
589  sem_t sem_;
590};
591
592
593void SolarisSemaphore::Wait() {
594  while (true) {
595    int result = sem_wait(&sem_);
596    if (result == 0) return;  // Successfully got semaphore.
597    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
598  }
599}
600
601
602#ifndef TIMEVAL_TO_TIMESPEC
603#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
604    (ts)->tv_sec = (tv)->tv_sec;                                    \
605    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
606} while (false)
607#endif
608
609
610#ifndef timeradd
611#define timeradd(a, b, result) \
612  do { \
613    (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
614    (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
615    if ((result)->tv_usec >= 1000000) { \
616      ++(result)->tv_sec; \
617      (result)->tv_usec -= 1000000; \
618    } \
619  } while (0)
620#endif
621
622
623bool SolarisSemaphore::Wait(int timeout) {
624  const long kOneSecondMicros = 1000000;  // NOLINT
625
626  // Split timeout into second and nanosecond parts.
627  struct timeval delta;
628  delta.tv_usec = timeout % kOneSecondMicros;
629  delta.tv_sec = timeout / kOneSecondMicros;
630
631  struct timeval current_time;
632  // Get the current time.
633  if (gettimeofday(&current_time, NULL) == -1) {
634    return false;
635  }
636
637  // Calculate time for end of timeout.
638  struct timeval end_time;
639  timeradd(&current_time, &delta, &end_time);
640
641  struct timespec ts;
642  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
643  // Wait for semaphore signalled or timeout.
644  while (true) {
645    int result = sem_timedwait(&sem_, &ts);
646    if (result == 0) return true;  // Successfully got semaphore.
647    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
648    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
649  }
650}
651
652
653Semaphore* OS::CreateSemaphore(int count) {
654  return new SolarisSemaphore(count);
655}
656
657
658static pthread_t GetThreadID() {
659  return pthread_self();
660}
661
662static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
663  USE(info);
664  if (signal != SIGPROF) return;
665  Isolate* isolate = Isolate::UncheckedCurrent();
666  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
667    // We require a fully initialized and entered isolate.
668    return;
669  }
670  if (v8::Locker::IsActive() &&
671      !isolate->thread_manager()->IsLockedByCurrentThread()) {
672    return;
673  }
674
675  Sampler* sampler = isolate->logger()->sampler();
676  if (sampler == NULL || !sampler->IsActive()) return;
677
678  TickSample sample_obj;
679  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
680  if (sample == NULL) sample = &sample_obj;
681
682  // Extracting the sample from the context is extremely machine dependent.
683  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
684  mcontext_t& mcontext = ucontext->uc_mcontext;
685  sample->state = isolate->current_vm_state();
686
687  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
688  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
689  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
690
691  sampler->SampleStack(sample);
692  sampler->Tick(sample);
693}
694
695class Sampler::PlatformData : public Malloced {
696 public:
697  PlatformData() : vm_tid_(GetThreadID()) {}
698
699  pthread_t vm_tid() const { return vm_tid_; }
700
701 private:
702  pthread_t vm_tid_;
703};
704
705
706class SignalSender : public Thread {
707 public:
708  enum SleepInterval {
709    HALF_INTERVAL,
710    FULL_INTERVAL
711  };
712
713  static const int kSignalSenderStackSize = 64 * KB;
714
715  explicit SignalSender(int interval)
716      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
717        interval_(interval) {}
718
719  static void InstallSignalHandler() {
720    struct sigaction sa;
721    sa.sa_sigaction = ProfilerSignalHandler;
722    sigemptyset(&sa.sa_mask);
723    sa.sa_flags = SA_RESTART | SA_SIGINFO;
724    signal_handler_installed_ =
725        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
726  }
727
728  static void RestoreSignalHandler() {
729    if (signal_handler_installed_) {
730      sigaction(SIGPROF, &old_signal_handler_, 0);
731      signal_handler_installed_ = false;
732    }
733  }
734
735  static void AddActiveSampler(Sampler* sampler) {
736    ScopedLock lock(mutex_.Pointer());
737    SamplerRegistry::AddActiveSampler(sampler);
738    if (instance_ == NULL) {
739      // Start a thread that will send SIGPROF signal to VM threads,
740      // when CPU profiling will be enabled.
741      instance_ = new SignalSender(sampler->interval());
742      instance_->Start();
743    } else {
744      ASSERT(instance_->interval_ == sampler->interval());
745    }
746  }
747
748  static void RemoveActiveSampler(Sampler* sampler) {
749    ScopedLock lock(mutex_.Pointer());
750    SamplerRegistry::RemoveActiveSampler(sampler);
751    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
752      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
753      delete instance_;
754      instance_ = NULL;
755      RestoreSignalHandler();
756    }
757  }
758
759  // Implement Thread::Run().
760  virtual void Run() {
761    SamplerRegistry::State state;
762    while ((state = SamplerRegistry::GetState()) !=
763           SamplerRegistry::HAS_NO_SAMPLERS) {
764      bool cpu_profiling_enabled =
765          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
766      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
767      if (cpu_profiling_enabled && !signal_handler_installed_) {
768        InstallSignalHandler();
769      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
770        RestoreSignalHandler();
771      }
772
773      // When CPU profiling is enabled both JavaScript and C++ code is
774      // profiled. We must not suspend.
775      if (!cpu_profiling_enabled) {
776        if (rate_limiter_.SuspendIfNecessary()) continue;
777      }
778      if (cpu_profiling_enabled && runtime_profiler_enabled) {
779        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
780          return;
781        }
782        Sleep(HALF_INTERVAL);
783        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
784          return;
785        }
786        Sleep(HALF_INTERVAL);
787      } else {
788        if (cpu_profiling_enabled) {
789          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
790                                                      this)) {
791            return;
792          }
793        }
794        if (runtime_profiler_enabled) {
795          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
796                                                      NULL)) {
797            return;
798          }
799        }
800        Sleep(FULL_INTERVAL);
801      }
802    }
803  }
804
805  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
806    if (!sampler->IsProfiling()) return;
807    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
808    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
809  }
810
811  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
812    if (!sampler->isolate()->IsInitialized()) return;
813    sampler->isolate()->runtime_profiler()->NotifyTick();
814  }
815
816  void SendProfilingSignal(pthread_t tid) {
817    if (!signal_handler_installed_) return;
818    pthread_kill(tid, SIGPROF);
819  }
820
821  void Sleep(SleepInterval full_or_half) {
822    // Convert ms to us and subtract 100 us to compensate delays
823    // occuring during signal delivery.
824    useconds_t interval = interval_ * 1000 - 100;
825    if (full_or_half == HALF_INTERVAL) interval /= 2;
826    int result = usleep(interval);
827#ifdef DEBUG
828    if (result != 0 && errno != EINTR) {
829      fprintf(stderr,
830              "SignalSender usleep error; interval = %u, errno = %d\n",
831              interval,
832              errno);
833      ASSERT(result == 0 || errno == EINTR);
834    }
835#endif
836    USE(result);
837  }
838
839  const int interval_;
840  RuntimeProfilerRateLimiter rate_limiter_;
841
842  // Protects the process wide state below.
843  static LazyMutex mutex_;
844  static SignalSender* instance_;
845  static bool signal_handler_installed_;
846  static struct sigaction old_signal_handler_;
847
848 private:
849  DISALLOW_COPY_AND_ASSIGN(SignalSender);
850};
851
852LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
853SignalSender* SignalSender::instance_ = NULL;
854struct sigaction SignalSender::old_signal_handler_;
855bool SignalSender::signal_handler_installed_ = false;
856
857
858Sampler::Sampler(Isolate* isolate, int interval)
859    : isolate_(isolate),
860      interval_(interval),
861      profiling_(false),
862      active_(false),
863      samples_taken_(0) {
864  data_ = new PlatformData;
865}
866
867
868Sampler::~Sampler() {
869  ASSERT(!IsActive());
870  delete data_;
871}
872
873
874void Sampler::Start() {
875  ASSERT(!IsActive());
876  SetActive(true);
877  SignalSender::AddActiveSampler(this);
878}
879
880
881void Sampler::Stop() {
882  ASSERT(IsActive());
883  SignalSender::RemoveActiveSampler(this);
884  SetActive(false);
885}
886
887} }  // namespace v8::internal
888