1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <pthread.h>
32#include <semaphore.h>
33#include <signal.h>
34#include <sys/time.h>
35#include <sys/resource.h>
36#include <sys/types.h>
37#include <sys/ucontext.h>
38#include <stdlib.h>
39
40#include <sys/types.h>  // mmap & munmap
41#include <sys/mman.h>   // mmap & munmap
42#include <sys/stat.h>   // open
43#include <sys/fcntl.h>  // open
44#include <unistd.h>     // getpagesize
45// If you don't have execinfo.h then you need devel/libexecinfo from ports.
46#include <execinfo.h>   // backtrace, backtrace_symbols
47#include <strings.h>    // index
48#include <errno.h>
49#include <stdarg.h>
50#include <limits.h>
51
52#undef MAP_TYPE
53
54#include "v8.h"
55#include "v8threads.h"
56
57#include "platform-posix.h"
58#include "platform.h"
59#include "vm-state-inl.h"
60
61
62namespace v8 {
63namespace internal {
64
65// 0 is never a valid thread id on FreeBSD since tids and pids share a
66// name space and pid 0 is used to kill the group (see man 2 kill).
67static const pthread_t kNoThread = (pthread_t) 0;
68
69
70double ceiling(double x) {
71    // Correct as on OS X
72    if (-1.0 < x && x < 0.0) {
73        return -0.0;
74    } else {
75        return ceil(x);
76    }
77}
78
79
80static Mutex* limit_mutex = NULL;
81
82
83void OS::SetUp() {
84  // Seed the random number generator.
85  // Convert the current time to a 64-bit integer first, before converting it
86  // to an unsigned. Going directly can cause an overflow and the seed to be
87  // set to all ones. The seed will be identical for different instances that
88  // call this setup code within the same millisecond.
89  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
90  srandom(static_cast<unsigned int>(seed));
91  limit_mutex = CreateMutex();
92}
93
94
95void OS::PostSetUp() {
96  // Math functions depend on CPU features therefore they are initialized after
97  // CPU.
98  MathSetup();
99}
100
101
102void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
103  __asm__ __volatile__("" : : : "memory");
104  *ptr = value;
105}
106
107
108uint64_t OS::CpuFeaturesImpliedByPlatform() {
109  return 0;  // FreeBSD runs on anything.
110}
111
112
113int OS::ActivationFrameAlignment() {
114  // 16 byte alignment on FreeBSD
115  return 16;
116}
117
118
119const char* OS::LocalTimezone(double time) {
120  if (isnan(time)) return "";
121  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
122  struct tm* t = localtime(&tv);
123  if (NULL == t) return "";
124  return t->tm_zone;
125}
126
127
128double OS::LocalTimeOffset() {
129  time_t tv = time(NULL);
130  struct tm* t = localtime(&tv);
131  // tm_gmtoff includes any daylight savings offset, so subtract it.
132  return static_cast<double>(t->tm_gmtoff * msPerSecond -
133                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
134}
135
136
137// We keep the lowest and highest addresses mapped as a quick way of
138// determining that pointers are outside the heap (used mostly in assertions
139// and verification).  The estimate is conservative, i.e., not all addresses in
140// 'allocated' space are actually allocated to our heap.  The range is
141// [lowest, highest), inclusive on the low and and exclusive on the high end.
142static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
143static void* highest_ever_allocated = reinterpret_cast<void*>(0);
144
145
146static void UpdateAllocatedSpaceLimits(void* address, int size) {
147  ASSERT(limit_mutex != NULL);
148  ScopedLock lock(limit_mutex);
149
150  lowest_ever_allocated = Min(lowest_ever_allocated, address);
151  highest_ever_allocated =
152      Max(highest_ever_allocated,
153          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
154}
155
156
157bool OS::IsOutsideAllocatedSpace(void* address) {
158  return address < lowest_ever_allocated || address >= highest_ever_allocated;
159}
160
161
162size_t OS::AllocateAlignment() {
163  return getpagesize();
164}
165
166
167void* OS::Allocate(const size_t requested,
168                   size_t* allocated,
169                   bool executable) {
170  const size_t msize = RoundUp(requested, getpagesize());
171  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
172  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
173
174  if (mbase == MAP_FAILED) {
175    LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
176    return NULL;
177  }
178  *allocated = msize;
179  UpdateAllocatedSpaceLimits(mbase, msize);
180  return mbase;
181}
182
183
184void OS::Free(void* buf, const size_t length) {
185  // TODO(1240712): munmap has a return value which is ignored here.
186  int result = munmap(buf, length);
187  USE(result);
188  ASSERT(result == 0);
189}
190
191
192void OS::Sleep(int milliseconds) {
193  unsigned int ms = static_cast<unsigned int>(milliseconds);
194  usleep(1000 * ms);
195}
196
197
198void OS::Abort() {
199  // Redirect to std abort to signal abnormal program termination.
200  abort();
201}
202
203
204void OS::DebugBreak() {
205#if (defined(__arm__) || defined(__thumb__))
206# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
207  asm("bkpt 0");
208# endif
209#else
210  asm("int $3");
211#endif
212}
213
214
215class PosixMemoryMappedFile : public OS::MemoryMappedFile {
216 public:
217  PosixMemoryMappedFile(FILE* file, void* memory, int size)
218    : file_(file), memory_(memory), size_(size) { }
219  virtual ~PosixMemoryMappedFile();
220  virtual void* memory() { return memory_; }
221  virtual int size() { return size_; }
222 private:
223  FILE* file_;
224  void* memory_;
225  int size_;
226};
227
228
229OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
230  FILE* file = fopen(name, "r+");
231  if (file == NULL) return NULL;
232
233  fseek(file, 0, SEEK_END);
234  int size = ftell(file);
235
236  void* memory =
237      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
238  return new PosixMemoryMappedFile(file, memory, size);
239}
240
241
242OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
243    void* initial) {
244  FILE* file = fopen(name, "w+");
245  if (file == NULL) return NULL;
246  int result = fwrite(initial, size, 1, file);
247  if (result < 1) {
248    fclose(file);
249    return NULL;
250  }
251  void* memory =
252      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
253  return new PosixMemoryMappedFile(file, memory, size);
254}
255
256
257PosixMemoryMappedFile::~PosixMemoryMappedFile() {
258  if (memory_) munmap(memory_, size_);
259  fclose(file_);
260}
261
262
263static unsigned StringToLong(char* buffer) {
264  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
265}
266
267
268void OS::LogSharedLibraryAddresses() {
269  static const int MAP_LENGTH = 1024;
270  int fd = open("/proc/self/maps", O_RDONLY);
271  if (fd < 0) return;
272  while (true) {
273    char addr_buffer[11];
274    addr_buffer[0] = '0';
275    addr_buffer[1] = 'x';
276    addr_buffer[10] = 0;
277    int result = read(fd, addr_buffer + 2, 8);
278    if (result < 8) break;
279    unsigned start = StringToLong(addr_buffer);
280    result = read(fd, addr_buffer + 2, 1);
281    if (result < 1) break;
282    if (addr_buffer[2] != '-') break;
283    result = read(fd, addr_buffer + 2, 8);
284    if (result < 8) break;
285    unsigned end = StringToLong(addr_buffer);
286    char buffer[MAP_LENGTH];
287    int bytes_read = -1;
288    do {
289      bytes_read++;
290      if (bytes_read >= MAP_LENGTH - 1)
291        break;
292      result = read(fd, buffer + bytes_read, 1);
293      if (result < 1) break;
294    } while (buffer[bytes_read] != '\n');
295    buffer[bytes_read] = 0;
296    // Ignore mappings that are not executable.
297    if (buffer[3] != 'x') continue;
298    char* start_of_path = index(buffer, '/');
299    // There may be no filename in this line.  Skip to next.
300    if (start_of_path == NULL) continue;
301    buffer[bytes_read] = 0;
302    LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
303  }
304  close(fd);
305}
306
307
308void OS::SignalCodeMovingGC() {
309}
310
311
312int OS::StackWalk(Vector<OS::StackFrame> frames) {
313  int frames_size = frames.length();
314  ScopedVector<void*> addresses(frames_size);
315
316  int frames_count = backtrace(addresses.start(), frames_size);
317
318  char** symbols = backtrace_symbols(addresses.start(), frames_count);
319  if (symbols == NULL) {
320    return kStackWalkError;
321  }
322
323  for (int i = 0; i < frames_count; i++) {
324    frames[i].address = addresses[i];
325    // Format a text representation of the frame based on the information
326    // available.
327    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
328             "%s",
329             symbols[i]);
330    // Make sure line termination is in place.
331    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
332  }
333
334  free(symbols);
335
336  return frames_count;
337}
338
339
340// Constants used for mmap.
341static const int kMmapFd = -1;
342static const int kMmapFdOffset = 0;
343
344VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
345
346VirtualMemory::VirtualMemory(size_t size) {
347  address_ = ReserveRegion(size);
348  size_ = size;
349}
350
351
352VirtualMemory::VirtualMemory(size_t size, size_t alignment)
353    : address_(NULL), size_(0) {
354  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
355  size_t request_size = RoundUp(size + alignment,
356                                static_cast<intptr_t>(OS::AllocateAlignment()));
357  void* reservation = mmap(OS::GetRandomMmapAddr(),
358                           request_size,
359                           PROT_NONE,
360                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
361                           kMmapFd,
362                           kMmapFdOffset);
363  if (reservation == MAP_FAILED) return;
364
365  Address base = static_cast<Address>(reservation);
366  Address aligned_base = RoundUp(base, alignment);
367  ASSERT_LE(base, aligned_base);
368
369  // Unmap extra memory reserved before and after the desired block.
370  if (aligned_base != base) {
371    size_t prefix_size = static_cast<size_t>(aligned_base - base);
372    OS::Free(base, prefix_size);
373    request_size -= prefix_size;
374  }
375
376  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
377  ASSERT_LE(aligned_size, request_size);
378
379  if (aligned_size != request_size) {
380    size_t suffix_size = request_size - aligned_size;
381    OS::Free(aligned_base + aligned_size, suffix_size);
382    request_size -= suffix_size;
383  }
384
385  ASSERT(aligned_size == request_size);
386
387  address_ = static_cast<void*>(aligned_base);
388  size_ = aligned_size;
389}
390
391
392VirtualMemory::~VirtualMemory() {
393  if (IsReserved()) {
394    bool result = ReleaseRegion(address(), size());
395    ASSERT(result);
396    USE(result);
397  }
398}
399
400
401bool VirtualMemory::IsReserved() {
402  return address_ != NULL;
403}
404
405
406void VirtualMemory::Reset() {
407  address_ = NULL;
408  size_ = 0;
409}
410
411
412bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
413  return CommitRegion(address, size, is_executable);
414}
415
416
417bool VirtualMemory::Uncommit(void* address, size_t size) {
418  return UncommitRegion(address, size);
419}
420
421
422bool VirtualMemory::Guard(void* address) {
423  OS::Guard(address, OS::CommitPageSize());
424  return true;
425}
426
427
428void* VirtualMemory::ReserveRegion(size_t size) {
429  void* result = mmap(OS::GetRandomMmapAddr(),
430                      size,
431                      PROT_NONE,
432                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
433                      kMmapFd,
434                      kMmapFdOffset);
435
436  if (result == MAP_FAILED) return NULL;
437
438  return result;
439}
440
441
442bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
443  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
444  if (MAP_FAILED == mmap(base,
445                         size,
446                         prot,
447                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
448                         kMmapFd,
449                         kMmapFdOffset)) {
450    return false;
451  }
452
453  UpdateAllocatedSpaceLimits(base, size);
454  return true;
455}
456
457
458bool VirtualMemory::UncommitRegion(void* base, size_t size) {
459  return mmap(base,
460              size,
461              PROT_NONE,
462              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
463              kMmapFd,
464              kMmapFdOffset) != MAP_FAILED;
465}
466
467
468bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
469  return munmap(base, size) == 0;
470}
471
472
473class Thread::PlatformData : public Malloced {
474 public:
475  pthread_t thread_;  // Thread handle for pthread.
476};
477
478
479Thread::Thread(const Options& options)
480    : data_(new PlatformData),
481      stack_size_(options.stack_size()) {
482  set_name(options.name());
483}
484
485
486Thread::~Thread() {
487  delete data_;
488}
489
490
491static void* ThreadEntry(void* arg) {
492  Thread* thread = reinterpret_cast<Thread*>(arg);
493  // This is also initialized by the first argument to pthread_create() but we
494  // don't know which thread will run first (the original thread or the new
495  // one) so we initialize it here too.
496  thread->data()->thread_ = pthread_self();
497  ASSERT(thread->data()->thread_ != kNoThread);
498  thread->Run();
499  return NULL;
500}
501
502
503void Thread::set_name(const char* name) {
504  strncpy(name_, name, sizeof(name_));
505  name_[sizeof(name_) - 1] = '\0';
506}
507
508
509void Thread::Start() {
510  pthread_attr_t* attr_ptr = NULL;
511  pthread_attr_t attr;
512  if (stack_size_ > 0) {
513    pthread_attr_init(&attr);
514    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
515    attr_ptr = &attr;
516  }
517  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
518  ASSERT(data_->thread_ != kNoThread);
519}
520
521
522void Thread::Join() {
523  pthread_join(data_->thread_, NULL);
524}
525
526
527Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
528  pthread_key_t key;
529  int result = pthread_key_create(&key, NULL);
530  USE(result);
531  ASSERT(result == 0);
532  return static_cast<LocalStorageKey>(key);
533}
534
535
536void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
537  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
538  int result = pthread_key_delete(pthread_key);
539  USE(result);
540  ASSERT(result == 0);
541}
542
543
544void* Thread::GetThreadLocal(LocalStorageKey key) {
545  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
546  return pthread_getspecific(pthread_key);
547}
548
549
550void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
551  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
552  pthread_setspecific(pthread_key, value);
553}
554
555
556void Thread::YieldCPU() {
557  sched_yield();
558}
559
560
561class FreeBSDMutex : public Mutex {
562 public:
563  FreeBSDMutex() {
564    pthread_mutexattr_t attrs;
565    int result = pthread_mutexattr_init(&attrs);
566    ASSERT(result == 0);
567    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
568    ASSERT(result == 0);
569    result = pthread_mutex_init(&mutex_, &attrs);
570    ASSERT(result == 0);
571  }
572
573  virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
574
575  virtual int Lock() {
576    int result = pthread_mutex_lock(&mutex_);
577    return result;
578  }
579
580  virtual int Unlock() {
581    int result = pthread_mutex_unlock(&mutex_);
582    return result;
583  }
584
585  virtual bool TryLock() {
586    int result = pthread_mutex_trylock(&mutex_);
587    // Return false if the lock is busy and locking failed.
588    if (result == EBUSY) {
589      return false;
590    }
591    ASSERT(result == 0);  // Verify no other errors.
592    return true;
593  }
594
595 private:
596  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
597};
598
599
600Mutex* OS::CreateMutex() {
601  return new FreeBSDMutex();
602}
603
604
605class FreeBSDSemaphore : public Semaphore {
606 public:
607  explicit FreeBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
608  virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
609
610  virtual void Wait();
611  virtual bool Wait(int timeout);
612  virtual void Signal() { sem_post(&sem_); }
613 private:
614  sem_t sem_;
615};
616
617
618void FreeBSDSemaphore::Wait() {
619  while (true) {
620    int result = sem_wait(&sem_);
621    if (result == 0) return;  // Successfully got semaphore.
622    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
623  }
624}
625
626
627bool FreeBSDSemaphore::Wait(int timeout) {
628  const long kOneSecondMicros = 1000000;  // NOLINT
629
630  // Split timeout into second and nanosecond parts.
631  struct timeval delta;
632  delta.tv_usec = timeout % kOneSecondMicros;
633  delta.tv_sec = timeout / kOneSecondMicros;
634
635  struct timeval current_time;
636  // Get the current time.
637  if (gettimeofday(&current_time, NULL) == -1) {
638    return false;
639  }
640
641  // Calculate time for end of timeout.
642  struct timeval end_time;
643  timeradd(&current_time, &delta, &end_time);
644
645  struct timespec ts;
646  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
647  while (true) {
648    int result = sem_timedwait(&sem_, &ts);
649    if (result == 0) return true;  // Successfully got semaphore.
650    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
651    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
652  }
653}
654
655
656Semaphore* OS::CreateSemaphore(int count) {
657  return new FreeBSDSemaphore(count);
658}
659
660
661static pthread_t GetThreadID() {
662  pthread_t thread_id = pthread_self();
663  return thread_id;
664}
665
666
667class Sampler::PlatformData : public Malloced {
668 public:
669  PlatformData() : vm_tid_(GetThreadID()) {}
670
671  pthread_t vm_tid() const { return vm_tid_; }
672
673 private:
674  pthread_t vm_tid_;
675};
676
677
678static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
679  USE(info);
680  if (signal != SIGPROF) return;
681  Isolate* isolate = Isolate::UncheckedCurrent();
682  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
683    // We require a fully initialized and entered isolate.
684    return;
685  }
686  if (v8::Locker::IsActive() &&
687      !isolate->thread_manager()->IsLockedByCurrentThread()) {
688    return;
689  }
690
691  Sampler* sampler = isolate->logger()->sampler();
692  if (sampler == NULL || !sampler->IsActive()) return;
693
694  TickSample sample_obj;
695  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
696  if (sample == NULL) sample = &sample_obj;
697
698  // Extracting the sample from the context is extremely machine dependent.
699  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
700  mcontext_t& mcontext = ucontext->uc_mcontext;
701  sample->state = isolate->current_vm_state();
702#if V8_HOST_ARCH_IA32
703  sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
704  sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
705  sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
706#elif V8_HOST_ARCH_X64
707  sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
708  sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
709  sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
710#elif V8_HOST_ARCH_ARM
711  sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
712  sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
713  sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
714#endif
715  sampler->SampleStack(sample);
716  sampler->Tick(sample);
717}
718
719
720class SignalSender : public Thread {
721 public:
722  enum SleepInterval {
723    HALF_INTERVAL,
724    FULL_INTERVAL
725  };
726
727  static const int kSignalSenderStackSize = 64 * KB;
728
729  explicit SignalSender(int interval)
730      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
731        interval_(interval) {}
732
733  static void AddActiveSampler(Sampler* sampler) {
734    ScopedLock lock(mutex_.Pointer());
735    SamplerRegistry::AddActiveSampler(sampler);
736    if (instance_ == NULL) {
737      // Install a signal handler.
738      struct sigaction sa;
739      sa.sa_sigaction = ProfilerSignalHandler;
740      sigemptyset(&sa.sa_mask);
741      sa.sa_flags = SA_RESTART | SA_SIGINFO;
742      signal_handler_installed_ =
743          (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
744
745      // Start a thread that sends SIGPROF signal to VM threads.
746      instance_ = new SignalSender(sampler->interval());
747      instance_->Start();
748    } else {
749      ASSERT(instance_->interval_ == sampler->interval());
750    }
751  }
752
753  static void RemoveActiveSampler(Sampler* sampler) {
754    ScopedLock lock(mutex_.Pointer());
755    SamplerRegistry::RemoveActiveSampler(sampler);
756    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
757      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
758      delete instance_;
759      instance_ = NULL;
760
761      // Restore the old signal handler.
762      if (signal_handler_installed_) {
763        sigaction(SIGPROF, &old_signal_handler_, 0);
764        signal_handler_installed_ = false;
765      }
766    }
767  }
768
769  // Implement Thread::Run().
770  virtual void Run() {
771    SamplerRegistry::State state;
772    while ((state = SamplerRegistry::GetState()) !=
773           SamplerRegistry::HAS_NO_SAMPLERS) {
774      bool cpu_profiling_enabled =
775          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
776      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
777      // When CPU profiling is enabled both JavaScript and C++ code is
778      // profiled. We must not suspend.
779      if (!cpu_profiling_enabled) {
780        if (rate_limiter_.SuspendIfNecessary()) continue;
781      }
782      if (cpu_profiling_enabled && runtime_profiler_enabled) {
783        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
784          return;
785        }
786        Sleep(HALF_INTERVAL);
787        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
788          return;
789        }
790        Sleep(HALF_INTERVAL);
791      } else {
792        if (cpu_profiling_enabled) {
793          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
794                                                      this)) {
795            return;
796          }
797        }
798        if (runtime_profiler_enabled) {
799          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
800                                                      NULL)) {
801            return;
802          }
803        }
804        Sleep(FULL_INTERVAL);
805      }
806    }
807  }
808
809  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
810    if (!sampler->IsProfiling()) return;
811    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
812    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
813  }
814
815  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
816    if (!sampler->isolate()->IsInitialized()) return;
817    sampler->isolate()->runtime_profiler()->NotifyTick();
818  }
819
820  void SendProfilingSignal(pthread_t tid) {
821    if (!signal_handler_installed_) return;
822    pthread_kill(tid, SIGPROF);
823  }
824
825  void Sleep(SleepInterval full_or_half) {
826    // Convert ms to us and subtract 100 us to compensate delays
827    // occuring during signal delivery.
828    useconds_t interval = interval_ * 1000 - 100;
829    if (full_or_half == HALF_INTERVAL) interval /= 2;
830    int result = usleep(interval);
831#ifdef DEBUG
832    if (result != 0 && errno != EINTR) {
833      fprintf(stderr,
834              "SignalSender usleep error; interval = %u, errno = %d\n",
835              interval,
836              errno);
837      ASSERT(result == 0 || errno == EINTR);
838    }
839#endif
840    USE(result);
841  }
842
843  const int interval_;
844  RuntimeProfilerRateLimiter rate_limiter_;
845
846  // Protects the process wide state below.
847  static LazyMutex mutex_;
848  static SignalSender* instance_;
849  static bool signal_handler_installed_;
850  static struct sigaction old_signal_handler_;
851
852 private:
853  DISALLOW_COPY_AND_ASSIGN(SignalSender);
854};
855
856LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
857SignalSender* SignalSender::instance_ = NULL;
858struct sigaction SignalSender::old_signal_handler_;
859bool SignalSender::signal_handler_installed_ = false;
860
861
862Sampler::Sampler(Isolate* isolate, int interval)
863    : isolate_(isolate),
864      interval_(interval),
865      profiling_(false),
866      active_(false),
867      samples_taken_(0) {
868  data_ = new PlatformData;
869}
870
871
872Sampler::~Sampler() {
873  ASSERT(!IsActive());
874  delete data_;
875}
876
877
878void Sampler::Start() {
879  ASSERT(!IsActive());
880  SetActive(true);
881  SignalSender::AddActiveSampler(this);
882}
883
884
885void Sampler::Stop() {
886  ASSERT(IsActive());
887  SignalSender::RemoveActiveSampler(this);
888  SetActive(false);
889}
890
891
892} }  // namespace v8::internal
893