platform-macos.cc revision 6ded16be15dd865a9b21ea304d5273c8be299c87
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for MacOS goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <unistd.h>
32#include <sys/mman.h>
33#include <mach/mach_init.h>
34#include <mach-o/dyld.h>
35#include <mach-o/getsect.h>
36
37#include <AvailabilityMacros.h>
38
39#include <pthread.h>
40#include <semaphore.h>
41#include <signal.h>
42#include <mach/mach.h>
43#include <mach/semaphore.h>
44#include <mach/task.h>
45#include <mach/vm_statistics.h>
46#include <sys/time.h>
47#include <sys/resource.h>
48#include <sys/types.h>
49#include <stdarg.h>
50#include <stdlib.h>
51
52#include <errno.h>
53
54#undef MAP_TYPE
55
56#include "v8.h"
57
58#include "platform.h"
59
60// Manually define these here as weak imports, rather than including execinfo.h.
61// This lets us launch on 10.4 which does not have these calls.
62extern "C" {
63  extern int backtrace(void**, int) __attribute__((weak_import));
64  extern char** backtrace_symbols(void* const*, int)
65      __attribute__((weak_import));
66  extern void backtrace_symbols_fd(void* const*, int, int)
67      __attribute__((weak_import));
68}
69
70
71namespace v8 {
72namespace internal {
73
74// 0 is never a valid thread id on MacOSX since a ptread_t is
75// a pointer.
76static const pthread_t kNoThread = (pthread_t) 0;
77
78
79double ceiling(double x) {
80  // Correct Mac OS X Leopard 'ceil' behavior.
81  if (-1.0 < x && x < 0.0) {
82    return -0.0;
83  } else {
84    return ceil(x);
85  }
86}
87
88
89void OS::Setup() {
90  // Seed the random number generator.
91  // Convert the current time to a 64-bit integer first, before converting it
92  // to an unsigned. Going directly will cause an overflow and the seed to be
93  // set to all ones. The seed will be identical for different instances that
94  // call this setup code within the same millisecond.
95  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
96  srandom(static_cast<unsigned int>(seed));
97}
98
99
100// We keep the lowest and highest addresses mapped as a quick way of
101// determining that pointers are outside the heap (used mostly in assertions
102// and verification).  The estimate is conservative, ie, not all addresses in
103// 'allocated' space are actually allocated to our heap.  The range is
104// [lowest, highest), inclusive on the low and and exclusive on the high end.
105static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
106static void* highest_ever_allocated = reinterpret_cast<void*>(0);
107
108
109static void UpdateAllocatedSpaceLimits(void* address, int size) {
110  lowest_ever_allocated = Min(lowest_ever_allocated, address);
111  highest_ever_allocated =
112      Max(highest_ever_allocated,
113          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
114}
115
116
117bool OS::IsOutsideAllocatedSpace(void* address) {
118  return address < lowest_ever_allocated || address >= highest_ever_allocated;
119}
120
121
122size_t OS::AllocateAlignment() {
123  return getpagesize();
124}
125
126
127// Constants used for mmap.
128// kMmapFd is used to pass vm_alloc flags to tag the region with the user
129// defined tag 255 This helps identify V8-allocated regions in memory analysis
130// tools like vmmap(1).
131static const int kMmapFd = VM_MAKE_TAG(255);
132static const off_t kMmapFdOffset = 0;
133
134
135void* OS::Allocate(const size_t requested,
136                   size_t* allocated,
137                   bool is_executable) {
138  const size_t msize = RoundUp(requested, getpagesize());
139  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
140  void* mbase = mmap(NULL, msize, prot,
141                     MAP_PRIVATE | MAP_ANON,
142                     kMmapFd, kMmapFdOffset);
143  if (mbase == MAP_FAILED) {
144    LOG(StringEvent("OS::Allocate", "mmap failed"));
145    return NULL;
146  }
147  *allocated = msize;
148  UpdateAllocatedSpaceLimits(mbase, msize);
149  return mbase;
150}
151
152
153void OS::Free(void* address, const size_t size) {
154  // TODO(1240712): munmap has a return value which is ignored here.
155  int result = munmap(address, size);
156  USE(result);
157  ASSERT(result == 0);
158}
159
160
161#ifdef ENABLE_HEAP_PROTECTION
162
163void OS::Protect(void* address, size_t size) {
164  UNIMPLEMENTED();
165}
166
167
168void OS::Unprotect(void* address, size_t size, bool is_executable) {
169  UNIMPLEMENTED();
170}
171
172#endif
173
174
175void OS::Sleep(int milliseconds) {
176  usleep(1000 * milliseconds);
177}
178
179
180void OS::Abort() {
181  // Redirect to std abort to signal abnormal program termination
182  abort();
183}
184
185
186void OS::DebugBreak() {
187  asm("int $3");
188}
189
190
191class PosixMemoryMappedFile : public OS::MemoryMappedFile {
192 public:
193  PosixMemoryMappedFile(FILE* file, void* memory, int size)
194    : file_(file), memory_(memory), size_(size) { }
195  virtual ~PosixMemoryMappedFile();
196  virtual void* memory() { return memory_; }
197 private:
198  FILE* file_;
199  void* memory_;
200  int size_;
201};
202
203
204OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
205    void* initial) {
206  FILE* file = fopen(name, "w+");
207  if (file == NULL) return NULL;
208  fwrite(initial, size, 1, file);
209  void* memory =
210      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
211  return new PosixMemoryMappedFile(file, memory, size);
212}
213
214
215PosixMemoryMappedFile::~PosixMemoryMappedFile() {
216  if (memory_) munmap(memory_, size_);
217  fclose(file_);
218}
219
220
221void OS::LogSharedLibraryAddresses() {
222#ifdef ENABLE_LOGGING_AND_PROFILING
223  unsigned int images_count = _dyld_image_count();
224  for (unsigned int i = 0; i < images_count; ++i) {
225    const mach_header* header = _dyld_get_image_header(i);
226    if (header == NULL) continue;
227#if V8_HOST_ARCH_X64
228    uint64_t size;
229    char* code_ptr = getsectdatafromheader_64(
230        reinterpret_cast<const mach_header_64*>(header),
231        SEG_TEXT,
232        SECT_TEXT,
233        &size);
234#else
235    unsigned int size;
236    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
237#endif
238    if (code_ptr == NULL) continue;
239    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
240    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
241    LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
242  }
243#endif  // ENABLE_LOGGING_AND_PROFILING
244}
245
246
247uint64_t OS::CpuFeaturesImpliedByPlatform() {
248  // MacOSX requires all these to install so we can assume they are present.
249  // These constants are defined by the CPUid instructions.
250  const uint64_t one = 1;
251  return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
252}
253
254
255int OS::ActivationFrameAlignment() {
256  // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
257  // Function Call Guide".
258  return 16;
259}
260
261
262const char* OS::LocalTimezone(double time) {
263  if (isnan(time)) return "";
264  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
265  struct tm* t = localtime(&tv);
266  if (NULL == t) return "";
267  return t->tm_zone;
268}
269
270
271double OS::LocalTimeOffset() {
272  time_t tv = time(NULL);
273  struct tm* t = localtime(&tv);
274  // tm_gmtoff includes any daylight savings offset, so subtract it.
275  return static_cast<double>(t->tm_gmtoff * msPerSecond -
276                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
277}
278
279
280int OS::StackWalk(Vector<StackFrame> frames) {
281  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
282  if (backtrace == NULL)
283    return 0;
284
285  int frames_size = frames.length();
286  void** addresses = NewArray<void*>(frames_size);
287  int frames_count = backtrace(addresses, frames_size);
288
289  char** symbols;
290  symbols = backtrace_symbols(addresses, frames_count);
291  if (symbols == NULL) {
292    DeleteArray(addresses);
293    return kStackWalkError;
294  }
295
296  for (int i = 0; i < frames_count; i++) {
297    frames[i].address = addresses[i];
298    // Format a text representation of the frame based on the information
299    // available.
300    SNPrintF(MutableCStrVector(frames[i].text,
301                               kStackWalkMaxTextLen),
302             "%s",
303             symbols[i]);
304    // Make sure line termination is in place.
305    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
306  }
307
308  DeleteArray(addresses);
309  free(symbols);
310
311  return frames_count;
312}
313
314
315
316
317VirtualMemory::VirtualMemory(size_t size) {
318  address_ = mmap(NULL, size, PROT_NONE,
319                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
320                  kMmapFd, kMmapFdOffset);
321  size_ = size;
322}
323
324
325VirtualMemory::~VirtualMemory() {
326  if (IsReserved()) {
327    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
328  }
329}
330
331
332bool VirtualMemory::IsReserved() {
333  return address_ != MAP_FAILED;
334}
335
336
337bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
338  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
339  if (MAP_FAILED == mmap(address, size, prot,
340                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
341                         kMmapFd, kMmapFdOffset)) {
342    return false;
343  }
344
345  UpdateAllocatedSpaceLimits(address, size);
346  return true;
347}
348
349
350bool VirtualMemory::Uncommit(void* address, size_t size) {
351  return mmap(address, size, PROT_NONE,
352              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
353              kMmapFd, kMmapFdOffset) != MAP_FAILED;
354}
355
356
357class ThreadHandle::PlatformData : public Malloced {
358 public:
359  explicit PlatformData(ThreadHandle::Kind kind) {
360    Initialize(kind);
361  }
362
363  void Initialize(ThreadHandle::Kind kind) {
364    switch (kind) {
365      case ThreadHandle::SELF: thread_ = pthread_self(); break;
366      case ThreadHandle::INVALID: thread_ = kNoThread; break;
367    }
368  }
369  pthread_t thread_;  // Thread handle for pthread.
370};
371
372
373
374ThreadHandle::ThreadHandle(Kind kind) {
375  data_ = new PlatformData(kind);
376}
377
378
379void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
380  data_->Initialize(kind);
381}
382
383
384ThreadHandle::~ThreadHandle() {
385  delete data_;
386}
387
388
389bool ThreadHandle::IsSelf() const {
390  return pthread_equal(data_->thread_, pthread_self());
391}
392
393
394bool ThreadHandle::IsValid() const {
395  return data_->thread_ != kNoThread;
396}
397
398
399Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
400}
401
402
403Thread::~Thread() {
404}
405
406
407static void* ThreadEntry(void* arg) {
408  Thread* thread = reinterpret_cast<Thread*>(arg);
409  // This is also initialized by the first argument to pthread_create() but we
410  // don't know which thread will run first (the original thread or the new
411  // one) so we initialize it here too.
412  thread->thread_handle_data()->thread_ = pthread_self();
413  ASSERT(thread->IsValid());
414  thread->Run();
415  return NULL;
416}
417
418
419void Thread::Start() {
420  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
421}
422
423
424void Thread::Join() {
425  pthread_join(thread_handle_data()->thread_, NULL);
426}
427
428
429Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
430  pthread_key_t key;
431  int result = pthread_key_create(&key, NULL);
432  USE(result);
433  ASSERT(result == 0);
434  return static_cast<LocalStorageKey>(key);
435}
436
437
438void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
439  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
440  int result = pthread_key_delete(pthread_key);
441  USE(result);
442  ASSERT(result == 0);
443}
444
445
446void* Thread::GetThreadLocal(LocalStorageKey key) {
447  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
448  return pthread_getspecific(pthread_key);
449}
450
451
452void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
453  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
454  pthread_setspecific(pthread_key, value);
455}
456
457
458void Thread::YieldCPU() {
459  sched_yield();
460}
461
462
463class MacOSMutex : public Mutex {
464 public:
465
466  MacOSMutex() {
467    pthread_mutexattr_t attr;
468    pthread_mutexattr_init(&attr);
469    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
470    pthread_mutex_init(&mutex_, &attr);
471  }
472
473  ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
474
475  int Lock() { return pthread_mutex_lock(&mutex_); }
476
477  int Unlock() { return pthread_mutex_unlock(&mutex_); }
478
479 private:
480  pthread_mutex_t mutex_;
481};
482
483
484Mutex* OS::CreateMutex() {
485  return new MacOSMutex();
486}
487
488
489class MacOSSemaphore : public Semaphore {
490 public:
491  explicit MacOSSemaphore(int count) {
492    semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
493  }
494
495  ~MacOSSemaphore() {
496    semaphore_destroy(mach_task_self(), semaphore_);
497  }
498
499  // The MacOS mach semaphore documentation claims it does not have spurious
500  // wakeups, the way pthreads semaphores do.  So the code from the linux
501  // platform is not needed here.
502  void Wait() { semaphore_wait(semaphore_); }
503
504  bool Wait(int timeout);
505
506  void Signal() { semaphore_signal(semaphore_); }
507
508 private:
509  semaphore_t semaphore_;
510};
511
512
513bool MacOSSemaphore::Wait(int timeout) {
514  mach_timespec_t ts;
515  ts.tv_sec = timeout / 1000000;
516  ts.tv_nsec = (timeout % 1000000) * 1000;
517  return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
518}
519
520
521Semaphore* OS::CreateSemaphore(int count) {
522  return new MacOSSemaphore(count);
523}
524
525
526#ifdef ENABLE_LOGGING_AND_PROFILING
527
528class Sampler::PlatformData : public Malloced {
529 public:
530  explicit PlatformData(Sampler* sampler)
531      : sampler_(sampler),
532        task_self_(mach_task_self()),
533        profiled_thread_(0),
534        sampler_thread_(0) {
535  }
536
537  Sampler* sampler_;
538  // Note: for profiled_thread_ Mach primitives are used instead of PThread's
539  // because the latter doesn't provide thread manipulation primitives required.
540  // For details, consult "Mac OS X Internals" book, Section 7.3.
541  mach_port_t task_self_;
542  thread_act_t profiled_thread_;
543  pthread_t sampler_thread_;
544
545  // Sampler thread handler.
546  void Runner() {
547    // Loop until the sampler is disengaged, keeping the specified samling freq.
548    for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
549      TickSample sample_obj;
550      TickSample* sample = CpuProfiler::TickSampleEvent();
551      if (sample == NULL) sample = &sample_obj;
552
553      // We always sample the VM state.
554      sample->state = VMState::current_state();
555      // If profiling, we record the pc and sp of the profiled thread.
556      if (sampler_->IsProfiling()
557          && KERN_SUCCESS == thread_suspend(profiled_thread_)) {
558#if V8_HOST_ARCH_X64
559        thread_state_flavor_t flavor = x86_THREAD_STATE64;
560        x86_thread_state64_t state;
561        mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
562#if __DARWIN_UNIX03
563#define REGISTER_FIELD(name) __r ## name
564#else
565#define REGISTER_FIELD(name) r ## name
566#endif  // __DARWIN_UNIX03
567#elif V8_HOST_ARCH_IA32
568        thread_state_flavor_t flavor = i386_THREAD_STATE;
569        i386_thread_state_t state;
570        mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
571#if __DARWIN_UNIX03
572#define REGISTER_FIELD(name) __e ## name
573#else
574#define REGISTER_FIELD(name) e ## name
575#endif  // __DARWIN_UNIX03
576#else
577#error Unsupported Mac OS X host architecture.
578#endif  // V8_HOST_ARCH
579
580        if (thread_get_state(profiled_thread_,
581                             flavor,
582                             reinterpret_cast<natural_t*>(&state),
583                             &count) == KERN_SUCCESS) {
584          sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
585          sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
586          sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
587          sampler_->SampleStack(sample);
588        }
589        thread_resume(profiled_thread_);
590      }
591
592      // Invoke tick handler with program counter and stack pointer.
593      sampler_->Tick(sample);
594    }
595  }
596};
597
598#undef REGISTER_FIELD
599
600
601// Entry point for sampler thread.
602static void* SamplerEntry(void* arg) {
603  Sampler::PlatformData* data =
604      reinterpret_cast<Sampler::PlatformData*>(arg);
605  data->Runner();
606  return 0;
607}
608
609
610Sampler::Sampler(int interval, bool profiling)
611    : interval_(interval), profiling_(profiling), active_(false) {
612  data_ = new PlatformData(this);
613}
614
615
616Sampler::~Sampler() {
617  delete data_;
618}
619
620
621void Sampler::Start() {
622  // If we are profiling, we need to be able to access the calling
623  // thread.
624  if (IsProfiling()) {
625    data_->profiled_thread_ = mach_thread_self();
626  }
627
628  // Create sampler thread with high priority.
629  // According to POSIX spec, when SCHED_FIFO policy is used, a thread
630  // runs until it exits or blocks.
631  pthread_attr_t sched_attr;
632  sched_param fifo_param;
633  pthread_attr_init(&sched_attr);
634  pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED);
635  pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO);
636  fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
637  pthread_attr_setschedparam(&sched_attr, &fifo_param);
638
639  active_ = true;
640  pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
641}
642
643
644void Sampler::Stop() {
645  // Seting active to false triggers termination of the sampler
646  // thread.
647  active_ = false;
648
649  // Wait for sampler thread to terminate.
650  pthread_join(data_->sampler_thread_, NULL);
651
652  // Deallocate Mach port for thread.
653  if (IsProfiling()) {
654    mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
655  }
656}
657
658#endif  // ENABLE_LOGGING_AND_PROFILING
659
660} }  // namespace v8::internal
661