1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for MacOS goes here. For the POSIX comaptible parts
29// the implementation is in platform-posix.cc.
30
31#include <dlfcn.h>
32#include <unistd.h>
33#include <sys/mman.h>
34#include <mach/mach_init.h>
35#include <mach-o/dyld.h>
36#include <mach-o/getsect.h>
37
38#include <AvailabilityMacros.h>
39
40#include <pthread.h>
41#include <semaphore.h>
42#include <signal.h>
43#include <libkern/OSAtomic.h>
44#include <mach/mach.h>
45#include <mach/semaphore.h>
46#include <mach/task.h>
47#include <mach/vm_statistics.h>
48#include <sys/time.h>
49#include <sys/resource.h>
50#include <sys/types.h>
51#include <sys/sysctl.h>
52#include <stdarg.h>
53#include <stdlib.h>
54#include <string.h>
55#include <errno.h>
56#include <cxxabi.h>
57
58#undef MAP_TYPE
59
60#include "v8.h"
61
62#include "platform-posix.h"
63#include "platform.h"
64#include "simulator.h"
65#include "vm-state-inl.h"
66
67// Manually define these here as weak imports, rather than including execinfo.h.
68// This lets us launch on 10.4 which does not have these calls.
69extern "C" {
70  extern int backtrace(void**, int) __attribute__((weak_import));
71  extern char** backtrace_symbols(void* const*, int)
72      __attribute__((weak_import));
73  extern void backtrace_symbols_fd(void* const*, int, int)
74      __attribute__((weak_import));
75}
76
77
78namespace v8 {
79namespace internal {
80
81
82static Mutex* limit_mutex = NULL;
83
84
85// We keep the lowest and highest addresses mapped as a quick way of
86// determining that pointers are outside the heap (used mostly in assertions
87// and verification).  The estimate is conservative, i.e., not all addresses in
88// 'allocated' space are actually allocated to our heap.  The range is
89// [lowest, highest), inclusive on the low and and exclusive on the high end.
90static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
91static void* highest_ever_allocated = reinterpret_cast<void*>(0);
92
93
94static void UpdateAllocatedSpaceLimits(void* address, int size) {
95  ASSERT(limit_mutex != NULL);
96  ScopedLock lock(limit_mutex);
97
98  lowest_ever_allocated = Min(lowest_ever_allocated, address);
99  highest_ever_allocated =
100      Max(highest_ever_allocated,
101          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
102}
103
104
105bool OS::IsOutsideAllocatedSpace(void* address) {
106  return address < lowest_ever_allocated || address >= highest_ever_allocated;
107}
108
109
110// Constants used for mmap.
111// kMmapFd is used to pass vm_alloc flags to tag the region with the user
112// defined tag 255 This helps identify V8-allocated regions in memory analysis
113// tools like vmmap(1).
114static const int kMmapFd = VM_MAKE_TAG(255);
115static const off_t kMmapFdOffset = 0;
116
117
118void* OS::Allocate(const size_t requested,
119                   size_t* allocated,
120                   bool is_executable) {
121  const size_t msize = RoundUp(requested, getpagesize());
122  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
123  void* mbase = mmap(OS::GetRandomMmapAddr(),
124                     msize,
125                     prot,
126                     MAP_PRIVATE | MAP_ANON,
127                     kMmapFd,
128                     kMmapFdOffset);
129  if (mbase == MAP_FAILED) {
130    LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
131    return NULL;
132  }
133  *allocated = msize;
134  UpdateAllocatedSpaceLimits(mbase, msize);
135  return mbase;
136}
137
138
139void OS::DumpBacktrace() {
140  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
141  if (backtrace == NULL) return;
142
143  POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
144}
145
146
147class PosixMemoryMappedFile : public OS::MemoryMappedFile {
148 public:
149  PosixMemoryMappedFile(FILE* file, void* memory, int size)
150    : file_(file), memory_(memory), size_(size) { }
151  virtual ~PosixMemoryMappedFile();
152  virtual void* memory() { return memory_; }
153  virtual int size() { return size_; }
154 private:
155  FILE* file_;
156  void* memory_;
157  int size_;
158};
159
160
161OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
162  FILE* file = fopen(name, "r+");
163  if (file == NULL) return NULL;
164
165  fseek(file, 0, SEEK_END);
166  int size = ftell(file);
167
168  void* memory =
169      mmap(OS::GetRandomMmapAddr(),
170           size,
171           PROT_READ | PROT_WRITE,
172           MAP_SHARED,
173           fileno(file),
174           0);
175  return new PosixMemoryMappedFile(file, memory, size);
176}
177
178
179OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
180    void* initial) {
181  FILE* file = fopen(name, "w+");
182  if (file == NULL) return NULL;
183  int result = fwrite(initial, size, 1, file);
184  if (result < 1) {
185    fclose(file);
186    return NULL;
187  }
188  void* memory =
189      mmap(OS::GetRandomMmapAddr(),
190          size,
191          PROT_READ | PROT_WRITE,
192          MAP_SHARED,
193          fileno(file),
194          0);
195  return new PosixMemoryMappedFile(file, memory, size);
196}
197
198
199PosixMemoryMappedFile::~PosixMemoryMappedFile() {
200  if (memory_) OS::Free(memory_, size_);
201  fclose(file_);
202}
203
204
205void OS::LogSharedLibraryAddresses() {
206  unsigned int images_count = _dyld_image_count();
207  for (unsigned int i = 0; i < images_count; ++i) {
208    const mach_header* header = _dyld_get_image_header(i);
209    if (header == NULL) continue;
210#if V8_HOST_ARCH_X64
211    uint64_t size;
212    char* code_ptr = getsectdatafromheader_64(
213        reinterpret_cast<const mach_header_64*>(header),
214        SEG_TEXT,
215        SECT_TEXT,
216        &size);
217#else
218    unsigned int size;
219    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
220#endif
221    if (code_ptr == NULL) continue;
222    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
223    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
224    LOG(Isolate::Current(),
225        SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
226  }
227}
228
229
230void OS::SignalCodeMovingGC() {
231}
232
233
234const char* OS::LocalTimezone(double time) {
235  if (std::isnan(time)) return "";
236  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
237  struct tm* t = localtime(&tv);
238  if (NULL == t) return "";
239  return t->tm_zone;
240}
241
242
243double OS::LocalTimeOffset() {
244  time_t tv = time(NULL);
245  struct tm* t = localtime(&tv);
246  // tm_gmtoff includes any daylight savings offset, so subtract it.
247  return static_cast<double>(t->tm_gmtoff * msPerSecond -
248                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
249}
250
251
252int OS::StackWalk(Vector<StackFrame> frames) {
253  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
254  if (backtrace == NULL) return 0;
255
256  return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
257}
258
259
260VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
261
262
263VirtualMemory::VirtualMemory(size_t size)
264    : address_(ReserveRegion(size)), size_(size) { }
265
266
267VirtualMemory::VirtualMemory(size_t size, size_t alignment)
268    : address_(NULL), size_(0) {
269  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
270  size_t request_size = RoundUp(size + alignment,
271                                static_cast<intptr_t>(OS::AllocateAlignment()));
272  void* reservation = mmap(OS::GetRandomMmapAddr(),
273                           request_size,
274                           PROT_NONE,
275                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
276                           kMmapFd,
277                           kMmapFdOffset);
278  if (reservation == MAP_FAILED) return;
279
280  Address base = static_cast<Address>(reservation);
281  Address aligned_base = RoundUp(base, alignment);
282  ASSERT_LE(base, aligned_base);
283
284  // Unmap extra memory reserved before and after the desired block.
285  if (aligned_base != base) {
286    size_t prefix_size = static_cast<size_t>(aligned_base - base);
287    OS::Free(base, prefix_size);
288    request_size -= prefix_size;
289  }
290
291  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
292  ASSERT_LE(aligned_size, request_size);
293
294  if (aligned_size != request_size) {
295    size_t suffix_size = request_size - aligned_size;
296    OS::Free(aligned_base + aligned_size, suffix_size);
297    request_size -= suffix_size;
298  }
299
300  ASSERT(aligned_size == request_size);
301
302  address_ = static_cast<void*>(aligned_base);
303  size_ = aligned_size;
304}
305
306
307VirtualMemory::~VirtualMemory() {
308  if (IsReserved()) {
309    bool result = ReleaseRegion(address(), size());
310    ASSERT(result);
311    USE(result);
312  }
313}
314
315
316bool VirtualMemory::IsReserved() {
317  return address_ != NULL;
318}
319
320
321void VirtualMemory::Reset() {
322  address_ = NULL;
323  size_ = 0;
324}
325
326
327bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
328  return CommitRegion(address, size, is_executable);
329}
330
331
332bool VirtualMemory::Uncommit(void* address, size_t size) {
333  return UncommitRegion(address, size);
334}
335
336
337bool VirtualMemory::Guard(void* address) {
338  OS::Guard(address, OS::CommitPageSize());
339  return true;
340}
341
342
343void* VirtualMemory::ReserveRegion(size_t size) {
344  void* result = mmap(OS::GetRandomMmapAddr(),
345                      size,
346                      PROT_NONE,
347                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
348                      kMmapFd,
349                      kMmapFdOffset);
350
351  if (result == MAP_FAILED) return NULL;
352
353  return result;
354}
355
356
357bool VirtualMemory::CommitRegion(void* address,
358                                 size_t size,
359                                 bool is_executable) {
360  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
361  if (MAP_FAILED == mmap(address,
362                         size,
363                         prot,
364                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
365                         kMmapFd,
366                         kMmapFdOffset)) {
367    return false;
368  }
369
370  UpdateAllocatedSpaceLimits(address, size);
371  return true;
372}
373
374
375bool VirtualMemory::UncommitRegion(void* address, size_t size) {
376  return mmap(address,
377              size,
378              PROT_NONE,
379              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
380              kMmapFd,
381              kMmapFdOffset) != MAP_FAILED;
382}
383
384
385bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
386  return munmap(address, size) == 0;
387}
388
389
390bool VirtualMemory::HasLazyCommits() {
391  return false;
392}
393
394
395class MacOSSemaphore : public Semaphore {
396 public:
397  explicit MacOSSemaphore(int count) {
398    int r;
399    r = semaphore_create(mach_task_self(),
400                         &semaphore_,
401                         SYNC_POLICY_FIFO,
402                         count);
403    ASSERT(r == KERN_SUCCESS);
404  }
405
406  ~MacOSSemaphore() {
407    int r;
408    r = semaphore_destroy(mach_task_self(), semaphore_);
409    ASSERT(r == KERN_SUCCESS);
410  }
411
412  void Wait() {
413    int r;
414    do {
415      r = semaphore_wait(semaphore_);
416      ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
417    } while (r == KERN_ABORTED);
418  }
419
420  bool Wait(int timeout);
421
422  void Signal() { semaphore_signal(semaphore_); }
423
424 private:
425  semaphore_t semaphore_;
426};
427
428
429bool MacOSSemaphore::Wait(int timeout) {
430  mach_timespec_t ts;
431  ts.tv_sec = timeout / 1000000;
432  ts.tv_nsec = (timeout % 1000000) * 1000;
433  return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
434}
435
436
437Semaphore* OS::CreateSemaphore(int count) {
438  return new MacOSSemaphore(count);
439}
440
441
442void OS::SetUp() {
443  // Seed the random number generator. We preserve microsecond resolution.
444  uint64_t seed = Ticks() ^ (getpid() << 16);
445  srandom(static_cast<unsigned int>(seed));
446  limit_mutex = CreateMutex();
447}
448
449
450void OS::TearDown() {
451  delete limit_mutex;
452}
453
454
455} }  // namespace v8::internal
456