1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Platform-specific code for MacOS goes here. For the POSIX-compatible
6// parts, the implementation is in platform-posix.cc.
7
8#include <dlfcn.h>
9#include <unistd.h>
10#include <sys/mman.h>
11#include <mach/mach_init.h>
12#include <mach-o/dyld.h>
13#include <mach-o/getsect.h>
14
15#include <AvailabilityMacros.h>
16
17#include <pthread.h>
18#include <semaphore.h>
19#include <signal.h>
20#include <libkern/OSAtomic.h>
21#include <mach/mach.h>
22#include <mach/semaphore.h>
23#include <mach/task.h>
24#include <mach/vm_statistics.h>
25#include <sys/time.h>
26#include <sys/resource.h>
27#include <sys/types.h>
28#include <sys/sysctl.h>
29#include <stdarg.h>
30#include <stdlib.h>
31#include <string.h>
32#include <errno.h>
33
34#undef MAP_TYPE
35
36#include "src/v8.h"
37
38#include "src/platform.h"
39
40
41namespace v8 {
42namespace internal {
43
44
45// Constants used for mmap.
46// kMmapFd is used to pass vm_alloc flags to tag the region with the user
47// defined tag 255 This helps identify V8-allocated regions in memory analysis
48// tools like vmmap(1).
49static const int kMmapFd = VM_MAKE_TAG(255);
50static const off_t kMmapFdOffset = 0;
51
52
53void* OS::Allocate(const size_t requested,
54                   size_t* allocated,
55                   bool is_executable) {
56  const size_t msize = RoundUp(requested, getpagesize());
57  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
58  void* mbase = mmap(OS::GetRandomMmapAddr(),
59                     msize,
60                     prot,
61                     MAP_PRIVATE | MAP_ANON,
62                     kMmapFd,
63                     kMmapFdOffset);
64  if (mbase == MAP_FAILED) return NULL;
65  *allocated = msize;
66  return mbase;
67}
68
69
70class PosixMemoryMappedFile : public OS::MemoryMappedFile {
71 public:
72  PosixMemoryMappedFile(FILE* file, void* memory, int size)
73    : file_(file), memory_(memory), size_(size) { }
74  virtual ~PosixMemoryMappedFile();
75  virtual void* memory() { return memory_; }
76  virtual int size() { return size_; }
77 private:
78  FILE* file_;
79  void* memory_;
80  int size_;
81};
82
83
84OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
85  FILE* file = fopen(name, "r+");
86  if (file == NULL) return NULL;
87
88  fseek(file, 0, SEEK_END);
89  int size = ftell(file);
90
91  void* memory =
92      mmap(OS::GetRandomMmapAddr(),
93           size,
94           PROT_READ | PROT_WRITE,
95           MAP_SHARED,
96           fileno(file),
97           0);
98  return new PosixMemoryMappedFile(file, memory, size);
99}
100
101
102OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
103    void* initial) {
104  FILE* file = fopen(name, "w+");
105  if (file == NULL) return NULL;
106  int result = fwrite(initial, size, 1, file);
107  if (result < 1) {
108    fclose(file);
109    return NULL;
110  }
111  void* memory =
112      mmap(OS::GetRandomMmapAddr(),
113          size,
114          PROT_READ | PROT_WRITE,
115          MAP_SHARED,
116          fileno(file),
117          0);
118  return new PosixMemoryMappedFile(file, memory, size);
119}
120
121
122PosixMemoryMappedFile::~PosixMemoryMappedFile() {
123  if (memory_) OS::Free(memory_, size_);
124  fclose(file_);
125}
126
127
128std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
129  std::vector<SharedLibraryAddress> result;
130  unsigned int images_count = _dyld_image_count();
131  for (unsigned int i = 0; i < images_count; ++i) {
132    const mach_header* header = _dyld_get_image_header(i);
133    if (header == NULL) continue;
134#if V8_HOST_ARCH_X64
135    uint64_t size;
136    char* code_ptr = getsectdatafromheader_64(
137        reinterpret_cast<const mach_header_64*>(header),
138        SEG_TEXT,
139        SECT_TEXT,
140        &size);
141#else
142    unsigned int size;
143    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
144#endif
145    if (code_ptr == NULL) continue;
146    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
147    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
148    result.push_back(
149        SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
150  }
151  return result;
152}
153
154
155void OS::SignalCodeMovingGC() {
156}
157
158
159const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
160  if (std::isnan(time)) return "";
161  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
162  struct tm* t = localtime(&tv);
163  if (NULL == t) return "";
164  return t->tm_zone;
165}
166
167
168double OS::LocalTimeOffset(TimezoneCache* cache) {
169  time_t tv = time(NULL);
170  struct tm* t = localtime(&tv);
171  // tm_gmtoff includes any daylight savings offset, so subtract it.
172  return static_cast<double>(t->tm_gmtoff * msPerSecond -
173                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
174}
175
176
177VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
178
179
180VirtualMemory::VirtualMemory(size_t size)
181    : address_(ReserveRegion(size)), size_(size) { }
182
183
184VirtualMemory::VirtualMemory(size_t size, size_t alignment)
185    : address_(NULL), size_(0) {
186  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
187  size_t request_size = RoundUp(size + alignment,
188                                static_cast<intptr_t>(OS::AllocateAlignment()));
189  void* reservation = mmap(OS::GetRandomMmapAddr(),
190                           request_size,
191                           PROT_NONE,
192                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
193                           kMmapFd,
194                           kMmapFdOffset);
195  if (reservation == MAP_FAILED) return;
196
197  Address base = static_cast<Address>(reservation);
198  Address aligned_base = RoundUp(base, alignment);
199  ASSERT_LE(base, aligned_base);
200
201  // Unmap extra memory reserved before and after the desired block.
202  if (aligned_base != base) {
203    size_t prefix_size = static_cast<size_t>(aligned_base - base);
204    OS::Free(base, prefix_size);
205    request_size -= prefix_size;
206  }
207
208  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
209  ASSERT_LE(aligned_size, request_size);
210
211  if (aligned_size != request_size) {
212    size_t suffix_size = request_size - aligned_size;
213    OS::Free(aligned_base + aligned_size, suffix_size);
214    request_size -= suffix_size;
215  }
216
217  ASSERT(aligned_size == request_size);
218
219  address_ = static_cast<void*>(aligned_base);
220  size_ = aligned_size;
221}
222
223
224VirtualMemory::~VirtualMemory() {
225  if (IsReserved()) {
226    bool result = ReleaseRegion(address(), size());
227    ASSERT(result);
228    USE(result);
229  }
230}
231
232
233bool VirtualMemory::IsReserved() {
234  return address_ != NULL;
235}
236
237
238void VirtualMemory::Reset() {
239  address_ = NULL;
240  size_ = 0;
241}
242
243
244bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
245  return CommitRegion(address, size, is_executable);
246}
247
248
249bool VirtualMemory::Uncommit(void* address, size_t size) {
250  return UncommitRegion(address, size);
251}
252
253
254bool VirtualMemory::Guard(void* address) {
255  OS::Guard(address, OS::CommitPageSize());
256  return true;
257}
258
259
260void* VirtualMemory::ReserveRegion(size_t size) {
261  void* result = mmap(OS::GetRandomMmapAddr(),
262                      size,
263                      PROT_NONE,
264                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
265                      kMmapFd,
266                      kMmapFdOffset);
267
268  if (result == MAP_FAILED) return NULL;
269
270  return result;
271}
272
273
274bool VirtualMemory::CommitRegion(void* address,
275                                 size_t size,
276                                 bool is_executable) {
277  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
278  if (MAP_FAILED == mmap(address,
279                         size,
280                         prot,
281                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
282                         kMmapFd,
283                         kMmapFdOffset)) {
284    return false;
285  }
286  return true;
287}
288
289
290bool VirtualMemory::UncommitRegion(void* address, size_t size) {
291  return mmap(address,
292              size,
293              PROT_NONE,
294              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
295              kMmapFd,
296              kMmapFdOffset) != MAP_FAILED;
297}
298
299
300bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
301  return munmap(address, size) == 0;
302}
303
304
305bool VirtualMemory::HasLazyCommits() {
306  return false;
307}
308
309} }  // namespace v8::internal
310