1// Copyright 2012 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28// Platform specific code for MacOS goes here. For the POSIX comaptible parts 29// the implementation is in platform-posix.cc. 30 31#include <dlfcn.h> 32#include <unistd.h> 33#include <sys/mman.h> 34#include <mach/mach_init.h> 35#include <mach-o/dyld.h> 36#include <mach-o/getsect.h> 37 38#include <AvailabilityMacros.h> 39 40#include <pthread.h> 41#include <semaphore.h> 42#include <signal.h> 43#include <libkern/OSAtomic.h> 44#include <mach/mach.h> 45#include <mach/semaphore.h> 46#include <mach/task.h> 47#include <mach/vm_statistics.h> 48#include <sys/time.h> 49#include <sys/resource.h> 50#include <sys/types.h> 51#include <sys/sysctl.h> 52#include <stdarg.h> 53#include <stdlib.h> 54#include <string.h> 55#include <errno.h> 56 57#undef MAP_TYPE 58 59#include "v8.h" 60 61#include "platform-posix.h" 62#include "platform.h" 63#include "vm-state-inl.h" 64 65// Manually define these here as weak imports, rather than including execinfo.h. 66// This lets us launch on 10.4 which does not have these calls. 67extern "C" { 68 extern int backtrace(void**, int) __attribute__((weak_import)); 69 extern char** backtrace_symbols(void* const*, int) 70 __attribute__((weak_import)); 71 extern void backtrace_symbols_fd(void* const*, int, int) 72 __attribute__((weak_import)); 73} 74 75 76namespace v8 { 77namespace internal { 78 79// 0 is never a valid thread id on MacOSX since a pthread_t is 80// a pointer. 81static const pthread_t kNoThread = (pthread_t) 0; 82 83 84double ceiling(double x) { 85 // Correct Mac OS X Leopard 'ceil' behavior. 86 if (-1.0 < x && x < 0.0) { 87 return -0.0; 88 } else { 89 return ceil(x); 90 } 91} 92 93 94static Mutex* limit_mutex = NULL; 95 96 97void OS::SetUp() { 98 // Seed the random number generator. We preserve microsecond resolution. 99 uint64_t seed = Ticks() ^ (getpid() << 16); 100 srandom(static_cast<unsigned int>(seed)); 101 limit_mutex = CreateMutex(); 102} 103 104 105void OS::PostSetUp() { 106 // Math functions depend on CPU features therefore they are initialized after 107 // CPU. 108 MathSetup(); 109} 110 111 112// We keep the lowest and highest addresses mapped as a quick way of 113// determining that pointers are outside the heap (used mostly in assertions 114// and verification). The estimate is conservative, i.e., not all addresses in 115// 'allocated' space are actually allocated to our heap. The range is 116// [lowest, highest), inclusive on the low and and exclusive on the high end. 117static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 118static void* highest_ever_allocated = reinterpret_cast<void*>(0); 119 120 121static void UpdateAllocatedSpaceLimits(void* address, int size) { 122 ASSERT(limit_mutex != NULL); 123 ScopedLock lock(limit_mutex); 124 125 lowest_ever_allocated = Min(lowest_ever_allocated, address); 126 highest_ever_allocated = 127 Max(highest_ever_allocated, 128 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 129} 130 131 132bool OS::IsOutsideAllocatedSpace(void* address) { 133 return address < lowest_ever_allocated || address >= highest_ever_allocated; 134} 135 136 137size_t OS::AllocateAlignment() { 138 return getpagesize(); 139} 140 141 142// Constants used for mmap. 143// kMmapFd is used to pass vm_alloc flags to tag the region with the user 144// defined tag 255 This helps identify V8-allocated regions in memory analysis 145// tools like vmmap(1). 146static const int kMmapFd = VM_MAKE_TAG(255); 147static const off_t kMmapFdOffset = 0; 148 149 150void* OS::Allocate(const size_t requested, 151 size_t* allocated, 152 bool is_executable) { 153 const size_t msize = RoundUp(requested, getpagesize()); 154 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 155 void* mbase = mmap(OS::GetRandomMmapAddr(), 156 msize, 157 prot, 158 MAP_PRIVATE | MAP_ANON, 159 kMmapFd, 160 kMmapFdOffset); 161 if (mbase == MAP_FAILED) { 162 LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); 163 return NULL; 164 } 165 *allocated = msize; 166 UpdateAllocatedSpaceLimits(mbase, msize); 167 return mbase; 168} 169 170 171void OS::Free(void* address, const size_t size) { 172 // TODO(1240712): munmap has a return value which is ignored here. 173 int result = munmap(address, size); 174 USE(result); 175 ASSERT(result == 0); 176} 177 178 179void OS::Sleep(int milliseconds) { 180 usleep(1000 * milliseconds); 181} 182 183 184void OS::Abort() { 185 // Redirect to std abort to signal abnormal program termination 186 abort(); 187} 188 189 190void OS::DebugBreak() { 191 asm("int $3"); 192} 193 194 195class PosixMemoryMappedFile : public OS::MemoryMappedFile { 196 public: 197 PosixMemoryMappedFile(FILE* file, void* memory, int size) 198 : file_(file), memory_(memory), size_(size) { } 199 virtual ~PosixMemoryMappedFile(); 200 virtual void* memory() { return memory_; } 201 virtual int size() { return size_; } 202 private: 203 FILE* file_; 204 void* memory_; 205 int size_; 206}; 207 208 209OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { 210 FILE* file = fopen(name, "r+"); 211 if (file == NULL) return NULL; 212 213 fseek(file, 0, SEEK_END); 214 int size = ftell(file); 215 216 void* memory = 217 mmap(OS::GetRandomMmapAddr(), 218 size, 219 PROT_READ | PROT_WRITE, 220 MAP_SHARED, 221 fileno(file), 222 0); 223 return new PosixMemoryMappedFile(file, memory, size); 224} 225 226 227OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, 228 void* initial) { 229 FILE* file = fopen(name, "w+"); 230 if (file == NULL) return NULL; 231 int result = fwrite(initial, size, 1, file); 232 if (result < 1) { 233 fclose(file); 234 return NULL; 235 } 236 void* memory = 237 mmap(OS::GetRandomMmapAddr(), 238 size, 239 PROT_READ | PROT_WRITE, 240 MAP_SHARED, 241 fileno(file), 242 0); 243 return new PosixMemoryMappedFile(file, memory, size); 244} 245 246 247PosixMemoryMappedFile::~PosixMemoryMappedFile() { 248 if (memory_) OS::Free(memory_, size_); 249 fclose(file_); 250} 251 252 253void OS::LogSharedLibraryAddresses() { 254 unsigned int images_count = _dyld_image_count(); 255 for (unsigned int i = 0; i < images_count; ++i) { 256 const mach_header* header = _dyld_get_image_header(i); 257 if (header == NULL) continue; 258#if V8_HOST_ARCH_X64 259 uint64_t size; 260 char* code_ptr = getsectdatafromheader_64( 261 reinterpret_cast<const mach_header_64*>(header), 262 SEG_TEXT, 263 SECT_TEXT, 264 &size); 265#else 266 unsigned int size; 267 char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); 268#endif 269 if (code_ptr == NULL) continue; 270 const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); 271 const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; 272 LOG(Isolate::Current(), 273 SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); 274 } 275} 276 277 278void OS::SignalCodeMovingGC() { 279} 280 281 282uint64_t OS::CpuFeaturesImpliedByPlatform() { 283 // MacOSX requires all these to install so we can assume they are present. 284 // These constants are defined by the CPUid instructions. 285 const uint64_t one = 1; 286 return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID); 287} 288 289 290int OS::ActivationFrameAlignment() { 291 // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI 292 // Function Call Guide". 293 return 16; 294} 295 296 297void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { 298 OSMemoryBarrier(); 299 *ptr = value; 300} 301 302 303const char* OS::LocalTimezone(double time) { 304 if (isnan(time)) return ""; 305 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 306 struct tm* t = localtime(&tv); 307 if (NULL == t) return ""; 308 return t->tm_zone; 309} 310 311 312double OS::LocalTimeOffset() { 313 time_t tv = time(NULL); 314 struct tm* t = localtime(&tv); 315 // tm_gmtoff includes any daylight savings offset, so subtract it. 316 return static_cast<double>(t->tm_gmtoff * msPerSecond - 317 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); 318} 319 320 321int OS::StackWalk(Vector<StackFrame> frames) { 322 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. 323 if (backtrace == NULL) 324 return 0; 325 326 int frames_size = frames.length(); 327 ScopedVector<void*> addresses(frames_size); 328 329 int frames_count = backtrace(addresses.start(), frames_size); 330 331 char** symbols = backtrace_symbols(addresses.start(), frames_count); 332 if (symbols == NULL) { 333 return kStackWalkError; 334 } 335 336 for (int i = 0; i < frames_count; i++) { 337 frames[i].address = addresses[i]; 338 // Format a text representation of the frame based on the information 339 // available. 340 SNPrintF(MutableCStrVector(frames[i].text, 341 kStackWalkMaxTextLen), 342 "%s", 343 symbols[i]); 344 // Make sure line termination is in place. 345 frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; 346 } 347 348 free(symbols); 349 350 return frames_count; 351} 352 353 354VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } 355 356 357VirtualMemory::VirtualMemory(size_t size) 358 : address_(ReserveRegion(size)), size_(size) { } 359 360 361VirtualMemory::VirtualMemory(size_t size, size_t alignment) 362 : address_(NULL), size_(0) { 363 ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); 364 size_t request_size = RoundUp(size + alignment, 365 static_cast<intptr_t>(OS::AllocateAlignment())); 366 void* reservation = mmap(OS::GetRandomMmapAddr(), 367 request_size, 368 PROT_NONE, 369 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 370 kMmapFd, 371 kMmapFdOffset); 372 if (reservation == MAP_FAILED) return; 373 374 Address base = static_cast<Address>(reservation); 375 Address aligned_base = RoundUp(base, alignment); 376 ASSERT_LE(base, aligned_base); 377 378 // Unmap extra memory reserved before and after the desired block. 379 if (aligned_base != base) { 380 size_t prefix_size = static_cast<size_t>(aligned_base - base); 381 OS::Free(base, prefix_size); 382 request_size -= prefix_size; 383 } 384 385 size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); 386 ASSERT_LE(aligned_size, request_size); 387 388 if (aligned_size != request_size) { 389 size_t suffix_size = request_size - aligned_size; 390 OS::Free(aligned_base + aligned_size, suffix_size); 391 request_size -= suffix_size; 392 } 393 394 ASSERT(aligned_size == request_size); 395 396 address_ = static_cast<void*>(aligned_base); 397 size_ = aligned_size; 398} 399 400 401VirtualMemory::~VirtualMemory() { 402 if (IsReserved()) { 403 bool result = ReleaseRegion(address(), size()); 404 ASSERT(result); 405 USE(result); 406 } 407} 408 409 410void VirtualMemory::Reset() { 411 address_ = NULL; 412 size_ = 0; 413} 414 415 416void* VirtualMemory::ReserveRegion(size_t size) { 417 void* result = mmap(OS::GetRandomMmapAddr(), 418 size, 419 PROT_NONE, 420 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 421 kMmapFd, 422 kMmapFdOffset); 423 424 if (result == MAP_FAILED) return NULL; 425 426 return result; 427} 428 429 430bool VirtualMemory::IsReserved() { 431 return address_ != NULL; 432} 433 434 435bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { 436 return CommitRegion(address, size, is_executable); 437} 438 439 440bool VirtualMemory::Guard(void* address) { 441 OS::Guard(address, OS::CommitPageSize()); 442 return true; 443} 444 445 446bool VirtualMemory::CommitRegion(void* address, 447 size_t size, 448 bool is_executable) { 449 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 450 if (MAP_FAILED == mmap(address, 451 size, 452 prot, 453 MAP_PRIVATE | MAP_ANON | MAP_FIXED, 454 kMmapFd, 455 kMmapFdOffset)) { 456 return false; 457 } 458 459 UpdateAllocatedSpaceLimits(address, size); 460 return true; 461} 462 463 464bool VirtualMemory::Uncommit(void* address, size_t size) { 465 return UncommitRegion(address, size); 466} 467 468 469bool VirtualMemory::UncommitRegion(void* address, size_t size) { 470 return mmap(address, 471 size, 472 PROT_NONE, 473 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, 474 kMmapFd, 475 kMmapFdOffset) != MAP_FAILED; 476} 477 478 479bool VirtualMemory::ReleaseRegion(void* address, size_t size) { 480 return munmap(address, size) == 0; 481} 482 483 484class Thread::PlatformData : public Malloced { 485 public: 486 PlatformData() : thread_(kNoThread) {} 487 pthread_t thread_; // Thread handle for pthread. 488}; 489 490 491Thread::Thread(const Options& options) 492 : data_(new PlatformData), 493 stack_size_(options.stack_size()) { 494 set_name(options.name()); 495} 496 497 498Thread::~Thread() { 499 delete data_; 500} 501 502 503static void SetThreadName(const char* name) { 504 // pthread_setname_np is only available in 10.6 or later, so test 505 // for it at runtime. 506 int (*dynamic_pthread_setname_np)(const char*); 507 *reinterpret_cast<void**>(&dynamic_pthread_setname_np) = 508 dlsym(RTLD_DEFAULT, "pthread_setname_np"); 509 if (!dynamic_pthread_setname_np) 510 return; 511 512 // Mac OS X does not expose the length limit of the name, so hardcode it. 513 static const int kMaxNameLength = 63; 514 USE(kMaxNameLength); 515 ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength); 516 dynamic_pthread_setname_np(name); 517} 518 519 520static void* ThreadEntry(void* arg) { 521 Thread* thread = reinterpret_cast<Thread*>(arg); 522 // This is also initialized by the first argument to pthread_create() but we 523 // don't know which thread will run first (the original thread or the new 524 // one) so we initialize it here too. 525 thread->data()->thread_ = pthread_self(); 526 SetThreadName(thread->name()); 527 ASSERT(thread->data()->thread_ != kNoThread); 528 thread->Run(); 529 return NULL; 530} 531 532 533void Thread::set_name(const char* name) { 534 strncpy(name_, name, sizeof(name_)); 535 name_[sizeof(name_) - 1] = '\0'; 536} 537 538 539void Thread::Start() { 540 pthread_attr_t* attr_ptr = NULL; 541 pthread_attr_t attr; 542 if (stack_size_ > 0) { 543 pthread_attr_init(&attr); 544 pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_)); 545 attr_ptr = &attr; 546 } 547 pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this); 548 ASSERT(data_->thread_ != kNoThread); 549} 550 551 552void Thread::Join() { 553 pthread_join(data_->thread_, NULL); 554} 555 556 557#ifdef V8_FAST_TLS_SUPPORTED 558 559static Atomic32 tls_base_offset_initialized = 0; 560intptr_t kMacTlsBaseOffset = 0; 561 562// It's safe to do the initialization more that once, but it has to be 563// done at least once. 564static void InitializeTlsBaseOffset() { 565 const size_t kBufferSize = 128; 566 char buffer[kBufferSize]; 567 size_t buffer_size = kBufferSize; 568 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE }; 569 if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) { 570 V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version"); 571 } 572 // The buffer now contains a string of the form XX.YY.ZZ, where 573 // XX is the major kernel version component. 574 // Make sure the buffer is 0-terminated. 575 buffer[kBufferSize - 1] = '\0'; 576 char* period_pos = strchr(buffer, '.'); 577 *period_pos = '\0'; 578 int kernel_version_major = 579 static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT 580 // The constants below are taken from pthreads.s from the XNU kernel 581 // sources archive at www.opensource.apple.com. 582 if (kernel_version_major < 11) { 583 // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the 584 // same offsets. 585#if defined(V8_HOST_ARCH_IA32) 586 kMacTlsBaseOffset = 0x48; 587#else 588 kMacTlsBaseOffset = 0x60; 589#endif 590 } else { 591 // 11.x.x (Lion) changed the offset. 592 kMacTlsBaseOffset = 0; 593 } 594 595 Release_Store(&tls_base_offset_initialized, 1); 596} 597 598static void CheckFastTls(Thread::LocalStorageKey key) { 599 void* expected = reinterpret_cast<void*>(0x1234CAFE); 600 Thread::SetThreadLocal(key, expected); 601 void* actual = Thread::GetExistingThreadLocal(key); 602 if (expected != actual) { 603 V8_Fatal(__FILE__, __LINE__, 604 "V8 failed to initialize fast TLS on current kernel"); 605 } 606 Thread::SetThreadLocal(key, NULL); 607} 608 609#endif // V8_FAST_TLS_SUPPORTED 610 611 612Thread::LocalStorageKey Thread::CreateThreadLocalKey() { 613#ifdef V8_FAST_TLS_SUPPORTED 614 bool check_fast_tls = false; 615 if (tls_base_offset_initialized == 0) { 616 check_fast_tls = true; 617 InitializeTlsBaseOffset(); 618 } 619#endif 620 pthread_key_t key; 621 int result = pthread_key_create(&key, NULL); 622 USE(result); 623 ASSERT(result == 0); 624 LocalStorageKey typed_key = static_cast<LocalStorageKey>(key); 625#ifdef V8_FAST_TLS_SUPPORTED 626 // If we just initialized fast TLS support, make sure it works. 627 if (check_fast_tls) CheckFastTls(typed_key); 628#endif 629 return typed_key; 630} 631 632 633void Thread::DeleteThreadLocalKey(LocalStorageKey key) { 634 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 635 int result = pthread_key_delete(pthread_key); 636 USE(result); 637 ASSERT(result == 0); 638} 639 640 641void* Thread::GetThreadLocal(LocalStorageKey key) { 642 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 643 return pthread_getspecific(pthread_key); 644} 645 646 647void Thread::SetThreadLocal(LocalStorageKey key, void* value) { 648 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 649 pthread_setspecific(pthread_key, value); 650} 651 652 653void Thread::YieldCPU() { 654 sched_yield(); 655} 656 657 658class MacOSMutex : public Mutex { 659 public: 660 MacOSMutex() { 661 pthread_mutexattr_t attr; 662 pthread_mutexattr_init(&attr); 663 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); 664 pthread_mutex_init(&mutex_, &attr); 665 } 666 667 virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); } 668 669 virtual int Lock() { return pthread_mutex_lock(&mutex_); } 670 virtual int Unlock() { return pthread_mutex_unlock(&mutex_); } 671 672 virtual bool TryLock() { 673 int result = pthread_mutex_trylock(&mutex_); 674 // Return false if the lock is busy and locking failed. 675 if (result == EBUSY) { 676 return false; 677 } 678 ASSERT(result == 0); // Verify no other errors. 679 return true; 680 } 681 682 private: 683 pthread_mutex_t mutex_; 684}; 685 686 687Mutex* OS::CreateMutex() { 688 return new MacOSMutex(); 689} 690 691 692class MacOSSemaphore : public Semaphore { 693 public: 694 explicit MacOSSemaphore(int count) { 695 semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count); 696 } 697 698 ~MacOSSemaphore() { 699 semaphore_destroy(mach_task_self(), semaphore_); 700 } 701 702 // The MacOS mach semaphore documentation claims it does not have spurious 703 // wakeups, the way pthreads semaphores do. So the code from the linux 704 // platform is not needed here. 705 void Wait() { semaphore_wait(semaphore_); } 706 707 bool Wait(int timeout); 708 709 void Signal() { semaphore_signal(semaphore_); } 710 711 private: 712 semaphore_t semaphore_; 713}; 714 715 716bool MacOSSemaphore::Wait(int timeout) { 717 mach_timespec_t ts; 718 ts.tv_sec = timeout / 1000000; 719 ts.tv_nsec = (timeout % 1000000) * 1000; 720 return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT; 721} 722 723 724Semaphore* OS::CreateSemaphore(int count) { 725 return new MacOSSemaphore(count); 726} 727 728 729class Sampler::PlatformData : public Malloced { 730 public: 731 PlatformData() : profiled_thread_(mach_thread_self()) {} 732 733 ~PlatformData() { 734 // Deallocate Mach port for thread. 735 mach_port_deallocate(mach_task_self(), profiled_thread_); 736 } 737 738 thread_act_t profiled_thread() { return profiled_thread_; } 739 740 private: 741 // Note: for profiled_thread_ Mach primitives are used instead of PThread's 742 // because the latter doesn't provide thread manipulation primitives required. 743 // For details, consult "Mac OS X Internals" book, Section 7.3. 744 thread_act_t profiled_thread_; 745}; 746 747 748class SamplerThread : public Thread { 749 public: 750 static const int kSamplerThreadStackSize = 64 * KB; 751 752 explicit SamplerThread(int interval) 753 : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), 754 interval_(interval) {} 755 756 static void AddActiveSampler(Sampler* sampler) { 757 ScopedLock lock(mutex_.Pointer()); 758 SamplerRegistry::AddActiveSampler(sampler); 759 if (instance_ == NULL) { 760 instance_ = new SamplerThread(sampler->interval()); 761 instance_->Start(); 762 } else { 763 ASSERT(instance_->interval_ == sampler->interval()); 764 } 765 } 766 767 static void RemoveActiveSampler(Sampler* sampler) { 768 ScopedLock lock(mutex_.Pointer()); 769 SamplerRegistry::RemoveActiveSampler(sampler); 770 if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { 771 RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); 772 delete instance_; 773 instance_ = NULL; 774 } 775 } 776 777 // Implement Thread::Run(). 778 virtual void Run() { 779 SamplerRegistry::State state; 780 while ((state = SamplerRegistry::GetState()) != 781 SamplerRegistry::HAS_NO_SAMPLERS) { 782 bool cpu_profiling_enabled = 783 (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); 784 bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); 785 // When CPU profiling is enabled both JavaScript and C++ code is 786 // profiled. We must not suspend. 787 if (!cpu_profiling_enabled) { 788 if (rate_limiter_.SuspendIfNecessary()) continue; 789 } 790 if (cpu_profiling_enabled) { 791 if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { 792 return; 793 } 794 } 795 if (runtime_profiler_enabled) { 796 if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { 797 return; 798 } 799 } 800 OS::Sleep(interval_); 801 } 802 } 803 804 static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) { 805 if (!sampler->isolate()->IsInitialized()) return; 806 if (!sampler->IsProfiling()) return; 807 SamplerThread* sampler_thread = 808 reinterpret_cast<SamplerThread*>(raw_sampler_thread); 809 sampler_thread->SampleContext(sampler); 810 } 811 812 static void DoRuntimeProfile(Sampler* sampler, void* ignored) { 813 if (!sampler->isolate()->IsInitialized()) return; 814 sampler->isolate()->runtime_profiler()->NotifyTick(); 815 } 816 817 void SampleContext(Sampler* sampler) { 818 thread_act_t profiled_thread = sampler->platform_data()->profiled_thread(); 819 TickSample sample_obj; 820 TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate()); 821 if (sample == NULL) sample = &sample_obj; 822 823 if (KERN_SUCCESS != thread_suspend(profiled_thread)) return; 824 825#if V8_HOST_ARCH_X64 826 thread_state_flavor_t flavor = x86_THREAD_STATE64; 827 x86_thread_state64_t state; 828 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; 829#if __DARWIN_UNIX03 830#define REGISTER_FIELD(name) __r ## name 831#else 832#define REGISTER_FIELD(name) r ## name 833#endif // __DARWIN_UNIX03 834#elif V8_HOST_ARCH_IA32 835 thread_state_flavor_t flavor = i386_THREAD_STATE; 836 i386_thread_state_t state; 837 mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; 838#if __DARWIN_UNIX03 839#define REGISTER_FIELD(name) __e ## name 840#else 841#define REGISTER_FIELD(name) e ## name 842#endif // __DARWIN_UNIX03 843#else 844#error Unsupported Mac OS X host architecture. 845#endif // V8_HOST_ARCH 846 847 if (thread_get_state(profiled_thread, 848 flavor, 849 reinterpret_cast<natural_t*>(&state), 850 &count) == KERN_SUCCESS) { 851 sample->state = sampler->isolate()->current_vm_state(); 852 sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip)); 853 sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); 854 sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); 855 sampler->SampleStack(sample); 856 sampler->Tick(sample); 857 } 858 thread_resume(profiled_thread); 859 } 860 861 const int interval_; 862 RuntimeProfilerRateLimiter rate_limiter_; 863 864 // Protects the process wide state below. 865 static LazyMutex mutex_; 866 static SamplerThread* instance_; 867 868 private: 869 DISALLOW_COPY_AND_ASSIGN(SamplerThread); 870}; 871 872#undef REGISTER_FIELD 873 874 875LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER; 876SamplerThread* SamplerThread::instance_ = NULL; 877 878 879Sampler::Sampler(Isolate* isolate, int interval) 880 : isolate_(isolate), 881 interval_(interval), 882 profiling_(false), 883 active_(false), 884 samples_taken_(0) { 885 data_ = new PlatformData; 886} 887 888 889Sampler::~Sampler() { 890 ASSERT(!IsActive()); 891 delete data_; 892} 893 894 895void Sampler::Start() { 896 ASSERT(!IsActive()); 897 SetActive(true); 898 SamplerThread::AddActiveSampler(this); 899} 900 901 902void Sampler::Stop() { 903 ASSERT(IsActive()); 904 SamplerThread::RemoveActiveSampler(this); 905 SetActive(false); 906} 907 908 909} } // namespace v8::internal 910