platform-linux.cc revision 1a80c996a0cb6c5ac739148352552ab47038ccc3
1// Copyright 2006-2008 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28// Platform specific code for Linux goes here. For the POSIX comaptible parts 29// the implementation is in platform-posix.cc. 30 31#include <pthread.h> 32#include <semaphore.h> 33#include <signal.h> 34#include <sys/time.h> 35#include <sys/resource.h> 36#include <sys/syscall.h> 37#include <sys/types.h> 38#include <stdlib.h> 39 40// Ubuntu Dapper requires memory pages to be marked as 41// executable. Otherwise, OS raises an exception when executing code 42// in that page. 43#include <sys/types.h> // mmap & munmap 44#include <sys/mman.h> // mmap & munmap 45#include <sys/stat.h> // open 46#include <fcntl.h> // open 47#include <unistd.h> // sysconf 48#ifdef __GLIBC__ 49#include <execinfo.h> // backtrace, backtrace_symbols 50#endif // def __GLIBC__ 51#include <strings.h> // index 52#include <errno.h> 53#include <stdarg.h> 54 55#undef MAP_TYPE 56 57#include "v8.h" 58 59#include "platform.h" 60#include "top.h" 61#include "v8threads.h" 62 63 64namespace v8 { 65namespace internal { 66 67// 0 is never a valid thread id on Linux since tids and pids share a 68// name space and pid 0 is reserved (see man 2 kill). 69static const pthread_t kNoThread = (pthread_t) 0; 70 71 72double ceiling(double x) { 73 return ceil(x); 74} 75 76 77void OS::Setup() { 78 // Seed the random number generator. 79 // Convert the current time to a 64-bit integer first, before converting it 80 // to an unsigned. Going directly can cause an overflow and the seed to be 81 // set to all ones. The seed will be identical for different instances that 82 // call this setup code within the same millisecond. 83 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); 84 srandom(static_cast<unsigned int>(seed)); 85} 86 87 88uint64_t OS::CpuFeaturesImpliedByPlatform() { 89#if (defined(__VFP_FP__) && !defined(__SOFTFP__)) 90 // Here gcc is telling us that we are on an ARM and gcc is assuming that we 91 // have VFP3 instructions. If gcc can assume it then so can we. 92 return 1u << VFP3; 93#elif CAN_USE_ARMV7_INSTRUCTIONS 94 return 1u << ARMv7; 95#else 96 return 0; // Linux runs on anything. 97#endif 98} 99 100 101#ifdef __arm__ 102static bool CPUInfoContainsString(const char * search_string) { 103 const char* file_name = "/proc/cpuinfo"; 104 // This is written as a straight shot one pass parser 105 // and not using STL string and ifstream because, 106 // on Linux, it's reading from a (non-mmap-able) 107 // character special device. 108 FILE* f = NULL; 109 const char* what = search_string; 110 111 if (NULL == (f = fopen(file_name, "r"))) 112 return false; 113 114 int k; 115 while (EOF != (k = fgetc(f))) { 116 if (k == *what) { 117 ++what; 118 while ((*what != '\0') && (*what == fgetc(f))) { 119 ++what; 120 } 121 if (*what == '\0') { 122 fclose(f); 123 return true; 124 } else { 125 what = search_string; 126 } 127 } 128 } 129 fclose(f); 130 131 // Did not find string in the proc file. 132 return false; 133} 134 135bool OS::ArmCpuHasFeature(CpuFeature feature) { 136 const char* search_string = NULL; 137 // Simple detection of VFP at runtime for Linux. 138 // It is based on /proc/cpuinfo, which reveals hardware configuration 139 // to user-space applications. According to ARM (mid 2009), no similar 140 // facility is universally available on the ARM architectures, 141 // so it's up to individual OSes to provide such. 142 switch (feature) { 143 case VFP3: 144 search_string = "vfpv3"; 145 break; 146 case ARMv7: 147 search_string = "ARMv7"; 148 break; 149 default: 150 UNREACHABLE(); 151 } 152 153 if (CPUInfoContainsString(search_string)) { 154 return true; 155 } 156 157 if (feature == VFP3) { 158 // Some old kernels will report vfp not vfpv3. Here we make a last attempt 159 // to detect vfpv3 by checking for vfp *and* neon, since neon is only 160 // available on architectures with vfpv3. 161 // Checking neon on its own is not enough as it is possible to have neon 162 // without vfp. 163 if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) { 164 return true; 165 } 166 } 167 168 return false; 169} 170#endif // def __arm__ 171 172 173int OS::ActivationFrameAlignment() { 174#ifdef V8_TARGET_ARCH_ARM 175 // On EABI ARM targets this is required for fp correctness in the 176 // runtime system. 177 return 8; 178#elif V8_TARGET_ARCH_MIPS 179 return 8; 180#endif 181 // With gcc 4.4 the tree vectorization optimizer can generate code 182 // that requires 16 byte alignment such as movdqa on x86. 183 return 16; 184} 185 186 187#ifdef V8_TARGET_ARCH_ARM 188// 0xffff0fa0 is the hard coded address of a function provided by 189// the kernel which implements a memory barrier. On older 190// ARM architecture revisions (pre-v6) this may be implemented using 191// a syscall. This address is stable, and in active use (hard coded) 192// by at least glibc-2.7 and the Android C library. 193typedef void (*LinuxKernelMemoryBarrierFunc)(void); 194LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = 195 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; 196#endif 197 198void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { 199#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__) 200 // Only use on ARM hardware. 201 pLinuxKernelMemoryBarrier(); 202#else 203 __asm__ __volatile__("" : : : "memory"); 204 // An x86 store acts as a release barrier. 205#endif 206 *ptr = value; 207} 208 209 210const char* OS::LocalTimezone(double time) { 211 if (isnan(time)) return ""; 212 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 213 struct tm* t = localtime(&tv); 214 if (NULL == t) return ""; 215 return t->tm_zone; 216} 217 218 219double OS::LocalTimeOffset() { 220 time_t tv = time(NULL); 221 struct tm* t = localtime(&tv); 222 // tm_gmtoff includes any daylight savings offset, so subtract it. 223 return static_cast<double>(t->tm_gmtoff * msPerSecond - 224 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); 225} 226 227 228// We keep the lowest and highest addresses mapped as a quick way of 229// determining that pointers are outside the heap (used mostly in assertions 230// and verification). The estimate is conservative, ie, not all addresses in 231// 'allocated' space are actually allocated to our heap. The range is 232// [lowest, highest), inclusive on the low and and exclusive on the high end. 233static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 234static void* highest_ever_allocated = reinterpret_cast<void*>(0); 235 236 237static void UpdateAllocatedSpaceLimits(void* address, int size) { 238 lowest_ever_allocated = Min(lowest_ever_allocated, address); 239 highest_ever_allocated = 240 Max(highest_ever_allocated, 241 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 242} 243 244 245bool OS::IsOutsideAllocatedSpace(void* address) { 246 return address < lowest_ever_allocated || address >= highest_ever_allocated; 247} 248 249 250size_t OS::AllocateAlignment() { 251 return sysconf(_SC_PAGESIZE); 252} 253 254 255void* OS::Allocate(const size_t requested, 256 size_t* allocated, 257 bool is_executable) { 258 // TODO(805): Port randomization of allocated executable memory to Linux. 259 const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); 260 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 261 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 262 if (mbase == MAP_FAILED) { 263 LOG(StringEvent("OS::Allocate", "mmap failed")); 264 return NULL; 265 } 266 *allocated = msize; 267 UpdateAllocatedSpaceLimits(mbase, msize); 268 return mbase; 269} 270 271 272void OS::Free(void* address, const size_t size) { 273 // TODO(1240712): munmap has a return value which is ignored here. 274 int result = munmap(address, size); 275 USE(result); 276 ASSERT(result == 0); 277} 278 279 280#ifdef ENABLE_HEAP_PROTECTION 281 282void OS::Protect(void* address, size_t size) { 283 // TODO(1240712): mprotect has a return value which is ignored here. 284 mprotect(address, size, PROT_READ); 285} 286 287 288void OS::Unprotect(void* address, size_t size, bool is_executable) { 289 // TODO(1240712): mprotect has a return value which is ignored here. 290 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 291 mprotect(address, size, prot); 292} 293 294#endif 295 296 297void OS::Sleep(int milliseconds) { 298 unsigned int ms = static_cast<unsigned int>(milliseconds); 299 usleep(1000 * ms); 300} 301 302 303void OS::Abort() { 304 // Redirect to std abort to signal abnormal program termination. 305 abort(); 306} 307 308 309void OS::DebugBreak() { 310// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x, 311// which is the architecture of generated code). 312#if (defined(__arm__) || defined(__thumb__)) 313# if defined(CAN_USE_ARMV5_INSTRUCTIONS) 314 asm("bkpt 0"); 315# endif 316#elif defined(__mips__) 317 asm("break"); 318#else 319 asm("int $3"); 320#endif 321} 322 323 324class PosixMemoryMappedFile : public OS::MemoryMappedFile { 325 public: 326 PosixMemoryMappedFile(FILE* file, void* memory, int size) 327 : file_(file), memory_(memory), size_(size) { } 328 virtual ~PosixMemoryMappedFile(); 329 virtual void* memory() { return memory_; } 330 private: 331 FILE* file_; 332 void* memory_; 333 int size_; 334}; 335 336 337OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, 338 void* initial) { 339 FILE* file = fopen(name, "w+"); 340 if (file == NULL) return NULL; 341 int result = fwrite(initial, size, 1, file); 342 if (result < 1) { 343 fclose(file); 344 return NULL; 345 } 346 void* memory = 347 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 348 return new PosixMemoryMappedFile(file, memory, size); 349} 350 351 352PosixMemoryMappedFile::~PosixMemoryMappedFile() { 353 if (memory_) munmap(memory_, size_); 354 fclose(file_); 355} 356 357 358void OS::LogSharedLibraryAddresses() { 359#ifdef ENABLE_LOGGING_AND_PROFILING 360 // This function assumes that the layout of the file is as follows: 361 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] 362 // If we encounter an unexpected situation we abort scanning further entries. 363 FILE* fp = fopen("/proc/self/maps", "r"); 364 if (fp == NULL) return; 365 366 // Allocate enough room to be able to store a full file name. 367 const int kLibNameLen = FILENAME_MAX + 1; 368 char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); 369 370 // This loop will terminate once the scanning hits an EOF. 371 while (true) { 372 uintptr_t start, end; 373 char attr_r, attr_w, attr_x, attr_p; 374 // Parse the addresses and permission bits at the beginning of the line. 375 if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; 376 if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; 377 378 int c; 379 if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { 380 // Found a read-only executable entry. Skip characters until we reach 381 // the beginning of the filename or the end of the line. 382 do { 383 c = getc(fp); 384 } while ((c != EOF) && (c != '\n') && (c != '/')); 385 if (c == EOF) break; // EOF: Was unexpected, just exit. 386 387 // Process the filename if found. 388 if (c == '/') { 389 ungetc(c, fp); // Push the '/' back into the stream to be read below. 390 391 // Read to the end of the line. Exit if the read fails. 392 if (fgets(lib_name, kLibNameLen, fp) == NULL) break; 393 394 // Drop the newline character read by fgets. We do not need to check 395 // for a zero-length string because we know that we at least read the 396 // '/' character. 397 lib_name[strlen(lib_name) - 1] = '\0'; 398 } else { 399 // No library name found, just record the raw address range. 400 snprintf(lib_name, kLibNameLen, 401 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); 402 } 403 LOG(SharedLibraryEvent(lib_name, start, end)); 404 } else { 405 // Entry not describing executable data. Skip to end of line to setup 406 // reading the next entry. 407 do { 408 c = getc(fp); 409 } while ((c != EOF) && (c != '\n')); 410 if (c == EOF) break; 411 } 412 } 413 free(lib_name); 414 fclose(fp); 415#endif 416} 417 418 419static const char kGCFakeMmap[] = "/tmp/__v8_gc__"; 420 421 422void OS::SignalCodeMovingGC() { 423#ifdef ENABLE_LOGGING_AND_PROFILING 424 // Support for ll_prof.py. 425 // 426 // The Linux profiler built into the kernel logs all mmap's with 427 // PROT_EXEC so that analysis tools can properly attribute ticks. We 428 // do a mmap with a name known by ll_prof.py and immediately munmap 429 // it. This injects a GC marker into the stream of events generated 430 // by the kernel and allows us to synchronize V8 code log and the 431 // kernel log. 432 int size = sysconf(_SC_PAGESIZE); 433 FILE* f = fopen(kGCFakeMmap, "w+"); 434 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, 435 fileno(f), 0); 436 ASSERT(addr != MAP_FAILED); 437 munmap(addr, size); 438 fclose(f); 439#endif 440} 441 442 443int OS::StackWalk(Vector<OS::StackFrame> frames) { 444 // backtrace is a glibc extension. 445#ifdef __GLIBC__ 446 int frames_size = frames.length(); 447 ScopedVector<void*> addresses(frames_size); 448 449 int frames_count = backtrace(addresses.start(), frames_size); 450 451 char** symbols = backtrace_symbols(addresses.start(), frames_count); 452 if (symbols == NULL) { 453 return kStackWalkError; 454 } 455 456 for (int i = 0; i < frames_count; i++) { 457 frames[i].address = addresses[i]; 458 // Format a text representation of the frame based on the information 459 // available. 460 SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), 461 "%s", 462 symbols[i]); 463 // Make sure line termination is in place. 464 frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; 465 } 466 467 free(symbols); 468 469 return frames_count; 470#else // ndef __GLIBC__ 471 return 0; 472#endif // ndef __GLIBC__ 473} 474 475 476// Constants used for mmap. 477static const int kMmapFd = -1; 478static const int kMmapFdOffset = 0; 479 480 481VirtualMemory::VirtualMemory(size_t size) { 482 address_ = mmap(NULL, size, PROT_NONE, 483 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, 484 kMmapFd, kMmapFdOffset); 485 size_ = size; 486} 487 488 489VirtualMemory::~VirtualMemory() { 490 if (IsReserved()) { 491 if (0 == munmap(address(), size())) address_ = MAP_FAILED; 492 } 493} 494 495 496bool VirtualMemory::IsReserved() { 497 return address_ != MAP_FAILED; 498} 499 500 501bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { 502 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 503 if (MAP_FAILED == mmap(address, size, prot, 504 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 505 kMmapFd, kMmapFdOffset)) { 506 return false; 507 } 508 509 UpdateAllocatedSpaceLimits(address, size); 510 return true; 511} 512 513 514bool VirtualMemory::Uncommit(void* address, size_t size) { 515 return mmap(address, size, PROT_NONE, 516 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, 517 kMmapFd, kMmapFdOffset) != MAP_FAILED; 518} 519 520 521class ThreadHandle::PlatformData : public Malloced { 522 public: 523 explicit PlatformData(ThreadHandle::Kind kind) { 524 Initialize(kind); 525 } 526 527 void Initialize(ThreadHandle::Kind kind) { 528 switch (kind) { 529 case ThreadHandle::SELF: thread_ = pthread_self(); break; 530 case ThreadHandle::INVALID: thread_ = kNoThread; break; 531 } 532 } 533 534 pthread_t thread_; // Thread handle for pthread. 535}; 536 537 538ThreadHandle::ThreadHandle(Kind kind) { 539 data_ = new PlatformData(kind); 540} 541 542 543void ThreadHandle::Initialize(ThreadHandle::Kind kind) { 544 data_->Initialize(kind); 545} 546 547 548ThreadHandle::~ThreadHandle() { 549 delete data_; 550} 551 552 553bool ThreadHandle::IsSelf() const { 554 return pthread_equal(data_->thread_, pthread_self()); 555} 556 557 558bool ThreadHandle::IsValid() const { 559 return data_->thread_ != kNoThread; 560} 561 562 563Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) { 564} 565 566 567Thread::~Thread() { 568} 569 570 571static void* ThreadEntry(void* arg) { 572 Thread* thread = reinterpret_cast<Thread*>(arg); 573 // This is also initialized by the first argument to pthread_create() but we 574 // don't know which thread will run first (the original thread or the new 575 // one) so we initialize it here too. 576 thread->thread_handle_data()->thread_ = pthread_self(); 577 ASSERT(thread->IsValid()); 578 thread->Run(); 579 return NULL; 580} 581 582 583void Thread::Start() { 584 pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this); 585 ASSERT(IsValid()); 586} 587 588 589void Thread::Join() { 590 pthread_join(thread_handle_data()->thread_, NULL); 591} 592 593 594Thread::LocalStorageKey Thread::CreateThreadLocalKey() { 595 pthread_key_t key; 596 int result = pthread_key_create(&key, NULL); 597 USE(result); 598 ASSERT(result == 0); 599 return static_cast<LocalStorageKey>(key); 600} 601 602 603void Thread::DeleteThreadLocalKey(LocalStorageKey key) { 604 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 605 int result = pthread_key_delete(pthread_key); 606 USE(result); 607 ASSERT(result == 0); 608} 609 610 611void* Thread::GetThreadLocal(LocalStorageKey key) { 612 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 613 return pthread_getspecific(pthread_key); 614} 615 616 617void Thread::SetThreadLocal(LocalStorageKey key, void* value) { 618 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 619 pthread_setspecific(pthread_key, value); 620} 621 622 623void Thread::YieldCPU() { 624 sched_yield(); 625} 626 627 628class LinuxMutex : public Mutex { 629 public: 630 631 LinuxMutex() { 632 pthread_mutexattr_t attrs; 633 int result = pthread_mutexattr_init(&attrs); 634 ASSERT(result == 0); 635 result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); 636 ASSERT(result == 0); 637 result = pthread_mutex_init(&mutex_, &attrs); 638 ASSERT(result == 0); 639 } 640 641 virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); } 642 643 virtual int Lock() { 644 int result = pthread_mutex_lock(&mutex_); 645 return result; 646 } 647 648 virtual int Unlock() { 649 int result = pthread_mutex_unlock(&mutex_); 650 return result; 651 } 652 653 private: 654 pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. 655}; 656 657 658Mutex* OS::CreateMutex() { 659 return new LinuxMutex(); 660} 661 662 663class LinuxSemaphore : public Semaphore { 664 public: 665 explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); } 666 virtual ~LinuxSemaphore() { sem_destroy(&sem_); } 667 668 virtual void Wait(); 669 virtual bool Wait(int timeout); 670 virtual void Signal() { sem_post(&sem_); } 671 private: 672 sem_t sem_; 673}; 674 675 676void LinuxSemaphore::Wait() { 677 while (true) { 678 int result = sem_wait(&sem_); 679 if (result == 0) return; // Successfully got semaphore. 680 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 681 } 682} 683 684 685#ifndef TIMEVAL_TO_TIMESPEC 686#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ 687 (ts)->tv_sec = (tv)->tv_sec; \ 688 (ts)->tv_nsec = (tv)->tv_usec * 1000; \ 689} while (false) 690#endif 691 692 693bool LinuxSemaphore::Wait(int timeout) { 694 const long kOneSecondMicros = 1000000; // NOLINT 695 696 // Split timeout into second and nanosecond parts. 697 struct timeval delta; 698 delta.tv_usec = timeout % kOneSecondMicros; 699 delta.tv_sec = timeout / kOneSecondMicros; 700 701 struct timeval current_time; 702 // Get the current time. 703 if (gettimeofday(¤t_time, NULL) == -1) { 704 return false; 705 } 706 707 // Calculate time for end of timeout. 708 struct timeval end_time; 709 timeradd(¤t_time, &delta, &end_time); 710 711 struct timespec ts; 712 TIMEVAL_TO_TIMESPEC(&end_time, &ts); 713 // Wait for semaphore signalled or timeout. 714 while (true) { 715 int result = sem_timedwait(&sem_, &ts); 716 if (result == 0) return true; // Successfully got semaphore. 717 if (result > 0) { 718 // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1. 719 errno = result; 720 result = -1; 721 } 722 if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. 723 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 724 } 725} 726 727 728Semaphore* OS::CreateSemaphore(int count) { 729 return new LinuxSemaphore(count); 730} 731 732 733#ifdef ENABLE_LOGGING_AND_PROFILING 734 735static Sampler* active_sampler_ = NULL; 736 737 738#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) 739// Android runs a fairly new Linux kernel, so signal info is there, 740// but the C library doesn't have the structs defined. 741 742struct sigcontext { 743 uint32_t trap_no; 744 uint32_t error_code; 745 uint32_t oldmask; 746 uint32_t gregs[16]; 747 uint32_t arm_cpsr; 748 uint32_t fault_address; 749}; 750typedef uint32_t __sigset_t; 751typedef struct sigcontext mcontext_t; 752typedef struct ucontext { 753 uint32_t uc_flags; 754 struct ucontext* uc_link; 755 stack_t uc_stack; 756 mcontext_t uc_mcontext; 757 __sigset_t uc_sigmask; 758} ucontext_t; 759enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; 760 761#endif 762 763 764static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { 765#ifndef V8_HOST_ARCH_MIPS 766 USE(info); 767 if (signal != SIGPROF) return; 768 if (active_sampler_ == NULL) return; 769 770 TickSample sample_obj; 771 TickSample* sample = CpuProfiler::TickSampleEvent(); 772 if (sample == NULL) sample = &sample_obj; 773 774 // We always sample the VM state. 775 sample->state = VMState::current_state(); 776 777 // If profiling, we extract the current pc and sp. 778 if (active_sampler_->IsProfiling()) { 779 // Extracting the sample from the context is extremely machine dependent. 780 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); 781 mcontext_t& mcontext = ucontext->uc_mcontext; 782#if V8_HOST_ARCH_IA32 783 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); 784 sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); 785 sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); 786#elif V8_HOST_ARCH_X64 787 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); 788 sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); 789 sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); 790#elif V8_HOST_ARCH_ARM 791// An undefined macro evaluates to 0, so this applies to Android's Bionic also. 792#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) 793 sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]); 794 sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]); 795 sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]); 796#else 797 sample->pc = reinterpret_cast<Address>(mcontext.arm_pc); 798 sample->sp = reinterpret_cast<Address>(mcontext.arm_sp); 799 sample->fp = reinterpret_cast<Address>(mcontext.arm_fp); 800#endif 801#elif V8_HOST_ARCH_MIPS 802 // Implement this on MIPS. 803 UNIMPLEMENTED(); 804#endif 805 active_sampler_->SampleStack(sample); 806 } 807 808 active_sampler_->Tick(sample); 809#endif 810} 811 812 813class Sampler::PlatformData : public Malloced { 814 public: 815 explicit PlatformData(Sampler* sampler) 816 : sampler_(sampler), 817 signal_handler_installed_(false), 818 vm_tgid_(getpid()), 819 // Glibc doesn't provide a wrapper for gettid(2). 820 vm_tid_(syscall(SYS_gettid)), 821 signal_sender_launched_(false) { 822 } 823 824 void SignalSender() { 825 while (sampler_->IsActive()) { 826 // Glibc doesn't provide a wrapper for tgkill(2). 827 syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF); 828 // Convert ms to us and subtract 100 us to compensate delays 829 // occuring during signal delivery. 830 const useconds_t interval = sampler_->interval_ * 1000 - 100; 831 int result = usleep(interval); 832#ifdef DEBUG 833 if (result != 0 && errno != EINTR) { 834 fprintf(stderr, 835 "SignalSender usleep error; interval = %u, errno = %d\n", 836 interval, 837 errno); 838 ASSERT(result == 0 || errno == EINTR); 839 } 840#endif 841 USE(result); 842 } 843 } 844 845 Sampler* sampler_; 846 bool signal_handler_installed_; 847 struct sigaction old_signal_handler_; 848 int vm_tgid_; 849 int vm_tid_; 850 bool signal_sender_launched_; 851 pthread_t signal_sender_thread_; 852}; 853 854 855static void* SenderEntry(void* arg) { 856 Sampler::PlatformData* data = 857 reinterpret_cast<Sampler::PlatformData*>(arg); 858 data->SignalSender(); 859 return 0; 860} 861 862 863Sampler::Sampler(int interval, bool profiling) 864 : interval_(interval), 865 profiling_(profiling), 866 synchronous_(profiling), 867 active_(false), 868 samples_taken_(0) { 869 data_ = new PlatformData(this); 870} 871 872 873Sampler::~Sampler() { 874 ASSERT(!data_->signal_sender_launched_); 875 delete data_; 876} 877 878 879void Sampler::Start() { 880 // There can only be one active sampler at the time on POSIX 881 // platforms. 882 if (active_sampler_ != NULL) return; 883 884 // Request profiling signals. 885 struct sigaction sa; 886 sa.sa_sigaction = ProfilerSignalHandler; 887 sigemptyset(&sa.sa_mask); 888 sa.sa_flags = SA_RESTART | SA_SIGINFO; 889 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; 890 data_->signal_handler_installed_ = true; 891 892 // Start a thread that sends SIGPROF signal to VM thread. 893 // Sending the signal ourselves instead of relying on itimer provides 894 // much better accuracy. 895 active_ = true; 896 if (pthread_create( 897 &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) { 898 data_->signal_sender_launched_ = true; 899 } 900 901 // Set this sampler as the active sampler. 902 active_sampler_ = this; 903} 904 905 906void Sampler::Stop() { 907 active_ = false; 908 909 // Wait for signal sender termination (it will exit after setting 910 // active_ to false). 911 if (data_->signal_sender_launched_) { 912 pthread_join(data_->signal_sender_thread_, NULL); 913 data_->signal_sender_launched_ = false; 914 } 915 916 // Restore old signal handler 917 if (data_->signal_handler_installed_) { 918 sigaction(SIGPROF, &data_->old_signal_handler_, 0); 919 data_->signal_handler_installed_ = false; 920 } 921 922 // This sampler is no longer the active sampler. 923 active_sampler_ = NULL; 924} 925 926 927#endif // ENABLE_LOGGING_AND_PROFILING 928 929} } // namespace v8::internal 930