platform-linux.cc revision 1e0659c275bb392c045087af4f6b0d7565cb3d77
1// Copyright 2006-2008 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28// Platform specific code for Linux goes here. For the POSIX comaptible parts 29// the implementation is in platform-posix.cc. 30 31#include <pthread.h> 32#include <semaphore.h> 33#include <signal.h> 34#include <sys/prctl.h> 35#include <sys/time.h> 36#include <sys/resource.h> 37#include <sys/syscall.h> 38#include <sys/types.h> 39#include <stdlib.h> 40 41// Ubuntu Dapper requires memory pages to be marked as 42// executable. Otherwise, OS raises an exception when executing code 43// in that page. 44#include <sys/types.h> // mmap & munmap 45#include <sys/mman.h> // mmap & munmap 46#include <sys/stat.h> // open 47#include <fcntl.h> // open 48#include <unistd.h> // sysconf 49#ifdef __GLIBC__ 50#include <execinfo.h> // backtrace, backtrace_symbols 51#endif // def __GLIBC__ 52#include <strings.h> // index 53#include <errno.h> 54#include <stdarg.h> 55 56#undef MAP_TYPE 57 58#include "v8.h" 59 60#include "platform.h" 61#include "top.h" 62#include "v8threads.h" 63#include "vm-state-inl.h" 64 65 66namespace v8 { 67namespace internal { 68 69// 0 is never a valid thread id on Linux since tids and pids share a 70// name space and pid 0 is reserved (see man 2 kill). 71static const pthread_t kNoThread = (pthread_t) 0; 72 73 74double ceiling(double x) { 75 return ceil(x); 76} 77 78 79void OS::Setup() { 80 // Seed the random number generator. 81 // Convert the current time to a 64-bit integer first, before converting it 82 // to an unsigned. Going directly can cause an overflow and the seed to be 83 // set to all ones. The seed will be identical for different instances that 84 // call this setup code within the same millisecond. 85 uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); 86 srandom(static_cast<unsigned int>(seed)); 87} 88 89 90uint64_t OS::CpuFeaturesImpliedByPlatform() { 91#if (defined(__VFP_FP__) && !defined(__SOFTFP__)) 92 // Here gcc is telling us that we are on an ARM and gcc is assuming that we 93 // have VFP3 instructions. If gcc can assume it then so can we. 94 return 1u << VFP3; 95#elif CAN_USE_ARMV7_INSTRUCTIONS 96 return 1u << ARMv7; 97#else 98 return 0; // Linux runs on anything. 99#endif 100} 101 102 103#ifdef __arm__ 104static bool CPUInfoContainsString(const char * search_string) { 105 const char* file_name = "/proc/cpuinfo"; 106 // This is written as a straight shot one pass parser 107 // and not using STL string and ifstream because, 108 // on Linux, it's reading from a (non-mmap-able) 109 // character special device. 110 FILE* f = NULL; 111 const char* what = search_string; 112 113 if (NULL == (f = fopen(file_name, "r"))) 114 return false; 115 116 int k; 117 while (EOF != (k = fgetc(f))) { 118 if (k == *what) { 119 ++what; 120 while ((*what != '\0') && (*what == fgetc(f))) { 121 ++what; 122 } 123 if (*what == '\0') { 124 fclose(f); 125 return true; 126 } else { 127 what = search_string; 128 } 129 } 130 } 131 fclose(f); 132 133 // Did not find string in the proc file. 134 return false; 135} 136 137bool OS::ArmCpuHasFeature(CpuFeature feature) { 138 const char* search_string = NULL; 139 // Simple detection of VFP at runtime for Linux. 140 // It is based on /proc/cpuinfo, which reveals hardware configuration 141 // to user-space applications. According to ARM (mid 2009), no similar 142 // facility is universally available on the ARM architectures, 143 // so it's up to individual OSes to provide such. 144 switch (feature) { 145 case VFP3: 146 search_string = "vfpv3"; 147 break; 148 case ARMv7: 149 search_string = "ARMv7"; 150 break; 151 default: 152 UNREACHABLE(); 153 } 154 155 if (CPUInfoContainsString(search_string)) { 156 return true; 157 } 158 159 if (feature == VFP3) { 160 // Some old kernels will report vfp not vfpv3. Here we make a last attempt 161 // to detect vfpv3 by checking for vfp *and* neon, since neon is only 162 // available on architectures with vfpv3. 163 // Checking neon on its own is not enough as it is possible to have neon 164 // without vfp. 165 if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) { 166 return true; 167 } 168 } 169 170 return false; 171} 172#endif // def __arm__ 173 174 175int OS::ActivationFrameAlignment() { 176#ifdef V8_TARGET_ARCH_ARM 177 // On EABI ARM targets this is required for fp correctness in the 178 // runtime system. 179 return 8; 180#elif V8_TARGET_ARCH_MIPS 181 return 8; 182#endif 183 // With gcc 4.4 the tree vectorization optimizer can generate code 184 // that requires 16 byte alignment such as movdqa on x86. 185 return 16; 186} 187 188 189void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { 190#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__) 191 // Only use on ARM hardware. 192 MemoryBarrier(); 193#else 194 __asm__ __volatile__("" : : : "memory"); 195 // An x86 store acts as a release barrier. 196#endif 197 *ptr = value; 198} 199 200 201const char* OS::LocalTimezone(double time) { 202 if (isnan(time)) return ""; 203 time_t tv = static_cast<time_t>(floor(time/msPerSecond)); 204 struct tm* t = localtime(&tv); 205 if (NULL == t) return ""; 206 return t->tm_zone; 207} 208 209 210double OS::LocalTimeOffset() { 211 time_t tv = time(NULL); 212 struct tm* t = localtime(&tv); 213 // tm_gmtoff includes any daylight savings offset, so subtract it. 214 return static_cast<double>(t->tm_gmtoff * msPerSecond - 215 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); 216} 217 218 219// We keep the lowest and highest addresses mapped as a quick way of 220// determining that pointers are outside the heap (used mostly in assertions 221// and verification). The estimate is conservative, ie, not all addresses in 222// 'allocated' space are actually allocated to our heap. The range is 223// [lowest, highest), inclusive on the low and and exclusive on the high end. 224static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); 225static void* highest_ever_allocated = reinterpret_cast<void*>(0); 226 227 228static void UpdateAllocatedSpaceLimits(void* address, int size) { 229 lowest_ever_allocated = Min(lowest_ever_allocated, address); 230 highest_ever_allocated = 231 Max(highest_ever_allocated, 232 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); 233} 234 235 236bool OS::IsOutsideAllocatedSpace(void* address) { 237 return address < lowest_ever_allocated || address >= highest_ever_allocated; 238} 239 240 241size_t OS::AllocateAlignment() { 242 return sysconf(_SC_PAGESIZE); 243} 244 245 246void* OS::Allocate(const size_t requested, 247 size_t* allocated, 248 bool is_executable) { 249 // TODO(805): Port randomization of allocated executable memory to Linux. 250 const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); 251 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 252 void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 253 if (mbase == MAP_FAILED) { 254 LOG(StringEvent("OS::Allocate", "mmap failed")); 255 return NULL; 256 } 257 *allocated = msize; 258 UpdateAllocatedSpaceLimits(mbase, msize); 259 return mbase; 260} 261 262 263void OS::Free(void* address, const size_t size) { 264 // TODO(1240712): munmap has a return value which is ignored here. 265 int result = munmap(address, size); 266 USE(result); 267 ASSERT(result == 0); 268} 269 270 271#ifdef ENABLE_HEAP_PROTECTION 272 273void OS::Protect(void* address, size_t size) { 274 // TODO(1240712): mprotect has a return value which is ignored here. 275 mprotect(address, size, PROT_READ); 276} 277 278 279void OS::Unprotect(void* address, size_t size, bool is_executable) { 280 // TODO(1240712): mprotect has a return value which is ignored here. 281 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 282 mprotect(address, size, prot); 283} 284 285#endif 286 287 288void OS::Sleep(int milliseconds) { 289 unsigned int ms = static_cast<unsigned int>(milliseconds); 290 usleep(1000 * ms); 291} 292 293 294void OS::Abort() { 295 // Redirect to std abort to signal abnormal program termination. 296 abort(); 297} 298 299 300void OS::DebugBreak() { 301// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x, 302// which is the architecture of generated code). 303#if (defined(__arm__) || defined(__thumb__)) 304# if defined(CAN_USE_ARMV5_INSTRUCTIONS) 305 asm("bkpt 0"); 306# endif 307#elif defined(__mips__) 308 asm("break"); 309#else 310 asm("int $3"); 311#endif 312} 313 314 315class PosixMemoryMappedFile : public OS::MemoryMappedFile { 316 public: 317 PosixMemoryMappedFile(FILE* file, void* memory, int size) 318 : file_(file), memory_(memory), size_(size) { } 319 virtual ~PosixMemoryMappedFile(); 320 virtual void* memory() { return memory_; } 321 virtual int size() { return size_; } 322 private: 323 FILE* file_; 324 void* memory_; 325 int size_; 326}; 327 328 329OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { 330 FILE* file = fopen(name, "w+"); 331 if (file == NULL) return NULL; 332 333 fseek(file, 0, SEEK_END); 334 int size = ftell(file); 335 336 void* memory = 337 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 338 return new PosixMemoryMappedFile(file, memory, size); 339} 340 341 342OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, 343 void* initial) { 344 FILE* file = fopen(name, "w+"); 345 if (file == NULL) return NULL; 346 int result = fwrite(initial, size, 1, file); 347 if (result < 1) { 348 fclose(file); 349 return NULL; 350 } 351 void* memory = 352 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); 353 return new PosixMemoryMappedFile(file, memory, size); 354} 355 356 357PosixMemoryMappedFile::~PosixMemoryMappedFile() { 358 if (memory_) munmap(memory_, size_); 359 fclose(file_); 360} 361 362 363void OS::LogSharedLibraryAddresses() { 364#ifdef ENABLE_LOGGING_AND_PROFILING 365 // This function assumes that the layout of the file is as follows: 366 // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] 367 // If we encounter an unexpected situation we abort scanning further entries. 368 FILE* fp = fopen("/proc/self/maps", "r"); 369 if (fp == NULL) return; 370 371 // Allocate enough room to be able to store a full file name. 372 const int kLibNameLen = FILENAME_MAX + 1; 373 char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen)); 374 375 // This loop will terminate once the scanning hits an EOF. 376 while (true) { 377 uintptr_t start, end; 378 char attr_r, attr_w, attr_x, attr_p; 379 // Parse the addresses and permission bits at the beginning of the line. 380 if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; 381 if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; 382 383 int c; 384 if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { 385 // Found a read-only executable entry. Skip characters until we reach 386 // the beginning of the filename or the end of the line. 387 do { 388 c = getc(fp); 389 } while ((c != EOF) && (c != '\n') && (c != '/')); 390 if (c == EOF) break; // EOF: Was unexpected, just exit. 391 392 // Process the filename if found. 393 if (c == '/') { 394 ungetc(c, fp); // Push the '/' back into the stream to be read below. 395 396 // Read to the end of the line. Exit if the read fails. 397 if (fgets(lib_name, kLibNameLen, fp) == NULL) break; 398 399 // Drop the newline character read by fgets. We do not need to check 400 // for a zero-length string because we know that we at least read the 401 // '/' character. 402 lib_name[strlen(lib_name) - 1] = '\0'; 403 } else { 404 // No library name found, just record the raw address range. 405 snprintf(lib_name, kLibNameLen, 406 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); 407 } 408 LOG(SharedLibraryEvent(lib_name, start, end)); 409 } else { 410 // Entry not describing executable data. Skip to end of line to setup 411 // reading the next entry. 412 do { 413 c = getc(fp); 414 } while ((c != EOF) && (c != '\n')); 415 if (c == EOF) break; 416 } 417 } 418 free(lib_name); 419 fclose(fp); 420#endif 421} 422 423 424static const char kGCFakeMmap[] = "/tmp/__v8_gc__"; 425 426 427void OS::SignalCodeMovingGC() { 428#ifdef ENABLE_LOGGING_AND_PROFILING 429 // Support for ll_prof.py. 430 // 431 // The Linux profiler built into the kernel logs all mmap's with 432 // PROT_EXEC so that analysis tools can properly attribute ticks. We 433 // do a mmap with a name known by ll_prof.py and immediately munmap 434 // it. This injects a GC marker into the stream of events generated 435 // by the kernel and allows us to synchronize V8 code log and the 436 // kernel log. 437 int size = sysconf(_SC_PAGESIZE); 438 FILE* f = fopen(kGCFakeMmap, "w+"); 439 void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, 440 fileno(f), 0); 441 ASSERT(addr != MAP_FAILED); 442 munmap(addr, size); 443 fclose(f); 444#endif 445} 446 447 448int OS::StackWalk(Vector<OS::StackFrame> frames) { 449 // backtrace is a glibc extension. 450#ifdef __GLIBC__ 451 int frames_size = frames.length(); 452 ScopedVector<void*> addresses(frames_size); 453 454 int frames_count = backtrace(addresses.start(), frames_size); 455 456 char** symbols = backtrace_symbols(addresses.start(), frames_count); 457 if (symbols == NULL) { 458 return kStackWalkError; 459 } 460 461 for (int i = 0; i < frames_count; i++) { 462 frames[i].address = addresses[i]; 463 // Format a text representation of the frame based on the information 464 // available. 465 SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), 466 "%s", 467 symbols[i]); 468 // Make sure line termination is in place. 469 frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; 470 } 471 472 free(symbols); 473 474 return frames_count; 475#else // ndef __GLIBC__ 476 return 0; 477#endif // ndef __GLIBC__ 478} 479 480 481// Constants used for mmap. 482static const int kMmapFd = -1; 483static const int kMmapFdOffset = 0; 484 485 486VirtualMemory::VirtualMemory(size_t size) { 487 address_ = mmap(NULL, size, PROT_NONE, 488 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, 489 kMmapFd, kMmapFdOffset); 490 size_ = size; 491} 492 493 494VirtualMemory::~VirtualMemory() { 495 if (IsReserved()) { 496 if (0 == munmap(address(), size())) address_ = MAP_FAILED; 497 } 498} 499 500 501bool VirtualMemory::IsReserved() { 502 return address_ != MAP_FAILED; 503} 504 505 506bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { 507 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); 508 if (MAP_FAILED == mmap(address, size, prot, 509 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 510 kMmapFd, kMmapFdOffset)) { 511 return false; 512 } 513 514 UpdateAllocatedSpaceLimits(address, size); 515 return true; 516} 517 518 519bool VirtualMemory::Uncommit(void* address, size_t size) { 520 return mmap(address, size, PROT_NONE, 521 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, 522 kMmapFd, kMmapFdOffset) != MAP_FAILED; 523} 524 525 526class ThreadHandle::PlatformData : public Malloced { 527 public: 528 explicit PlatformData(ThreadHandle::Kind kind) { 529 Initialize(kind); 530 } 531 532 void Initialize(ThreadHandle::Kind kind) { 533 switch (kind) { 534 case ThreadHandle::SELF: thread_ = pthread_self(); break; 535 case ThreadHandle::INVALID: thread_ = kNoThread; break; 536 } 537 } 538 539 pthread_t thread_; // Thread handle for pthread. 540}; 541 542 543ThreadHandle::ThreadHandle(Kind kind) { 544 data_ = new PlatformData(kind); 545} 546 547 548void ThreadHandle::Initialize(ThreadHandle::Kind kind) { 549 data_->Initialize(kind); 550} 551 552 553ThreadHandle::~ThreadHandle() { 554 delete data_; 555} 556 557 558bool ThreadHandle::IsSelf() const { 559 return pthread_equal(data_->thread_, pthread_self()); 560} 561 562 563bool ThreadHandle::IsValid() const { 564 return data_->thread_ != kNoThread; 565} 566 567 568Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) { 569 set_name("v8:<unknown>"); 570} 571 572 573Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) { 574 set_name(name); 575} 576 577 578Thread::~Thread() { 579} 580 581 582static void* ThreadEntry(void* arg) { 583 Thread* thread = reinterpret_cast<Thread*>(arg); 584 // This is also initialized by the first argument to pthread_create() but we 585 // don't know which thread will run first (the original thread or the new 586 // one) so we initialize it here too. 587 prctl(PR_SET_NAME, 588 reinterpret_cast<unsigned long>(thread->name()), // NOLINT 589 0, 0, 0); 590 thread->thread_handle_data()->thread_ = pthread_self(); 591 ASSERT(thread->IsValid()); 592 thread->Run(); 593 return NULL; 594} 595 596 597void Thread::set_name(const char* name) { 598 strncpy(name_, name, sizeof(name_)); 599 name_[sizeof(name_) - 1] = '\0'; 600} 601 602 603void Thread::Start() { 604 pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this); 605 ASSERT(IsValid()); 606} 607 608 609void Thread::Join() { 610 pthread_join(thread_handle_data()->thread_, NULL); 611} 612 613 614Thread::LocalStorageKey Thread::CreateThreadLocalKey() { 615 pthread_key_t key; 616 int result = pthread_key_create(&key, NULL); 617 USE(result); 618 ASSERT(result == 0); 619 return static_cast<LocalStorageKey>(key); 620} 621 622 623void Thread::DeleteThreadLocalKey(LocalStorageKey key) { 624 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 625 int result = pthread_key_delete(pthread_key); 626 USE(result); 627 ASSERT(result == 0); 628} 629 630 631void* Thread::GetThreadLocal(LocalStorageKey key) { 632 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 633 return pthread_getspecific(pthread_key); 634} 635 636 637void Thread::SetThreadLocal(LocalStorageKey key, void* value) { 638 pthread_key_t pthread_key = static_cast<pthread_key_t>(key); 639 pthread_setspecific(pthread_key, value); 640} 641 642 643void Thread::YieldCPU() { 644 sched_yield(); 645} 646 647 648class LinuxMutex : public Mutex { 649 public: 650 651 LinuxMutex() { 652 pthread_mutexattr_t attrs; 653 int result = pthread_mutexattr_init(&attrs); 654 ASSERT(result == 0); 655 result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); 656 ASSERT(result == 0); 657 result = pthread_mutex_init(&mutex_, &attrs); 658 ASSERT(result == 0); 659 } 660 661 virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); } 662 663 virtual int Lock() { 664 int result = pthread_mutex_lock(&mutex_); 665 return result; 666 } 667 668 virtual int Unlock() { 669 int result = pthread_mutex_unlock(&mutex_); 670 return result; 671 } 672 673 virtual bool TryLock() { 674 int result = pthread_mutex_trylock(&mutex_); 675 // Return false if the lock is busy and locking failed. 676 if (result == EBUSY) { 677 return false; 678 } 679 ASSERT(result == 0); // Verify no other errors. 680 return true; 681 } 682 683 private: 684 pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. 685}; 686 687 688Mutex* OS::CreateMutex() { 689 return new LinuxMutex(); 690} 691 692 693class LinuxSemaphore : public Semaphore { 694 public: 695 explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); } 696 virtual ~LinuxSemaphore() { sem_destroy(&sem_); } 697 698 virtual void Wait(); 699 virtual bool Wait(int timeout); 700 virtual void Signal() { sem_post(&sem_); } 701 private: 702 sem_t sem_; 703}; 704 705 706void LinuxSemaphore::Wait() { 707 while (true) { 708 int result = sem_wait(&sem_); 709 if (result == 0) return; // Successfully got semaphore. 710 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 711 } 712} 713 714 715#ifndef TIMEVAL_TO_TIMESPEC 716#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ 717 (ts)->tv_sec = (tv)->tv_sec; \ 718 (ts)->tv_nsec = (tv)->tv_usec * 1000; \ 719} while (false) 720#endif 721 722 723bool LinuxSemaphore::Wait(int timeout) { 724 const long kOneSecondMicros = 1000000; // NOLINT 725 726 // Split timeout into second and nanosecond parts. 727 struct timeval delta; 728 delta.tv_usec = timeout % kOneSecondMicros; 729 delta.tv_sec = timeout / kOneSecondMicros; 730 731 struct timeval current_time; 732 // Get the current time. 733 if (gettimeofday(¤t_time, NULL) == -1) { 734 return false; 735 } 736 737 // Calculate time for end of timeout. 738 struct timeval end_time; 739 timeradd(¤t_time, &delta, &end_time); 740 741 struct timespec ts; 742 TIMEVAL_TO_TIMESPEC(&end_time, &ts); 743 // Wait for semaphore signalled or timeout. 744 while (true) { 745 int result = sem_timedwait(&sem_, &ts); 746 if (result == 0) return true; // Successfully got semaphore. 747 if (result > 0) { 748 // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1. 749 errno = result; 750 result = -1; 751 } 752 if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. 753 CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. 754 } 755} 756 757 758Semaphore* OS::CreateSemaphore(int count) { 759 return new LinuxSemaphore(count); 760} 761 762 763#ifdef ENABLE_LOGGING_AND_PROFILING 764 765static Sampler* active_sampler_ = NULL; 766static int vm_tid_ = 0; 767 768 769#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) 770// Android runs a fairly new Linux kernel, so signal info is there, 771// but the C library doesn't have the structs defined. 772 773struct sigcontext { 774 uint32_t trap_no; 775 uint32_t error_code; 776 uint32_t oldmask; 777 uint32_t gregs[16]; 778 uint32_t arm_cpsr; 779 uint32_t fault_address; 780}; 781typedef uint32_t __sigset_t; 782typedef struct sigcontext mcontext_t; 783typedef struct ucontext { 784 uint32_t uc_flags; 785 struct ucontext* uc_link; 786 stack_t uc_stack; 787 mcontext_t uc_mcontext; 788 __sigset_t uc_sigmask; 789} ucontext_t; 790enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; 791 792#endif 793 794 795static int GetThreadID() { 796 // Glibc doesn't provide a wrapper for gettid(2). 797 return syscall(SYS_gettid); 798} 799 800 801static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { 802#ifndef V8_HOST_ARCH_MIPS 803 USE(info); 804 if (signal != SIGPROF) return; 805 if (active_sampler_ == NULL || !active_sampler_->IsActive()) return; 806 if (vm_tid_ != GetThreadID()) return; 807 808 TickSample sample_obj; 809 TickSample* sample = CpuProfiler::TickSampleEvent(); 810 if (sample == NULL) sample = &sample_obj; 811 812 // Extracting the sample from the context is extremely machine dependent. 813 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); 814 mcontext_t& mcontext = ucontext->uc_mcontext; 815 sample->state = Top::current_vm_state(); 816#if V8_HOST_ARCH_IA32 817 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]); 818 sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]); 819 sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]); 820#elif V8_HOST_ARCH_X64 821 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]); 822 sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]); 823 sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]); 824#elif V8_HOST_ARCH_ARM 825// An undefined macro evaluates to 0, so this applies to Android's Bionic also. 826#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) 827 sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]); 828 sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]); 829 sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]); 830#else 831 sample->pc = reinterpret_cast<Address>(mcontext.arm_pc); 832 sample->sp = reinterpret_cast<Address>(mcontext.arm_sp); 833 sample->fp = reinterpret_cast<Address>(mcontext.arm_fp); 834#endif 835#elif V8_HOST_ARCH_MIPS 836 // Implement this on MIPS. 837 UNIMPLEMENTED(); 838#endif 839 active_sampler_->SampleStack(sample); 840 active_sampler_->Tick(sample); 841#endif 842} 843 844 845class Sampler::PlatformData : public Malloced { 846 public: 847 enum SleepInterval { 848 FULL_INTERVAL, 849 HALF_INTERVAL 850 }; 851 852 explicit PlatformData(Sampler* sampler) 853 : sampler_(sampler), 854 signal_handler_installed_(false), 855 vm_tgid_(getpid()), 856 signal_sender_launched_(false) { 857 } 858 859 void SignalSender() { 860 while (sampler_->IsActive()) { 861 if (rate_limiter_.SuspendIfNecessary()) continue; 862 if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) { 863 SendProfilingSignal(); 864 Sleep(HALF_INTERVAL); 865 RuntimeProfiler::NotifyTick(); 866 Sleep(HALF_INTERVAL); 867 } else { 868 if (sampler_->IsProfiling()) SendProfilingSignal(); 869 if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick(); 870 Sleep(FULL_INTERVAL); 871 } 872 } 873 } 874 875 void SendProfilingSignal() { 876 // Glibc doesn't provide a wrapper for tgkill(2). 877 syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF); 878 } 879 880 void Sleep(SleepInterval full_or_half) { 881 // Convert ms to us and subtract 100 us to compensate delays 882 // occuring during signal delivery. 883 useconds_t interval = sampler_->interval_ * 1000 - 100; 884 if (full_or_half == HALF_INTERVAL) interval /= 2; 885 int result = usleep(interval); 886#ifdef DEBUG 887 if (result != 0 && errno != EINTR) { 888 fprintf(stderr, 889 "SignalSender usleep error; interval = %u, errno = %d\n", 890 interval, 891 errno); 892 ASSERT(result == 0 || errno == EINTR); 893 } 894#endif 895 USE(result); 896 } 897 898 Sampler* sampler_; 899 bool signal_handler_installed_; 900 struct sigaction old_signal_handler_; 901 int vm_tgid_; 902 bool signal_sender_launched_; 903 pthread_t signal_sender_thread_; 904 RuntimeProfilerRateLimiter rate_limiter_; 905}; 906 907 908static void* SenderEntry(void* arg) { 909 Sampler::PlatformData* data = 910 reinterpret_cast<Sampler::PlatformData*>(arg); 911 data->SignalSender(); 912 return 0; 913} 914 915 916Sampler::Sampler(int interval) 917 : interval_(interval), 918 profiling_(false), 919 active_(false), 920 samples_taken_(0) { 921 data_ = new PlatformData(this); 922} 923 924 925Sampler::~Sampler() { 926 ASSERT(!data_->signal_sender_launched_); 927 delete data_; 928} 929 930 931void Sampler::Start() { 932 // There can only be one active sampler at the time on POSIX 933 // platforms. 934 ASSERT(!IsActive()); 935 vm_tid_ = GetThreadID(); 936 937 // Request profiling signals. 938 struct sigaction sa; 939 sa.sa_sigaction = ProfilerSignalHandler; 940 sigemptyset(&sa.sa_mask); 941 sa.sa_flags = SA_RESTART | SA_SIGINFO; 942 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; 943 data_->signal_handler_installed_ = true; 944 945 // Start a thread that sends SIGPROF signal to VM thread. 946 // Sending the signal ourselves instead of relying on itimer provides 947 // much better accuracy. 948 SetActive(true); 949 if (pthread_create( 950 &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) { 951 data_->signal_sender_launched_ = true; 952 } 953 954 // Set this sampler as the active sampler. 955 active_sampler_ = this; 956} 957 958 959void Sampler::Stop() { 960 SetActive(false); 961 962 // Wait for signal sender termination (it will exit after setting 963 // active_ to false). 964 if (data_->signal_sender_launched_) { 965 Top::WakeUpRuntimeProfilerThreadBeforeShutdown(); 966 pthread_join(data_->signal_sender_thread_, NULL); 967 data_->signal_sender_launched_ = false; 968 } 969 970 // Restore old signal handler 971 if (data_->signal_handler_installed_) { 972 sigaction(SIGPROF, &data_->old_signal_handler_, 0); 973 data_->signal_handler_installed_ = false; 974 } 975 976 // This sampler is no longer the active sampler. 977 active_sampler_ = NULL; 978} 979 980 981#endif // ENABLE_LOGGING_AND_PROFILING 982 983} } // namespace v8::internal 984