trace.cc revision cf7f19135f0e273f7b0136315633c2abfc715343
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "trace.h" 18 19#include <sys/uio.h> 20 21#define ATRACE_TAG ATRACE_TAG_DALVIK 22#include "cutils/trace.h" 23 24#include "base/stl_util.h" 25#include "base/unix_file/fd_file.h" 26#include "class_linker.h" 27#include "common_throws.h" 28#include "debugger.h" 29#include "dex_file-inl.h" 30#include "instrumentation.h" 31#include "mirror/art_method-inl.h" 32#include "mirror/class-inl.h" 33#include "mirror/dex_cache.h" 34#include "mirror/object_array-inl.h" 35#include "mirror/object-inl.h" 36#include "os.h" 37#include "scoped_thread_state_change.h" 38#include "ScopedLocalRef.h" 39#include "thread.h" 40#include "thread_list.h" 41#if !defined(ART_USE_PORTABLE_COMPILER) 42#include "entrypoints/quick/quick_entrypoints.h" 43#endif 44 45namespace art { 46 47// File format: 48// header 49// record 0 50// record 1 51// ... 52// 53// Header format: 54// u4 magic ('SLOW') 55// u2 version 56// u2 offset to data 57// u8 start date/time in usec 58// u2 record size in bytes (version >= 2 only) 59// ... padding to 32 bytes 60// 61// Record format v1: 62// u1 thread ID 63// u4 method ID | method action 64// u4 time delta since start, in usec 65// 66// Record format v2: 67// u2 thread ID 68// u4 method ID | method action 69// u4 time delta since start, in usec 70// 71// Record format v3: 72// u2 thread ID 73// u4 method ID | method action 74// u4 time delta since start, in usec 75// u4 wall time since start, in usec (when clock == "dual" only) 76// 77// 32 bits of microseconds is 70 minutes. 78// 79// All values are stored in little-endian order. 80 81enum TraceAction { 82 kTraceMethodEnter = 0x00, // method entry 83 kTraceMethodExit = 0x01, // method exit 84 kTraceUnroll = 0x02, // method exited by exception unrolling 85 // 0x03 currently unused 86 kTraceMethodActionMask = 0x03, // two bits 87}; 88 89class BuildStackTraceVisitor : public StackVisitor { 90 public: 91 explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL), 92 method_trace_(Trace::AllocStackTrace()) {} 93 94 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 95 mirror::ArtMethod* m = GetMethod(); 96 // Ignore runtime frames (in particular callee save). 97 if (!m->IsRuntimeMethod()) { 98 method_trace_->push_back(m); 99 } 100 return true; 101 } 102 103 // Returns a stack trace where the topmost frame corresponds with the first element of the vector. 104 std::vector<mirror::ArtMethod*>* GetStackTrace() const { 105 return method_trace_; 106 } 107 108 private: 109 std::vector<mirror::ArtMethod*>* const method_trace_; 110}; 111 112static const char kTraceTokenChar = '*'; 113static const uint16_t kTraceHeaderLength = 32; 114static const uint32_t kTraceMagicValue = 0x574f4c53; 115static const uint16_t kTraceVersionSingleClock = 2; 116static const uint16_t kTraceVersionDualClock = 3; 117static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 118static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps 119 120TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource; 121 122Trace* volatile Trace::the_trace_ = NULL; 123pthread_t Trace::sampling_pthread_ = 0U; 124std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_; 125 126static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { 127 return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); 128} 129 130static TraceAction DecodeTraceAction(uint32_t tmid) { 131 return static_cast<TraceAction>(tmid & kTraceMethodActionMask); 132} 133 134static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method, 135 TraceAction action) { 136 uint32_t tmid = PointerToLowMemUInt32(method) | action; 137 DCHECK_EQ(method, DecodeTraceMethodId(tmid)); 138 return tmid; 139} 140 141std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { 142 if (temp_stack_trace_.get() != NULL) { 143 return temp_stack_trace_.release(); 144 } else { 145 return new std::vector<mirror::ArtMethod*>(); 146 } 147} 148 149void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { 150 stack_trace->clear(); 151 temp_stack_trace_.reset(stack_trace); 152} 153 154void Trace::SetDefaultClockSource(TraceClockSource clock_source) { 155#if defined(HAVE_POSIX_CLOCKS) 156 default_clock_source_ = clock_source; 157#else 158 if (clock_source != kTraceClockSourceWall) { 159 LOG(WARNING) << "Ignoring tracing request to use CPU time."; 160 } 161#endif 162} 163 164static uint16_t GetTraceVersion(TraceClockSource clock_source) { 165 return (clock_source == kTraceClockSourceDual) ? kTraceVersionDualClock 166 : kTraceVersionSingleClock; 167} 168 169static uint16_t GetRecordSize(TraceClockSource clock_source) { 170 return (clock_source == kTraceClockSourceDual) ? kTraceRecordSizeDualClock 171 : kTraceRecordSizeSingleClock; 172} 173 174bool Trace::UseThreadCpuClock() { 175 return (clock_source_ == kTraceClockSourceThreadCpu) || 176 (clock_source_ == kTraceClockSourceDual); 177} 178 179bool Trace::UseWallClock() { 180 return (clock_source_ == kTraceClockSourceWall) || 181 (clock_source_ == kTraceClockSourceDual); 182} 183 184void Trace::MeasureClockOverhead() { 185 if (UseThreadCpuClock()) { 186 Thread::Current()->GetCpuMicroTime(); 187 } 188 if (UseWallClock()) { 189 MicroTime(); 190 } 191} 192 193// Compute an average time taken to measure clocks. 194uint32_t Trace::GetClockOverheadNanoSeconds() { 195 Thread* self = Thread::Current(); 196 uint64_t start = self->GetCpuMicroTime(); 197 198 for (int i = 4000; i > 0; i--) { 199 MeasureClockOverhead(); 200 MeasureClockOverhead(); 201 MeasureClockOverhead(); 202 MeasureClockOverhead(); 203 MeasureClockOverhead(); 204 MeasureClockOverhead(); 205 MeasureClockOverhead(); 206 MeasureClockOverhead(); 207 } 208 209 uint64_t elapsed_us = self->GetCpuMicroTime() - start; 210 return static_cast<uint32_t>(elapsed_us / 32); 211} 212 213// TODO: put this somewhere with the big-endian equivalent used by JDWP. 214static void Append2LE(uint8_t* buf, uint16_t val) { 215 *buf++ = static_cast<uint8_t>(val); 216 *buf++ = static_cast<uint8_t>(val >> 8); 217} 218 219// TODO: put this somewhere with the big-endian equivalent used by JDWP. 220static void Append4LE(uint8_t* buf, uint32_t val) { 221 *buf++ = static_cast<uint8_t>(val); 222 *buf++ = static_cast<uint8_t>(val >> 8); 223 *buf++ = static_cast<uint8_t>(val >> 16); 224 *buf++ = static_cast<uint8_t>(val >> 24); 225} 226 227// TODO: put this somewhere with the big-endian equivalent used by JDWP. 228static void Append8LE(uint8_t* buf, uint64_t val) { 229 *buf++ = static_cast<uint8_t>(val); 230 *buf++ = static_cast<uint8_t>(val >> 8); 231 *buf++ = static_cast<uint8_t>(val >> 16); 232 *buf++ = static_cast<uint8_t>(val >> 24); 233 *buf++ = static_cast<uint8_t>(val >> 32); 234 *buf++ = static_cast<uint8_t>(val >> 40); 235 *buf++ = static_cast<uint8_t>(val >> 48); 236 *buf++ = static_cast<uint8_t>(val >> 56); 237} 238 239static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 240 BuildStackTraceVisitor build_trace_visitor(thread); 241 build_trace_visitor.WalkStack(); 242 std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); 243 Trace* the_trace = reinterpret_cast<Trace*>(arg); 244 the_trace->CompareAndUpdateStackTrace(thread, stack_trace); 245} 246 247static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) { 248 thread->SetTraceClockBase(0); 249 std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); 250 thread->SetStackTraceSample(NULL); 251 delete stack_trace; 252} 253 254void Trace::CompareAndUpdateStackTrace(Thread* thread, 255 std::vector<mirror::ArtMethod*>* stack_trace) { 256 CHECK_EQ(pthread_self(), sampling_pthread_); 257 std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); 258 // Update the thread's stack trace sample. 259 thread->SetStackTraceSample(stack_trace); 260 // Read timer clocks to use for all events in this trace. 261 uint32_t thread_clock_diff = 0; 262 uint32_t wall_clock_diff = 0; 263 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 264 if (old_stack_trace == NULL) { 265 // If there's no previous stack trace sample for this thread, log an entry event for all 266 // methods in the trace. 267 for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 268 rit != stack_trace->rend(); ++rit) { 269 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 270 thread_clock_diff, wall_clock_diff); 271 } 272 } else { 273 // If there's a previous stack trace for this thread, diff the traces and emit entry and exit 274 // events accordingly. 275 std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); 276 std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 277 // Iterate bottom-up over both traces until there's a difference between them. 278 while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { 279 old_rit++; 280 rit++; 281 } 282 // Iterate top-down over the old trace until the point where they differ, emitting exit events. 283 for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); 284 old_it != old_rit.base(); ++old_it) { 285 LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, 286 thread_clock_diff, wall_clock_diff); 287 } 288 // Iterate bottom-up over the new trace from the point where they differ, emitting entry events. 289 for (; rit != stack_trace->rend(); ++rit) { 290 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 291 thread_clock_diff, wall_clock_diff); 292 } 293 FreeStackTrace(old_stack_trace); 294 } 295} 296 297void* Trace::RunSamplingThread(void* arg) { 298 Runtime* runtime = Runtime::Current(); 299 intptr_t interval_us = reinterpret_cast<intptr_t>(arg); 300 CHECK_GE(interval_us, 0); 301 CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(), 302 !runtime->IsCompiler())); 303 304 while (true) { 305 usleep(interval_us); 306 ATRACE_BEGIN("Profile sampling"); 307 Thread* self = Thread::Current(); 308 Trace* the_trace; 309 { 310 MutexLock mu(self, *Locks::trace_lock_); 311 the_trace = the_trace_; 312 if (the_trace == NULL) { 313 break; 314 } 315 } 316 317 runtime->GetThreadList()->SuspendAll(); 318 { 319 MutexLock mu(self, *Locks::thread_list_lock_); 320 runtime->GetThreadList()->ForEach(GetSample, the_trace); 321 } 322 runtime->GetThreadList()->ResumeAll(); 323 ATRACE_END(); 324 } 325 326 runtime->DetachCurrentThread(); 327 return NULL; 328} 329 330void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, 331 bool direct_to_ddms, bool sampling_enabled, int interval_us) { 332 Thread* self = Thread::Current(); 333 { 334 MutexLock mu(self, *Locks::trace_lock_); 335 if (the_trace_ != NULL) { 336 LOG(ERROR) << "Trace already in progress, ignoring this request"; 337 return; 338 } 339 } 340 341 // Check interval if sampling is enabled 342 if (sampling_enabled && interval_us <= 0) { 343 LOG(ERROR) << "Invalid sampling interval: " << interval_us; 344 ScopedObjectAccess soa(self); 345 ThrowRuntimeException("Invalid sampling interval: %d", interval_us); 346 return; 347 } 348 349 // Open trace file if not going directly to ddms. 350 std::unique_ptr<File> trace_file; 351 if (!direct_to_ddms) { 352 if (trace_fd < 0) { 353 trace_file.reset(OS::CreateEmptyFile(trace_filename)); 354 } else { 355 trace_file.reset(new File(trace_fd, "tracefile")); 356 trace_file->DisableAutoClose(); 357 } 358 if (trace_file.get() == NULL) { 359 PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; 360 ScopedObjectAccess soa(self); 361 ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); 362 return; 363 } 364 } 365 366 Runtime* runtime = Runtime::Current(); 367 368 // Enable count of allocs if specified in the flags. 369 bool enable_stats = false; 370 371 runtime->GetThreadList()->SuspendAll(); 372 373 // Create Trace object. 374 { 375 MutexLock mu(self, *Locks::trace_lock_); 376 if (the_trace_ != NULL) { 377 LOG(ERROR) << "Trace already in progress, ignoring this request"; 378 } else { 379 enable_stats = (flags && kTraceCountAllocs) != 0; 380 the_trace_ = new Trace(trace_file.release(), buffer_size, flags, sampling_enabled); 381 if (sampling_enabled) { 382 CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, 383 reinterpret_cast<void*>(interval_us)), 384 "Sampling profiler thread"); 385 } else { 386 runtime->GetInstrumentation()->AddListener(the_trace_, 387 instrumentation::Instrumentation::kMethodEntered | 388 instrumentation::Instrumentation::kMethodExited | 389 instrumentation::Instrumentation::kMethodUnwind); 390 runtime->GetInstrumentation()->EnableMethodTracing(); 391 } 392 } 393 } 394 395 runtime->GetThreadList()->ResumeAll(); 396 397 // Can't call this when holding the mutator lock. 398 if (enable_stats) { 399 runtime->SetStatsEnabled(true); 400 } 401} 402 403void Trace::Stop() { 404 bool stop_alloc_counting = false; 405 Runtime* runtime = Runtime::Current(); 406 runtime->GetThreadList()->SuspendAll(); 407 Trace* the_trace = NULL; 408 pthread_t sampling_pthread = 0U; 409 { 410 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 411 if (the_trace_ == NULL) { 412 LOG(ERROR) << "Trace stop requested, but no trace currently running"; 413 } else { 414 the_trace = the_trace_; 415 the_trace_ = NULL; 416 sampling_pthread = sampling_pthread_; 417 } 418 } 419 if (the_trace != NULL) { 420 stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0; 421 the_trace->FinishTracing(); 422 423 if (the_trace->sampling_enabled_) { 424 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 425 runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, NULL); 426 } else { 427 runtime->GetInstrumentation()->DisableMethodTracing(); 428 runtime->GetInstrumentation()->RemoveListener(the_trace, 429 instrumentation::Instrumentation::kMethodEntered | 430 instrumentation::Instrumentation::kMethodExited | 431 instrumentation::Instrumentation::kMethodUnwind); 432 } 433 delete the_trace; 434 } 435 runtime->GetThreadList()->ResumeAll(); 436 437 if (stop_alloc_counting) { 438 // Can be racy since SetStatsEnabled is not guarded by any locks. 439 Runtime::Current()->SetStatsEnabled(false); 440 } 441 442 if (sampling_pthread != 0U) { 443 CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown"); 444 sampling_pthread_ = 0U; 445 } 446} 447 448void Trace::Shutdown() { 449 if (GetMethodTracingMode() != kTracingInactive) { 450 Stop(); 451 } 452} 453 454TracingMode Trace::GetMethodTracingMode() { 455 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 456 if (the_trace_ == NULL) { 457 return kTracingInactive; 458 } else if (the_trace_->sampling_enabled_) { 459 return kSampleProfilingActive; 460 } else { 461 return kMethodTracingActive; 462 } 463} 464 465Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled) 466 : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), 467 sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_), 468 buffer_size_(buffer_size), start_time_(MicroTime()), 469 clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) { 470 // Set up the beginning of the trace. 471 uint16_t trace_version = GetTraceVersion(clock_source_); 472 memset(buf_.get(), 0, kTraceHeaderLength); 473 Append4LE(buf_.get(), kTraceMagicValue); 474 Append2LE(buf_.get() + 4, trace_version); 475 Append2LE(buf_.get() + 6, kTraceHeaderLength); 476 Append8LE(buf_.get() + 8, start_time_); 477 if (trace_version >= kTraceVersionDualClock) { 478 uint16_t record_size = GetRecordSize(clock_source_); 479 Append2LE(buf_.get() + 16, record_size); 480 } 481 482 // Update current offset. 483 cur_offset_.StoreRelaxed(kTraceHeaderLength); 484} 485 486static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) 487 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 488 uint8_t* ptr = buf + kTraceHeaderLength; 489 uint8_t* end = buf + buf_size; 490 491 while (ptr < end) { 492 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 493 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 494 TraceAction action = DecodeTraceAction(tmid); 495 LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); 496 ptr += GetRecordSize(clock_source); 497 } 498} 499 500void Trace::FinishTracing() { 501 // Compute elapsed time. 502 uint64_t elapsed = MicroTime() - start_time_; 503 504 size_t final_offset = cur_offset_.LoadRelaxed(); 505 506 std::set<mirror::ArtMethod*> visited_methods; 507 GetVisitedMethods(final_offset, &visited_methods); 508 509 std::ostringstream os; 510 511 os << StringPrintf("%cversion\n", kTraceTokenChar); 512 os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); 513 os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); 514 if (UseThreadCpuClock()) { 515 if (UseWallClock()) { 516 os << StringPrintf("clock=dual\n"); 517 } else { 518 os << StringPrintf("clock=thread-cpu\n"); 519 } 520 } else { 521 os << StringPrintf("clock=wall\n"); 522 } 523 os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed); 524 size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); 525 os << StringPrintf("num-method-calls=%zd\n", num_records); 526 os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_); 527 os << StringPrintf("vm=art\n"); 528 if ((flags_ & kTraceCountAllocs) != 0) { 529 os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS)); 530 os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES)); 531 os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS)); 532 } 533 os << StringPrintf("%cthreads\n", kTraceTokenChar); 534 DumpThreadList(os); 535 os << StringPrintf("%cmethods\n", kTraceTokenChar); 536 DumpMethodList(os, visited_methods); 537 os << StringPrintf("%cend\n", kTraceTokenChar); 538 539 std::string header(os.str()); 540 if (trace_file_.get() == NULL) { 541 iovec iov[2]; 542 iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str())); 543 iov[0].iov_len = header.length(); 544 iov[1].iov_base = buf_.get(); 545 iov[1].iov_len = final_offset; 546 Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); 547 const bool kDumpTraceInfo = false; 548 if (kDumpTraceInfo) { 549 LOG(INFO) << "Trace sent:\n" << header; 550 DumpBuf(buf_.get(), final_offset, clock_source_); 551 } 552 } else { 553 if (!trace_file_->WriteFully(header.c_str(), header.length()) || 554 !trace_file_->WriteFully(buf_.get(), final_offset)) { 555 std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); 556 PLOG(ERROR) << detail; 557 ThrowRuntimeException("%s", detail.c_str()); 558 } 559 } 560} 561 562void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, 563 mirror::ArtMethod* method, uint32_t new_dex_pc) { 564 // We're not recorded to listen to this kind of event, so complain. 565 LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; 566} 567 568void Trace::FieldRead(Thread* /*thread*/, mirror::Object* this_object, 569 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field) 570 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 571 // We're not recorded to listen to this kind of event, so complain. 572 LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; 573} 574 575void Trace::FieldWritten(Thread* /*thread*/, mirror::Object* this_object, 576 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field, 577 const JValue& field_value) 578 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 579 // We're not recorded to listen to this kind of event, so complain. 580 LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; 581} 582 583void Trace::MethodEntered(Thread* thread, mirror::Object* this_object, 584 mirror::ArtMethod* method, uint32_t dex_pc) { 585 uint32_t thread_clock_diff = 0; 586 uint32_t wall_clock_diff = 0; 587 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 588 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered, 589 thread_clock_diff, wall_clock_diff); 590} 591 592void Trace::MethodExited(Thread* thread, mirror::Object* this_object, 593 mirror::ArtMethod* method, uint32_t dex_pc, 594 const JValue& return_value) { 595 UNUSED(return_value); 596 uint32_t thread_clock_diff = 0; 597 uint32_t wall_clock_diff = 0; 598 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 599 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited, 600 thread_clock_diff, wall_clock_diff); 601} 602 603void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object, 604 mirror::ArtMethod* method, uint32_t dex_pc) { 605 uint32_t thread_clock_diff = 0; 606 uint32_t wall_clock_diff = 0; 607 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 608 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind, 609 thread_clock_diff, wall_clock_diff); 610} 611 612void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, 613 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc, 614 mirror::Throwable* exception_object) 615 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 616 LOG(ERROR) << "Unexpected exception caught event in tracing"; 617} 618 619void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) { 620 if (UseThreadCpuClock()) { 621 uint64_t clock_base = thread->GetTraceClockBase(); 622 if (UNLIKELY(clock_base == 0)) { 623 // First event, record the base time in the map. 624 uint64_t time = thread->GetCpuMicroTime(); 625 thread->SetTraceClockBase(time); 626 } else { 627 *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; 628 } 629 } 630 if (UseWallClock()) { 631 *wall_clock_diff = MicroTime() - start_time_; 632 } 633} 634 635void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, 636 instrumentation::Instrumentation::InstrumentationEvent event, 637 uint32_t thread_clock_diff, uint32_t wall_clock_diff) { 638 // Advance cur_offset_ atomically. 639 int32_t new_offset; 640 int32_t old_offset; 641 do { 642 old_offset = cur_offset_.LoadRelaxed(); 643 new_offset = old_offset + GetRecordSize(clock_source_); 644 if (new_offset > buffer_size_) { 645 overflow_ = true; 646 return; 647 } 648 } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset)); 649 650 TraceAction action = kTraceMethodEnter; 651 switch (event) { 652 case instrumentation::Instrumentation::kMethodEntered: 653 action = kTraceMethodEnter; 654 break; 655 case instrumentation::Instrumentation::kMethodExited: 656 action = kTraceMethodExit; 657 break; 658 case instrumentation::Instrumentation::kMethodUnwind: 659 action = kTraceUnroll; 660 break; 661 default: 662 UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; 663 } 664 665 uint32_t method_value = EncodeTraceMethodAndAction(method, action); 666 667 // Write data 668 uint8_t* ptr = buf_.get() + old_offset; 669 Append2LE(ptr, thread->GetTid()); 670 Append4LE(ptr + 2, method_value); 671 ptr += 6; 672 673 if (UseThreadCpuClock()) { 674 Append4LE(ptr, thread_clock_diff); 675 ptr += 4; 676 } 677 if (UseWallClock()) { 678 Append4LE(ptr, wall_clock_diff); 679 } 680} 681 682void Trace::GetVisitedMethods(size_t buf_size, 683 std::set<mirror::ArtMethod*>* visited_methods) { 684 uint8_t* ptr = buf_.get() + kTraceHeaderLength; 685 uint8_t* end = buf_.get() + buf_size; 686 687 while (ptr < end) { 688 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 689 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 690 visited_methods->insert(method); 691 ptr += GetRecordSize(clock_source_); 692 } 693} 694 695void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { 696 for (const auto& method : visited_methods) { 697 os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, 698 PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(), 699 method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile()); 700 } 701} 702 703static void DumpThread(Thread* t, void* arg) { 704 std::ostream& os = *reinterpret_cast<std::ostream*>(arg); 705 std::string name; 706 t->GetThreadName(name); 707 os << t->GetTid() << "\t" << name << "\n"; 708} 709 710void Trace::DumpThreadList(std::ostream& os) { 711 Thread* self = Thread::Current(); 712 for (auto it : exited_threads_) { 713 os << it.first << "\t" << it.second << "\n"; 714 } 715 Locks::thread_list_lock_->AssertNotHeld(self); 716 MutexLock mu(self, *Locks::thread_list_lock_); 717 Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); 718} 719 720void Trace::StoreExitingThreadInfo(Thread* thread) { 721 MutexLock mu(thread, *Locks::trace_lock_); 722 if (the_trace_ != nullptr) { 723 std::string name; 724 thread->GetThreadName(name); 725 the_trace_->exited_threads_.Put(thread->GetTid(), name); 726 } 727} 728 729} // namespace art 730