trace.cc revision 0624a27b9a2951bfcf23321a714543e137836904
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "trace.h" 18 19#include <sys/uio.h> 20#include <unistd.h> 21 22#define ATRACE_TAG ATRACE_TAG_DALVIK 23#include "cutils/trace.h" 24 25#include "base/stl_util.h" 26#include "base/unix_file/fd_file.h" 27#include "class_linker.h" 28#include "common_throws.h" 29#include "debugger.h" 30#include "dex_file-inl.h" 31#include "instrumentation.h" 32#include "mirror/art_method-inl.h" 33#include "mirror/class-inl.h" 34#include "mirror/dex_cache.h" 35#include "mirror/object_array-inl.h" 36#include "mirror/object-inl.h" 37#include "os.h" 38#include "scoped_thread_state_change.h" 39#include "ScopedLocalRef.h" 40#include "thread.h" 41#include "thread_list.h" 42#include "entrypoints/quick/quick_entrypoints.h" 43 44namespace art { 45 46// File format: 47// header 48// record 0 49// record 1 50// ... 51// 52// Header format: 53// u4 magic ('SLOW') 54// u2 version 55// u2 offset to data 56// u8 start date/time in usec 57// u2 record size in bytes (version >= 2 only) 58// ... padding to 32 bytes 59// 60// Record format v1: 61// u1 thread ID 62// u4 method ID | method action 63// u4 time delta since start, in usec 64// 65// Record format v2: 66// u2 thread ID 67// u4 method ID | method action 68// u4 time delta since start, in usec 69// 70// Record format v3: 71// u2 thread ID 72// u4 method ID | method action 73// u4 time delta since start, in usec 74// u4 wall time since start, in usec (when clock == "dual" only) 75// 76// 32 bits of microseconds is 70 minutes. 77// 78// All values are stored in little-endian order. 79 80enum TraceAction { 81 kTraceMethodEnter = 0x00, // method entry 82 kTraceMethodExit = 0x01, // method exit 83 kTraceUnroll = 0x02, // method exited by exception unrolling 84 // 0x03 currently unused 85 kTraceMethodActionMask = 0x03, // two bits 86}; 87 88class BuildStackTraceVisitor : public StackVisitor { 89 public: 90 explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL), 91 method_trace_(Trace::AllocStackTrace()) {} 92 93 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 94 mirror::ArtMethod* m = GetMethod(); 95 // Ignore runtime frames (in particular callee save). 96 if (!m->IsRuntimeMethod()) { 97 method_trace_->push_back(m); 98 } 99 return true; 100 } 101 102 // Returns a stack trace where the topmost frame corresponds with the first element of the vector. 103 std::vector<mirror::ArtMethod*>* GetStackTrace() const { 104 return method_trace_; 105 } 106 107 private: 108 std::vector<mirror::ArtMethod*>* const method_trace_; 109}; 110 111static const char kTraceTokenChar = '*'; 112static const uint16_t kTraceHeaderLength = 32; 113static const uint32_t kTraceMagicValue = 0x574f4c53; 114static const uint16_t kTraceVersionSingleClock = 2; 115static const uint16_t kTraceVersionDualClock = 3; 116static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 117static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps 118 119TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource; 120 121Trace* volatile Trace::the_trace_ = NULL; 122pthread_t Trace::sampling_pthread_ = 0U; 123std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_; 124 125static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { 126 return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); 127} 128 129static TraceAction DecodeTraceAction(uint32_t tmid) { 130 return static_cast<TraceAction>(tmid & kTraceMethodActionMask); 131} 132 133static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method, 134 TraceAction action) { 135 uint32_t tmid = PointerToLowMemUInt32(method) | action; 136 DCHECK_EQ(method, DecodeTraceMethodId(tmid)); 137 return tmid; 138} 139 140std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { 141 if (temp_stack_trace_.get() != NULL) { 142 return temp_stack_trace_.release(); 143 } else { 144 return new std::vector<mirror::ArtMethod*>(); 145 } 146} 147 148void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { 149 stack_trace->clear(); 150 temp_stack_trace_.reset(stack_trace); 151} 152 153void Trace::SetDefaultClockSource(TraceClockSource clock_source) { 154#if defined(__linux__) 155 default_clock_source_ = clock_source; 156#else 157 if (clock_source != TraceClockSource::kWall) { 158 LOG(WARNING) << "Ignoring tracing request to use CPU time."; 159 } 160#endif 161} 162 163static uint16_t GetTraceVersion(TraceClockSource clock_source) { 164 return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClock 165 : kTraceVersionSingleClock; 166} 167 168static uint16_t GetRecordSize(TraceClockSource clock_source) { 169 return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClock 170 : kTraceRecordSizeSingleClock; 171} 172 173bool Trace::UseThreadCpuClock() { 174 return (clock_source_ == TraceClockSource::kThreadCpu) || 175 (clock_source_ == TraceClockSource::kDual); 176} 177 178bool Trace::UseWallClock() { 179 return (clock_source_ == TraceClockSource::kWall) || 180 (clock_source_ == TraceClockSource::kDual); 181} 182 183void Trace::MeasureClockOverhead() { 184 if (UseThreadCpuClock()) { 185 Thread::Current()->GetCpuMicroTime(); 186 } 187 if (UseWallClock()) { 188 MicroTime(); 189 } 190} 191 192// Compute an average time taken to measure clocks. 193uint32_t Trace::GetClockOverheadNanoSeconds() { 194 Thread* self = Thread::Current(); 195 uint64_t start = self->GetCpuMicroTime(); 196 197 for (int i = 4000; i > 0; i--) { 198 MeasureClockOverhead(); 199 MeasureClockOverhead(); 200 MeasureClockOverhead(); 201 MeasureClockOverhead(); 202 MeasureClockOverhead(); 203 MeasureClockOverhead(); 204 MeasureClockOverhead(); 205 MeasureClockOverhead(); 206 } 207 208 uint64_t elapsed_us = self->GetCpuMicroTime() - start; 209 return static_cast<uint32_t>(elapsed_us / 32); 210} 211 212// TODO: put this somewhere with the big-endian equivalent used by JDWP. 213static void Append2LE(uint8_t* buf, uint16_t val) { 214 *buf++ = static_cast<uint8_t>(val); 215 *buf++ = static_cast<uint8_t>(val >> 8); 216} 217 218// TODO: put this somewhere with the big-endian equivalent used by JDWP. 219static void Append4LE(uint8_t* buf, uint32_t val) { 220 *buf++ = static_cast<uint8_t>(val); 221 *buf++ = static_cast<uint8_t>(val >> 8); 222 *buf++ = static_cast<uint8_t>(val >> 16); 223 *buf++ = static_cast<uint8_t>(val >> 24); 224} 225 226// TODO: put this somewhere with the big-endian equivalent used by JDWP. 227static void Append8LE(uint8_t* buf, uint64_t val) { 228 *buf++ = static_cast<uint8_t>(val); 229 *buf++ = static_cast<uint8_t>(val >> 8); 230 *buf++ = static_cast<uint8_t>(val >> 16); 231 *buf++ = static_cast<uint8_t>(val >> 24); 232 *buf++ = static_cast<uint8_t>(val >> 32); 233 *buf++ = static_cast<uint8_t>(val >> 40); 234 *buf++ = static_cast<uint8_t>(val >> 48); 235 *buf++ = static_cast<uint8_t>(val >> 56); 236} 237 238static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 239 BuildStackTraceVisitor build_trace_visitor(thread); 240 build_trace_visitor.WalkStack(); 241 std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); 242 Trace* the_trace = reinterpret_cast<Trace*>(arg); 243 the_trace->CompareAndUpdateStackTrace(thread, stack_trace); 244} 245 246static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED, 247 void* arg ATTRIBUTE_UNUSED) { 248 thread->SetTraceClockBase(0); 249 std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); 250 thread->SetStackTraceSample(NULL); 251 delete stack_trace; 252} 253 254void Trace::CompareAndUpdateStackTrace(Thread* thread, 255 std::vector<mirror::ArtMethod*>* stack_trace) { 256 CHECK_EQ(pthread_self(), sampling_pthread_); 257 std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); 258 // Update the thread's stack trace sample. 259 thread->SetStackTraceSample(stack_trace); 260 // Read timer clocks to use for all events in this trace. 261 uint32_t thread_clock_diff = 0; 262 uint32_t wall_clock_diff = 0; 263 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 264 if (old_stack_trace == NULL) { 265 // If there's no previous stack trace sample for this thread, log an entry event for all 266 // methods in the trace. 267 for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 268 rit != stack_trace->rend(); ++rit) { 269 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 270 thread_clock_diff, wall_clock_diff); 271 } 272 } else { 273 // If there's a previous stack trace for this thread, diff the traces and emit entry and exit 274 // events accordingly. 275 std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); 276 std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 277 // Iterate bottom-up over both traces until there's a difference between them. 278 while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { 279 old_rit++; 280 rit++; 281 } 282 // Iterate top-down over the old trace until the point where they differ, emitting exit events. 283 for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); 284 old_it != old_rit.base(); ++old_it) { 285 LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, 286 thread_clock_diff, wall_clock_diff); 287 } 288 // Iterate bottom-up over the new trace from the point where they differ, emitting entry events. 289 for (; rit != stack_trace->rend(); ++rit) { 290 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 291 thread_clock_diff, wall_clock_diff); 292 } 293 FreeStackTrace(old_stack_trace); 294 } 295} 296 297void* Trace::RunSamplingThread(void* arg) { 298 Runtime* runtime = Runtime::Current(); 299 intptr_t interval_us = reinterpret_cast<intptr_t>(arg); 300 CHECK_GE(interval_us, 0); 301 CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(), 302 !runtime->IsAotCompiler())); 303 304 while (true) { 305 usleep(interval_us); 306 ATRACE_BEGIN("Profile sampling"); 307 Thread* self = Thread::Current(); 308 Trace* the_trace; 309 { 310 MutexLock mu(self, *Locks::trace_lock_); 311 the_trace = the_trace_; 312 if (the_trace == NULL) { 313 break; 314 } 315 } 316 317 runtime->GetThreadList()->SuspendAll(__FUNCTION__); 318 { 319 MutexLock mu(self, *Locks::thread_list_lock_); 320 runtime->GetThreadList()->ForEach(GetSample, the_trace); 321 } 322 runtime->GetThreadList()->ResumeAll(); 323 ATRACE_END(); 324 } 325 326 runtime->DetachCurrentThread(); 327 return NULL; 328} 329 330void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, 331 bool direct_to_ddms, bool sampling_enabled, int interval_us) { 332 Thread* self = Thread::Current(); 333 { 334 MutexLock mu(self, *Locks::trace_lock_); 335 if (the_trace_ != NULL) { 336 LOG(ERROR) << "Trace already in progress, ignoring this request"; 337 return; 338 } 339 } 340 341 // Check interval if sampling is enabled 342 if (sampling_enabled && interval_us <= 0) { 343 LOG(ERROR) << "Invalid sampling interval: " << interval_us; 344 ScopedObjectAccess soa(self); 345 ThrowRuntimeException("Invalid sampling interval: %d", interval_us); 346 return; 347 } 348 349 // Open trace file if not going directly to ddms. 350 std::unique_ptr<File> trace_file; 351 if (!direct_to_ddms) { 352 if (trace_fd < 0) { 353 trace_file.reset(OS::CreateEmptyFile(trace_filename)); 354 } else { 355 trace_file.reset(new File(trace_fd, "tracefile")); 356 trace_file->DisableAutoClose(); 357 } 358 if (trace_file.get() == NULL) { 359 PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; 360 ScopedObjectAccess soa(self); 361 ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); 362 return; 363 } 364 } 365 366 Runtime* runtime = Runtime::Current(); 367 368 // Enable count of allocs if specified in the flags. 369 bool enable_stats = false; 370 371 runtime->GetThreadList()->SuspendAll(__FUNCTION__); 372 373 // Create Trace object. 374 { 375 MutexLock mu(self, *Locks::trace_lock_); 376 if (the_trace_ != NULL) { 377 LOG(ERROR) << "Trace already in progress, ignoring this request"; 378 } else { 379 enable_stats = (flags && kTraceCountAllocs) != 0; 380 the_trace_ = new Trace(trace_file.release(), buffer_size, flags, sampling_enabled); 381 if (sampling_enabled) { 382 CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, 383 reinterpret_cast<void*>(interval_us)), 384 "Sampling profiler thread"); 385 } else { 386 runtime->GetInstrumentation()->AddListener(the_trace_, 387 instrumentation::Instrumentation::kMethodEntered | 388 instrumentation::Instrumentation::kMethodExited | 389 instrumentation::Instrumentation::kMethodUnwind); 390 runtime->GetInstrumentation()->EnableMethodTracing(); 391 } 392 } 393 } 394 395 runtime->GetThreadList()->ResumeAll(); 396 397 // Can't call this when holding the mutator lock. 398 if (enable_stats) { 399 runtime->SetStatsEnabled(true); 400 } 401} 402 403void Trace::Stop() { 404 bool stop_alloc_counting = false; 405 Runtime* const runtime = Runtime::Current(); 406 Trace* the_trace = nullptr; 407 pthread_t sampling_pthread = 0U; 408 { 409 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 410 if (the_trace_ == NULL) { 411 LOG(ERROR) << "Trace stop requested, but no trace currently running"; 412 } else { 413 the_trace = the_trace_; 414 the_trace_ = NULL; 415 sampling_pthread = sampling_pthread_; 416 } 417 } 418 // Make sure that we join before we delete the trace since we don't want to have 419 // the sampling thread access a stale pointer. This finishes since the sampling thread exits when 420 // the_trace_ is null. 421 if (sampling_pthread != 0U) { 422 CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown"); 423 sampling_pthread_ = 0U; 424 } 425 runtime->GetThreadList()->SuspendAll(__FUNCTION__); 426 if (the_trace != nullptr) { 427 stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0; 428 the_trace->FinishTracing(); 429 430 if (the_trace->sampling_enabled_) { 431 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 432 runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr); 433 } else { 434 runtime->GetInstrumentation()->DisableMethodTracing(); 435 runtime->GetInstrumentation()->RemoveListener( 436 the_trace, instrumentation::Instrumentation::kMethodEntered | 437 instrumentation::Instrumentation::kMethodExited | 438 instrumentation::Instrumentation::kMethodUnwind); 439 } 440 if (the_trace->trace_file_.get() != nullptr) { 441 // Do not try to erase, so flush and close explicitly. 442 if (the_trace->trace_file_->Flush() != 0) { 443 PLOG(ERROR) << "Could not flush trace file."; 444 } 445 if (the_trace->trace_file_->Close() != 0) { 446 PLOG(ERROR) << "Could not close trace file."; 447 } 448 } 449 delete the_trace; 450 } 451 runtime->GetThreadList()->ResumeAll(); 452 if (stop_alloc_counting) { 453 // Can be racy since SetStatsEnabled is not guarded by any locks. 454 runtime->SetStatsEnabled(false); 455 } 456} 457 458void Trace::Shutdown() { 459 if (GetMethodTracingMode() != kTracingInactive) { 460 Stop(); 461 } 462} 463 464TracingMode Trace::GetMethodTracingMode() { 465 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 466 if (the_trace_ == NULL) { 467 return kTracingInactive; 468 } else if (the_trace_->sampling_enabled_) { 469 return kSampleProfilingActive; 470 } else { 471 return kMethodTracingActive; 472 } 473} 474 475Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled) 476 : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), 477 sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_), 478 buffer_size_(buffer_size), start_time_(MicroTime()), 479 clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) { 480 // Set up the beginning of the trace. 481 uint16_t trace_version = GetTraceVersion(clock_source_); 482 memset(buf_.get(), 0, kTraceHeaderLength); 483 Append4LE(buf_.get(), kTraceMagicValue); 484 Append2LE(buf_.get() + 4, trace_version); 485 Append2LE(buf_.get() + 6, kTraceHeaderLength); 486 Append8LE(buf_.get() + 8, start_time_); 487 if (trace_version >= kTraceVersionDualClock) { 488 uint16_t record_size = GetRecordSize(clock_source_); 489 Append2LE(buf_.get() + 16, record_size); 490 } 491 492 // Update current offset. 493 cur_offset_.StoreRelaxed(kTraceHeaderLength); 494} 495 496static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) 497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 498 uint8_t* ptr = buf + kTraceHeaderLength; 499 uint8_t* end = buf + buf_size; 500 501 while (ptr < end) { 502 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 503 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 504 TraceAction action = DecodeTraceAction(tmid); 505 LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); 506 ptr += GetRecordSize(clock_source); 507 } 508} 509 510void Trace::FinishTracing() { 511 // Compute elapsed time. 512 uint64_t elapsed = MicroTime() - start_time_; 513 514 size_t final_offset = cur_offset_.LoadRelaxed(); 515 516 std::set<mirror::ArtMethod*> visited_methods; 517 GetVisitedMethods(final_offset, &visited_methods); 518 519 std::ostringstream os; 520 521 os << StringPrintf("%cversion\n", kTraceTokenChar); 522 os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); 523 os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); 524 if (UseThreadCpuClock()) { 525 if (UseWallClock()) { 526 os << StringPrintf("clock=dual\n"); 527 } else { 528 os << StringPrintf("clock=thread-cpu\n"); 529 } 530 } else { 531 os << StringPrintf("clock=wall\n"); 532 } 533 os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed); 534 size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); 535 os << StringPrintf("num-method-calls=%zd\n", num_records); 536 os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_); 537 os << StringPrintf("vm=art\n"); 538 os << StringPrintf("pid=%d\n", getpid()); 539 if ((flags_ & kTraceCountAllocs) != 0) { 540 os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS)); 541 os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES)); 542 os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS)); 543 } 544 os << StringPrintf("%cthreads\n", kTraceTokenChar); 545 DumpThreadList(os); 546 os << StringPrintf("%cmethods\n", kTraceTokenChar); 547 DumpMethodList(os, visited_methods); 548 os << StringPrintf("%cend\n", kTraceTokenChar); 549 550 std::string header(os.str()); 551 if (trace_file_.get() == NULL) { 552 iovec iov[2]; 553 iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str())); 554 iov[0].iov_len = header.length(); 555 iov[1].iov_base = buf_.get(); 556 iov[1].iov_len = final_offset; 557 Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); 558 const bool kDumpTraceInfo = false; 559 if (kDumpTraceInfo) { 560 LOG(INFO) << "Trace sent:\n" << header; 561 DumpBuf(buf_.get(), final_offset, clock_source_); 562 } 563 } else { 564 if (!trace_file_->WriteFully(header.c_str(), header.length()) || 565 !trace_file_->WriteFully(buf_.get(), final_offset)) { 566 std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); 567 PLOG(ERROR) << detail; 568 ThrowRuntimeException("%s", detail.c_str()); 569 } 570 } 571} 572 573void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, 574 mirror::ArtMethod* method, uint32_t new_dex_pc) { 575 UNUSED(thread, this_object, method, new_dex_pc); 576 // We're not recorded to listen to this kind of event, so complain. 577 LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; 578} 579 580void Trace::FieldRead(Thread* thread, mirror::Object* this_object, 581 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field) 582 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 583 UNUSED(thread, this_object, method, dex_pc, field); 584 // We're not recorded to listen to this kind of event, so complain. 585 LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; 586} 587 588void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, 589 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field, 590 const JValue& field_value) 591 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 592 UNUSED(thread, this_object, method, dex_pc, field, field_value); 593 // We're not recorded to listen to this kind of event, so complain. 594 LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; 595} 596 597void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 598 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { 599 uint32_t thread_clock_diff = 0; 600 uint32_t wall_clock_diff = 0; 601 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 602 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered, 603 thread_clock_diff, wall_clock_diff); 604} 605 606void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 607 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED, 608 const JValue& return_value ATTRIBUTE_UNUSED) { 609 uint32_t thread_clock_diff = 0; 610 uint32_t wall_clock_diff = 0; 611 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 612 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited, 613 thread_clock_diff, wall_clock_diff); 614} 615 616void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 617 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { 618 uint32_t thread_clock_diff = 0; 619 uint32_t wall_clock_diff = 0; 620 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 621 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind, 622 thread_clock_diff, wall_clock_diff); 623} 624 625void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) 626 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 627 UNUSED(thread, exception_object); 628 LOG(ERROR) << "Unexpected exception caught event in tracing"; 629} 630 631void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, 632 int32_t /*dex_pc_offset*/) 633 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 634 LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method); 635} 636 637void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) { 638 if (UseThreadCpuClock()) { 639 uint64_t clock_base = thread->GetTraceClockBase(); 640 if (UNLIKELY(clock_base == 0)) { 641 // First event, record the base time in the map. 642 uint64_t time = thread->GetCpuMicroTime(); 643 thread->SetTraceClockBase(time); 644 } else { 645 *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; 646 } 647 } 648 if (UseWallClock()) { 649 *wall_clock_diff = MicroTime() - start_time_; 650 } 651} 652 653void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, 654 instrumentation::Instrumentation::InstrumentationEvent event, 655 uint32_t thread_clock_diff, uint32_t wall_clock_diff) { 656 // Advance cur_offset_ atomically. 657 int32_t new_offset; 658 int32_t old_offset; 659 do { 660 old_offset = cur_offset_.LoadRelaxed(); 661 new_offset = old_offset + GetRecordSize(clock_source_); 662 if (new_offset > buffer_size_) { 663 overflow_ = true; 664 return; 665 } 666 } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset)); 667 668 TraceAction action = kTraceMethodEnter; 669 switch (event) { 670 case instrumentation::Instrumentation::kMethodEntered: 671 action = kTraceMethodEnter; 672 break; 673 case instrumentation::Instrumentation::kMethodExited: 674 action = kTraceMethodExit; 675 break; 676 case instrumentation::Instrumentation::kMethodUnwind: 677 action = kTraceUnroll; 678 break; 679 default: 680 UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; 681 } 682 683 uint32_t method_value = EncodeTraceMethodAndAction(method, action); 684 685 // Write data 686 uint8_t* ptr = buf_.get() + old_offset; 687 Append2LE(ptr, thread->GetTid()); 688 Append4LE(ptr + 2, method_value); 689 ptr += 6; 690 691 if (UseThreadCpuClock()) { 692 Append4LE(ptr, thread_clock_diff); 693 ptr += 4; 694 } 695 if (UseWallClock()) { 696 Append4LE(ptr, wall_clock_diff); 697 } 698} 699 700void Trace::GetVisitedMethods(size_t buf_size, 701 std::set<mirror::ArtMethod*>* visited_methods) { 702 uint8_t* ptr = buf_.get() + kTraceHeaderLength; 703 uint8_t* end = buf_.get() + buf_size; 704 705 while (ptr < end) { 706 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 707 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 708 visited_methods->insert(method); 709 ptr += GetRecordSize(clock_source_); 710 } 711} 712 713void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { 714 for (const auto& method : visited_methods) { 715 os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, 716 PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(), 717 method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile()); 718 } 719} 720 721static void DumpThread(Thread* t, void* arg) { 722 std::ostream& os = *reinterpret_cast<std::ostream*>(arg); 723 std::string name; 724 t->GetThreadName(name); 725 os << t->GetTid() << "\t" << name << "\n"; 726} 727 728void Trace::DumpThreadList(std::ostream& os) { 729 Thread* self = Thread::Current(); 730 for (auto it : exited_threads_) { 731 os << it.first << "\t" << it.second << "\n"; 732 } 733 Locks::thread_list_lock_->AssertNotHeld(self); 734 MutexLock mu(self, *Locks::thread_list_lock_); 735 Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); 736} 737 738void Trace::StoreExitingThreadInfo(Thread* thread) { 739 MutexLock mu(thread, *Locks::trace_lock_); 740 if (the_trace_ != nullptr) { 741 std::string name; 742 thread->GetThreadName(name); 743 // The same thread/tid may be used multiple times. As SafeMap::Put does not allow to override 744 // a previous mapping, use SafeMap::Overwrite. 745 the_trace_->exited_threads_.Overwrite(thread->GetTid(), name); 746 } 747} 748 749} // namespace art 750