trace.cc revision c785344b87221f5e4e6473e5b762e4e61fe65dcf
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "trace.h" 18 19#include <sys/uio.h> 20#include <unistd.h> 21 22#define ATRACE_TAG ATRACE_TAG_DALVIK 23#include "cutils/trace.h" 24 25#include "base/stl_util.h" 26#include "base/unix_file/fd_file.h" 27#include "class_linker.h" 28#include "common_throws.h" 29#include "debugger.h" 30#include "dex_file-inl.h" 31#include "instrumentation.h" 32#include "mirror/art_method-inl.h" 33#include "mirror/class-inl.h" 34#include "mirror/dex_cache.h" 35#include "mirror/object_array-inl.h" 36#include "mirror/object-inl.h" 37#include "os.h" 38#include "scoped_thread_state_change.h" 39#include "ScopedLocalRef.h" 40#include "thread.h" 41#include "thread_list.h" 42#include "entrypoints/quick/quick_entrypoints.h" 43 44namespace art { 45 46// File format: 47// header 48// record 0 49// record 1 50// ... 51// 52// Header format: 53// u4 magic ('SLOW') 54// u2 version 55// u2 offset to data 56// u8 start date/time in usec 57// u2 record size in bytes (version >= 2 only) 58// ... padding to 32 bytes 59// 60// Record format v1: 61// u1 thread ID 62// u4 method ID | method action 63// u4 time delta since start, in usec 64// 65// Record format v2: 66// u2 thread ID 67// u4 method ID | method action 68// u4 time delta since start, in usec 69// 70// Record format v3: 71// u2 thread ID 72// u4 method ID | method action 73// u4 time delta since start, in usec 74// u4 wall time since start, in usec (when clock == "dual" only) 75// 76// 32 bits of microseconds is 70 minutes. 77// 78// All values are stored in little-endian order. 79 80enum TraceAction { 81 kTraceMethodEnter = 0x00, // method entry 82 kTraceMethodExit = 0x01, // method exit 83 kTraceUnroll = 0x02, // method exited by exception unrolling 84 // 0x03 currently unused 85 kTraceMethodActionMask = 0x03, // two bits 86}; 87 88class BuildStackTraceVisitor : public StackVisitor { 89 public: 90 explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL), 91 method_trace_(Trace::AllocStackTrace()) {} 92 93 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 94 mirror::ArtMethod* m = GetMethod(); 95 // Ignore runtime frames (in particular callee save). 96 if (!m->IsRuntimeMethod()) { 97 method_trace_->push_back(m); 98 } 99 return true; 100 } 101 102 // Returns a stack trace where the topmost frame corresponds with the first element of the vector. 103 std::vector<mirror::ArtMethod*>* GetStackTrace() const { 104 return method_trace_; 105 } 106 107 private: 108 std::vector<mirror::ArtMethod*>* const method_trace_; 109}; 110 111static const char kTraceTokenChar = '*'; 112static const uint16_t kTraceHeaderLength = 32; 113static const uint32_t kTraceMagicValue = 0x574f4c53; 114static const uint16_t kTraceVersionSingleClock = 2; 115static const uint16_t kTraceVersionDualClock = 3; 116static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 117static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps 118 119TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource; 120 121Trace* volatile Trace::the_trace_ = NULL; 122pthread_t Trace::sampling_pthread_ = 0U; 123std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_; 124 125static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { 126 return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); 127} 128 129static TraceAction DecodeTraceAction(uint32_t tmid) { 130 return static_cast<TraceAction>(tmid & kTraceMethodActionMask); 131} 132 133static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method, 134 TraceAction action) { 135 uint32_t tmid = PointerToLowMemUInt32(method) | action; 136 DCHECK_EQ(method, DecodeTraceMethodId(tmid)); 137 return tmid; 138} 139 140std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { 141 if (temp_stack_trace_.get() != NULL) { 142 return temp_stack_trace_.release(); 143 } else { 144 return new std::vector<mirror::ArtMethod*>(); 145 } 146} 147 148void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { 149 stack_trace->clear(); 150 temp_stack_trace_.reset(stack_trace); 151} 152 153void Trace::SetDefaultClockSource(TraceClockSource clock_source) { 154#if defined(__linux__) 155 default_clock_source_ = clock_source; 156#else 157 if (clock_source != TraceClockSource::kWall) { 158 LOG(WARNING) << "Ignoring tracing request to use CPU time."; 159 } 160#endif 161} 162 163static uint16_t GetTraceVersion(TraceClockSource clock_source) { 164 return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClock 165 : kTraceVersionSingleClock; 166} 167 168static uint16_t GetRecordSize(TraceClockSource clock_source) { 169 return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClock 170 : kTraceRecordSizeSingleClock; 171} 172 173bool Trace::UseThreadCpuClock() { 174 return (clock_source_ == TraceClockSource::kThreadCpu) || 175 (clock_source_ == TraceClockSource::kDual); 176} 177 178bool Trace::UseWallClock() { 179 return (clock_source_ == TraceClockSource::kWall) || 180 (clock_source_ == TraceClockSource::kDual); 181} 182 183void Trace::MeasureClockOverhead() { 184 if (UseThreadCpuClock()) { 185 Thread::Current()->GetCpuMicroTime(); 186 } 187 if (UseWallClock()) { 188 MicroTime(); 189 } 190} 191 192// Compute an average time taken to measure clocks. 193uint32_t Trace::GetClockOverheadNanoSeconds() { 194 Thread* self = Thread::Current(); 195 uint64_t start = self->GetCpuMicroTime(); 196 197 for (int i = 4000; i > 0; i--) { 198 MeasureClockOverhead(); 199 MeasureClockOverhead(); 200 MeasureClockOverhead(); 201 MeasureClockOverhead(); 202 MeasureClockOverhead(); 203 MeasureClockOverhead(); 204 MeasureClockOverhead(); 205 MeasureClockOverhead(); 206 } 207 208 uint64_t elapsed_us = self->GetCpuMicroTime() - start; 209 return static_cast<uint32_t>(elapsed_us / 32); 210} 211 212// TODO: put this somewhere with the big-endian equivalent used by JDWP. 213static void Append2LE(uint8_t* buf, uint16_t val) { 214 *buf++ = static_cast<uint8_t>(val); 215 *buf++ = static_cast<uint8_t>(val >> 8); 216} 217 218// TODO: put this somewhere with the big-endian equivalent used by JDWP. 219static void Append4LE(uint8_t* buf, uint32_t val) { 220 *buf++ = static_cast<uint8_t>(val); 221 *buf++ = static_cast<uint8_t>(val >> 8); 222 *buf++ = static_cast<uint8_t>(val >> 16); 223 *buf++ = static_cast<uint8_t>(val >> 24); 224} 225 226// TODO: put this somewhere with the big-endian equivalent used by JDWP. 227static void Append8LE(uint8_t* buf, uint64_t val) { 228 *buf++ = static_cast<uint8_t>(val); 229 *buf++ = static_cast<uint8_t>(val >> 8); 230 *buf++ = static_cast<uint8_t>(val >> 16); 231 *buf++ = static_cast<uint8_t>(val >> 24); 232 *buf++ = static_cast<uint8_t>(val >> 32); 233 *buf++ = static_cast<uint8_t>(val >> 40); 234 *buf++ = static_cast<uint8_t>(val >> 48); 235 *buf++ = static_cast<uint8_t>(val >> 56); 236} 237 238static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 239 BuildStackTraceVisitor build_trace_visitor(thread); 240 build_trace_visitor.WalkStack(); 241 std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); 242 Trace* the_trace = reinterpret_cast<Trace*>(arg); 243 the_trace->CompareAndUpdateStackTrace(thread, stack_trace); 244} 245 246static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) { 247 thread->SetTraceClockBase(0); 248 std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); 249 thread->SetStackTraceSample(NULL); 250 delete stack_trace; 251} 252 253void Trace::CompareAndUpdateStackTrace(Thread* thread, 254 std::vector<mirror::ArtMethod*>* stack_trace) { 255 CHECK_EQ(pthread_self(), sampling_pthread_); 256 std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); 257 // Update the thread's stack trace sample. 258 thread->SetStackTraceSample(stack_trace); 259 // Read timer clocks to use for all events in this trace. 260 uint32_t thread_clock_diff = 0; 261 uint32_t wall_clock_diff = 0; 262 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 263 if (old_stack_trace == NULL) { 264 // If there's no previous stack trace sample for this thread, log an entry event for all 265 // methods in the trace. 266 for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 267 rit != stack_trace->rend(); ++rit) { 268 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 269 thread_clock_diff, wall_clock_diff); 270 } 271 } else { 272 // If there's a previous stack trace for this thread, diff the traces and emit entry and exit 273 // events accordingly. 274 std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); 275 std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 276 // Iterate bottom-up over both traces until there's a difference between them. 277 while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { 278 old_rit++; 279 rit++; 280 } 281 // Iterate top-down over the old trace until the point where they differ, emitting exit events. 282 for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); 283 old_it != old_rit.base(); ++old_it) { 284 LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, 285 thread_clock_diff, wall_clock_diff); 286 } 287 // Iterate bottom-up over the new trace from the point where they differ, emitting entry events. 288 for (; rit != stack_trace->rend(); ++rit) { 289 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 290 thread_clock_diff, wall_clock_diff); 291 } 292 FreeStackTrace(old_stack_trace); 293 } 294} 295 296void* Trace::RunSamplingThread(void* arg) { 297 Runtime* runtime = Runtime::Current(); 298 intptr_t interval_us = reinterpret_cast<intptr_t>(arg); 299 CHECK_GE(interval_us, 0); 300 CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(), 301 !runtime->IsAotCompiler())); 302 303 while (true) { 304 usleep(interval_us); 305 ATRACE_BEGIN("Profile sampling"); 306 Thread* self = Thread::Current(); 307 Trace* the_trace; 308 { 309 MutexLock mu(self, *Locks::trace_lock_); 310 the_trace = the_trace_; 311 if (the_trace == NULL) { 312 break; 313 } 314 } 315 316 runtime->GetThreadList()->SuspendAll(__FUNCTION__); 317 { 318 MutexLock mu(self, *Locks::thread_list_lock_); 319 runtime->GetThreadList()->ForEach(GetSample, the_trace); 320 } 321 runtime->GetThreadList()->ResumeAll(); 322 ATRACE_END(); 323 } 324 325 runtime->DetachCurrentThread(); 326 return NULL; 327} 328 329void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, 330 TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) { 331 Thread* self = Thread::Current(); 332 { 333 MutexLock mu(self, *Locks::trace_lock_); 334 if (the_trace_ != NULL) { 335 LOG(ERROR) << "Trace already in progress, ignoring this request"; 336 return; 337 } 338 } 339 340 // Check interval if sampling is enabled 341 if (trace_mode == TraceMode::kSampling && interval_us <= 0) { 342 LOG(ERROR) << "Invalid sampling interval: " << interval_us; 343 ScopedObjectAccess soa(self); 344 ThrowRuntimeException("Invalid sampling interval: %d", interval_us); 345 return; 346 } 347 348 // Open trace file if not going directly to ddms. 349 std::unique_ptr<File> trace_file; 350 if (output_mode != TraceOutputMode::kDDMS) { 351 if (trace_fd < 0) { 352 trace_file.reset(OS::CreateEmptyFile(trace_filename)); 353 } else { 354 trace_file.reset(new File(trace_fd, "tracefile")); 355 trace_file->DisableAutoClose(); 356 } 357 if (trace_file.get() == NULL) { 358 PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; 359 ScopedObjectAccess soa(self); 360 ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); 361 return; 362 } 363 } 364 365 Runtime* runtime = Runtime::Current(); 366 367 // Enable count of allocs if specified in the flags. 368 bool enable_stats = false; 369 370 runtime->GetThreadList()->SuspendAll(__FUNCTION__); 371 372 // Create Trace object. 373 { 374 MutexLock mu(self, *Locks::trace_lock_); 375 if (the_trace_ != NULL) { 376 LOG(ERROR) << "Trace already in progress, ignoring this request"; 377 } else { 378 enable_stats = (flags && kTraceCountAllocs) != 0; 379 the_trace_ = new Trace(trace_file.release(), buffer_size, flags, trace_mode); 380 if (trace_mode == TraceMode::kSampling) { 381 CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, 382 reinterpret_cast<void*>(interval_us)), 383 "Sampling profiler thread"); 384 } else { 385 runtime->GetInstrumentation()->AddListener(the_trace_, 386 instrumentation::Instrumentation::kMethodEntered | 387 instrumentation::Instrumentation::kMethodExited | 388 instrumentation::Instrumentation::kMethodUnwind); 389 runtime->GetInstrumentation()->EnableMethodTracing(); 390 } 391 } 392 } 393 394 runtime->GetThreadList()->ResumeAll(); 395 396 // Can't call this when holding the mutator lock. 397 if (enable_stats) { 398 runtime->SetStatsEnabled(true); 399 } 400} 401 402void Trace::Stop() { 403 bool stop_alloc_counting = false; 404 Runtime* const runtime = Runtime::Current(); 405 Trace* the_trace = nullptr; 406 pthread_t sampling_pthread = 0U; 407 { 408 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 409 if (the_trace_ == NULL) { 410 LOG(ERROR) << "Trace stop requested, but no trace currently running"; 411 } else { 412 the_trace = the_trace_; 413 the_trace_ = NULL; 414 sampling_pthread = sampling_pthread_; 415 } 416 } 417 // Make sure that we join before we delete the trace since we don't want to have 418 // the sampling thread access a stale pointer. This finishes since the sampling thread exits when 419 // the_trace_ is null. 420 if (sampling_pthread != 0U) { 421 CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown"); 422 sampling_pthread_ = 0U; 423 } 424 runtime->GetThreadList()->SuspendAll(__FUNCTION__); 425 if (the_trace != nullptr) { 426 stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0; 427 the_trace->FinishTracing(); 428 429 if (the_trace->trace_mode_ == TraceMode::kSampling) { 430 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 431 runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr); 432 } else { 433 runtime->GetInstrumentation()->DisableMethodTracing(); 434 runtime->GetInstrumentation()->RemoveListener( 435 the_trace, instrumentation::Instrumentation::kMethodEntered | 436 instrumentation::Instrumentation::kMethodExited | 437 instrumentation::Instrumentation::kMethodUnwind); 438 } 439 if (the_trace->trace_file_.get() != nullptr) { 440 // Do not try to erase, so flush and close explicitly. 441 if (the_trace->trace_file_->Flush() != 0) { 442 PLOG(ERROR) << "Could not flush trace file."; 443 } 444 if (the_trace->trace_file_->Close() != 0) { 445 PLOG(ERROR) << "Could not close trace file."; 446 } 447 } 448 delete the_trace; 449 } 450 runtime->GetThreadList()->ResumeAll(); 451 if (stop_alloc_counting) { 452 // Can be racy since SetStatsEnabled is not guarded by any locks. 453 runtime->SetStatsEnabled(false); 454 } 455} 456 457void Trace::Shutdown() { 458 if (GetMethodTracingMode() != kTracingInactive) { 459 Stop(); 460 } 461} 462 463TracingMode Trace::GetMethodTracingMode() { 464 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 465 if (the_trace_ == NULL) { 466 return kTracingInactive; 467 } else { 468 switch (the_trace_->trace_mode_) { 469 case TraceMode::kSampling: 470 return kSampleProfilingActive; 471 case TraceMode::kMethodTracing: 472 return kMethodTracingActive; 473 } 474 LOG(FATAL) << "Unreachable"; 475 UNREACHABLE(); 476 } 477} 478 479Trace::Trace(File* trace_file, int buffer_size, int flags, TraceMode trace_mode) 480 : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), 481 trace_mode_(trace_mode), clock_source_(default_clock_source_), 482 buffer_size_(buffer_size), start_time_(MicroTime()), 483 clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) { 484 // Set up the beginning of the trace. 485 uint16_t trace_version = GetTraceVersion(clock_source_); 486 memset(buf_.get(), 0, kTraceHeaderLength); 487 Append4LE(buf_.get(), kTraceMagicValue); 488 Append2LE(buf_.get() + 4, trace_version); 489 Append2LE(buf_.get() + 6, kTraceHeaderLength); 490 Append8LE(buf_.get() + 8, start_time_); 491 if (trace_version >= kTraceVersionDualClock) { 492 uint16_t record_size = GetRecordSize(clock_source_); 493 Append2LE(buf_.get() + 16, record_size); 494 } 495 496 // Update current offset. 497 cur_offset_.StoreRelaxed(kTraceHeaderLength); 498} 499 500static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) 501 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 502 uint8_t* ptr = buf + kTraceHeaderLength; 503 uint8_t* end = buf + buf_size; 504 505 while (ptr < end) { 506 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 507 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 508 TraceAction action = DecodeTraceAction(tmid); 509 LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); 510 ptr += GetRecordSize(clock_source); 511 } 512} 513 514void Trace::FinishTracing() { 515 // Compute elapsed time. 516 uint64_t elapsed = MicroTime() - start_time_; 517 518 size_t final_offset = cur_offset_.LoadRelaxed(); 519 520 std::set<mirror::ArtMethod*> visited_methods; 521 GetVisitedMethods(final_offset, &visited_methods); 522 523 std::ostringstream os; 524 525 os << StringPrintf("%cversion\n", kTraceTokenChar); 526 os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); 527 os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); 528 if (UseThreadCpuClock()) { 529 if (UseWallClock()) { 530 os << StringPrintf("clock=dual\n"); 531 } else { 532 os << StringPrintf("clock=thread-cpu\n"); 533 } 534 } else { 535 os << StringPrintf("clock=wall\n"); 536 } 537 os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed); 538 size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); 539 os << StringPrintf("num-method-calls=%zd\n", num_records); 540 os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_); 541 os << StringPrintf("vm=art\n"); 542 os << StringPrintf("pid=%d\n", getpid()); 543 if ((flags_ & kTraceCountAllocs) != 0) { 544 os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS)); 545 os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES)); 546 os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS)); 547 } 548 os << StringPrintf("%cthreads\n", kTraceTokenChar); 549 DumpThreadList(os); 550 os << StringPrintf("%cmethods\n", kTraceTokenChar); 551 DumpMethodList(os, visited_methods); 552 os << StringPrintf("%cend\n", kTraceTokenChar); 553 554 std::string header(os.str()); 555 if (trace_file_.get() == NULL) { 556 iovec iov[2]; 557 iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str())); 558 iov[0].iov_len = header.length(); 559 iov[1].iov_base = buf_.get(); 560 iov[1].iov_len = final_offset; 561 Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); 562 const bool kDumpTraceInfo = false; 563 if (kDumpTraceInfo) { 564 LOG(INFO) << "Trace sent:\n" << header; 565 DumpBuf(buf_.get(), final_offset, clock_source_); 566 } 567 } else { 568 if (!trace_file_->WriteFully(header.c_str(), header.length()) || 569 !trace_file_->WriteFully(buf_.get(), final_offset)) { 570 std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); 571 PLOG(ERROR) << detail; 572 ThrowRuntimeException("%s", detail.c_str()); 573 } 574 } 575} 576 577void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, 578 mirror::ArtMethod* method, uint32_t new_dex_pc) { 579 UNUSED(thread, this_object, method, new_dex_pc); 580 // We're not recorded to listen to this kind of event, so complain. 581 LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; 582} 583 584void Trace::FieldRead(Thread* thread, mirror::Object* this_object, 585 mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field) 586 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 587 UNUSED(thread, this_object, method, dex_pc, field); 588 // We're not recorded to listen to this kind of event, so complain. 589 LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; 590} 591 592void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, 593 mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field, 594 const JValue& field_value) 595 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 596 UNUSED(thread, this_object, method, dex_pc, field, field_value); 597 // We're not recorded to listen to this kind of event, so complain. 598 LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; 599} 600 601void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 602 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { 603 uint32_t thread_clock_diff = 0; 604 uint32_t wall_clock_diff = 0; 605 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 606 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered, 607 thread_clock_diff, wall_clock_diff); 608} 609 610void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 611 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED, 612 const JValue& return_value ATTRIBUTE_UNUSED) { 613 uint32_t thread_clock_diff = 0; 614 uint32_t wall_clock_diff = 0; 615 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 616 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited, 617 thread_clock_diff, wall_clock_diff); 618} 619 620void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 621 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { 622 uint32_t thread_clock_diff = 0; 623 uint32_t wall_clock_diff = 0; 624 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 625 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind, 626 thread_clock_diff, wall_clock_diff); 627} 628 629void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) 630 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 631 UNUSED(thread, exception_object); 632 LOG(ERROR) << "Unexpected exception caught event in tracing"; 633} 634 635void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, 636 int32_t /*dex_pc_offset*/) 637 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 638 LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method); 639} 640 641void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) { 642 if (UseThreadCpuClock()) { 643 uint64_t clock_base = thread->GetTraceClockBase(); 644 if (UNLIKELY(clock_base == 0)) { 645 // First event, record the base time in the map. 646 uint64_t time = thread->GetCpuMicroTime(); 647 thread->SetTraceClockBase(time); 648 } else { 649 *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; 650 } 651 } 652 if (UseWallClock()) { 653 *wall_clock_diff = MicroTime() - start_time_; 654 } 655} 656 657void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, 658 instrumentation::Instrumentation::InstrumentationEvent event, 659 uint32_t thread_clock_diff, uint32_t wall_clock_diff) { 660 // Advance cur_offset_ atomically. 661 int32_t new_offset; 662 int32_t old_offset; 663 do { 664 old_offset = cur_offset_.LoadRelaxed(); 665 new_offset = old_offset + GetRecordSize(clock_source_); 666 if (new_offset > buffer_size_) { 667 overflow_ = true; 668 return; 669 } 670 } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset)); 671 672 TraceAction action = kTraceMethodEnter; 673 switch (event) { 674 case instrumentation::Instrumentation::kMethodEntered: 675 action = kTraceMethodEnter; 676 break; 677 case instrumentation::Instrumentation::kMethodExited: 678 action = kTraceMethodExit; 679 break; 680 case instrumentation::Instrumentation::kMethodUnwind: 681 action = kTraceUnroll; 682 break; 683 default: 684 UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; 685 } 686 687 uint32_t method_value = EncodeTraceMethodAndAction(method, action); 688 689 // Write data 690 uint8_t* ptr = buf_.get() + old_offset; 691 Append2LE(ptr, thread->GetTid()); 692 Append4LE(ptr + 2, method_value); 693 ptr += 6; 694 695 if (UseThreadCpuClock()) { 696 Append4LE(ptr, thread_clock_diff); 697 ptr += 4; 698 } 699 if (UseWallClock()) { 700 Append4LE(ptr, wall_clock_diff); 701 } 702} 703 704void Trace::GetVisitedMethods(size_t buf_size, 705 std::set<mirror::ArtMethod*>* visited_methods) { 706 uint8_t* ptr = buf_.get() + kTraceHeaderLength; 707 uint8_t* end = buf_.get() + buf_size; 708 709 while (ptr < end) { 710 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 711 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 712 visited_methods->insert(method); 713 ptr += GetRecordSize(clock_source_); 714 } 715} 716 717void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { 718 for (const auto& method : visited_methods) { 719 os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, 720 PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(), 721 method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile()); 722 } 723} 724 725static void DumpThread(Thread* t, void* arg) { 726 std::ostream& os = *reinterpret_cast<std::ostream*>(arg); 727 std::string name; 728 t->GetThreadName(name); 729 os << t->GetTid() << "\t" << name << "\n"; 730} 731 732void Trace::DumpThreadList(std::ostream& os) { 733 Thread* self = Thread::Current(); 734 for (auto it : exited_threads_) { 735 os << it.first << "\t" << it.second << "\n"; 736 } 737 Locks::thread_list_lock_->AssertNotHeld(self); 738 MutexLock mu(self, *Locks::thread_list_lock_); 739 Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); 740} 741 742void Trace::StoreExitingThreadInfo(Thread* thread) { 743 MutexLock mu(thread, *Locks::trace_lock_); 744 if (the_trace_ != nullptr) { 745 std::string name; 746 thread->GetThreadName(name); 747 // The same thread/tid may be used multiple times. As SafeMap::Put does not allow to override 748 // a previous mapping, use SafeMap::Overwrite. 749 the_trace_->exited_threads_.Overwrite(thread->GetTid(), name); 750 } 751} 752 753} // namespace art 754