trace.cc revision 4303ba97313458491e038d78efa041d41cf7bb43
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "trace.h" 18 19#include <sys/uio.h> 20 21#define ATRACE_TAG ATRACE_TAG_DALVIK 22#include "cutils/trace.h" 23 24#include "base/stl_util.h" 25#include "base/unix_file/fd_file.h" 26#include "class_linker.h" 27#include "common_throws.h" 28#include "debugger.h" 29#include "dex_file-inl.h" 30#include "instrumentation.h" 31#include "mirror/art_method-inl.h" 32#include "mirror/class-inl.h" 33#include "mirror/dex_cache.h" 34#include "mirror/object_array-inl.h" 35#include "mirror/object-inl.h" 36#include "os.h" 37#include "scoped_thread_state_change.h" 38#include "ScopedLocalRef.h" 39#include "thread.h" 40#include "thread_list.h" 41#if !defined(ART_USE_PORTABLE_COMPILER) 42#include "entrypoints/quick/quick_entrypoints.h" 43#endif 44 45namespace art { 46 47// File format: 48// header 49// record 0 50// record 1 51// ... 52// 53// Header format: 54// u4 magic ('SLOW') 55// u2 version 56// u2 offset to data 57// u8 start date/time in usec 58// u2 record size in bytes (version >= 2 only) 59// ... padding to 32 bytes 60// 61// Record format v1: 62// u1 thread ID 63// u4 method ID | method action 64// u4 time delta since start, in usec 65// 66// Record format v2: 67// u2 thread ID 68// u4 method ID | method action 69// u4 time delta since start, in usec 70// 71// Record format v3: 72// u2 thread ID 73// u4 method ID | method action 74// u4 time delta since start, in usec 75// u4 wall time since start, in usec (when clock == "dual" only) 76// 77// 32 bits of microseconds is 70 minutes. 78// 79// All values are stored in little-endian order. 80 81enum TraceAction { 82 kTraceMethodEnter = 0x00, // method entry 83 kTraceMethodExit = 0x01, // method exit 84 kTraceUnroll = 0x02, // method exited by exception unrolling 85 // 0x03 currently unused 86 kTraceMethodActionMask = 0x03, // two bits 87}; 88 89class BuildStackTraceVisitor : public StackVisitor { 90 public: 91 explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL), 92 method_trace_(Trace::AllocStackTrace()) {} 93 94 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 95 mirror::ArtMethod* m = GetMethod(); 96 // Ignore runtime frames (in particular callee save). 97 if (!m->IsRuntimeMethod()) { 98 method_trace_->push_back(m); 99 } 100 return true; 101 } 102 103 // Returns a stack trace where the topmost frame corresponds with the first element of the vector. 104 std::vector<mirror::ArtMethod*>* GetStackTrace() const { 105 return method_trace_; 106 } 107 108 private: 109 std::vector<mirror::ArtMethod*>* const method_trace_; 110}; 111 112static const char kTraceTokenChar = '*'; 113static const uint16_t kTraceHeaderLength = 32; 114static const uint32_t kTraceMagicValue = 0x574f4c53; 115static const uint16_t kTraceVersionSingleClock = 2; 116static const uint16_t kTraceVersionDualClock = 3; 117static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2 118static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps 119 120TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource; 121 122Trace* volatile Trace::the_trace_ = NULL; 123pthread_t Trace::sampling_pthread_ = 0U; 124std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_; 125 126static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) { 127 return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask); 128} 129 130static TraceAction DecodeTraceAction(uint32_t tmid) { 131 return static_cast<TraceAction>(tmid & kTraceMethodActionMask); 132} 133 134static uint32_t EncodeTraceMethodAndAction(mirror::ArtMethod* method, 135 TraceAction action) { 136 uint32_t tmid = PointerToLowMemUInt32(method) | action; 137 DCHECK_EQ(method, DecodeTraceMethodId(tmid)); 138 return tmid; 139} 140 141std::vector<mirror::ArtMethod*>* Trace::AllocStackTrace() { 142 if (temp_stack_trace_.get() != NULL) { 143 return temp_stack_trace_.release(); 144 } else { 145 return new std::vector<mirror::ArtMethod*>(); 146 } 147} 148 149void Trace::FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace) { 150 stack_trace->clear(); 151 temp_stack_trace_.reset(stack_trace); 152} 153 154void Trace::SetDefaultClockSource(TraceClockSource clock_source) { 155#if defined(HAVE_POSIX_CLOCKS) 156 default_clock_source_ = clock_source; 157#else 158 if (clock_source != kTraceClockSourceWall) { 159 LOG(WARNING) << "Ignoring tracing request to use CPU time."; 160 } 161#endif 162} 163 164static uint16_t GetTraceVersion(TraceClockSource clock_source) { 165 return (clock_source == kTraceClockSourceDual) ? kTraceVersionDualClock 166 : kTraceVersionSingleClock; 167} 168 169static uint16_t GetRecordSize(TraceClockSource clock_source) { 170 return (clock_source == kTraceClockSourceDual) ? kTraceRecordSizeDualClock 171 : kTraceRecordSizeSingleClock; 172} 173 174bool Trace::UseThreadCpuClock() { 175 return (clock_source_ == kTraceClockSourceThreadCpu) || 176 (clock_source_ == kTraceClockSourceDual); 177} 178 179bool Trace::UseWallClock() { 180 return (clock_source_ == kTraceClockSourceWall) || 181 (clock_source_ == kTraceClockSourceDual); 182} 183 184void Trace::MeasureClockOverhead() { 185 if (UseThreadCpuClock()) { 186 Thread::Current()->GetCpuMicroTime(); 187 } 188 if (UseWallClock()) { 189 MicroTime(); 190 } 191} 192 193// Compute an average time taken to measure clocks. 194uint32_t Trace::GetClockOverheadNanoSeconds() { 195 Thread* self = Thread::Current(); 196 uint64_t start = self->GetCpuMicroTime(); 197 198 for (int i = 4000; i > 0; i--) { 199 MeasureClockOverhead(); 200 MeasureClockOverhead(); 201 MeasureClockOverhead(); 202 MeasureClockOverhead(); 203 MeasureClockOverhead(); 204 MeasureClockOverhead(); 205 MeasureClockOverhead(); 206 MeasureClockOverhead(); 207 } 208 209 uint64_t elapsed_us = self->GetCpuMicroTime() - start; 210 return static_cast<uint32_t>(elapsed_us / 32); 211} 212 213// TODO: put this somewhere with the big-endian equivalent used by JDWP. 214static void Append2LE(uint8_t* buf, uint16_t val) { 215 *buf++ = static_cast<uint8_t>(val); 216 *buf++ = static_cast<uint8_t>(val >> 8); 217} 218 219// TODO: put this somewhere with the big-endian equivalent used by JDWP. 220static void Append4LE(uint8_t* buf, uint32_t val) { 221 *buf++ = static_cast<uint8_t>(val); 222 *buf++ = static_cast<uint8_t>(val >> 8); 223 *buf++ = static_cast<uint8_t>(val >> 16); 224 *buf++ = static_cast<uint8_t>(val >> 24); 225} 226 227// TODO: put this somewhere with the big-endian equivalent used by JDWP. 228static void Append8LE(uint8_t* buf, uint64_t val) { 229 *buf++ = static_cast<uint8_t>(val); 230 *buf++ = static_cast<uint8_t>(val >> 8); 231 *buf++ = static_cast<uint8_t>(val >> 16); 232 *buf++ = static_cast<uint8_t>(val >> 24); 233 *buf++ = static_cast<uint8_t>(val >> 32); 234 *buf++ = static_cast<uint8_t>(val >> 40); 235 *buf++ = static_cast<uint8_t>(val >> 48); 236 *buf++ = static_cast<uint8_t>(val >> 56); 237} 238 239static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 240 BuildStackTraceVisitor build_trace_visitor(thread); 241 build_trace_visitor.WalkStack(); 242 std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); 243 Trace* the_trace = reinterpret_cast<Trace*>(arg); 244 the_trace->CompareAndUpdateStackTrace(thread, stack_trace); 245} 246 247static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED, 248 void* arg ATTRIBUTE_UNUSED) { 249 thread->SetTraceClockBase(0); 250 std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample(); 251 thread->SetStackTraceSample(NULL); 252 delete stack_trace; 253} 254 255void Trace::CompareAndUpdateStackTrace(Thread* thread, 256 std::vector<mirror::ArtMethod*>* stack_trace) { 257 CHECK_EQ(pthread_self(), sampling_pthread_); 258 std::vector<mirror::ArtMethod*>* old_stack_trace = thread->GetStackTraceSample(); 259 // Update the thread's stack trace sample. 260 thread->SetStackTraceSample(stack_trace); 261 // Read timer clocks to use for all events in this trace. 262 uint32_t thread_clock_diff = 0; 263 uint32_t wall_clock_diff = 0; 264 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 265 if (old_stack_trace == NULL) { 266 // If there's no previous stack trace sample for this thread, log an entry event for all 267 // methods in the trace. 268 for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 269 rit != stack_trace->rend(); ++rit) { 270 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 271 thread_clock_diff, wall_clock_diff); 272 } 273 } else { 274 // If there's a previous stack trace for this thread, diff the traces and emit entry and exit 275 // events accordingly. 276 std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin(); 277 std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin(); 278 // Iterate bottom-up over both traces until there's a difference between them. 279 while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) { 280 old_rit++; 281 rit++; 282 } 283 // Iterate top-down over the old trace until the point where they differ, emitting exit events. 284 for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin(); 285 old_it != old_rit.base(); ++old_it) { 286 LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited, 287 thread_clock_diff, wall_clock_diff); 288 } 289 // Iterate bottom-up over the new trace from the point where they differ, emitting entry events. 290 for (; rit != stack_trace->rend(); ++rit) { 291 LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered, 292 thread_clock_diff, wall_clock_diff); 293 } 294 FreeStackTrace(old_stack_trace); 295 } 296} 297 298void* Trace::RunSamplingThread(void* arg) { 299 Runtime* runtime = Runtime::Current(); 300 intptr_t interval_us = reinterpret_cast<intptr_t>(arg); 301 CHECK_GE(interval_us, 0); 302 CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(), 303 !runtime->IsCompiler())); 304 305 while (true) { 306 usleep(interval_us); 307 ATRACE_BEGIN("Profile sampling"); 308 Thread* self = Thread::Current(); 309 Trace* the_trace; 310 { 311 MutexLock mu(self, *Locks::trace_lock_); 312 the_trace = the_trace_; 313 if (the_trace == NULL) { 314 break; 315 } 316 } 317 318 runtime->GetThreadList()->SuspendAll(); 319 { 320 MutexLock mu(self, *Locks::thread_list_lock_); 321 runtime->GetThreadList()->ForEach(GetSample, the_trace); 322 } 323 runtime->GetThreadList()->ResumeAll(); 324 ATRACE_END(); 325 } 326 327 runtime->DetachCurrentThread(); 328 return NULL; 329} 330 331void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags, 332 bool direct_to_ddms, bool sampling_enabled, int interval_us) { 333 Thread* self = Thread::Current(); 334 { 335 MutexLock mu(self, *Locks::trace_lock_); 336 if (the_trace_ != NULL) { 337 LOG(ERROR) << "Trace already in progress, ignoring this request"; 338 return; 339 } 340 } 341 342 // Check interval if sampling is enabled 343 if (sampling_enabled && interval_us <= 0) { 344 LOG(ERROR) << "Invalid sampling interval: " << interval_us; 345 ScopedObjectAccess soa(self); 346 ThrowRuntimeException("Invalid sampling interval: %d", interval_us); 347 return; 348 } 349 350 // Open trace file if not going directly to ddms. 351 std::unique_ptr<File> trace_file; 352 if (!direct_to_ddms) { 353 if (trace_fd < 0) { 354 trace_file.reset(OS::CreateEmptyFile(trace_filename)); 355 } else { 356 trace_file.reset(new File(trace_fd, "tracefile")); 357 trace_file->DisableAutoClose(); 358 } 359 if (trace_file.get() == NULL) { 360 PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; 361 ScopedObjectAccess soa(self); 362 ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); 363 return; 364 } 365 } 366 367 Runtime* runtime = Runtime::Current(); 368 369 // Enable count of allocs if specified in the flags. 370 bool enable_stats = false; 371 372 runtime->GetThreadList()->SuspendAll(); 373 374 // Create Trace object. 375 { 376 MutexLock mu(self, *Locks::trace_lock_); 377 if (the_trace_ != NULL) { 378 LOG(ERROR) << "Trace already in progress, ignoring this request"; 379 } else { 380 enable_stats = (flags && kTraceCountAllocs) != 0; 381 the_trace_ = new Trace(trace_file.release(), buffer_size, flags, sampling_enabled); 382 if (sampling_enabled) { 383 CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, 384 reinterpret_cast<void*>(interval_us)), 385 "Sampling profiler thread"); 386 } else { 387 runtime->GetInstrumentation()->AddListener(the_trace_, 388 instrumentation::Instrumentation::kMethodEntered | 389 instrumentation::Instrumentation::kMethodExited | 390 instrumentation::Instrumentation::kMethodUnwind); 391 runtime->GetInstrumentation()->EnableMethodTracing(); 392 } 393 } 394 } 395 396 runtime->GetThreadList()->ResumeAll(); 397 398 // Can't call this when holding the mutator lock. 399 if (enable_stats) { 400 runtime->SetStatsEnabled(true); 401 } 402} 403 404void Trace::Stop() { 405 bool stop_alloc_counting = false; 406 Runtime* runtime = Runtime::Current(); 407 runtime->GetThreadList()->SuspendAll(); 408 Trace* the_trace = NULL; 409 pthread_t sampling_pthread = 0U; 410 { 411 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 412 if (the_trace_ == NULL) { 413 LOG(ERROR) << "Trace stop requested, but no trace currently running"; 414 } else { 415 the_trace = the_trace_; 416 the_trace_ = NULL; 417 sampling_pthread = sampling_pthread_; 418 } 419 } 420 if (the_trace != NULL) { 421 stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0; 422 the_trace->FinishTracing(); 423 424 if (the_trace->sampling_enabled_) { 425 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); 426 runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, NULL); 427 } else { 428 runtime->GetInstrumentation()->DisableMethodTracing(); 429 runtime->GetInstrumentation()->RemoveListener(the_trace, 430 instrumentation::Instrumentation::kMethodEntered | 431 instrumentation::Instrumentation::kMethodExited | 432 instrumentation::Instrumentation::kMethodUnwind); 433 } 434 if (the_trace->trace_file_.get() != nullptr) { 435 // Do not try to erase, so flush and close explicitly. 436 if (the_trace->trace_file_->Flush() != 0) { 437 PLOG(ERROR) << "Could not flush trace file."; 438 } 439 if (the_trace->trace_file_->Close() != 0) { 440 PLOG(ERROR) << "Could not close trace file."; 441 } 442 } 443 delete the_trace; 444 } 445 runtime->GetThreadList()->ResumeAll(); 446 447 if (stop_alloc_counting) { 448 // Can be racy since SetStatsEnabled is not guarded by any locks. 449 Runtime::Current()->SetStatsEnabled(false); 450 } 451 452 if (sampling_pthread != 0U) { 453 CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown"); 454 sampling_pthread_ = 0U; 455 } 456} 457 458void Trace::Shutdown() { 459 if (GetMethodTracingMode() != kTracingInactive) { 460 Stop(); 461 } 462} 463 464TracingMode Trace::GetMethodTracingMode() { 465 MutexLock mu(Thread::Current(), *Locks::trace_lock_); 466 if (the_trace_ == NULL) { 467 return kTracingInactive; 468 } else if (the_trace_->sampling_enabled_) { 469 return kSampleProfilingActive; 470 } else { 471 return kMethodTracingActive; 472 } 473} 474 475Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled) 476 : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags), 477 sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_), 478 buffer_size_(buffer_size), start_time_(MicroTime()), 479 clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) { 480 // Set up the beginning of the trace. 481 uint16_t trace_version = GetTraceVersion(clock_source_); 482 memset(buf_.get(), 0, kTraceHeaderLength); 483 Append4LE(buf_.get(), kTraceMagicValue); 484 Append2LE(buf_.get() + 4, trace_version); 485 Append2LE(buf_.get() + 6, kTraceHeaderLength); 486 Append8LE(buf_.get() + 8, start_time_); 487 if (trace_version >= kTraceVersionDualClock) { 488 uint16_t record_size = GetRecordSize(clock_source_); 489 Append2LE(buf_.get() + 16, record_size); 490 } 491 492 // Update current offset. 493 cur_offset_.StoreRelaxed(kTraceHeaderLength); 494} 495 496static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) 497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 498 uint8_t* ptr = buf + kTraceHeaderLength; 499 uint8_t* end = buf + buf_size; 500 501 while (ptr < end) { 502 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 503 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 504 TraceAction action = DecodeTraceAction(tmid); 505 LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action); 506 ptr += GetRecordSize(clock_source); 507 } 508} 509 510void Trace::FinishTracing() { 511 // Compute elapsed time. 512 uint64_t elapsed = MicroTime() - start_time_; 513 514 size_t final_offset = cur_offset_.LoadRelaxed(); 515 516 std::set<mirror::ArtMethod*> visited_methods; 517 GetVisitedMethods(final_offset, &visited_methods); 518 519 std::ostringstream os; 520 521 os << StringPrintf("%cversion\n", kTraceTokenChar); 522 os << StringPrintf("%d\n", GetTraceVersion(clock_source_)); 523 os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false"); 524 if (UseThreadCpuClock()) { 525 if (UseWallClock()) { 526 os << StringPrintf("clock=dual\n"); 527 } else { 528 os << StringPrintf("clock=thread-cpu\n"); 529 } 530 } else { 531 os << StringPrintf("clock=wall\n"); 532 } 533 os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed); 534 size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_); 535 os << StringPrintf("num-method-calls=%zd\n", num_records); 536 os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_); 537 os << StringPrintf("vm=art\n"); 538 if ((flags_ & kTraceCountAllocs) != 0) { 539 os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS)); 540 os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES)); 541 os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS)); 542 } 543 os << StringPrintf("%cthreads\n", kTraceTokenChar); 544 DumpThreadList(os); 545 os << StringPrintf("%cmethods\n", kTraceTokenChar); 546 DumpMethodList(os, visited_methods); 547 os << StringPrintf("%cend\n", kTraceTokenChar); 548 549 std::string header(os.str()); 550 if (trace_file_.get() == NULL) { 551 iovec iov[2]; 552 iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str())); 553 iov[0].iov_len = header.length(); 554 iov[1].iov_base = buf_.get(); 555 iov[1].iov_len = final_offset; 556 Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2); 557 const bool kDumpTraceInfo = false; 558 if (kDumpTraceInfo) { 559 LOG(INFO) << "Trace sent:\n" << header; 560 DumpBuf(buf_.get(), final_offset, clock_source_); 561 } 562 } else { 563 if (!trace_file_->WriteFully(header.c_str(), header.length()) || 564 !trace_file_->WriteFully(buf_.get(), final_offset)) { 565 std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno))); 566 PLOG(ERROR) << detail; 567 ThrowRuntimeException("%s", detail.c_str()); 568 } 569 } 570} 571 572void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, 573 mirror::ArtMethod* method, uint32_t new_dex_pc) { 574 UNUSED(thread, this_object, method, new_dex_pc); 575 // We're not recorded to listen to this kind of event, so complain. 576 LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc; 577} 578 579void Trace::FieldRead(Thread* thread, mirror::Object* this_object, 580 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field) 581 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 582 UNUSED(thread, this_object, method, dex_pc, field); 583 // We're not recorded to listen to this kind of event, so complain. 584 LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; 585} 586 587void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, 588 mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field, 589 const JValue& field_value) 590 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 591 UNUSED(thread, this_object, method, dex_pc, field, field_value); 592 // We're not recorded to listen to this kind of event, so complain. 593 LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; 594} 595 596void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 597 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { 598 uint32_t thread_clock_diff = 0; 599 uint32_t wall_clock_diff = 0; 600 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 601 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered, 602 thread_clock_diff, wall_clock_diff); 603} 604 605void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 606 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED, 607 const JValue& return_value ATTRIBUTE_UNUSED) { 608 uint32_t thread_clock_diff = 0; 609 uint32_t wall_clock_diff = 0; 610 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 611 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited, 612 thread_clock_diff, wall_clock_diff); 613} 614 615void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED, 616 mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) { 617 uint32_t thread_clock_diff = 0; 618 uint32_t wall_clock_diff = 0; 619 ReadClocks(thread, &thread_clock_diff, &wall_clock_diff); 620 LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind, 621 thread_clock_diff, wall_clock_diff); 622} 623 624void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, 625 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc, 626 mirror::Throwable* exception_object) 627 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 628 UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object); 629 LOG(ERROR) << "Unexpected exception caught event in tracing"; 630} 631 632void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) { 633 if (UseThreadCpuClock()) { 634 uint64_t clock_base = thread->GetTraceClockBase(); 635 if (UNLIKELY(clock_base == 0)) { 636 // First event, record the base time in the map. 637 uint64_t time = thread->GetCpuMicroTime(); 638 thread->SetTraceClockBase(time); 639 } else { 640 *thread_clock_diff = thread->GetCpuMicroTime() - clock_base; 641 } 642 } 643 if (UseWallClock()) { 644 *wall_clock_diff = MicroTime() - start_time_; 645 } 646} 647 648void Trace::LogMethodTraceEvent(Thread* thread, mirror::ArtMethod* method, 649 instrumentation::Instrumentation::InstrumentationEvent event, 650 uint32_t thread_clock_diff, uint32_t wall_clock_diff) { 651 // Advance cur_offset_ atomically. 652 int32_t new_offset; 653 int32_t old_offset; 654 do { 655 old_offset = cur_offset_.LoadRelaxed(); 656 new_offset = old_offset + GetRecordSize(clock_source_); 657 if (new_offset > buffer_size_) { 658 overflow_ = true; 659 return; 660 } 661 } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset)); 662 663 TraceAction action = kTraceMethodEnter; 664 switch (event) { 665 case instrumentation::Instrumentation::kMethodEntered: 666 action = kTraceMethodEnter; 667 break; 668 case instrumentation::Instrumentation::kMethodExited: 669 action = kTraceMethodExit; 670 break; 671 case instrumentation::Instrumentation::kMethodUnwind: 672 action = kTraceUnroll; 673 break; 674 default: 675 UNIMPLEMENTED(FATAL) << "Unexpected event: " << event; 676 } 677 678 uint32_t method_value = EncodeTraceMethodAndAction(method, action); 679 680 // Write data 681 uint8_t* ptr = buf_.get() + old_offset; 682 Append2LE(ptr, thread->GetTid()); 683 Append4LE(ptr + 2, method_value); 684 ptr += 6; 685 686 if (UseThreadCpuClock()) { 687 Append4LE(ptr, thread_clock_diff); 688 ptr += 4; 689 } 690 if (UseWallClock()) { 691 Append4LE(ptr, wall_clock_diff); 692 } 693} 694 695void Trace::GetVisitedMethods(size_t buf_size, 696 std::set<mirror::ArtMethod*>* visited_methods) { 697 uint8_t* ptr = buf_.get() + kTraceHeaderLength; 698 uint8_t* end = buf_.get() + buf_size; 699 700 while (ptr < end) { 701 uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24); 702 mirror::ArtMethod* method = DecodeTraceMethodId(tmid); 703 visited_methods->insert(method); 704 ptr += GetRecordSize(clock_source_); 705 } 706} 707 708void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) { 709 for (const auto& method : visited_methods) { 710 os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method, 711 PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(), 712 method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile()); 713 } 714} 715 716static void DumpThread(Thread* t, void* arg) { 717 std::ostream& os = *reinterpret_cast<std::ostream*>(arg); 718 std::string name; 719 t->GetThreadName(name); 720 os << t->GetTid() << "\t" << name << "\n"; 721} 722 723void Trace::DumpThreadList(std::ostream& os) { 724 Thread* self = Thread::Current(); 725 for (auto it : exited_threads_) { 726 os << it.first << "\t" << it.second << "\n"; 727 } 728 Locks::thread_list_lock_->AssertNotHeld(self); 729 MutexLock mu(self, *Locks::thread_list_lock_); 730 Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os); 731} 732 733void Trace::StoreExitingThreadInfo(Thread* thread) { 734 MutexLock mu(thread, *Locks::trace_lock_); 735 if (the_trace_ != nullptr) { 736 std::string name; 737 thread->GetThreadName(name); 738 the_trace_->exited_threads_.Put(thread->GetTid(), name); 739 } 740} 741 742} // namespace art 743