trace.cc revision 414af10d719603fb4d8d972f5a022c17957b44e1
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "trace.h"
18
19#include <sys/uio.h>
20
21#include "base/stl_util.h"
22#include "base/unix_file/fd_file.h"
23#include "class_linker.h"
24#include "common_throws.h"
25#include "debugger.h"
26#include "dex_file-inl.h"
27#include "instrumentation.h"
28#include "mirror/art_method-inl.h"
29#include "mirror/class-inl.h"
30#include "mirror/dex_cache.h"
31#include "mirror/object_array-inl.h"
32#include "mirror/object-inl.h"
33#include "object_utils.h"
34#include "os.h"
35#include "scoped_thread_state_change.h"
36#include "ScopedLocalRef.h"
37#include "thread.h"
38#include "thread_list.h"
39#if !defined(ART_USE_PORTABLE_COMPILER)
40#include "entrypoints/quick/quick_entrypoints.h"
41#endif
42
43namespace art {
44
45// File format:
46//     header
47//     record 0
48//     record 1
49//     ...
50//
51// Header format:
52//     u4  magic ('SLOW')
53//     u2  version
54//     u2  offset to data
55//     u8  start date/time in usec
56//     u2  record size in bytes (version >= 2 only)
57//     ... padding to 32 bytes
58//
59// Record format v1:
60//     u1  thread ID
61//     u4  method ID | method action
62//     u4  time delta since start, in usec
63//
64// Record format v2:
65//     u2  thread ID
66//     u4  method ID | method action
67//     u4  time delta since start, in usec
68//
69// Record format v3:
70//     u2  thread ID
71//     u4  method ID | method action
72//     u4  time delta since start, in usec
73//     u4  wall time since start, in usec (when clock == "dual" only)
74//
75// 32 bits of microseconds is 70 minutes.
76//
77// All values are stored in little-endian order.
78
79enum TraceAction {
80    kTraceMethodEnter = 0x00,       // method entry
81    kTraceMethodExit = 0x01,        // method exit
82    kTraceUnroll = 0x02,            // method exited by exception unrolling
83    // 0x03 currently unused
84    kTraceMethodActionMask = 0x03,  // two bits
85};
86
87class BuildStackTraceVisitor : public StackVisitor {
88 public:
89  explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, NULL),
90      method_trace_(new std::vector<mirror::ArtMethod*>) {}
91
92  bool VisitFrame() {
93    mirror::ArtMethod* m = GetMethod();
94    // Ignore runtime frames (in particular callee save).
95    if (!m->IsRuntimeMethod()) {
96      method_trace_->push_back(m);
97    }
98    return true;
99  }
100
101  // Returns a stack trace where the topmost frame corresponds with the first element of the vector.
102  std::vector<mirror::ArtMethod*>* GetStackTrace() const {
103    return method_trace_;
104  }
105
106 private:
107  std::vector<mirror::ArtMethod*>* const method_trace_;
108};
109
110static const char     kTraceTokenChar             = '*';
111static const uint16_t kTraceHeaderLength          = 32;
112static const uint32_t kTraceMagicValue            = 0x574f4c53;
113static const uint16_t kTraceVersionSingleClock    = 2;
114static const uint16_t kTraceVersionDualClock      = 3;
115static const uint16_t kTraceRecordSizeSingleClock = 10;  // using v2
116static const uint16_t kTraceRecordSizeDualClock   = 14;  // using v3 with two timestamps
117
118#if defined(HAVE_POSIX_CLOCKS)
119ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceDual;
120#else
121ProfilerClockSource Trace::default_clock_source_ = kProfilerClockSourceWall;
122#endif
123
124Trace* volatile Trace::the_trace_ = NULL;
125// TODO: Add way to enable sampling and set interval through gui.
126bool Trace::sampling_enabled_ = true;
127uint32_t Trace::sampling_interval_us_ = 10000;
128pthread_t Trace::sampling_pthread_ = 0U;
129
130static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) {
131  return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask);
132}
133
134static TraceAction DecodeTraceAction(uint32_t tmid) {
135  return static_cast<TraceAction>(tmid & kTraceMethodActionMask);
136}
137
138static uint32_t EncodeTraceMethodAndAction(const mirror::ArtMethod* method,
139                                           TraceAction action) {
140  uint32_t tmid = reinterpret_cast<uint32_t>(method) | action;
141  DCHECK_EQ(method, DecodeTraceMethodId(tmid));
142  return tmid;
143}
144
145void Trace::SetDefaultClockSource(ProfilerClockSource clock_source) {
146#if defined(HAVE_POSIX_CLOCKS)
147  default_clock_source_ = clock_source;
148#else
149  if (clock_source != kProfilerClockSourceWall) {
150    LOG(WARNING) << "Ignoring tracing request to use ";
151  }
152#endif
153}
154
155static uint16_t GetTraceVersion(ProfilerClockSource clock_source) {
156  return (clock_source == kProfilerClockSourceDual) ? kTraceVersionDualClock
157                                                    : kTraceVersionSingleClock;
158}
159
160static uint16_t GetRecordSize(ProfilerClockSource clock_source) {
161  return (clock_source == kProfilerClockSourceDual) ? kTraceRecordSizeDualClock
162                                                    : kTraceRecordSizeSingleClock;
163}
164
165bool Trace::UseThreadCpuClock() {
166  return (clock_source_ == kProfilerClockSourceThreadCpu) ||
167      (clock_source_ == kProfilerClockSourceDual);
168}
169
170bool Trace::UseWallClock() {
171  return (clock_source_ == kProfilerClockSourceWall) ||
172      (clock_source_ == kProfilerClockSourceDual);
173}
174
175static void MeasureClockOverhead(Trace* trace) {
176  if (trace->UseThreadCpuClock()) {
177    ThreadCpuMicroTime();
178  }
179  if (trace->UseWallClock()) {
180    MicroTime();
181  }
182}
183
184static uint32_t GetClockOverhead(Trace* trace) {
185  uint64_t start = ThreadCpuMicroTime();
186
187  for (int i = 4000; i > 0; i--) {
188    MeasureClockOverhead(trace);
189    MeasureClockOverhead(trace);
190    MeasureClockOverhead(trace);
191    MeasureClockOverhead(trace);
192    MeasureClockOverhead(trace);
193    MeasureClockOverhead(trace);
194    MeasureClockOverhead(trace);
195    MeasureClockOverhead(trace);
196  }
197
198  uint64_t elapsed = ThreadCpuMicroTime() - start;
199  return uint32_t (elapsed / 32);
200}
201
202// TODO: put this somewhere with the big-endian equivalent used by JDWP.
203static void Append2LE(uint8_t* buf, uint16_t val) {
204  *buf++ = (uint8_t) val;
205  *buf++ = (uint8_t) (val >> 8);
206}
207
208// TODO: put this somewhere with the big-endian equivalent used by JDWP.
209static void Append4LE(uint8_t* buf, uint32_t val) {
210  *buf++ = (uint8_t) val;
211  *buf++ = (uint8_t) (val >> 8);
212  *buf++ = (uint8_t) (val >> 16);
213  *buf++ = (uint8_t) (val >> 24);
214}
215
216// TODO: put this somewhere with the big-endian equivalent used by JDWP.
217static void Append8LE(uint8_t* buf, uint64_t val) {
218  *buf++ = (uint8_t) val;
219  *buf++ = (uint8_t) (val >> 8);
220  *buf++ = (uint8_t) (val >> 16);
221  *buf++ = (uint8_t) (val >> 24);
222  *buf++ = (uint8_t) (val >> 32);
223  *buf++ = (uint8_t) (val >> 40);
224  *buf++ = (uint8_t) (val >> 48);
225  *buf++ = (uint8_t) (val >> 56);
226}
227
228static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
229  BuildStackTraceVisitor build_trace_visitor(thread);
230  build_trace_visitor.WalkStack();
231  std::vector<mirror::ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
232  Trace* the_trace = reinterpret_cast<Trace*>(arg);
233  the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
234}
235
236void Trace::CompareAndUpdateStackTrace(Thread* thread,
237                                       std::vector<mirror::ArtMethod*>* stack_trace) {
238  CHECK_EQ(pthread_self(), sampling_pthread_);
239  SafeMap<Thread*, std::vector<mirror::ArtMethod*>*>::iterator map_it = thread_stack_trace_map_.find(thread);
240  if (map_it == thread_stack_trace_map_.end()) {
241    // If there's no existing stack trace in the map for this thread, log an entry event for all
242    // methods in the trace.
243    thread_stack_trace_map_.Put(thread, stack_trace);
244    for (std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
245         rit != stack_trace->rend(); ++rit) {
246      LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered);
247    }
248  } else {
249    // If there's a previous stack trace for this thread, diff the traces and emit entry and exit
250    // events accordingly.
251    std::vector<mirror::ArtMethod*>* old_stack_trace = map_it->second;
252    thread_stack_trace_map_.Overwrite(thread, stack_trace);
253    std::vector<mirror::ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin();
254    std::vector<mirror::ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
255
256    // Iterate bottom-up over both traces until there's a difference between them.
257    while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) {
258      old_rit++;
259      rit++;
260    }
261    // Iterate top-down over the old trace until the point where they differ, emitting exit events.
262    for (std::vector<mirror::ArtMethod*>::iterator old_it = old_stack_trace->begin();
263         old_it != old_rit.base(); ++old_it) {
264      LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited);
265    }
266    // Iterate bottom-up over the new trace from the point where they differ, emitting entry events.
267    for (; rit != stack_trace->rend(); ++rit) {
268      LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered);
269    }
270    delete old_stack_trace;
271  }
272}
273
274void* Trace::RunSamplingThread(void* arg) {
275  Runtime* runtime = Runtime::Current();
276  CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
277                                     !runtime->IsCompiler()));
278
279  while (true) {
280    usleep(sampling_interval_us_);
281
282    Thread* self = Thread::Current();
283    Trace* the_trace;
284    {
285      MutexLock mu(self, *Locks::trace_lock_);
286      the_trace = the_trace_;
287      if (the_trace == NULL) {
288        break;
289      }
290    }
291
292    runtime->GetThreadList()->SuspendAll();
293    {
294      MutexLock mu(self, *Locks::thread_list_lock_);
295      runtime->GetThreadList()->ForEach(GetSample, the_trace);
296    }
297    runtime->GetThreadList()->ResumeAll();
298  }
299
300  runtime->DetachCurrentThread();
301  return NULL;
302}
303
304void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
305                  bool direct_to_ddms) {
306  Thread* self = Thread::Current();
307  {
308    MutexLock mu(self, *Locks::trace_lock_);
309    if (the_trace_ != NULL) {
310      LOG(ERROR) << "Trace already in progress, ignoring this request";
311      return;
312    }
313  }
314  Runtime* runtime = Runtime::Current();
315  runtime->GetThreadList()->SuspendAll();
316
317  // Open trace file if not going directly to ddms.
318  UniquePtr<File> trace_file;
319  if (!direct_to_ddms) {
320    if (trace_fd < 0) {
321      trace_file.reset(OS::CreateEmptyFile(trace_filename));
322    } else {
323      trace_file.reset(new File(trace_fd, "tracefile"));
324      trace_file->DisableAutoClose();
325    }
326    if (trace_file.get() == NULL) {
327      PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'";
328      runtime->GetThreadList()->ResumeAll();
329      ScopedObjectAccess soa(self);
330      ThrowRuntimeException("Unable to open trace file '%s'", trace_filename);
331      return;
332    }
333  }
334
335  // Create Trace object.
336  {
337    MutexLock mu(self, *Locks::trace_lock_);
338    if (the_trace_ != NULL) {
339      LOG(ERROR) << "Trace already in progress, ignoring this request";
340    } else {
341      the_trace_ = new Trace(trace_file.release(), buffer_size, flags);
342
343      // Enable count of allocs if specified in the flags.
344      if ((flags && kTraceCountAllocs) != 0) {
345        runtime->SetStatsEnabled(true);
346      }
347
348      if (sampling_enabled_) {
349        CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread, NULL),
350                           "Sampling profiler thread");
351      } else {
352        runtime->GetInstrumentation()->AddListener(the_trace_,
353                                                   instrumentation::Instrumentation::kMethodEntered |
354                                                   instrumentation::Instrumentation::kMethodExited |
355                                                   instrumentation::Instrumentation::kMethodUnwind);
356      }
357    }
358  }
359  runtime->GetThreadList()->ResumeAll();
360}
361
362void Trace::Stop() {
363  Runtime* runtime = Runtime::Current();
364  runtime->GetThreadList()->SuspendAll();
365  Trace* the_trace = NULL;
366  pthread_t sampling_pthread = 0U;
367  {
368    MutexLock mu(Thread::Current(), *Locks::trace_lock_);
369    if (the_trace_ == NULL) {
370      LOG(ERROR) << "Trace stop requested, but no trace currently running";
371    } else {
372      the_trace = the_trace_;
373      the_trace_ = NULL;
374      sampling_pthread = sampling_pthread_;
375      sampling_pthread_ = 0U;
376    }
377  }
378  if (the_trace != NULL) {
379    the_trace->FinishTracing();
380
381    if (!sampling_enabled_) {
382      runtime->GetInstrumentation()->RemoveListener(the_trace,
383                                                    instrumentation::Instrumentation::kMethodEntered |
384                                                    instrumentation::Instrumentation::kMethodExited |
385                                                    instrumentation::Instrumentation::kMethodUnwind);
386    }
387    delete the_trace;
388  }
389  runtime->GetThreadList()->ResumeAll();
390
391  if (sampling_enabled_ && sampling_pthread != 0U) {
392    CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
393  }
394}
395
396void Trace::Shutdown() {
397  if (IsMethodTracingActive()) {
398    Stop();
399  }
400}
401
402bool Trace::IsMethodTracingActive() {
403  MutexLock mu(Thread::Current(), *Locks::trace_lock_);
404  return the_trace_ != NULL;
405}
406
407Trace::Trace(File* trace_file, int buffer_size, int flags)
408    : trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
409      clock_source_(default_clock_source_), buffer_size_(buffer_size), start_time_(MicroTime()),
410      cur_offset_(0),  overflow_(false) {
411  // Set up the beginning of the trace.
412  uint16_t trace_version = GetTraceVersion(clock_source_);
413  memset(buf_.get(), 0, kTraceHeaderLength);
414  Append4LE(buf_.get(), kTraceMagicValue);
415  Append2LE(buf_.get() + 4, trace_version);
416  Append2LE(buf_.get() + 6, kTraceHeaderLength);
417  Append8LE(buf_.get() + 8, start_time_);
418  if (trace_version >= kTraceVersionDualClock) {
419    uint16_t record_size = GetRecordSize(clock_source_);
420    Append2LE(buf_.get() + 16, record_size);
421  }
422
423  // Update current offset.
424  cur_offset_ = kTraceHeaderLength;
425}
426
427Trace::~Trace() {
428  CHECK_EQ(sampling_pthread_, static_cast<pthread_t>(0U));
429  STLDeleteValues(&thread_stack_trace_map_);
430}
431
432static void DumpBuf(uint8_t* buf, size_t buf_size, ProfilerClockSource clock_source)
433    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
434  uint8_t* ptr = buf + kTraceHeaderLength;
435  uint8_t* end = buf + buf_size;
436
437  while (ptr < end) {
438    uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24);
439    mirror::ArtMethod* method = DecodeTraceMethodId(tmid);
440    TraceAction action = DecodeTraceAction(tmid);
441    LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action);
442    ptr += GetRecordSize(clock_source);
443  }
444}
445
446void Trace::FinishTracing() {
447  // Compute elapsed time.
448  uint64_t elapsed = MicroTime() - start_time_;
449
450  size_t final_offset = cur_offset_;
451  uint32_t clock_overhead = GetClockOverhead(this);
452
453  if ((flags_ & kTraceCountAllocs) != 0) {
454    Runtime::Current()->SetStatsEnabled(false);
455  }
456
457  std::set<mirror::ArtMethod*> visited_methods;
458  GetVisitedMethods(final_offset, &visited_methods);
459
460  std::ostringstream os;
461
462  os << StringPrintf("%cversion\n", kTraceTokenChar);
463  os << StringPrintf("%d\n", GetTraceVersion(clock_source_));
464  os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false");
465  if (UseThreadCpuClock()) {
466    if (UseWallClock()) {
467      os << StringPrintf("clock=dual\n");
468    } else {
469      os << StringPrintf("clock=thread-cpu\n");
470    }
471  } else {
472    os << StringPrintf("clock=wall\n");
473  }
474  os << StringPrintf("elapsed-time-usec=%llu\n", elapsed);
475  size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
476  os << StringPrintf("num-method-calls=%zd\n", num_records);
477  os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead);
478  os << StringPrintf("vm=art\n");
479  if ((flags_ & kTraceCountAllocs) != 0) {
480    os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
481    os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES));
482    os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS));
483  }
484  os << StringPrintf("%cthreads\n", kTraceTokenChar);
485  DumpThreadList(os);
486  os << StringPrintf("%cmethods\n", kTraceTokenChar);
487  DumpMethodList(os, visited_methods);
488  os << StringPrintf("%cend\n", kTraceTokenChar);
489
490  std::string header(os.str());
491  if (trace_file_.get() == NULL) {
492    iovec iov[2];
493    iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str()));
494    iov[0].iov_len = header.length();
495    iov[1].iov_base = buf_.get();
496    iov[1].iov_len = final_offset;
497    Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2);
498    const bool kDumpTraceInfo = false;
499    if (kDumpTraceInfo) {
500      LOG(INFO) << "Trace sent:\n" << header;
501      DumpBuf(buf_.get(), final_offset, clock_source_);
502    }
503  } else {
504    if (!trace_file_->WriteFully(header.c_str(), header.length()) ||
505        !trace_file_->WriteFully(buf_.get(), final_offset)) {
506      std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno)));
507      PLOG(ERROR) << detail;
508      ThrowRuntimeException("%s", detail.c_str());
509    }
510  }
511}
512
513void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
514                       const mirror::ArtMethod* method, uint32_t new_dex_pc) {
515  // We're not recorded to listen to this kind of event, so complain.
516  LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
517};
518
519void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
520                          const mirror::ArtMethod* method, uint32_t dex_pc) {
521  LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered);
522}
523
524void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
525                         const mirror::ArtMethod* method, uint32_t dex_pc,
526                         const JValue& return_value) {
527  UNUSED(return_value);
528  LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited);
529}
530
531void Trace::MethodUnwind(Thread* thread, const mirror::ArtMethod* method, uint32_t dex_pc) {
532  LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind);
533}
534
535void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
536                            mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
537                            mirror::Throwable* exception_object)
538    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
539  LOG(ERROR) << "Unexpected exception caught event in tracing";
540}
541
542void Trace::LogMethodTraceEvent(Thread* thread, const mirror::ArtMethod* method,
543                                instrumentation::Instrumentation::InstrumentationEvent event) {
544  // Advance cur_offset_ atomically.
545  int32_t new_offset;
546  int32_t old_offset;
547  do {
548    old_offset = cur_offset_;
549    new_offset = old_offset + GetRecordSize(clock_source_);
550    if (new_offset > buffer_size_) {
551      overflow_ = true;
552      return;
553    }
554  } while (android_atomic_release_cas(old_offset, new_offset, &cur_offset_) != 0);
555
556  TraceAction action = kTraceMethodEnter;
557  switch (event) {
558    case instrumentation::Instrumentation::kMethodEntered:
559      action = kTraceMethodEnter;
560      break;
561    case instrumentation::Instrumentation::kMethodExited:
562      action = kTraceMethodExit;
563      break;
564    case instrumentation::Instrumentation::kMethodUnwind:
565      action = kTraceUnroll;
566      break;
567    default:
568      UNIMPLEMENTED(FATAL) << "Unexpected event: " << event;
569  }
570
571  uint32_t method_value = EncodeTraceMethodAndAction(method, action);
572
573  // Write data
574  uint8_t* ptr = buf_.get() + old_offset;
575  Append2LE(ptr, thread->GetTid());
576  Append4LE(ptr + 2, method_value);
577  ptr += 6;
578
579  if (UseThreadCpuClock()) {
580    // TODO: this isn't vaguely thread safe.
581    SafeMap<Thread*, uint64_t>::iterator it = thread_clock_base_map_.find(thread);
582    uint32_t thread_clock_diff = 0;
583    if (UNLIKELY(it == thread_clock_base_map_.end())) {
584      // First event, the diff is 0, record the base time in the map.
585      uint64_t time = ThreadCpuMicroTime();
586      thread_clock_base_map_.Put(thread, time);
587    } else {
588      uint64_t thread_clock_base = it->second;
589      thread_clock_diff = ThreadCpuMicroTime() - thread_clock_base;
590    }
591    Append4LE(ptr, thread_clock_diff);
592    ptr += 4;
593  }
594  if (UseWallClock()) {
595    uint32_t wall_clock_diff = MicroTime() - start_time_;
596    Append4LE(ptr, wall_clock_diff);
597  }
598}
599
600void Trace::GetVisitedMethods(size_t buf_size,
601                              std::set<mirror::ArtMethod*>* visited_methods) {
602  uint8_t* ptr = buf_.get() + kTraceHeaderLength;
603  uint8_t* end = buf_.get() + buf_size;
604
605  while (ptr < end) {
606    uint32_t tmid = ptr[2] | (ptr[3] << 8) | (ptr[4] << 16) | (ptr[5] << 24);
607    mirror::ArtMethod* method = DecodeTraceMethodId(tmid);
608    visited_methods->insert(method);
609    ptr += GetRecordSize(clock_source_);
610  }
611}
612
613void Trace::DumpMethodList(std::ostream& os,
614                           const std::set<mirror::ArtMethod*>& visited_methods) {
615  typedef std::set<mirror::ArtMethod*>::const_iterator It;  // TODO: C++0x auto
616  MethodHelper mh;
617  for (It it = visited_methods.begin(); it != visited_methods.end(); ++it) {
618    mirror::ArtMethod* method = *it;
619    mh.ChangeMethod(method);
620    os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
621        PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(),
622        mh.GetSignature().c_str(), mh.GetDeclaringClassSourceFile());
623  }
624}
625
626static void DumpThread(Thread* t, void* arg) {
627  std::ostream& os = *reinterpret_cast<std::ostream*>(arg);
628  std::string name;
629  t->GetThreadName(name);
630  os << t->GetTid() << "\t" << name << "\n";
631}
632
633void Trace::DumpThreadList(std::ostream& os) {
634  Thread* self = Thread::Current();
635  Locks::thread_list_lock_->AssertNotHeld(self);
636  MutexLock mu(self, *Locks::thread_list_lock_);
637  Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os);
638}
639
640}  // namespace art
641