profiler.cc revision b9c3888380666a7b44718f04f787693787cd57c6
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "profiler.h"
18
19#include <fstream>
20#include <sys/uio.h>
21#include <sys/file.h>
22
23#include "base/stl_util.h"
24#include "base/unix_file/fd_file.h"
25#include "class_linker.h"
26#include "common_throws.h"
27#include "debugger.h"
28#include "dex_file-inl.h"
29#include "instrumentation.h"
30#include "mirror/art_method-inl.h"
31#include "mirror/class-inl.h"
32#include "mirror/dex_cache.h"
33#include "mirror/object_array-inl.h"
34#include "mirror/object-inl.h"
35#include "os.h"
36#include "scoped_thread_state_change.h"
37#include "ScopedLocalRef.h"
38#include "thread.h"
39#include "thread_list.h"
40
41#ifdef HAVE_ANDROID_OS
42#include "cutils/properties.h"
43#endif
44
45#if !defined(ART_USE_PORTABLE_COMPILER)
46#include "entrypoints/quick/quick_entrypoints.h"
47#endif
48
49namespace art {
50
51BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
52pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
53volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
54
55// TODO: this profiler runs regardless of the state of the machine.  Maybe we should use the
56// wakelock or something to modify the run characteristics.  This can be done when we
57// have some performance data after it's been used for a while.
58
59// Walk through the method within depth of max_depth_ on the Java stack
60class BoundedStackVisitor : public StackVisitor {
61 public:
62  BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
63      Thread* thread, uint32_t max_depth)
64      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
65      : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
66  }
67
68  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
69    mirror::ArtMethod* m = GetMethod();
70    if (m->IsRuntimeMethod()) {
71      return true;
72    }
73    uint32_t dex_pc_ = GetDexPc();
74    stack_->push_back(std::make_pair(m, dex_pc_));
75    ++depth_;
76    if (depth_ < max_depth_) {
77      return true;
78    } else {
79      return false;
80    }
81  }
82
83 private:
84  std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack_;
85  const uint32_t max_depth_;
86  uint32_t depth_;
87};
88
89// This is called from either a thread list traversal or from a checkpoint.  Regardless
90// of which caller, the mutator lock must be held.
91static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
92  BackgroundMethodSamplingProfiler* profiler =
93      reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
94  const ProfilerOptions profile_options = profiler->GetProfilerOptions();
95  switch (profile_options.GetProfileType()) {
96    case kProfilerMethod: {
97      mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
98      if (false && method == nullptr) {
99        LOG(INFO) << "No current method available";
100        std::ostringstream os;
101        thread->Dump(os);
102        std::string data(os.str());
103        LOG(INFO) << data;
104      }
105      profiler->RecordMethod(method);
106      break;
107    }
108    case kProfilerBoundedStack: {
109      std::vector<InstructionLocation> stack;
110      uint32_t max_depth = profile_options.GetMaxStackDepth();
111      BoundedStackVisitor bounded_stack_visitor(&stack, thread, max_depth);
112      bounded_stack_visitor.WalkStack();
113      profiler->RecordStack(stack);
114      break;
115    }
116    default:
117      LOG(INFO) << "This profile type is not implemented.";
118  }
119}
120
121// A closure that is called by the thread checkpoint code.
122class SampleCheckpoint : public Closure {
123 public:
124  explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
125    profiler_(profiler) {}
126
127  virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
128    Thread* self = Thread::Current();
129    if (thread == nullptr) {
130      LOG(ERROR) << "Checkpoint with nullptr thread";
131      return;
132    }
133
134    // Grab the mutator lock (shared access).
135    ScopedObjectAccess soa(self);
136
137    // Grab a sample.
138    GetSample(thread, this->profiler_);
139
140    // And finally tell the barrier that we're done.
141    this->profiler_->GetBarrier().Pass(self);
142  }
143
144 private:
145  BackgroundMethodSamplingProfiler* const profiler_;
146};
147
148bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
149  MutexLock mu(self, *Locks::profiler_lock_);
150  return shutting_down_;
151}
152
153void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
154  Runtime* runtime = Runtime::Current();
155  BackgroundMethodSamplingProfiler* profiler =
156      reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
157
158  // Add a random delay for the first time run so that we don't hammer the CPU
159  // with all profiles running at the same time.
160  const int kRandomDelayMaxSecs = 30;
161  const double kMaxBackoffSecs = 24*60*60;   // Max backoff time.
162
163  srand(MicroTime() * getpid());
164  int startup_delay = rand() % kRandomDelayMaxSecs;   // random delay for startup.
165
166
167  CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
168                                      !runtime->IsCompiler()));
169
170  Thread* self = Thread::Current();
171
172  double backoff = 1.0;
173  while (true) {
174    if (ShuttingDown(self)) {
175      break;
176    }
177
178    {
179      // wait until we need to run another profile
180      uint64_t delay_secs = profiler->options_.GetPeriodS() * backoff;
181
182      // Add a startup delay to prevent all the profiles running at once.
183      delay_secs += startup_delay;
184
185      // Immediate startup for benchmarking?
186      if (profiler->options_.GetStartImmediately() && startup_delay > 0) {
187        delay_secs = 0;
188      }
189
190      startup_delay = 0;
191
192      VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
193      MutexLock mu(self, profiler->wait_lock_);
194      profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
195
196      // Expand the backoff by its coefficient, but don't go beyond the max.
197      backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
198    }
199
200    if (ShuttingDown(self)) {
201      break;
202    }
203
204
205    uint64_t start_us = MicroTime();
206    uint64_t end_us = start_us + profiler->options_.GetDurationS() * UINT64_C(1000000);
207    uint64_t now_us = start_us;
208
209    VLOG(profiler) << "Starting profiling run now for "
210                   << PrettyDuration((end_us - start_us) * 1000);
211
212    SampleCheckpoint check_point(profiler);
213
214    size_t valid_samples = 0;
215    while (now_us < end_us) {
216      if (ShuttingDown(self)) {
217        break;
218      }
219
220      usleep(profiler->options_.GetIntervalUs());    // Non-interruptible sleep.
221
222      ThreadList* thread_list = runtime->GetThreadList();
223
224      profiler->profiler_barrier_->Init(self, 0);
225      size_t barrier_count = thread_list->RunCheckpointOnRunnableThreads(&check_point);
226
227      // All threads are suspended, nothing to do.
228      if (barrier_count == 0) {
229        now_us = MicroTime();
230        continue;
231      }
232
233      valid_samples += barrier_count;
234
235      ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
236
237      // Wait for the barrier to be crossed by all runnable threads.  This wait
238      // is done with a timeout so that we can detect problems with the checkpoint
239      // running code.  We should never see this.
240      const uint32_t kWaitTimeoutMs = 10000;
241      const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
242
243      uint64_t waitstart_us = MicroTime();
244      // Wait for all threads to pass the barrier.
245      profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
246      uint64_t waitend_us = MicroTime();
247      uint64_t waitdiff_us = waitend_us - waitstart_us;
248
249      // We should never get a timeout.  If we do, it suggests a problem with the checkpoint
250      // code.  Crash the process in this case.
251      CHECK_LT(waitdiff_us, kWaitTimeoutUs);
252
253      // Update the current time.
254      now_us = MicroTime();
255    }
256
257    if (valid_samples > 0) {
258      // After the profile has been taken, write it out.
259      ScopedObjectAccess soa(self);   // Acquire the mutator lock.
260      uint32_t size = profiler->WriteProfile();
261      VLOG(profiler) << "Profile size: " << size;
262    }
263  }
264
265  LOG(INFO) << "Profiler shutdown";
266  runtime->DetachCurrentThread();
267  return nullptr;
268}
269
270// Write out the profile file if we are generating a profile.
271uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
272  std::string full_name = output_filename_;
273  VLOG(profiler) << "Saving profile to " << full_name;
274
275  int fd = open(full_name.c_str(), O_RDWR);
276  if (fd < 0) {
277    // Open failed.
278    LOG(ERROR) << "Failed to open profile file " << full_name;
279    return 0;
280  }
281
282  // Lock the file for exclusive access.  This will block if another process is using
283  // the file.
284  int err = flock(fd, LOCK_EX);
285  if (err < 0) {
286    LOG(ERROR) << "Failed to lock profile file " << full_name;
287    return 0;
288  }
289
290  // Read the previous profile.
291  profile_table_.ReadPrevious(fd, options_.GetProfileType());
292
293  // Move back to the start of the file.
294  lseek(fd, 0, SEEK_SET);
295
296  // Format the profile output and write to the file.
297  std::ostringstream os;
298  uint32_t num_methods = DumpProfile(os);
299  std::string data(os.str());
300  const char *p = data.c_str();
301  size_t length = data.length();
302  size_t full_length = length;
303  do {
304    int n = ::write(fd, p, length);
305    p += n;
306    length -= n;
307  } while (length > 0);
308
309  // Truncate the file to the new length.
310  ftruncate(fd, full_length);
311
312  // Now unlock the file, allowing another process in.
313  err = flock(fd, LOCK_UN);
314  if (err < 0) {
315    LOG(ERROR) << "Failed to unlock profile file " << full_name;
316  }
317
318  // Done, close the file.
319  ::close(fd);
320
321  // Clean the profile for the next time.
322  CleanProfile();
323
324  return num_methods;
325}
326
327bool BackgroundMethodSamplingProfiler::Start(
328    const std::string& output_filename, const ProfilerOptions& options) {
329  if (!options.IsEnabled()) {
330    return false;
331  }
332
333  CHECK(!output_filename.empty());
334
335  Thread* self = Thread::Current();
336  {
337    MutexLock mu(self, *Locks::profiler_lock_);
338    // Don't start two profiler threads.
339    if (profiler_ != nullptr) {
340      return true;
341    }
342  }
343
344  LOG(INFO) << "Starting profiler using output file: " << output_filename
345            << " and options: " << options;
346  {
347    MutexLock mu(self, *Locks::profiler_lock_);
348    profiler_ = new BackgroundMethodSamplingProfiler(output_filename, options);
349
350    CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
351        reinterpret_cast<void*>(profiler_)),
352                       "Profiler thread");
353  }
354  return true;
355}
356
357
358
359void BackgroundMethodSamplingProfiler::Stop() {
360  BackgroundMethodSamplingProfiler* profiler = nullptr;
361  pthread_t profiler_pthread = 0U;
362  {
363    MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
364    CHECK(!shutting_down_);
365    profiler = profiler_;
366    shutting_down_ = true;
367    profiler_pthread = profiler_pthread_;
368  }
369
370  // Now wake up the sampler thread if it sleeping.
371  {
372    MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
373    profiler->period_condition_.Signal(Thread::Current());
374  }
375  // Wait for the sample thread to stop.
376  CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
377
378  {
379    MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
380    profiler_ = nullptr;
381  }
382  delete profiler;
383}
384
385
386void BackgroundMethodSamplingProfiler::Shutdown() {
387  Stop();
388}
389
390BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(
391  const std::string& output_filename, const ProfilerOptions& options)
392    : output_filename_(output_filename),
393      options_(options),
394      wait_lock_("Profile wait lock"),
395      period_condition_("Profile condition", wait_lock_),
396      profile_table_(wait_lock_),
397      profiler_barrier_(new Barrier(0)) {
398  // Populate the filtered_methods set.
399  // This is empty right now, but to add a method, do this:
400  //
401  // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
402}
403
404// Filter out methods the profiler doesn't want to record.
405// We require mutator lock since some statistics will be updated here.
406bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
407  if (method == nullptr) {
408    profile_table_.NullMethod();
409    // Don't record a nullptr method.
410    return false;
411  }
412
413  mirror::Class* cls = method->GetDeclaringClass();
414  if (cls != nullptr) {
415    if (cls->GetClassLoader() == nullptr) {
416      // Don't include things in the boot
417      profile_table_.BootMethod();
418      return false;
419    }
420  }
421
422  bool is_filtered = false;
423
424  if (strcmp(method->GetName(), "<clinit>") == 0) {
425    // always filter out class init
426    is_filtered = true;
427  }
428
429  // Filter out methods by name if there are any.
430  if (!is_filtered && filtered_methods_.size() > 0) {
431    std::string method_full_name = PrettyMethod(method);
432
433    // Don't include specific filtered methods.
434    is_filtered = filtered_methods_.count(method_full_name) != 0;
435  }
436  return !is_filtered;
437}
438
439// A method has been hit, record its invocation in the method map.
440// The mutator_lock must be held (shared) when this is called.
441void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
442  // Add to the profile table unless it is filtered out.
443  if (ProcessMethod(method)) {
444    profile_table_.Put(method);
445  }
446}
447
448// Record the current bounded stack into sampling results.
449void BackgroundMethodSamplingProfiler::RecordStack(const std::vector<InstructionLocation>& stack) {
450  if (stack.size() == 0) {
451    return;
452  }
453  // Get the method on top of the stack. We use this method to perform filtering.
454  mirror::ArtMethod* method = stack.front().first;
455  if (ProcessMethod(method)) {
456      profile_table_.PutStack(stack);
457  }
458}
459
460// Clean out any recordings for the method traces.
461void BackgroundMethodSamplingProfiler::CleanProfile() {
462  profile_table_.Clear();
463}
464
465uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
466  return profile_table_.Write(os, options_.GetProfileType());
467}
468
469// Profile Table.
470// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
471// hit it at the top of the stack.
472ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
473    num_null_methods_(0),
474    num_boot_methods_(0) {
475  for (int i = 0; i < kHashSize; i++) {
476    table[i] = nullptr;
477  }
478  method_context_table = nullptr;
479  stack_trie_root_ = nullptr;
480}
481
482ProfileSampleResults::~ProfileSampleResults() {
483  Clear();
484}
485
486// Add a method to the profile table.  If it's the first time the method
487// has been seen, add it with count=1, otherwise increment the count.
488void ProfileSampleResults::Put(mirror::ArtMethod* method) {
489  MutexLock mu(Thread::Current(), lock_);
490  uint32_t index = Hash(method);
491  if (table[index] == nullptr) {
492    table[index] = new Map();
493  }
494  Map::iterator i = table[index]->find(method);
495  if (i == table[index]->end()) {
496    (*table[index])[method] = 1;
497  } else {
498    i->second++;
499  }
500  num_samples_++;
501}
502
503// Add a bounded stack to the profile table. Only the count of the method on
504// top of the frame will be increased.
505void ProfileSampleResults::PutStack(const std::vector<InstructionLocation>& stack) {
506  MutexLock mu(Thread::Current(), lock_);
507  ScopedObjectAccess soa(Thread::Current());
508  if (stack_trie_root_ == nullptr) {
509    // The root of the stack trie is a dummy node so that we don't have to maintain
510    // a collection of tries.
511    stack_trie_root_ = new StackTrieNode();
512  }
513
514  StackTrieNode* current = stack_trie_root_;
515  if (stack.size() == 0) {
516    current->IncreaseCount();
517    return;
518  }
519
520  for (std::vector<InstructionLocation>::const_reverse_iterator iter = stack.rbegin();
521       iter != stack.rend(); ++iter) {
522    InstructionLocation inst_loc = *iter;
523    mirror::ArtMethod* method = inst_loc.first;
524    if (method == nullptr) {
525      // skip null method
526      continue;
527    }
528    uint32_t dex_pc = inst_loc.second;
529    uint32_t method_idx = method->GetDexMethodIndex();
530    const DexFile* dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
531    MethodReference method_ref(dex_file, method_idx);
532    StackTrieNode* child = current->FindChild(method_ref, dex_pc);
533    if (child != nullptr) {
534      current = child;
535    } else {
536      uint32_t method_size = 0;
537      const DexFile::CodeItem* codeitem = method->GetCodeItem();
538      if (codeitem != nullptr) {
539        method_size = codeitem->insns_size_in_code_units_;
540      }
541      StackTrieNode* new_node = new StackTrieNode(method_ref, dex_pc, method_size, current);
542      current->AppendChild(new_node);
543      current = new_node;
544    }
545  }
546
547  if (current != stack_trie_root_ && current->GetCount() == 0) {
548    // Insert into method_context table;
549    if (method_context_table == nullptr) {
550      method_context_table = new MethodContextMap();
551    }
552    MethodReference method = current->GetMethod();
553    MethodContextMap::iterator i = method_context_table->find(method);
554    if (i == method_context_table->end()) {
555      TrieNodeSet* node_set = new TrieNodeSet();
556      node_set->insert(current);
557      (*method_context_table)[method] = node_set;
558    } else {
559      TrieNodeSet* node_set = i->second;
560      node_set->insert(current);
561    }
562  }
563  current->IncreaseCount();
564  num_samples_++;
565}
566
567// Write the profile table to the output stream.  Also merge with the previous profile.
568uint32_t ProfileSampleResults::Write(std::ostream& os, ProfileDataType type) {
569  ScopedObjectAccess soa(Thread::Current());
570  num_samples_ += previous_num_samples_;
571  num_null_methods_ += previous_num_null_methods_;
572  num_boot_methods_ += previous_num_boot_methods_;
573
574  VLOG(profiler) << "Profile: "
575                 << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
576  os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
577  uint32_t num_methods = 0;
578  if (type == kProfilerMethod) {
579    for (int i = 0 ; i < kHashSize; i++) {
580      Map *map = table[i];
581      if (map != nullptr) {
582        for (const auto &meth_iter : *map) {
583          mirror::ArtMethod *method = meth_iter.first;
584          std::string method_name = PrettyMethod(method);
585
586          const DexFile::CodeItem* codeitem = method->GetCodeItem();
587          uint32_t method_size = 0;
588          if (codeitem != nullptr) {
589            method_size = codeitem->insns_size_in_code_units_;
590          }
591          uint32_t count = meth_iter.second;
592
593          // Merge this profile entry with one from a previous run (if present).  Also
594          // remove the previous entry.
595          PreviousProfile::iterator pi = previous_.find(method_name);
596          if (pi != previous_.end()) {
597            count += pi->second.count_;
598            previous_.erase(pi);
599          }
600          os << StringPrintf("%s/%u/%u\n",  method_name.c_str(), count, method_size);
601          ++num_methods;
602        }
603      }
604    }
605  } else if (type == kProfilerBoundedStack) {
606    if (method_context_table != nullptr) {
607      for (const auto &method_iter : *method_context_table) {
608        MethodReference method = method_iter.first;
609        TrieNodeSet* node_set = method_iter.second;
610        std::string method_name = PrettyMethod(method.dex_method_index, *(method.dex_file));
611        uint32_t method_size = 0;
612        uint32_t total_count = 0;
613        PreviousContextMap new_context_map;
614        for (const auto &trie_node_i : *node_set) {
615          StackTrieNode* node = trie_node_i;
616          method_size = node->GetMethodSize();
617          uint32_t count = node->GetCount();
618          uint32_t dexpc = node->GetDexPC();
619          total_count += count;
620
621          StackTrieNode* current = node->GetParent();
622          // We go backward on the trie to retrieve context and dex_pc until the dummy root.
623          // The format of the context is "method_1@pc_1@method_2@pc_2@..."
624          std::vector<std::string> context_vector;
625          while (current != nullptr && current->GetParent() != nullptr) {
626            context_vector.push_back(StringPrintf("%s@%u",
627                PrettyMethod(current->GetMethod().dex_method_index, *(current->GetMethod().dex_file)).c_str(),
628                current->GetDexPC()));
629            current = current->GetParent();
630          }
631          std::string context_sig = Join(context_vector, '@');
632          new_context_map[std::make_pair(dexpc, context_sig)] = count;
633        }
634
635        PreviousProfile::iterator pi = previous_.find(method_name);
636        if (pi != previous_.end()) {
637          total_count += pi->second.count_;
638          PreviousContextMap* previous_context_map = pi->second.context_map_;
639          if (previous_context_map != nullptr) {
640            for (const auto &context_i : *previous_context_map) {
641              uint32_t count = context_i.second;
642              PreviousContextMap::iterator ci = new_context_map.find(context_i.first);
643              if (ci == new_context_map.end()) {
644                new_context_map[context_i.first] = count;
645              } else {
646                ci->second += count;
647              }
648            }
649          }
650          delete previous_context_map;
651          previous_.erase(pi);
652        }
653        // We write out profile data with dex pc and context information in the following format:
654        // "method/total_count/size/[pc_1:count_1:context_1#pc_2:count_2:context_2#...]".
655        std::vector<std::string> context_count_vector;
656        for (const auto &context_i : new_context_map) {
657          context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
658              context_i.second, context_i.first.second.c_str()));
659        }
660        os << StringPrintf("%s/%u/%u/[%s]\n", method_name.c_str(), total_count,
661            method_size, Join(context_count_vector, '#').c_str());
662        ++num_methods;
663      }
664    }
665  }
666
667  // Now we write out the remaining previous methods.
668  for (const auto &pi : previous_) {
669    if (type == kProfilerMethod) {
670      os << StringPrintf("%s/%u/%u\n",  pi.first.c_str(), pi.second.count_, pi.second.method_size_);
671    } else if (type == kProfilerBoundedStack) {
672      os << StringPrintf("%s/%u/%u/[",  pi.first.c_str(), pi.second.count_, pi.second.method_size_);
673      PreviousContextMap* previous_context_map = pi.second.context_map_;
674      if (previous_context_map != nullptr) {
675        std::vector<std::string> context_count_vector;
676        for (const auto &context_i : *previous_context_map) {
677          context_count_vector.push_back(StringPrintf("%u:%u:%s", context_i.first.first,
678              context_i.second, context_i.first.second.c_str()));
679        }
680        os << Join(context_count_vector, '#');
681      }
682      os << "]\n";
683    }
684    ++num_methods;
685  }
686  return num_methods;
687}
688
689void ProfileSampleResults::Clear() {
690  num_samples_ = 0;
691  num_null_methods_ = 0;
692  num_boot_methods_ = 0;
693  for (int i = 0; i < kHashSize; i++) {
694    delete table[i];
695    table[i] = nullptr;
696  }
697  if (stack_trie_root_ != nullptr) {
698    stack_trie_root_->DeleteChildren();
699    delete stack_trie_root_;
700    stack_trie_root_ = nullptr;
701    if (method_context_table != nullptr) {
702      delete method_context_table;
703      method_context_table = nullptr;
704    }
705  }
706  for (auto &pi : previous_) {
707    if (pi.second.context_map_ != nullptr) {
708      delete pi.second.context_map_;
709      pi.second.context_map_ = nullptr;
710    }
711  }
712  previous_.clear();
713}
714
715uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
716  return (PointerToLowMemUInt32(method) >> 3) % kHashSize;
717}
718
719// Read a single line into the given string.  Returns true if everything OK, false
720// on EOF or error.
721static bool ReadProfileLine(int fd, std::string& line) {
722  char buf[4];
723  line.clear();
724  while (true) {
725    int n = read(fd, buf, 1);     // TODO: could speed this up but is it worth it?
726    if (n != 1) {
727      return false;
728    }
729    if (buf[0] == '\n') {
730      break;
731    }
732    line += buf[0];
733  }
734  return true;
735}
736
737void ProfileSampleResults::ReadPrevious(int fd, ProfileDataType type) {
738  // Reset counters.
739  previous_num_samples_ = previous_num_null_methods_ = previous_num_boot_methods_ = 0;
740
741  std::string line;
742
743  // The first line contains summary information.
744  if (!ReadProfileLine(fd, line)) {
745    return;
746  }
747  std::vector<std::string> summary_info;
748  Split(line, '/', summary_info);
749  if (summary_info.size() != 3) {
750    // Bad summary info.  It should be count/nullcount/bootcount
751    return;
752  }
753  previous_num_samples_ = strtoul(summary_info[0].c_str(), nullptr, 10);
754  previous_num_null_methods_ = strtoul(summary_info[1].c_str(), nullptr, 10);
755  previous_num_boot_methods_ = strtoul(summary_info[2].c_str(), nullptr, 10);
756
757  // Now read each line until the end of file.  Each line consists of 3 or 4 fields separated by /
758  while (true) {
759    if (!ReadProfileLine(fd, line)) {
760      break;
761    }
762    std::vector<std::string> info;
763    Split(line, '/', info);
764    if (info.size() != 3 && info.size() != 4) {
765      // Malformed.
766      break;
767    }
768    std::string methodname = info[0];
769    uint32_t total_count = strtoul(info[1].c_str(), nullptr, 10);
770    uint32_t size = strtoul(info[2].c_str(), nullptr, 10);
771    PreviousContextMap* context_map = nullptr;
772    if (type == kProfilerBoundedStack && info.size() == 4) {
773      context_map = new PreviousContextMap();
774      std::string context_counts_str = info[3].substr(1, info[3].size() - 2);
775      std::vector<std::string> context_count_pairs;
776      Split(context_counts_str, '#', context_count_pairs);
777      for (uint32_t i = 0; i < context_count_pairs.size(); ++i) {
778        std::vector<std::string> context_count;
779        Split(context_count_pairs[i], ':', context_count);
780        if (context_count.size() == 2) {
781          // Handles the situtation when the profile file doesn't contain context information.
782          uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
783          uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
784          (*context_map)[std::make_pair(dexpc, "")] = count;
785        } else {
786          // Handles the situtation when the profile file contains context information.
787          uint32_t dexpc = strtoul(context_count[0].c_str(), nullptr, 10);
788          uint32_t count = strtoul(context_count[1].c_str(), nullptr, 10);
789          std::string context = context_count[2];
790          (*context_map)[std::make_pair(dexpc, context)] = count;
791        }
792      }
793    }
794    previous_[methodname] = PreviousValue(total_count, size, context_map);
795  }
796}
797
798bool ProfileFile::LoadFile(const std::string& fileName) {
799  LOG(VERBOSE) << "reading profile file " << fileName;
800  struct stat st;
801  int err = stat(fileName.c_str(), &st);
802  if (err == -1) {
803    LOG(VERBOSE) << "not found";
804    return false;
805  }
806  if (st.st_size == 0) {
807    return false;  // Empty profiles are invalid.
808  }
809  std::ifstream in(fileName.c_str());
810  if (!in) {
811    LOG(VERBOSE) << "profile file " << fileName << " exists but can't be opened";
812    LOG(VERBOSE) << "file owner: " << st.st_uid << ":" << st.st_gid;
813    LOG(VERBOSE) << "me: " << getuid() << ":" << getgid();
814    LOG(VERBOSE) << "file permissions: " << std::oct << st.st_mode;
815    LOG(VERBOSE) << "errno: " << errno;
816    return false;
817  }
818  // The first line contains summary information.
819  std::string line;
820  std::getline(in, line);
821  if (in.eof()) {
822    return false;
823  }
824  std::vector<std::string> summary_info;
825  Split(line, '/', summary_info);
826  if (summary_info.size() != 3) {
827    // Bad summary info.  It should be total/null/boot.
828    return false;
829  }
830  // This is the number of hits in all profiled methods (without nullptr or boot methods)
831  uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
832
833  // Now read each line until the end of file.  Each line consists of 3 fields separated by '/'.
834  // Store the info in descending order given by the most used methods.
835  typedef std::set<std::pair<int, std::vector<std::string>>> ProfileSet;
836  ProfileSet countSet;
837  while (!in.eof()) {
838    std::getline(in, line);
839    if (in.eof()) {
840      break;
841    }
842    std::vector<std::string> info;
843    Split(line, '/', info);
844    if (info.size() != 3 && info.size() != 4) {
845      // Malformed.
846      return false;
847    }
848    int count = atoi(info[1].c_str());
849    countSet.insert(std::make_pair(-count, info));
850  }
851
852  uint32_t curTotalCount = 0;
853  ProfileSet::iterator end = countSet.end();
854  const ProfileData* prevData = nullptr;
855  for (ProfileSet::iterator it = countSet.begin(); it != end ; it++) {
856    const std::string& methodname = it->second[0];
857    uint32_t count = -it->first;
858    uint32_t size = strtoul(it->second[2].c_str(), nullptr, 10);
859    double usedPercent = (count * 100.0) / total_count;
860
861    curTotalCount += count;
862    // Methods with the same count should be part of the same top K percentage bucket.
863    double topKPercentage = (prevData != nullptr) && (prevData->GetCount() == count)
864      ? prevData->GetTopKUsedPercentage()
865      : 100 * static_cast<double>(curTotalCount) / static_cast<double>(total_count);
866
867    // Add it to the profile map.
868    ProfileData curData = ProfileData(methodname, count, size, usedPercent, topKPercentage);
869    profile_map_[methodname] = curData;
870    prevData = &curData;
871  }
872  return true;
873}
874
875bool ProfileFile::GetProfileData(ProfileFile::ProfileData* data, const std::string& method_name) {
876  ProfileMap::iterator i = profile_map_.find(method_name);
877  if (i == profile_map_.end()) {
878    return false;
879  }
880  *data = i->second;
881  return true;
882}
883
884bool ProfileFile::GetTopKSamples(std::set<std::string>& topKSamples, double topKPercentage) {
885  ProfileMap::iterator end = profile_map_.end();
886  for (ProfileMap::iterator it = profile_map_.begin(); it != end; it++) {
887    if (it->second.GetTopKUsedPercentage() < topKPercentage) {
888      topKSamples.insert(it->first);
889    }
890  }
891  return true;
892}
893
894StackTrieNode* StackTrieNode::FindChild(MethodReference method, uint32_t dex_pc) {
895  if (children_.size() == 0) {
896    return nullptr;
897  }
898  // Create a dummy node for searching.
899  StackTrieNode* node = new StackTrieNode(method, dex_pc, 0, nullptr);
900  std::set<StackTrieNode*, StackTrieNodeComparator>::iterator i = children_.find(node);
901  delete node;
902  return (i == children_.end()) ? nullptr : *i;
903}
904
905void StackTrieNode::DeleteChildren() {
906  for (auto &child : children_) {
907    if (child != nullptr) {
908      child->DeleteChildren();
909      delete child;
910    }
911  }
912}
913
914}  // namespace art
915