1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/profiler/profile-generator.h"
6
7#include "src/base/adapters.h"
8#include "src/debug/debug.h"
9#include "src/deoptimizer.h"
10#include "src/global-handles.h"
11#include "src/profiler/cpu-profiler.h"
12#include "src/profiler/profile-generator-inl.h"
13#include "src/tracing/trace-event.h"
14#include "src/tracing/traced-value.h"
15#include "src/unicode.h"
16
17namespace v8 {
18namespace internal {
19
20
21JITLineInfoTable::JITLineInfoTable() {}
22
23
24JITLineInfoTable::~JITLineInfoTable() {}
25
26
27void JITLineInfoTable::SetPosition(int pc_offset, int line) {
28  DCHECK(pc_offset >= 0);
29  DCHECK(line > 0);  // The 1-based number of the source line.
30  if (GetSourceLineNumber(pc_offset) != line) {
31    pc_offset_map_.insert(std::make_pair(pc_offset, line));
32  }
33}
34
35
36int JITLineInfoTable::GetSourceLineNumber(int pc_offset) const {
37  PcOffsetMap::const_iterator it = pc_offset_map_.lower_bound(pc_offset);
38  if (it == pc_offset_map_.end()) {
39    if (pc_offset_map_.empty()) return v8::CpuProfileNode::kNoLineNumberInfo;
40    return (--pc_offset_map_.end())->second;
41  }
42  return it->second;
43}
44
45
46const char* const CodeEntry::kEmptyNamePrefix = "";
47const char* const CodeEntry::kEmptyResourceName = "";
48const char* const CodeEntry::kEmptyBailoutReason = "";
49const char* const CodeEntry::kNoDeoptReason = "";
50
51const char* const CodeEntry::kProgramEntryName = "(program)";
52const char* const CodeEntry::kIdleEntryName = "(idle)";
53const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
54const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
55
56base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
57    CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
58
59base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
60    CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
61
62base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
63    CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
64
65base::LazyDynamicInstance<CodeEntry,
66                          CodeEntry::UnresolvedEntryCreateTrait>::type
67    CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
68
69CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
70  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
71}
72
73CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
74  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
75}
76
77CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
78  return new CodeEntry(Logger::BUILTIN_TAG,
79                       CodeEntry::kGarbageCollectorEntryName);
80}
81
82CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
83  return new CodeEntry(Logger::FUNCTION_TAG,
84                       CodeEntry::kUnresolvedFunctionName);
85}
86
87CodeEntry::~CodeEntry() {
88  delete line_info_;
89  for (auto location : inline_locations_) {
90    for (auto entry : location.second) {
91      delete entry;
92    }
93  }
94}
95
96
97uint32_t CodeEntry::GetHash() const {
98  uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
99  if (script_id_ != v8::UnboundScript::kNoScriptId) {
100    hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
101                               v8::internal::kZeroHashSeed);
102    hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
103                               v8::internal::kZeroHashSeed);
104  } else {
105    hash ^= ComputeIntegerHash(
106        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
107        v8::internal::kZeroHashSeed);
108    hash ^= ComputeIntegerHash(
109        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
110        v8::internal::kZeroHashSeed);
111    hash ^= ComputeIntegerHash(
112        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
113        v8::internal::kZeroHashSeed);
114    hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
115  }
116  return hash;
117}
118
119
120bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
121  if (this == entry) return true;
122  if (script_id_ != v8::UnboundScript::kNoScriptId) {
123    return script_id_ == entry->script_id_ && position_ == entry->position_;
124  }
125  return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
126         resource_name_ == entry->resource_name_ &&
127         line_number_ == entry->line_number_;
128}
129
130
131void CodeEntry::SetBuiltinId(Builtins::Name id) {
132  bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
133  bit_field_ = BuiltinIdField::update(bit_field_, id);
134}
135
136
137int CodeEntry::GetSourceLine(int pc_offset) const {
138  if (line_info_ && !line_info_->empty()) {
139    return line_info_->GetSourceLineNumber(pc_offset);
140  }
141  return v8::CpuProfileNode::kNoLineNumberInfo;
142}
143
144void CodeEntry::AddInlineStack(int pc_offset,
145                               std::vector<CodeEntry*> inline_stack) {
146  inline_locations_.insert(std::make_pair(pc_offset, std::move(inline_stack)));
147}
148
149const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
150  auto it = inline_locations_.find(pc_offset);
151  return it != inline_locations_.end() ? &it->second : NULL;
152}
153
154void CodeEntry::AddDeoptInlinedFrames(
155    int deopt_id, std::vector<CpuProfileDeoptFrame> inlined_frames) {
156  deopt_inlined_frames_.insert(
157      std::make_pair(deopt_id, std::move(inlined_frames)));
158}
159
160bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
161  return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
162}
163
164void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
165  if (!shared->script()->IsScript()) return;
166  Script* script = Script::cast(shared->script());
167  set_script_id(script->id());
168  set_position(shared->start_position());
169  set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
170}
171
172CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
173  DCHECK(has_deopt_info());
174
175  CpuProfileDeoptInfo info;
176  info.deopt_reason = deopt_reason_;
177  DCHECK_NE(kNoDeoptimizationId, deopt_id_);
178  if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
179    info.stack.push_back(CpuProfileDeoptFrame(
180        {script_id_, static_cast<size_t>(std::max(0, position()))}));
181  } else {
182    info.stack = deopt_inlined_frames_[deopt_id_];
183  }
184  return info;
185}
186
187
188void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
189  deopt_infos_.push_back(entry->GetDeoptInfo());
190  entry->clear_deopt_info();
191}
192
193
194ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
195  base::HashMap::Entry* map_entry =
196      children_.Lookup(entry, CodeEntryHash(entry));
197  return map_entry != NULL ?
198      reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
199}
200
201
202ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
203  base::HashMap::Entry* map_entry =
204      children_.LookupOrInsert(entry, CodeEntryHash(entry));
205  ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
206  if (!node) {
207    node = new ProfileNode(tree_, entry, this);
208    map_entry->value = node;
209    children_list_.Add(node);
210  }
211  return node;
212}
213
214
215void ProfileNode::IncrementLineTicks(int src_line) {
216  if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
217  // Increment a hit counter of a certain source line.
218  // Add a new source line if not found.
219  base::HashMap::Entry* e =
220      line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
221  DCHECK(e);
222  e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
223}
224
225
226bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
227                               unsigned int length) const {
228  if (entries == NULL || length == 0) return false;
229
230  unsigned line_count = line_ticks_.occupancy();
231
232  if (line_count == 0) return true;
233  if (length < line_count) return false;
234
235  v8::CpuProfileNode::LineTick* entry = entries;
236
237  for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
238       p = line_ticks_.Next(p), entry++) {
239    entry->line =
240        static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
241    entry->hit_count =
242        static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->value));
243  }
244
245  return true;
246}
247
248
249void ProfileNode::Print(int indent) {
250  base::OS::Print("%5u %*s %s%s %d #%d", self_ticks_, indent, "",
251                  entry_->name_prefix(), entry_->name(), entry_->script_id(),
252                  id());
253  if (entry_->resource_name()[0] != '\0')
254    base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
255  base::OS::Print("\n");
256  for (size_t i = 0; i < deopt_infos_.size(); ++i) {
257    CpuProfileDeoptInfo& info = deopt_infos_[i];
258    base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
259                    " with reason '%s'.\n",
260                    indent + 10, "", info.stack[0].script_id,
261                    info.stack[0].position, info.deopt_reason);
262    for (size_t index = 1; index < info.stack.size(); ++index) {
263      base::OS::Print("%*s;;;     Inline point: script_id %d position: %" PRIuS
264                      ".\n",
265                      indent + 10, "", info.stack[index].script_id,
266                      info.stack[index].position);
267    }
268  }
269  const char* bailout_reason = entry_->bailout_reason();
270  if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
271      bailout_reason != CodeEntry::kEmptyBailoutReason) {
272    base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
273                    bailout_reason);
274  }
275  for (base::HashMap::Entry* p = children_.Start(); p != NULL;
276       p = children_.Next(p)) {
277    reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
278  }
279}
280
281
282class DeleteNodesCallback {
283 public:
284  void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
285
286  void AfterAllChildrenTraversed(ProfileNode* node) {
287    delete node;
288  }
289
290  void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
291};
292
293ProfileTree::ProfileTree(Isolate* isolate)
294    : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
295      next_node_id_(1),
296      root_(new ProfileNode(this, &root_entry_, nullptr)),
297      isolate_(isolate),
298      next_function_id_(1),
299      function_ids_(ProfileNode::CodeEntriesMatch) {}
300
301ProfileTree::~ProfileTree() {
302  DeleteNodesCallback cb;
303  TraverseDepthFirst(&cb);
304}
305
306
307unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
308  CodeEntry* code_entry = node->entry();
309  base::HashMap::Entry* entry =
310      function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
311  if (!entry->value) {
312    entry->value = reinterpret_cast<void*>(next_function_id_++);
313  }
314  return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
315}
316
317ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
318                                         int src_line, bool update_stats) {
319  ProfileNode* node = root_;
320  CodeEntry* last_entry = NULL;
321  for (auto it = path.rbegin(); it != path.rend(); ++it) {
322    if (*it == NULL) continue;
323    last_entry = *it;
324    node = node->FindOrAddChild(*it);
325  }
326  if (last_entry && last_entry->has_deopt_info()) {
327    node->CollectDeoptInfo(last_entry);
328  }
329  if (update_stats) {
330    node->IncrementSelfTicks();
331    if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
332      node->IncrementLineTicks(src_line);
333    }
334  }
335  return node;
336}
337
338
339struct NodesPair {
340  NodesPair(ProfileNode* src, ProfileNode* dst)
341      : src(src), dst(dst) { }
342  ProfileNode* src;
343  ProfileNode* dst;
344};
345
346
347class Position {
348 public:
349  explicit Position(ProfileNode* node)
350      : node(node), child_idx_(0) { }
351  INLINE(ProfileNode* current_child()) {
352    return node->children()->at(child_idx_);
353  }
354  INLINE(bool has_current_child()) {
355    return child_idx_ < node->children()->length();
356  }
357  INLINE(void next_child()) { ++child_idx_; }
358
359  ProfileNode* node;
360 private:
361  int child_idx_;
362};
363
364
365// Non-recursive implementation of a depth-first post-order tree traversal.
366template <typename Callback>
367void ProfileTree::TraverseDepthFirst(Callback* callback) {
368  List<Position> stack(10);
369  stack.Add(Position(root_));
370  while (stack.length() > 0) {
371    Position& current = stack.last();
372    if (current.has_current_child()) {
373      callback->BeforeTraversingChild(current.node, current.current_child());
374      stack.Add(Position(current.current_child()));
375    } else {
376      callback->AfterAllChildrenTraversed(current.node);
377      if (stack.length() > 1) {
378        Position& parent = stack[stack.length() - 2];
379        callback->AfterChildTraversed(parent.node, current.node);
380        parent.next_child();
381      }
382      // Remove child from the stack.
383      stack.RemoveLast();
384    }
385  }
386}
387
388using v8::tracing::TracedValue;
389
390CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
391                       bool record_samples)
392    : title_(title),
393      record_samples_(record_samples),
394      start_time_(base::TimeTicks::HighResolutionNow()),
395      top_down_(profiler->isolate()),
396      profiler_(profiler),
397      streaming_next_sample_(0) {
398  auto value = TracedValue::Create();
399  value->SetDouble("startTime",
400                   (start_time_ - base::TimeTicks()).InMicroseconds());
401  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
402                              "Profile", this, "data", std::move(value));
403}
404
405void CpuProfile::AddPath(base::TimeTicks timestamp,
406                         const std::vector<CodeEntry*>& path, int src_line,
407                         bool update_stats) {
408  ProfileNode* top_frame_node =
409      top_down_.AddPathFromEnd(path, src_line, update_stats);
410  if (record_samples_ && !timestamp.IsNull()) {
411    timestamps_.Add(timestamp);
412    samples_.Add(top_frame_node);
413  }
414  const int kSamplesFlushCount = 100;
415  const int kNodesFlushCount = 10;
416  if (samples_.length() - streaming_next_sample_ >= kSamplesFlushCount ||
417      top_down_.pending_nodes_count() >= kNodesFlushCount) {
418    StreamPendingTraceEvents();
419  }
420}
421
422namespace {
423
424void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
425  const CodeEntry* entry = node->entry();
426  value->BeginDictionary("callFrame");
427  value->SetString("functionName", entry->name());
428  if (*entry->resource_name()) {
429    value->SetString("url", entry->resource_name());
430  }
431  value->SetInteger("scriptId", entry->script_id());
432  if (entry->line_number()) {
433    value->SetInteger("lineNumber", entry->line_number() - 1);
434  }
435  if (entry->column_number()) {
436    value->SetInteger("columnNumber", entry->column_number() - 1);
437  }
438  value->EndDictionary();
439  value->SetInteger("id", node->id());
440  if (node->parent()) {
441    value->SetInteger("parent", node->parent()->id());
442  }
443  const char* deopt_reason = entry->bailout_reason();
444  if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
445    value->SetString("deoptReason", deopt_reason);
446  }
447}
448
449}  // namespace
450
451void CpuProfile::StreamPendingTraceEvents() {
452  std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
453  if (pending_nodes.empty() && !samples_.length()) return;
454  auto value = TracedValue::Create();
455
456  if (!pending_nodes.empty() || streaming_next_sample_ != samples_.length()) {
457    value->BeginDictionary("cpuProfile");
458    if (!pending_nodes.empty()) {
459      value->BeginArray("nodes");
460      for (auto node : pending_nodes) {
461        value->BeginDictionary();
462        BuildNodeValue(node, value.get());
463        value->EndDictionary();
464      }
465      value->EndArray();
466    }
467    if (streaming_next_sample_ != samples_.length()) {
468      value->BeginArray("samples");
469      for (int i = streaming_next_sample_; i < samples_.length(); ++i) {
470        value->AppendInteger(samples_[i]->id());
471      }
472      value->EndArray();
473    }
474    value->EndDictionary();
475  }
476  if (streaming_next_sample_ != samples_.length()) {
477    value->BeginArray("timeDeltas");
478    base::TimeTicks lastTimestamp =
479        streaming_next_sample_ ? timestamps_[streaming_next_sample_ - 1]
480                               : start_time();
481    for (int i = streaming_next_sample_; i < timestamps_.length(); ++i) {
482      value->AppendInteger(
483          static_cast<int>((timestamps_[i] - lastTimestamp).InMicroseconds()));
484      lastTimestamp = timestamps_[i];
485    }
486    value->EndArray();
487    DCHECK(samples_.length() == timestamps_.length());
488    streaming_next_sample_ = samples_.length();
489  }
490
491  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
492                              "ProfileChunk", this, "data", std::move(value));
493}
494
495void CpuProfile::FinishProfile() {
496  end_time_ = base::TimeTicks::HighResolutionNow();
497  StreamPendingTraceEvents();
498  auto value = TracedValue::Create();
499  value->SetDouble("endTime", (end_time_ - base::TimeTicks()).InMicroseconds());
500  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
501                              "ProfileChunk", this, "data", std::move(value));
502}
503
504void CpuProfile::Print() {
505  base::OS::Print("[Top down]:\n");
506  top_down_.Print();
507}
508
509void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
510  DeleteAllCoveredCode(addr, addr + size);
511  code_map_.insert({addr, CodeEntryInfo(entry, size)});
512}
513
514void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
515  auto left = code_map_.upper_bound(start);
516  if (left != code_map_.begin()) {
517    --left;
518    if (left->first + left->second.size <= start) ++left;
519  }
520  auto right = left;
521  while (right != code_map_.end() && right->first < end) ++right;
522  code_map_.erase(left, right);
523}
524
525CodeEntry* CodeMap::FindEntry(Address addr) {
526  auto it = code_map_.upper_bound(addr);
527  if (it == code_map_.begin()) return nullptr;
528  --it;
529  Address end_address = it->first + it->second.size;
530  return addr < end_address ? it->second.entry : nullptr;
531}
532
533void CodeMap::MoveCode(Address from, Address to) {
534  if (from == to) return;
535  auto it = code_map_.find(from);
536  if (it == code_map_.end()) return;
537  CodeEntryInfo info = it->second;
538  code_map_.erase(it);
539  AddCode(to, info.entry, info.size);
540}
541
542void CodeMap::Print() {
543  for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
544    base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
545                    it->second.size, it->second.entry->name());
546  }
547}
548
549CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
550    : resource_names_(isolate->heap()),
551      profiler_(nullptr),
552      current_profiles_semaphore_(1) {}
553
554static void DeleteCpuProfile(CpuProfile** profile_ptr) {
555  delete *profile_ptr;
556}
557
558
559CpuProfilesCollection::~CpuProfilesCollection() {
560  finished_profiles_.Iterate(DeleteCpuProfile);
561  current_profiles_.Iterate(DeleteCpuProfile);
562}
563
564
565bool CpuProfilesCollection::StartProfiling(const char* title,
566                                           bool record_samples) {
567  current_profiles_semaphore_.Wait();
568  if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
569    current_profiles_semaphore_.Signal();
570    return false;
571  }
572  for (int i = 0; i < current_profiles_.length(); ++i) {
573    if (strcmp(current_profiles_[i]->title(), title) == 0) {
574      // Ignore attempts to start profile with the same title...
575      current_profiles_semaphore_.Signal();
576      // ... though return true to force it collect a sample.
577      return true;
578    }
579  }
580  current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
581  current_profiles_semaphore_.Signal();
582  return true;
583}
584
585
586CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
587  const int title_len = StrLength(title);
588  CpuProfile* profile = nullptr;
589  current_profiles_semaphore_.Wait();
590  for (int i = current_profiles_.length() - 1; i >= 0; --i) {
591    if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
592      profile = current_profiles_.Remove(i);
593      break;
594    }
595  }
596  current_profiles_semaphore_.Signal();
597
598  if (!profile) return nullptr;
599  profile->FinishProfile();
600  finished_profiles_.Add(profile);
601  return profile;
602}
603
604
605bool CpuProfilesCollection::IsLastProfile(const char* title) {
606  // Called from VM thread, and only it can mutate the list,
607  // so no locking is needed here.
608  if (current_profiles_.length() != 1) return false;
609  return StrLength(title) == 0
610      || strcmp(current_profiles_[0]->title(), title) == 0;
611}
612
613
614void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
615  // Called from VM thread for a completed profile.
616  for (int i = 0; i < finished_profiles_.length(); i++) {
617    if (profile == finished_profiles_[i]) {
618      finished_profiles_.Remove(i);
619      return;
620    }
621  }
622  UNREACHABLE();
623}
624
625void CpuProfilesCollection::AddPathToCurrentProfiles(
626    base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
627    int src_line, bool update_stats) {
628  // As starting / stopping profiles is rare relatively to this
629  // method, we don't bother minimizing the duration of lock holding,
630  // e.g. copying contents of the list to a local vector.
631  current_profiles_semaphore_.Wait();
632  for (int i = 0; i < current_profiles_.length(); ++i) {
633    current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
634  }
635  current_profiles_semaphore_.Signal();
636}
637
638ProfileGenerator::ProfileGenerator(Isolate* isolate,
639                                   CpuProfilesCollection* profiles)
640    : isolate_(isolate), profiles_(profiles) {}
641
642void ProfileGenerator::RecordTickSample(const TickSample& sample) {
643  std::vector<CodeEntry*> entries;
644  // Conservatively reserve space for stack frames + pc + function + vm-state.
645  // There could in fact be more of them because of inlined entries.
646  entries.reserve(sample.frames_count + 3);
647
648  // The ProfileNode knows nothing about all versions of generated code for
649  // the same JS function. The line number information associated with
650  // the latest version of generated code is used to find a source line number
651  // for a JS function. Then, the detected source line is passed to
652  // ProfileNode to increase the tick count for this source line.
653  int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
654  bool src_line_not_found = true;
655
656  if (sample.pc != nullptr) {
657    if (sample.has_external_callback && sample.state == EXTERNAL) {
658      // Don't use PC when in external callback code, as it can point
659      // inside callback's code, and we will erroneously report
660      // that a callback calls itself.
661      entries.push_back(FindEntry(sample.external_callback_entry));
662    } else {
663      CodeEntry* pc_entry = FindEntry(sample.pc);
664      // If there is no pc_entry we're likely in native code.
665      // Find out, if top of stack was pointing inside a JS function
666      // meaning that we have encountered a frameless invocation.
667      if (!pc_entry && !sample.has_external_callback) {
668        pc_entry = FindEntry(sample.tos);
669      }
670      // If pc is in the function code before it set up stack frame or after the
671      // frame was destroyed SafeStackFrameIterator incorrectly thinks that
672      // ebp contains return address of the current function and skips caller's
673      // frame. Check for this case and just skip such samples.
674      if (pc_entry) {
675        int pc_offset = static_cast<int>(reinterpret_cast<Address>(sample.pc) -
676                                         pc_entry->instruction_start());
677        src_line = pc_entry->GetSourceLine(pc_offset);
678        if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
679          src_line = pc_entry->line_number();
680        }
681        src_line_not_found = false;
682        entries.push_back(pc_entry);
683
684        if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
685            pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
686          // When current function is either the Function.prototype.apply or the
687          // Function.prototype.call builtin the top frame is either frame of
688          // the calling JS function or internal frame.
689          // In the latter case we know the caller for sure but in the
690          // former case we don't so we simply replace the frame with
691          // 'unresolved' entry.
692          if (!sample.has_external_callback) {
693            entries.push_back(CodeEntry::unresolved_entry());
694          }
695        }
696      }
697    }
698
699    for (unsigned i = 0; i < sample.frames_count; ++i) {
700      Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
701      CodeEntry* entry = FindEntry(stack_pos);
702      if (entry) {
703        // Find out if the entry has an inlining stack associated.
704        int pc_offset =
705            static_cast<int>(stack_pos - entry->instruction_start());
706        const std::vector<CodeEntry*>* inline_stack =
707            entry->GetInlineStack(pc_offset);
708        if (inline_stack) {
709          entries.insert(entries.end(), inline_stack->rbegin(),
710                         inline_stack->rend());
711        }
712        // Skip unresolved frames (e.g. internal frame) and get source line of
713        // the first JS caller.
714        if (src_line_not_found) {
715          src_line = entry->GetSourceLine(pc_offset);
716          if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
717            src_line = entry->line_number();
718          }
719          src_line_not_found = false;
720        }
721      }
722      entries.push_back(entry);
723    }
724  }
725
726  if (FLAG_prof_browser_mode) {
727    bool no_symbolized_entries = true;
728    for (auto e : entries) {
729      if (e != NULL) {
730        no_symbolized_entries = false;
731        break;
732      }
733    }
734    // If no frames were symbolized, put the VM state entry in.
735    if (no_symbolized_entries) {
736      entries.push_back(EntryForVMState(sample.state));
737    }
738  }
739
740  profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
741                                      sample.update_stats);
742}
743
744CodeEntry* ProfileGenerator::FindEntry(void* address) {
745  CodeEntry* entry = code_map_.FindEntry(reinterpret_cast<Address>(address));
746  if (!entry) {
747    RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
748    void* start = reinterpret_cast<void*>(rcs);
749    void* end = reinterpret_cast<void*>(rcs + 1);
750    if (start <= address && address < end) {
751      RuntimeCallCounter* counter =
752          reinterpret_cast<RuntimeCallCounter*>(address);
753      entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name,
754                            CodeEntry::kEmptyNamePrefix, "native V8Runtime");
755      code_map_.AddCode(reinterpret_cast<Address>(address), entry, 1);
756    }
757  }
758  return entry;
759}
760
761CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
762  switch (tag) {
763    case GC:
764      return CodeEntry::gc_entry();
765    case JS:
766    case COMPILER:
767    // DOM events handlers are reported as OTHER / EXTERNAL entries.
768    // To avoid confusing people, let's put all these entries into
769    // one bucket.
770    case OTHER:
771    case EXTERNAL:
772      return CodeEntry::program_entry();
773    case IDLE:
774      return CodeEntry::idle_entry();
775    default: return NULL;
776  }
777}
778
779}  // namespace internal
780}  // namespace v8
781