1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "cpu-profiler-inl.h"
31
32#ifdef ENABLE_LOGGING_AND_PROFILING
33
34#include "frames-inl.h"
35#include "hashmap.h"
36#include "log-inl.h"
37#include "vm-state-inl.h"
38
39#include "../include/v8-profiler.h"
40
41namespace v8 {
42namespace internal {
43
44static const int kEventsBufferSize = 256*KB;
45static const int kTickSamplesBufferChunkSize = 64*KB;
46static const int kTickSamplesBufferChunksCount = 16;
47
48
49ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
50                                                 ProfileGenerator* generator)
51    : Thread(isolate, "v8:ProfEvntProc"),
52      generator_(generator),
53      running_(true),
54      ticks_buffer_(sizeof(TickSampleEventRecord),
55                    kTickSamplesBufferChunkSize,
56                    kTickSamplesBufferChunksCount),
57      enqueue_order_(0) {
58}
59
60
61void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
62                                                  const char* prefix,
63                                                  String* name,
64                                                  Address start) {
65  if (FilterOutCodeCreateEvent(tag)) return;
66  CodeEventsContainer evt_rec;
67  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
68  rec->type = CodeEventRecord::CODE_CREATION;
69  rec->order = ++enqueue_order_;
70  rec->start = start;
71  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
72  rec->size = 1;
73  rec->shared = NULL;
74  events_buffer_.Enqueue(evt_rec);
75}
76
77
78void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
79                                              String* name,
80                                              String* resource_name,
81                                              int line_number,
82                                              Address start,
83                                              unsigned size,
84                                              Address shared) {
85  if (FilterOutCodeCreateEvent(tag)) return;
86  CodeEventsContainer evt_rec;
87  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
88  rec->type = CodeEventRecord::CODE_CREATION;
89  rec->order = ++enqueue_order_;
90  rec->start = start;
91  rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
92  rec->size = size;
93  rec->shared = shared;
94  events_buffer_.Enqueue(evt_rec);
95}
96
97
98void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
99                                              const char* name,
100                                              Address start,
101                                              unsigned size) {
102  if (FilterOutCodeCreateEvent(tag)) return;
103  CodeEventsContainer evt_rec;
104  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
105  rec->type = CodeEventRecord::CODE_CREATION;
106  rec->order = ++enqueue_order_;
107  rec->start = start;
108  rec->entry = generator_->NewCodeEntry(tag, name);
109  rec->size = size;
110  rec->shared = NULL;
111  events_buffer_.Enqueue(evt_rec);
112}
113
114
115void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
116                                              int args_count,
117                                              Address start,
118                                              unsigned size) {
119  if (FilterOutCodeCreateEvent(tag)) return;
120  CodeEventsContainer evt_rec;
121  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
122  rec->type = CodeEventRecord::CODE_CREATION;
123  rec->order = ++enqueue_order_;
124  rec->start = start;
125  rec->entry = generator_->NewCodeEntry(tag, args_count);
126  rec->size = size;
127  rec->shared = NULL;
128  events_buffer_.Enqueue(evt_rec);
129}
130
131
132void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
133  CodeEventsContainer evt_rec;
134  CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
135  rec->type = CodeEventRecord::CODE_MOVE;
136  rec->order = ++enqueue_order_;
137  rec->from = from;
138  rec->to = to;
139  events_buffer_.Enqueue(evt_rec);
140}
141
142
143void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
144  CodeEventsContainer evt_rec;
145  CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
146  rec->type = CodeEventRecord::CODE_DELETE;
147  rec->order = ++enqueue_order_;
148  rec->start = from;
149  events_buffer_.Enqueue(evt_rec);
150}
151
152
153void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
154                                                          Address to) {
155  CodeEventsContainer evt_rec;
156  SharedFunctionInfoMoveEventRecord* rec =
157      &evt_rec.SharedFunctionInfoMoveEventRecord_;
158  rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
159  rec->order = ++enqueue_order_;
160  rec->from = from;
161  rec->to = to;
162  events_buffer_.Enqueue(evt_rec);
163}
164
165
166void ProfilerEventsProcessor::RegExpCodeCreateEvent(
167    Logger::LogEventsAndTags tag,
168    const char* prefix,
169    String* name,
170    Address start,
171    unsigned size) {
172  if (FilterOutCodeCreateEvent(tag)) return;
173  CodeEventsContainer evt_rec;
174  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
175  rec->type = CodeEventRecord::CODE_CREATION;
176  rec->order = ++enqueue_order_;
177  rec->start = start;
178  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
179  rec->size = size;
180  events_buffer_.Enqueue(evt_rec);
181}
182
183
184void ProfilerEventsProcessor::AddCurrentStack() {
185  TickSampleEventRecord record;
186  TickSample* sample = &record.sample;
187  Isolate* isolate = Isolate::Current();
188  sample->state = isolate->current_vm_state();
189  sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
190  sample->tos = NULL;
191  sample->has_external_callback = false;
192  sample->frames_count = 0;
193  for (StackTraceFrameIterator it(isolate);
194       !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
195       it.Advance()) {
196    sample->stack[sample->frames_count++] = it.frame()->pc();
197  }
198  record.order = enqueue_order_;
199  ticks_from_vm_buffer_.Enqueue(record);
200}
201
202
203bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
204  if (!events_buffer_.IsEmpty()) {
205    CodeEventsContainer record;
206    events_buffer_.Dequeue(&record);
207    switch (record.generic.type) {
208#define PROFILER_TYPE_CASE(type, clss)                          \
209      case CodeEventRecord::type:                               \
210        record.clss##_.UpdateCodeMap(generator_->code_map());   \
211        break;
212
213      CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
214
215#undef PROFILER_TYPE_CASE
216      default: return true;  // Skip record.
217    }
218    *dequeue_order = record.generic.order;
219    return true;
220  }
221  return false;
222}
223
224
225bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
226  while (true) {
227    if (!ticks_from_vm_buffer_.IsEmpty()
228        && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
229      TickSampleEventRecord record;
230      ticks_from_vm_buffer_.Dequeue(&record);
231      generator_->RecordTickSample(record.sample);
232    }
233
234    const TickSampleEventRecord* rec =
235        TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
236    if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
237    // Make a local copy of tick sample record to ensure that it won't
238    // be modified as we are processing it. This is possible as the
239    // sampler writes w/o any sync to the queue, so if the processor
240    // will get far behind, a record may be modified right under its
241    // feet.
242    TickSampleEventRecord record = *rec;
243    if (record.order == dequeue_order) {
244      // A paranoid check to make sure that we don't get a memory overrun
245      // in case of frames_count having a wild value.
246      if (record.sample.frames_count < 0
247          || record.sample.frames_count > TickSample::kMaxFramesCount)
248        record.sample.frames_count = 0;
249      generator_->RecordTickSample(record.sample);
250      ticks_buffer_.FinishDequeue();
251    } else {
252      return true;
253    }
254  }
255}
256
257
258void ProfilerEventsProcessor::Run() {
259  unsigned dequeue_order = 0;
260
261  while (running_) {
262    // Process ticks until we have any.
263    if (ProcessTicks(dequeue_order)) {
264      // All ticks of the current dequeue_order are processed,
265      // proceed to the next code event.
266      ProcessCodeEvent(&dequeue_order);
267    }
268    YieldCPU();
269  }
270
271  // Process remaining tick events.
272  ticks_buffer_.FlushResidualRecords();
273  // Perform processing until we have tick events, skip remaining code events.
274  while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
275}
276
277
278void CpuProfiler::StartProfiling(const char* title) {
279  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
280  Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
281}
282
283
284void CpuProfiler::StartProfiling(String* title) {
285  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
286  Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
287}
288
289
290CpuProfile* CpuProfiler::StopProfiling(const char* title) {
291  return is_profiling() ?
292      Isolate::Current()->cpu_profiler()->StopCollectingProfile(title) : NULL;
293}
294
295
296CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
297  return is_profiling() ?
298      Isolate::Current()->cpu_profiler()->StopCollectingProfile(
299          security_token, title) : NULL;
300}
301
302
303int CpuProfiler::GetProfilesCount() {
304  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
305  // The count of profiles doesn't depend on a security token.
306  return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
307      TokenEnumerator::kNoSecurityToken)->length();
308}
309
310
311CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
312  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
313  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
314  const int token = profiler->token_enumerator_->GetTokenId(security_token);
315  return profiler->profiles_->Profiles(token)->at(index);
316}
317
318
319CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
320  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
321  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
322  const int token = profiler->token_enumerator_->GetTokenId(security_token);
323  return profiler->profiles_->GetProfile(token, uid);
324}
325
326
327TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
328  if (CpuProfiler::is_profiling(isolate)) {
329    return isolate->cpu_profiler()->processor_->TickSampleEvent();
330  } else {
331    return NULL;
332  }
333}
334
335
336void CpuProfiler::DeleteAllProfiles() {
337  Isolate* isolate = Isolate::Current();
338  ASSERT(isolate->cpu_profiler() != NULL);
339  if (is_profiling())
340    isolate->cpu_profiler()->StopProcessor();
341  isolate->cpu_profiler()->ResetProfiles();
342}
343
344
345void CpuProfiler::DeleteProfile(CpuProfile* profile) {
346  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
347  Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
348  delete profile;
349}
350
351
352bool CpuProfiler::HasDetachedProfiles() {
353  ASSERT(Isolate::Current()->cpu_profiler() != NULL);
354  return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
355}
356
357
358void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
359  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
360      Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
361}
362
363
364void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
365                           Code* code, const char* comment) {
366  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
367      tag, comment, code->address(), code->ExecutableSize());
368}
369
370
371void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
372                           Code* code, String* name) {
373  Isolate* isolate = Isolate::Current();
374  isolate->cpu_profiler()->processor_->CodeCreateEvent(
375      tag,
376      name,
377      isolate->heap()->empty_string(),
378      v8::CpuProfileNode::kNoLineNumberInfo,
379      code->address(),
380      code->ExecutableSize(),
381      NULL);
382}
383
384
385void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
386                                  Code* code,
387                                  SharedFunctionInfo* shared,
388                                  String* name) {
389  Isolate* isolate = Isolate::Current();
390  isolate->cpu_profiler()->processor_->CodeCreateEvent(
391      tag,
392      name,
393      isolate->heap()->empty_string(),
394      v8::CpuProfileNode::kNoLineNumberInfo,
395      code->address(),
396      code->ExecutableSize(),
397      shared->address());
398}
399
400
401void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
402                                  Code* code,
403                                  SharedFunctionInfo* shared,
404                                  String* source, int line) {
405  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
406      tag,
407      shared->DebugName(),
408      source,
409      line,
410      code->address(),
411      code->ExecutableSize(),
412      shared->address());
413}
414
415
416void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
417                           Code* code, int args_count) {
418  Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
419      tag,
420      args_count,
421      code->address(),
422      code->ExecutableSize());
423}
424
425
426void CpuProfiler::CodeMoveEvent(Address from, Address to) {
427  Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
428}
429
430
431void CpuProfiler::CodeDeleteEvent(Address from) {
432  Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from);
433}
434
435
436void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
437  CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
438  profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
439}
440
441
442void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
443  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
444      Logger::CALLBACK_TAG, "get ", name, entry_point);
445}
446
447
448void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
449  Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
450      Logger::REG_EXP_TAG,
451      "RegExp: ",
452      source,
453      code->address(),
454      code->ExecutableSize());
455}
456
457
458void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
459  Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
460      Logger::CALLBACK_TAG, "set ", name, entry_point);
461}
462
463
464CpuProfiler::CpuProfiler()
465    : profiles_(new CpuProfilesCollection()),
466      next_profile_uid_(1),
467      token_enumerator_(new TokenEnumerator()),
468      generator_(NULL),
469      processor_(NULL),
470      need_to_stop_sampler_(false),
471      is_profiling_(false) {
472}
473
474
475CpuProfiler::~CpuProfiler() {
476  delete token_enumerator_;
477  delete profiles_;
478}
479
480
481void CpuProfiler::ResetProfiles() {
482  delete profiles_;
483  profiles_ = new CpuProfilesCollection();
484}
485
486void CpuProfiler::StartCollectingProfile(const char* title) {
487  if (profiles_->StartProfiling(title, next_profile_uid_++)) {
488    StartProcessorIfNotStarted();
489  }
490  processor_->AddCurrentStack();
491}
492
493
494void CpuProfiler::StartCollectingProfile(String* title) {
495  StartCollectingProfile(profiles_->GetName(title));
496}
497
498
499void CpuProfiler::StartProcessorIfNotStarted() {
500  if (processor_ == NULL) {
501    Isolate* isolate = Isolate::Current();
502
503    // Disable logging when using the new implementation.
504    saved_logging_nesting_ = isolate->logger()->logging_nesting_;
505    isolate->logger()->logging_nesting_ = 0;
506    generator_ = new ProfileGenerator(profiles_);
507    processor_ = new ProfilerEventsProcessor(isolate, generator_);
508    NoBarrier_Store(&is_profiling_, true);
509    processor_->Start();
510    // Enumerate stuff we already have in the heap.
511    if (isolate->heap()->HasBeenSetup()) {
512      if (!FLAG_prof_browser_mode) {
513        bool saved_log_code_flag = FLAG_log_code;
514        FLAG_log_code = true;
515        isolate->logger()->LogCodeObjects();
516        FLAG_log_code = saved_log_code_flag;
517      }
518      isolate->logger()->LogCompiledFunctions();
519      isolate->logger()->LogAccessorCallbacks();
520    }
521    // Enable stack sampling.
522    Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
523    if (!sampler->IsActive()) {
524      sampler->Start();
525      need_to_stop_sampler_ = true;
526    }
527    sampler->IncreaseProfilingDepth();
528  }
529}
530
531
532CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
533  const double actual_sampling_rate = generator_->actual_sampling_rate();
534  StopProcessorIfLastProfile(title);
535  CpuProfile* result =
536      profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
537                               title,
538                               actual_sampling_rate);
539  if (result != NULL) {
540    result->Print();
541  }
542  return result;
543}
544
545
546CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
547                                               String* title) {
548  const double actual_sampling_rate = generator_->actual_sampling_rate();
549  const char* profile_title = profiles_->GetName(title);
550  StopProcessorIfLastProfile(profile_title);
551  int token = token_enumerator_->GetTokenId(security_token);
552  return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
553}
554
555
556void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
557  if (profiles_->IsLastProfile(title)) StopProcessor();
558}
559
560
561void CpuProfiler::StopProcessor() {
562  Logger* logger = Isolate::Current()->logger();
563  Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
564  sampler->DecreaseProfilingDepth();
565  if (need_to_stop_sampler_) {
566    sampler->Stop();
567    need_to_stop_sampler_ = false;
568  }
569  processor_->Stop();
570  processor_->Join();
571  delete processor_;
572  delete generator_;
573  processor_ = NULL;
574  NoBarrier_Store(&is_profiling_, false);
575  generator_ = NULL;
576  logger->logging_nesting_ = saved_logging_nesting_;
577}
578
579} }  // namespace v8::internal
580
581#endif  // ENABLE_LOGGING_AND_PROFILING
582
583namespace v8 {
584namespace internal {
585
586void CpuProfiler::Setup() {
587#ifdef ENABLE_LOGGING_AND_PROFILING
588  Isolate* isolate = Isolate::Current();
589  if (isolate->cpu_profiler() == NULL) {
590    isolate->set_cpu_profiler(new CpuProfiler());
591  }
592#endif
593}
594
595
596void CpuProfiler::TearDown() {
597#ifdef ENABLE_LOGGING_AND_PROFILING
598  Isolate* isolate = Isolate::Current();
599  if (isolate->cpu_profiler() != NULL) {
600    delete isolate->cpu_profiler();
601  }
602  isolate->set_cpu_profiler(NULL);
603#endif
604}
605
606} }  // namespace v8::internal
607