log.cc revision 257744e915dfc84d6d07a6b2accf8402d9ffc708
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdarg.h>
29
30#include "v8.h"
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "deoptimizer.h"
35#include "global-handles.h"
36#include "log.h"
37#include "macro-assembler.h"
38#include "runtime-profiler.h"
39#include "serialize.h"
40#include "string-stream.h"
41#include "vm-state-inl.h"
42
43namespace v8 {
44namespace internal {
45
46#ifdef ENABLE_LOGGING_AND_PROFILING
47
48//
49// Sliding state window.  Updates counters to keep track of the last
50// window of kBufferSize states.  This is useful to track where we
51// spent our time.
52//
53class SlidingStateWindow {
54 public:
55  explicit SlidingStateWindow(Isolate* isolate);
56  ~SlidingStateWindow();
57  void AddState(StateTag state);
58
59 private:
60  static const int kBufferSize = 256;
61  Counters* counters_;
62  int current_index_;
63  bool is_full_;
64  byte buffer_[kBufferSize];
65
66
67  void IncrementStateCounter(StateTag state) {
68    counters_->state_counters(state)->Increment();
69  }
70
71
72  void DecrementStateCounter(StateTag state) {
73    counters_->state_counters(state)->Decrement();
74  }
75};
76
77
78//
79// The Profiler samples pc and sp values for the main thread.
80// Each sample is appended to a circular buffer.
81// An independent thread removes data and writes it to the log.
82// This design minimizes the time spent in the sampler.
83//
84class Profiler: public Thread {
85 public:
86  explicit Profiler(Isolate* isolate);
87  void Engage();
88  void Disengage();
89
90  // Inserts collected profiling data into buffer.
91  void Insert(TickSample* sample) {
92    if (paused_)
93      return;
94
95    if (Succ(head_) == tail_) {
96      overflow_ = true;
97    } else {
98      buffer_[head_] = *sample;
99      head_ = Succ(head_);
100      buffer_semaphore_->Signal();  // Tell we have an element.
101    }
102  }
103
104  // Waits for a signal and removes profiling data.
105  bool Remove(TickSample* sample) {
106    buffer_semaphore_->Wait();  // Wait for an element.
107    *sample = buffer_[tail_];
108    bool result = overflow_;
109    tail_ = Succ(tail_);
110    overflow_ = false;
111    return result;
112  }
113
114  void Run();
115
116  // Pause and Resume TickSample data collection.
117  bool paused() const { return paused_; }
118  void pause() { paused_ = true; }
119  void resume() { paused_ = false; }
120
121 private:
122  // Returns the next index in the cyclic buffer.
123  int Succ(int index) { return (index + 1) % kBufferSize; }
124
125  // Cyclic buffer for communicating profiling samples
126  // between the signal handler and the worker thread.
127  static const int kBufferSize = 128;
128  TickSample buffer_[kBufferSize];  // Buffer storage.
129  int head_;  // Index to the buffer head.
130  int tail_;  // Index to the buffer tail.
131  bool overflow_;  // Tell whether a buffer overflow has occurred.
132  Semaphore* buffer_semaphore_;  // Sempahore used for buffer synchronization.
133
134  // Tells whether profiler is engaged, that is, processing thread is stated.
135  bool engaged_;
136
137  // Tells whether worker thread should continue running.
138  bool running_;
139
140  // Tells whether we are currently recording tick samples.
141  bool paused_;
142};
143
144
145//
146// StackTracer implementation
147//
148void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
149  ASSERT(isolate->IsInitialized());
150
151  sample->tos = NULL;
152  sample->frames_count = 0;
153  sample->has_external_callback = false;
154
155  // Avoid collecting traces while doing GC.
156  if (sample->state == GC) return;
157
158  const Address js_entry_sp =
159      Isolate::js_entry_sp(isolate->thread_local_top());
160  if (js_entry_sp == 0) {
161    // Not executing JS now.
162    return;
163  }
164
165  const Address callback = isolate->external_callback();
166  if (callback != NULL) {
167    sample->external_callback = callback;
168    sample->has_external_callback = true;
169  } else {
170    // Sample potential return address value for frameless invocation of
171    // stubs (we'll figure out later, if this value makes sense).
172    sample->tos = Memory::Address_at(sample->sp);
173    sample->has_external_callback = false;
174  }
175
176  SafeStackTraceFrameIterator it(isolate,
177                                 sample->fp, sample->sp,
178                                 sample->sp, js_entry_sp);
179  int i = 0;
180  while (!it.done() && i < TickSample::kMaxFramesCount) {
181    sample->stack[i++] = it.frame()->pc();
182    it.Advance();
183  }
184  sample->frames_count = i;
185}
186
187
188//
189// Ticker used to provide ticks to the profiler and the sliding state
190// window.
191//
192class Ticker: public Sampler {
193 public:
194  Ticker(Isolate* isolate, int interval):
195      Sampler(isolate, interval),
196      window_(NULL),
197      profiler_(NULL) {}
198
199  ~Ticker() { if (IsActive()) Stop(); }
200
201  virtual void Tick(TickSample* sample) {
202    if (profiler_) profiler_->Insert(sample);
203    if (window_) window_->AddState(sample->state);
204  }
205
206  void SetWindow(SlidingStateWindow* window) {
207    window_ = window;
208    if (!IsActive()) Start();
209  }
210
211  void ClearWindow() {
212    window_ = NULL;
213    if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
214  }
215
216  void SetProfiler(Profiler* profiler) {
217    ASSERT(profiler_ == NULL);
218    profiler_ = profiler;
219    IncreaseProfilingDepth();
220    if (!FLAG_prof_lazy && !IsActive()) Start();
221  }
222
223  void ClearProfiler() {
224    DecreaseProfilingDepth();
225    profiler_ = NULL;
226    if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
227  }
228
229 protected:
230  virtual void DoSampleStack(TickSample* sample) {
231    StackTracer::Trace(isolate(), sample);
232  }
233
234 private:
235  SlidingStateWindow* window_;
236  Profiler* profiler_;
237};
238
239
240//
241// SlidingStateWindow implementation.
242//
243SlidingStateWindow::SlidingStateWindow(Isolate* isolate)
244    : counters_(isolate->counters()), current_index_(0), is_full_(false) {
245  for (int i = 0; i < kBufferSize; i++) {
246    buffer_[i] = static_cast<byte>(OTHER);
247  }
248  isolate->logger()->ticker_->SetWindow(this);
249}
250
251
252SlidingStateWindow::~SlidingStateWindow() {
253  LOGGER->ticker_->ClearWindow();
254}
255
256
257void SlidingStateWindow::AddState(StateTag state) {
258  if (is_full_) {
259    DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
260  } else if (current_index_ == kBufferSize - 1) {
261    is_full_ = true;
262  }
263  buffer_[current_index_] = static_cast<byte>(state);
264  IncrementStateCounter(state);
265  ASSERT(IsPowerOf2(kBufferSize));
266  current_index_ = (current_index_ + 1) & (kBufferSize - 1);
267}
268
269
270//
271// Profiler implementation.
272//
273Profiler::Profiler(Isolate* isolate)
274    : Thread(isolate, "v8:Profiler"),
275      head_(0),
276      tail_(0),
277      overflow_(false),
278      buffer_semaphore_(OS::CreateSemaphore(0)),
279      engaged_(false),
280      running_(false),
281      paused_(false) {
282}
283
284
285void Profiler::Engage() {
286  if (engaged_) return;
287  engaged_ = true;
288
289  // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
290  // http://code.google.com/p/v8/issues/detail?id=487
291  if (!FLAG_prof_lazy) {
292    OS::LogSharedLibraryAddresses();
293  }
294
295  // Start thread processing the profiler buffer.
296  running_ = true;
297  Start();
298
299  // Register to get ticks.
300  LOGGER->ticker_->SetProfiler(this);
301
302  LOGGER->ProfilerBeginEvent();
303}
304
305
306void Profiler::Disengage() {
307  if (!engaged_) return;
308
309  // Stop receiving ticks.
310  LOGGER->ticker_->ClearProfiler();
311
312  // Terminate the worker thread by setting running_ to false,
313  // inserting a fake element in the queue and then wait for
314  // the thread to terminate.
315  running_ = false;
316  TickSample sample;
317  // Reset 'paused_' flag, otherwise semaphore may not be signalled.
318  resume();
319  Insert(&sample);
320  Join();
321
322  LOG(ISOLATE, UncheckedStringEvent("profiler", "end"));
323}
324
325
326void Profiler::Run() {
327  TickSample sample;
328  bool overflow = Remove(&sample);
329  i::Isolate* isolate = ISOLATE;
330  while (running_) {
331    LOG(isolate, TickEvent(&sample, overflow));
332    overflow = Remove(&sample);
333  }
334}
335
336
337// Low-level profiling event structures.
338
339struct LowLevelCodeCreateStruct {
340  static const char kTag = 'C';
341
342  int32_t name_size;
343  Address code_address;
344  int32_t code_size;
345};
346
347
348struct LowLevelCodeMoveStruct {
349  static const char kTag = 'M';
350
351  Address from_address;
352  Address to_address;
353};
354
355
356struct LowLevelCodeDeleteStruct {
357  static const char kTag = 'D';
358
359  Address address;
360};
361
362
363struct LowLevelSnapshotPositionStruct {
364  static const char kTag = 'P';
365
366  Address address;
367  int32_t position;
368};
369
370
371static const char kCodeMovingGCTag = 'G';
372
373
374//
375// Logger class implementation.
376//
377
378class Logger::NameMap {
379 public:
380  NameMap() : impl_(&PointerEquals) {}
381
382  ~NameMap() {
383    for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
384      DeleteArray(static_cast<const char*>(p->value));
385    }
386  }
387
388  void Insert(Address code_address, const char* name, int name_size) {
389    HashMap::Entry* entry = FindOrCreateEntry(code_address);
390    if (entry->value == NULL) {
391      entry->value = CopyName(name, name_size);
392    }
393  }
394
395  const char* Lookup(Address code_address) {
396    HashMap::Entry* entry = FindEntry(code_address);
397    return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
398  }
399
400  void Remove(Address code_address) {
401    HashMap::Entry* entry = FindEntry(code_address);
402    if (entry != NULL) DeleteArray(static_cast<const char*>(entry->value));
403    RemoveEntry(entry);
404  }
405
406  void Move(Address from, Address to) {
407    if (from == to) return;
408    HashMap::Entry* from_entry = FindEntry(from);
409    ASSERT(from_entry != NULL);
410    void* value = from_entry->value;
411    RemoveEntry(from_entry);
412    HashMap::Entry* to_entry = FindOrCreateEntry(to);
413    ASSERT(to_entry->value == NULL);
414    to_entry->value = value;
415  }
416
417 private:
418  static bool PointerEquals(void* lhs, void* rhs) {
419    return lhs == rhs;
420  }
421
422  static char* CopyName(const char* name, int name_size) {
423    char* result = NewArray<char>(name_size + 1);
424    for (int i = 0; i < name_size; ++i) {
425      char c = name[i];
426      if (c == '\0') c = ' ';
427      result[i] = c;
428    }
429    result[name_size] = '\0';
430    return result;
431  }
432
433  HashMap::Entry* FindOrCreateEntry(Address code_address) {
434    return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
435  }
436
437  HashMap::Entry* FindEntry(Address code_address) {
438    return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
439  }
440
441  void RemoveEntry(HashMap::Entry* entry) {
442    impl_.Remove(entry->key, entry->hash);
443  }
444
445  HashMap impl_;
446
447  DISALLOW_COPY_AND_ASSIGN(NameMap);
448};
449
450
451class Logger::NameBuffer {
452 public:
453  NameBuffer() { Reset(); }
454
455  void Reset() {
456    utf8_pos_ = 0;
457  }
458
459  void AppendString(String* str) {
460    if (str == NULL) return;
461    if (str->HasOnlyAsciiChars()) {
462      int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
463      String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length);
464      utf8_pos_ += utf8_length;
465      return;
466    }
467    int uc16_length = Min(str->length(), kUc16BufferSize);
468    String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
469    for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
470      uc16 c = uc16_buffer_[i];
471      if (c <= String::kMaxAsciiCharCodeU) {
472        utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
473      } else {
474        int char_length = unibrow::Utf8::Length(c);
475        if (utf8_pos_ + char_length > kUtf8BufferSize) break;
476        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
477        utf8_pos_ += char_length;
478      }
479    }
480  }
481
482  void AppendBytes(const char* bytes, int size) {
483    size = Min(size, kUtf8BufferSize - utf8_pos_);
484    memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
485    utf8_pos_ += size;
486  }
487
488  void AppendBytes(const char* bytes) {
489    AppendBytes(bytes, StrLength(bytes));
490  }
491
492  void AppendByte(char c) {
493    if (utf8_pos_ >= kUtf8BufferSize) return;
494    utf8_buffer_[utf8_pos_++] = c;
495  }
496
497  void AppendInt(int n) {
498    Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
499    int size = OS::SNPrintF(buffer, "%d", n);
500    if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
501      utf8_pos_ += size;
502    }
503  }
504
505  const char* get() { return utf8_buffer_; }
506  int size() const { return utf8_pos_; }
507
508 private:
509  static const int kUtf8BufferSize = 512;
510  static const int kUc16BufferSize = 128;
511
512  int utf8_pos_;
513  char utf8_buffer_[kUtf8BufferSize];
514  uc16 uc16_buffer_[kUc16BufferSize];
515};
516
517
518Logger::Logger()
519  : ticker_(NULL),
520    profiler_(NULL),
521    sliding_state_window_(NULL),
522    log_events_(NULL),
523    logging_nesting_(0),
524    cpu_profiler_nesting_(0),
525    heap_profiler_nesting_(0),
526    log_(new Log(this)),
527    name_buffer_(new NameBuffer),
528    address_to_name_map_(NULL),
529    is_initialized_(false),
530    last_address_(NULL),
531    prev_sp_(NULL),
532    prev_function_(NULL),
533    prev_to_(NULL),
534    prev_code_(NULL) {
535}
536
537
538Logger::~Logger() {
539  delete address_to_name_map_;
540  delete name_buffer_;
541  delete log_;
542}
543
544
545#define DECLARE_EVENT(ignore1, name) name,
546static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
547  LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
548};
549#undef DECLARE_EVENT
550
551
552void Logger::ProfilerBeginEvent() {
553  if (!log_->IsEnabled()) return;
554  LogMessageBuilder msg(this);
555  msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
556  msg.WriteToLogFile();
557}
558
559#endif  // ENABLE_LOGGING_AND_PROFILING
560
561
562void Logger::StringEvent(const char* name, const char* value) {
563#ifdef ENABLE_LOGGING_AND_PROFILING
564  if (FLAG_log) UncheckedStringEvent(name, value);
565#endif
566}
567
568
569#ifdef ENABLE_LOGGING_AND_PROFILING
570void Logger::UncheckedStringEvent(const char* name, const char* value) {
571  if (!log_->IsEnabled()) return;
572  LogMessageBuilder msg(this);
573  msg.Append("%s,\"%s\"\n", name, value);
574  msg.WriteToLogFile();
575}
576#endif
577
578
579void Logger::IntEvent(const char* name, int value) {
580#ifdef ENABLE_LOGGING_AND_PROFILING
581  if (FLAG_log) UncheckedIntEvent(name, value);
582#endif
583}
584
585
586void Logger::IntPtrTEvent(const char* name, intptr_t value) {
587#ifdef ENABLE_LOGGING_AND_PROFILING
588  if (FLAG_log) UncheckedIntPtrTEvent(name, value);
589#endif
590}
591
592
593#ifdef ENABLE_LOGGING_AND_PROFILING
594void Logger::UncheckedIntEvent(const char* name, int value) {
595  if (!log_->IsEnabled()) return;
596  LogMessageBuilder msg(this);
597  msg.Append("%s,%d\n", name, value);
598  msg.WriteToLogFile();
599}
600#endif
601
602
603#ifdef ENABLE_LOGGING_AND_PROFILING
604void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
605  if (!log_->IsEnabled()) return;
606  LogMessageBuilder msg(this);
607  msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
608  msg.WriteToLogFile();
609}
610#endif
611
612
613void Logger::HandleEvent(const char* name, Object** location) {
614#ifdef ENABLE_LOGGING_AND_PROFILING
615  if (!log_->IsEnabled() || !FLAG_log_handles) return;
616  LogMessageBuilder msg(this);
617  msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
618  msg.WriteToLogFile();
619#endif
620}
621
622
623#ifdef ENABLE_LOGGING_AND_PROFILING
624// ApiEvent is private so all the calls come from the Logger class.  It is the
625// caller's responsibility to ensure that log is enabled and that
626// FLAG_log_api is true.
627void Logger::ApiEvent(const char* format, ...) {
628  ASSERT(log_->IsEnabled() && FLAG_log_api);
629  LogMessageBuilder msg(this);
630  va_list ap;
631  va_start(ap, format);
632  msg.AppendVA(format, ap);
633  va_end(ap);
634  msg.WriteToLogFile();
635}
636#endif
637
638
639void Logger::ApiNamedSecurityCheck(Object* key) {
640#ifdef ENABLE_LOGGING_AND_PROFILING
641  if (!log_->IsEnabled() || !FLAG_log_api) return;
642  if (key->IsString()) {
643    SmartPointer<char> str =
644        String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
645    ApiEvent("api,check-security,\"%s\"\n", *str);
646  } else if (key->IsUndefined()) {
647    ApiEvent("api,check-security,undefined\n");
648  } else {
649    ApiEvent("api,check-security,['no-name']\n");
650  }
651#endif
652}
653
654
655void Logger::SharedLibraryEvent(const char* library_path,
656                                uintptr_t start,
657                                uintptr_t end) {
658#ifdef ENABLE_LOGGING_AND_PROFILING
659  if (!log_->IsEnabled() || !FLAG_prof) return;
660  LogMessageBuilder msg(this);
661  msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
662             library_path,
663             start,
664             end);
665  msg.WriteToLogFile();
666#endif
667}
668
669
670void Logger::SharedLibraryEvent(const wchar_t* library_path,
671                                uintptr_t start,
672                                uintptr_t end) {
673#ifdef ENABLE_LOGGING_AND_PROFILING
674  if (!log_->IsEnabled() || !FLAG_prof) return;
675  LogMessageBuilder msg(this);
676  msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
677             library_path,
678             start,
679             end);
680  msg.WriteToLogFile();
681#endif
682}
683
684
685#ifdef ENABLE_LOGGING_AND_PROFILING
686void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
687  // Prints "/" + re.source + "/" +
688  //      (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
689  LogMessageBuilder msg(this);
690
691  Handle<Object> source = GetProperty(regexp, "source");
692  if (!source->IsString()) {
693    msg.Append("no source");
694    return;
695  }
696
697  switch (regexp->TypeTag()) {
698    case JSRegExp::ATOM:
699      msg.Append('a');
700      break;
701    default:
702      break;
703  }
704  msg.Append('/');
705  msg.AppendDetailed(*Handle<String>::cast(source), false);
706  msg.Append('/');
707
708  // global flag
709  Handle<Object> global = GetProperty(regexp, "global");
710  if (global->IsTrue()) {
711    msg.Append('g');
712  }
713  // ignorecase flag
714  Handle<Object> ignorecase = GetProperty(regexp, "ignoreCase");
715  if (ignorecase->IsTrue()) {
716    msg.Append('i');
717  }
718  // multiline flag
719  Handle<Object> multiline = GetProperty(regexp, "multiline");
720  if (multiline->IsTrue()) {
721    msg.Append('m');
722  }
723
724  msg.WriteToLogFile();
725}
726#endif  // ENABLE_LOGGING_AND_PROFILING
727
728
729void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
730#ifdef ENABLE_LOGGING_AND_PROFILING
731  if (!log_->IsEnabled() || !FLAG_log_regexp) return;
732  LogMessageBuilder msg(this);
733  msg.Append("regexp-compile,");
734  LogRegExpSource(regexp);
735  msg.Append(in_cache ? ",hit\n" : ",miss\n");
736  msg.WriteToLogFile();
737#endif
738}
739
740
741void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
742#ifdef ENABLE_LOGGING_AND_PROFILING
743  if (!log_->IsEnabled() || !FLAG_log_runtime) return;
744  HandleScope scope;
745  LogMessageBuilder msg(this);
746  for (int i = 0; i < format.length(); i++) {
747    char c = format[i];
748    if (c == '%' && i <= format.length() - 2) {
749      i++;
750      ASSERT('0' <= format[i] && format[i] <= '9');
751      MaybeObject* maybe = args->GetElement(format[i] - '0');
752      Object* obj;
753      if (!maybe->ToObject(&obj)) {
754        msg.Append("<exception>");
755        continue;
756      }
757      i++;
758      switch (format[i]) {
759        case 's':
760          msg.AppendDetailed(String::cast(obj), false);
761          break;
762        case 'S':
763          msg.AppendDetailed(String::cast(obj), true);
764          break;
765        case 'r':
766          Logger::LogRegExpSource(Handle<JSRegExp>(JSRegExp::cast(obj)));
767          break;
768        case 'x':
769          msg.Append("0x%x", Smi::cast(obj)->value());
770          break;
771        case 'i':
772          msg.Append("%i", Smi::cast(obj)->value());
773          break;
774        default:
775          UNREACHABLE();
776      }
777    } else {
778      msg.Append(c);
779    }
780  }
781  msg.Append('\n');
782  msg.WriteToLogFile();
783#endif
784}
785
786
787void Logger::ApiIndexedSecurityCheck(uint32_t index) {
788#ifdef ENABLE_LOGGING_AND_PROFILING
789  if (!log_->IsEnabled() || !FLAG_log_api) return;
790  ApiEvent("api,check-security,%u\n", index);
791#endif
792}
793
794
795void Logger::ApiNamedPropertyAccess(const char* tag,
796                                    JSObject* holder,
797                                    Object* name) {
798#ifdef ENABLE_LOGGING_AND_PROFILING
799  ASSERT(name->IsString());
800  if (!log_->IsEnabled() || !FLAG_log_api) return;
801  String* class_name_obj = holder->class_name();
802  SmartPointer<char> class_name =
803      class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
804  SmartPointer<char> property_name =
805      String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
806  ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
807#endif
808}
809
810void Logger::ApiIndexedPropertyAccess(const char* tag,
811                                      JSObject* holder,
812                                      uint32_t index) {
813#ifdef ENABLE_LOGGING_AND_PROFILING
814  if (!log_->IsEnabled() || !FLAG_log_api) return;
815  String* class_name_obj = holder->class_name();
816  SmartPointer<char> class_name =
817      class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
818  ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
819#endif
820}
821
822void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
823#ifdef ENABLE_LOGGING_AND_PROFILING
824  if (!log_->IsEnabled() || !FLAG_log_api) return;
825  String* class_name_obj = object->class_name();
826  SmartPointer<char> class_name =
827      class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
828  ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
829#endif
830}
831
832
833void Logger::ApiEntryCall(const char* name) {
834#ifdef ENABLE_LOGGING_AND_PROFILING
835  if (!log_->IsEnabled() || !FLAG_log_api) return;
836  ApiEvent("api,%s\n", name);
837#endif
838}
839
840
841void Logger::NewEvent(const char* name, void* object, size_t size) {
842#ifdef ENABLE_LOGGING_AND_PROFILING
843  if (!log_->IsEnabled() || !FLAG_log) return;
844  LogMessageBuilder msg(this);
845  msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
846             static_cast<unsigned int>(size));
847  msg.WriteToLogFile();
848#endif
849}
850
851
852void Logger::DeleteEvent(const char* name, void* object) {
853#ifdef ENABLE_LOGGING_AND_PROFILING
854  if (!log_->IsEnabled() || !FLAG_log) return;
855  LogMessageBuilder msg(this);
856  msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
857  msg.WriteToLogFile();
858#endif
859}
860
861
862void Logger::NewEventStatic(const char* name, void* object, size_t size) {
863  LOGGER->NewEvent(name, object, size);
864}
865
866
867void Logger::DeleteEventStatic(const char* name, void* object) {
868  LOGGER->DeleteEvent(name, object);
869}
870
871#ifdef ENABLE_LOGGING_AND_PROFILING
872void Logger::CallbackEventInternal(const char* prefix, const char* name,
873                                   Address entry_point) {
874  if (!log_->IsEnabled() || !FLAG_log_code) return;
875  LogMessageBuilder msg(this);
876  msg.Append("%s,%s,",
877             kLogEventsNames[CODE_CREATION_EVENT],
878             kLogEventsNames[CALLBACK_TAG]);
879  msg.AppendAddress(entry_point);
880  msg.Append(",1,\"%s%s\"", prefix, name);
881  msg.Append('\n');
882  msg.WriteToLogFile();
883}
884#endif
885
886
887void Logger::CallbackEvent(String* name, Address entry_point) {
888#ifdef ENABLE_LOGGING_AND_PROFILING
889  if (!log_->IsEnabled() || !FLAG_log_code) return;
890  SmartPointer<char> str =
891      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
892  CallbackEventInternal("", *str, entry_point);
893#endif
894}
895
896
897void Logger::GetterCallbackEvent(String* name, Address entry_point) {
898#ifdef ENABLE_LOGGING_AND_PROFILING
899  if (!log_->IsEnabled() || !FLAG_log_code) return;
900  SmartPointer<char> str =
901      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
902  CallbackEventInternal("get ", *str, entry_point);
903#endif
904}
905
906
907void Logger::SetterCallbackEvent(String* name, Address entry_point) {
908#ifdef ENABLE_LOGGING_AND_PROFILING
909  if (!log_->IsEnabled() || !FLAG_log_code) return;
910  SmartPointer<char> str =
911      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
912  CallbackEventInternal("set ", *str, entry_point);
913#endif
914}
915
916
917void Logger::CodeCreateEvent(LogEventsAndTags tag,
918                             Code* code,
919                             const char* comment) {
920#ifdef ENABLE_LOGGING_AND_PROFILING
921  if (!log_->IsEnabled()) return;
922  if (FLAG_ll_prof || Serializer::enabled()) {
923    name_buffer_->Reset();
924    name_buffer_->AppendBytes(kLogEventsNames[tag]);
925    name_buffer_->AppendByte(':');
926    name_buffer_->AppendBytes(comment);
927  }
928  if (FLAG_ll_prof) {
929    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
930  }
931  if (Serializer::enabled()) {
932    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
933  }
934  if (!FLAG_log_code) return;
935  LogMessageBuilder msg(this);
936  msg.Append("%s,%s,",
937             kLogEventsNames[CODE_CREATION_EVENT],
938             kLogEventsNames[tag]);
939  msg.AppendAddress(code->address());
940  msg.Append(",%d,\"", code->ExecutableSize());
941  for (const char* p = comment; *p != '\0'; p++) {
942    if (*p == '"') {
943      msg.Append('\\');
944    }
945    msg.Append(*p);
946  }
947  msg.Append('"');
948  msg.Append('\n');
949  msg.WriteToLogFile();
950#endif
951}
952
953
954void Logger::CodeCreateEvent(LogEventsAndTags tag,
955                             Code* code,
956                             String* name) {
957#ifdef ENABLE_LOGGING_AND_PROFILING
958  if (!log_->IsEnabled()) return;
959  if (FLAG_ll_prof || Serializer::enabled()) {
960    name_buffer_->Reset();
961    name_buffer_->AppendBytes(kLogEventsNames[tag]);
962    name_buffer_->AppendByte(':');
963    name_buffer_->AppendString(name);
964  }
965  if (FLAG_ll_prof) {
966    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
967  }
968  if (Serializer::enabled()) {
969    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
970  }
971  if (!FLAG_log_code) return;
972  LogMessageBuilder msg(this);
973  msg.Append("%s,%s,",
974             kLogEventsNames[CODE_CREATION_EVENT],
975             kLogEventsNames[tag]);
976  msg.AppendAddress(code->address());
977  msg.Append(",%d,\"", code->ExecutableSize());
978  msg.AppendDetailed(name, false);
979  msg.Append('"');
980  msg.Append('\n');
981  msg.WriteToLogFile();
982#endif
983}
984
985
986#ifdef ENABLE_LOGGING_AND_PROFILING
987// ComputeMarker must only be used when SharedFunctionInfo is known.
988static const char* ComputeMarker(Code* code) {
989  switch (code->kind()) {
990    case Code::FUNCTION: return code->optimizable() ? "~" : "";
991    case Code::OPTIMIZED_FUNCTION: return "*";
992    default: return "";
993  }
994}
995#endif
996
997
998void Logger::CodeCreateEvent(LogEventsAndTags tag,
999                             Code* code,
1000                             SharedFunctionInfo* shared,
1001                             String* name) {
1002#ifdef ENABLE_LOGGING_AND_PROFILING
1003  if (!log_->IsEnabled()) return;
1004  if (FLAG_ll_prof || Serializer::enabled()) {
1005    name_buffer_->Reset();
1006    name_buffer_->AppendBytes(kLogEventsNames[tag]);
1007    name_buffer_->AppendByte(':');
1008    name_buffer_->AppendBytes(ComputeMarker(code));
1009    name_buffer_->AppendString(name);
1010  }
1011  if (FLAG_ll_prof) {
1012    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
1013  }
1014  if (Serializer::enabled()) {
1015    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
1016  }
1017  if (!FLAG_log_code) return;
1018  if (code == Isolate::Current()->builtins()->builtin(
1019      Builtins::kLazyCompile))
1020    return;
1021
1022  LogMessageBuilder msg(this);
1023  SmartPointer<char> str =
1024      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
1025  msg.Append("%s,%s,",
1026             kLogEventsNames[CODE_CREATION_EVENT],
1027             kLogEventsNames[tag]);
1028  msg.AppendAddress(code->address());
1029  msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
1030  msg.AppendAddress(shared->address());
1031  msg.Append(",%s", ComputeMarker(code));
1032  msg.Append('\n');
1033  msg.WriteToLogFile();
1034#endif
1035}
1036
1037
1038// Although, it is possible to extract source and line from
1039// the SharedFunctionInfo object, we left it to caller
1040// to leave logging functions free from heap allocations.
1041void Logger::CodeCreateEvent(LogEventsAndTags tag,
1042                             Code* code,
1043                             SharedFunctionInfo* shared,
1044                             String* source, int line) {
1045#ifdef ENABLE_LOGGING_AND_PROFILING
1046  if (!log_->IsEnabled()) return;
1047  if (FLAG_ll_prof || Serializer::enabled()) {
1048    name_buffer_->Reset();
1049    name_buffer_->AppendBytes(kLogEventsNames[tag]);
1050    name_buffer_->AppendByte(':');
1051    name_buffer_->AppendBytes(ComputeMarker(code));
1052    name_buffer_->AppendString(shared->DebugName());
1053    name_buffer_->AppendByte(' ');
1054    name_buffer_->AppendString(source);
1055    name_buffer_->AppendByte(':');
1056    name_buffer_->AppendInt(line);
1057  }
1058  if (FLAG_ll_prof) {
1059    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
1060  }
1061  if (Serializer::enabled()) {
1062    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
1063  }
1064  if (!FLAG_log_code) return;
1065  LogMessageBuilder msg(this);
1066  SmartPointer<char> name =
1067      shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
1068  SmartPointer<char> sourcestr =
1069      source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
1070  msg.Append("%s,%s,",
1071             kLogEventsNames[CODE_CREATION_EVENT],
1072             kLogEventsNames[tag]);
1073  msg.AppendAddress(code->address());
1074  msg.Append(",%d,\"%s %s:%d\",",
1075             code->ExecutableSize(),
1076             *name,
1077             *sourcestr,
1078             line);
1079  msg.AppendAddress(shared->address());
1080  msg.Append(",%s", ComputeMarker(code));
1081  msg.Append('\n');
1082  msg.WriteToLogFile();
1083#endif
1084}
1085
1086
1087void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
1088#ifdef ENABLE_LOGGING_AND_PROFILING
1089  if (!log_->IsEnabled()) return;
1090  if (FLAG_ll_prof || Serializer::enabled()) {
1091    name_buffer_->Reset();
1092    name_buffer_->AppendBytes(kLogEventsNames[tag]);
1093    name_buffer_->AppendByte(':');
1094    name_buffer_->AppendInt(args_count);
1095  }
1096  if (FLAG_ll_prof) {
1097    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
1098  }
1099  if (Serializer::enabled()) {
1100    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
1101  }
1102  if (!FLAG_log_code) return;
1103  LogMessageBuilder msg(this);
1104  msg.Append("%s,%s,",
1105             kLogEventsNames[CODE_CREATION_EVENT],
1106             kLogEventsNames[tag]);
1107  msg.AppendAddress(code->address());
1108  msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
1109  msg.Append('\n');
1110  msg.WriteToLogFile();
1111#endif
1112}
1113
1114
1115void Logger::CodeMovingGCEvent() {
1116#ifdef ENABLE_LOGGING_AND_PROFILING
1117  if (!log_->IsEnabled() || !FLAG_ll_prof) return;
1118  LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
1119  OS::SignalCodeMovingGC();
1120#endif
1121}
1122
1123
1124void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
1125#ifdef ENABLE_LOGGING_AND_PROFILING
1126  if (!log_->IsEnabled()) return;
1127  if (FLAG_ll_prof || Serializer::enabled()) {
1128    name_buffer_->Reset();
1129    name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
1130    name_buffer_->AppendByte(':');
1131    name_buffer_->AppendString(source);
1132  }
1133  if (FLAG_ll_prof) {
1134    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
1135  }
1136  if (Serializer::enabled()) {
1137    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
1138  }
1139  if (!FLAG_log_code) return;
1140  LogMessageBuilder msg(this);
1141  msg.Append("%s,%s,",
1142             kLogEventsNames[CODE_CREATION_EVENT],
1143             kLogEventsNames[REG_EXP_TAG]);
1144  msg.AppendAddress(code->address());
1145  msg.Append(",%d,\"", code->ExecutableSize());
1146  msg.AppendDetailed(source, false);
1147  msg.Append('\"');
1148  msg.Append('\n');
1149  msg.WriteToLogFile();
1150#endif
1151}
1152
1153
1154void Logger::CodeMoveEvent(Address from, Address to) {
1155#ifdef ENABLE_LOGGING_AND_PROFILING
1156  if (!log_->IsEnabled()) return;
1157  if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
1158  if (Serializer::enabled() && address_to_name_map_ != NULL) {
1159    address_to_name_map_->Move(from, to);
1160  }
1161  MoveEventInternal(CODE_MOVE_EVENT, from, to);
1162#endif
1163}
1164
1165
1166void Logger::CodeDeleteEvent(Address from) {
1167#ifdef ENABLE_LOGGING_AND_PROFILING
1168  if (!log_->IsEnabled()) return;
1169  if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
1170  if (Serializer::enabled() && address_to_name_map_ != NULL) {
1171    address_to_name_map_->Remove(from);
1172  }
1173  DeleteEventInternal(CODE_DELETE_EVENT, from);
1174#endif
1175}
1176
1177
1178void Logger::SnapshotPositionEvent(Address addr, int pos) {
1179#ifdef ENABLE_LOGGING_AND_PROFILING
1180  if (!log_->IsEnabled()) return;
1181  if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
1182  if (Serializer::enabled() && address_to_name_map_ != NULL) {
1183    const char* code_name = address_to_name_map_->Lookup(addr);
1184    if (code_name == NULL) return;  // Not a code object.
1185    LogMessageBuilder msg(this);
1186    msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
1187    for (const char* p = code_name; *p != '\0'; ++p) {
1188      if (*p == '"') msg.Append('\\');
1189      msg.Append(*p);
1190    }
1191    msg.Append("\"\n");
1192    msg.WriteToLogFile();
1193  }
1194  if (!FLAG_log_snapshot_positions) return;
1195  LogMessageBuilder msg(this);
1196  msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
1197  msg.AppendAddress(addr);
1198  msg.Append(",%d", pos);
1199  msg.Append('\n');
1200  msg.WriteToLogFile();
1201#endif
1202}
1203
1204
1205void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
1206#ifdef ENABLE_LOGGING_AND_PROFILING
1207  MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
1208#endif
1209}
1210
1211
1212#ifdef ENABLE_LOGGING_AND_PROFILING
1213void Logger::MoveEventInternal(LogEventsAndTags event,
1214                               Address from,
1215                               Address to) {
1216  if (!log_->IsEnabled() || !FLAG_log_code) return;
1217  LogMessageBuilder msg(this);
1218  msg.Append("%s,", kLogEventsNames[event]);
1219  msg.AppendAddress(from);
1220  msg.Append(',');
1221  msg.AppendAddress(to);
1222  msg.Append('\n');
1223  msg.WriteToLogFile();
1224}
1225#endif
1226
1227
1228#ifdef ENABLE_LOGGING_AND_PROFILING
1229void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
1230  if (!log_->IsEnabled() || !FLAG_log_code) return;
1231  LogMessageBuilder msg(this);
1232  msg.Append("%s,", kLogEventsNames[event]);
1233  msg.AppendAddress(from);
1234  msg.Append('\n');
1235  msg.WriteToLogFile();
1236}
1237#endif
1238
1239
1240void Logger::ResourceEvent(const char* name, const char* tag) {
1241#ifdef ENABLE_LOGGING_AND_PROFILING
1242  if (!log_->IsEnabled() || !FLAG_log) return;
1243  LogMessageBuilder msg(this);
1244  msg.Append("%s,%s,", name, tag);
1245
1246  uint32_t sec, usec;
1247  if (OS::GetUserTime(&sec, &usec) != -1) {
1248    msg.Append("%d,%d,", sec, usec);
1249  }
1250  msg.Append("%.0f", OS::TimeCurrentMillis());
1251
1252  msg.Append('\n');
1253  msg.WriteToLogFile();
1254#endif
1255}
1256
1257
1258void Logger::SuspectReadEvent(String* name, Object* obj) {
1259#ifdef ENABLE_LOGGING_AND_PROFILING
1260  if (!log_->IsEnabled() || !FLAG_log_suspect) return;
1261  LogMessageBuilder msg(this);
1262  String* class_name = obj->IsJSObject()
1263                       ? JSObject::cast(obj)->class_name()
1264                       : HEAP->empty_string();
1265  msg.Append("suspect-read,");
1266  msg.Append(class_name);
1267  msg.Append(',');
1268  msg.Append('"');
1269  msg.Append(name);
1270  msg.Append('"');
1271  msg.Append('\n');
1272  msg.WriteToLogFile();
1273#endif
1274}
1275
1276
1277void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
1278#ifdef ENABLE_LOGGING_AND_PROFILING
1279  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1280  LogMessageBuilder msg(this);
1281  // Using non-relative system time in order to be able to synchronize with
1282  // external memory profiling events (e.g. DOM memory size).
1283  msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
1284             space, kind, OS::TimeCurrentMillis());
1285  msg.WriteToLogFile();
1286#endif
1287}
1288
1289
1290void Logger::HeapSampleStats(const char* space, const char* kind,
1291                             intptr_t capacity, intptr_t used) {
1292#ifdef ENABLE_LOGGING_AND_PROFILING
1293  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1294  LogMessageBuilder msg(this);
1295  msg.Append("heap-sample-stats,\"%s\",\"%s\","
1296                 "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
1297             space, kind, capacity, used);
1298  msg.WriteToLogFile();
1299#endif
1300}
1301
1302
1303void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
1304#ifdef ENABLE_LOGGING_AND_PROFILING
1305  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1306  LogMessageBuilder msg(this);
1307  msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
1308  msg.WriteToLogFile();
1309#endif
1310}
1311
1312
1313void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
1314#ifdef ENABLE_LOGGING_AND_PROFILING
1315  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1316  LogMessageBuilder msg(this);
1317  msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
1318  msg.WriteToLogFile();
1319#endif
1320}
1321
1322
1323void Logger::HeapSampleJSConstructorEvent(const char* constructor,
1324                                          int number, int bytes) {
1325#ifdef ENABLE_LOGGING_AND_PROFILING
1326  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1327  LogMessageBuilder msg(this);
1328  msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
1329  msg.WriteToLogFile();
1330#endif
1331}
1332
1333// Event starts with comma, so we don't have it in the format string.
1334static const char kEventText[] = "heap-js-ret-item,%s";
1335// We take placeholder strings into account, but it's OK to be conservative.
1336static const int kEventTextLen = sizeof(kEventText)/sizeof(kEventText[0]);
1337
1338void Logger::HeapSampleJSRetainersEvent(
1339    const char* constructor, const char* event) {
1340#ifdef ENABLE_LOGGING_AND_PROFILING
1341  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1342  const int cons_len = StrLength(constructor);
1343  const int event_len = StrLength(event);
1344  int pos = 0;
1345  // Retainer lists can be long. We may need to split them into multiple events.
1346  do {
1347    LogMessageBuilder msg(this);
1348    msg.Append(kEventText, constructor);
1349    int to_write = event_len - pos;
1350    if (to_write > Log::kMessageBufferSize - (cons_len + kEventTextLen)) {
1351      int cut_pos = pos + Log::kMessageBufferSize - (cons_len + kEventTextLen);
1352      ASSERT(cut_pos < event_len);
1353      while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
1354      if (event[cut_pos] != ',') {
1355        // Crash in debug mode, skip in release mode.
1356        ASSERT(false);
1357        return;
1358      }
1359      // Append a piece of event that fits, without trailing comma.
1360      msg.AppendStringPart(event + pos, cut_pos - pos);
1361      // Start next piece with comma.
1362      pos = cut_pos;
1363    } else {
1364      msg.Append("%s", event + pos);
1365      pos += event_len;
1366    }
1367    msg.Append('\n');
1368    msg.WriteToLogFile();
1369  } while (pos < event_len);
1370#endif
1371}
1372
1373
1374void Logger::HeapSampleJSProducerEvent(const char* constructor,
1375                                       Address* stack) {
1376#ifdef ENABLE_LOGGING_AND_PROFILING
1377  if (!log_->IsEnabled() || !FLAG_log_gc) return;
1378  LogMessageBuilder msg(this);
1379  msg.Append("heap-js-prod-item,%s", constructor);
1380  while (*stack != NULL) {
1381    msg.Append(",0x%" V8PRIxPTR, *stack++);
1382  }
1383  msg.Append("\n");
1384  msg.WriteToLogFile();
1385#endif
1386}
1387
1388
1389void Logger::DebugTag(const char* call_site_tag) {
1390#ifdef ENABLE_LOGGING_AND_PROFILING
1391  if (!log_->IsEnabled() || !FLAG_log) return;
1392  LogMessageBuilder msg(this);
1393  msg.Append("debug-tag,%s\n", call_site_tag);
1394  msg.WriteToLogFile();
1395#endif
1396}
1397
1398
1399void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
1400#ifdef ENABLE_LOGGING_AND_PROFILING
1401  if (!log_->IsEnabled() || !FLAG_log) return;
1402  StringBuilder s(parameter.length() + 1);
1403  for (int i = 0; i < parameter.length(); ++i) {
1404    s.AddCharacter(static_cast<char>(parameter[i]));
1405  }
1406  char* parameter_string = s.Finalize();
1407  LogMessageBuilder msg(this);
1408  msg.Append("debug-queue-event,%s,%15.3f,%s\n",
1409             event_type,
1410             OS::TimeCurrentMillis(),
1411             parameter_string);
1412  DeleteArray(parameter_string);
1413  msg.WriteToLogFile();
1414#endif
1415}
1416
1417
1418#ifdef ENABLE_LOGGING_AND_PROFILING
1419void Logger::TickEvent(TickSample* sample, bool overflow) {
1420  if (!log_->IsEnabled() || !FLAG_prof) return;
1421  LogMessageBuilder msg(this);
1422  msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
1423  msg.AppendAddress(sample->pc);
1424  msg.Append(',');
1425  msg.AppendAddress(sample->sp);
1426  if (sample->has_external_callback) {
1427    msg.Append(",1,");
1428    msg.AppendAddress(sample->external_callback);
1429  } else {
1430    msg.Append(",0,");
1431    msg.AppendAddress(sample->tos);
1432  }
1433  msg.Append(",%d", static_cast<int>(sample->state));
1434  if (overflow) {
1435    msg.Append(",overflow");
1436  }
1437  for (int i = 0; i < sample->frames_count; ++i) {
1438    msg.Append(',');
1439    msg.AppendAddress(sample->stack[i]);
1440  }
1441  msg.Append('\n');
1442  msg.WriteToLogFile();
1443}
1444
1445
1446int Logger::GetActiveProfilerModules() {
1447  int result = PROFILER_MODULE_NONE;
1448  if (profiler_ != NULL && !profiler_->paused()) {
1449    result |= PROFILER_MODULE_CPU;
1450  }
1451  if (FLAG_log_gc) {
1452    result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
1453  }
1454  return result;
1455}
1456
1457
1458void Logger::PauseProfiler(int flags, int tag) {
1459  if (!log_->IsEnabled()) return;
1460  if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
1461    // It is OK to have negative nesting.
1462    if (--cpu_profiler_nesting_ == 0) {
1463      profiler_->pause();
1464      if (FLAG_prof_lazy) {
1465        if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
1466          ticker_->Stop();
1467        }
1468        FLAG_log_code = false;
1469        // Must be the same message as Log::kDynamicBufferSeal.
1470        LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
1471      }
1472      --logging_nesting_;
1473    }
1474  }
1475  if (flags &
1476      (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
1477    if (--heap_profiler_nesting_ == 0) {
1478      FLAG_log_gc = false;
1479      --logging_nesting_;
1480    }
1481  }
1482  if (tag != 0) {
1483    UncheckedIntEvent("close-tag", tag);
1484  }
1485}
1486
1487
1488void Logger::ResumeProfiler(int flags, int tag) {
1489  if (!log_->IsEnabled()) return;
1490  if (tag != 0) {
1491    UncheckedIntEvent("open-tag", tag);
1492  }
1493  if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
1494    if (cpu_profiler_nesting_++ == 0) {
1495      ++logging_nesting_;
1496      if (FLAG_prof_lazy) {
1497        profiler_->Engage();
1498        LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
1499        FLAG_log_code = true;
1500        LogCompiledFunctions();
1501        LogAccessorCallbacks();
1502        if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
1503          ticker_->Start();
1504        }
1505      }
1506      profiler_->resume();
1507    }
1508  }
1509  if (flags &
1510      (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
1511    if (heap_profiler_nesting_++ == 0) {
1512      ++logging_nesting_;
1513      FLAG_log_gc = true;
1514    }
1515  }
1516}
1517
1518
1519// This function can be called when Log's mutex is acquired,
1520// either from main or Profiler's thread.
1521void Logger::LogFailure() {
1522  PauseProfiler(PROFILER_MODULE_CPU, 0);
1523}
1524
1525
1526bool Logger::IsProfilerSamplerActive() {
1527  return ticker_->IsActive();
1528}
1529
1530
1531int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
1532  return log_->GetLogLines(from_pos, dest_buf, max_size);
1533}
1534
1535
1536class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
1537 public:
1538  EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
1539                                     Handle<Code>* code_objects,
1540                                     int* count)
1541      : sfis_(sfis), code_objects_(code_objects), count_(count) { }
1542
1543  virtual void EnterContext(Context* context) {}
1544  virtual void LeaveContext(Context* context) {}
1545
1546  virtual void VisitFunction(JSFunction* function) {
1547    if (sfis_ != NULL) {
1548      sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
1549    }
1550    if (code_objects_ != NULL) {
1551      ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
1552      code_objects_[*count_] = Handle<Code>(function->code());
1553    }
1554    *count_ = *count_ + 1;
1555  }
1556
1557 private:
1558  Handle<SharedFunctionInfo>* sfis_;
1559  Handle<Code>* code_objects_;
1560  int* count_;
1561};
1562
1563
1564static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
1565                                      Handle<Code>* code_objects) {
1566  AssertNoAllocation no_alloc;
1567  int compiled_funcs_count = 0;
1568
1569  // Iterate the heap to find shared function info objects and record
1570  // the unoptimized code for them.
1571  HeapIterator iterator;
1572  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
1573    if (!obj->IsSharedFunctionInfo()) continue;
1574    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1575    if (sfi->is_compiled()
1576        && (!sfi->script()->IsScript()
1577            || Script::cast(sfi->script())->HasValidSource())) {
1578      if (sfis != NULL) {
1579        sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
1580      }
1581      if (code_objects != NULL) {
1582        code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
1583      }
1584      ++compiled_funcs_count;
1585    }
1586  }
1587
1588  // Iterate all optimized functions in all contexts.
1589  EnumerateOptimizedFunctionsVisitor visitor(sfis,
1590                                             code_objects,
1591                                             &compiled_funcs_count);
1592  Deoptimizer::VisitAllOptimizedFunctions(&visitor);
1593
1594  return compiled_funcs_count;
1595}
1596
1597
1598void Logger::LogCodeObject(Object* object) {
1599  if (FLAG_log_code || FLAG_ll_prof) {
1600    Code* code_object = Code::cast(object);
1601    LogEventsAndTags tag = Logger::STUB_TAG;
1602    const char* description = "Unknown code from the snapshot";
1603    switch (code_object->kind()) {
1604      case Code::FUNCTION:
1605      case Code::OPTIMIZED_FUNCTION:
1606        return;  // We log this later using LogCompiledFunctions.
1607      case Code::UNARY_OP_IC:   // fall through
1608      case Code::BINARY_OP_IC:   // fall through
1609      case Code::COMPARE_IC:  // fall through
1610      case Code::STUB:
1611        description =
1612            CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
1613        if (description == NULL)
1614          description = "A stub from the snapshot";
1615        tag = Logger::STUB_TAG;
1616        break;
1617      case Code::BUILTIN:
1618        description = "A builtin from the snapshot";
1619        tag = Logger::BUILTIN_TAG;
1620        break;
1621      case Code::KEYED_LOAD_IC:
1622        description = "A keyed load IC from the snapshot";
1623        tag = Logger::KEYED_LOAD_IC_TAG;
1624        break;
1625      case Code::LOAD_IC:
1626        description = "A load IC from the snapshot";
1627        tag = Logger::LOAD_IC_TAG;
1628        break;
1629      case Code::STORE_IC:
1630        description = "A store IC from the snapshot";
1631        tag = Logger::STORE_IC_TAG;
1632        break;
1633      case Code::KEYED_STORE_IC:
1634        description = "A keyed store IC from the snapshot";
1635        tag = Logger::KEYED_STORE_IC_TAG;
1636        break;
1637      case Code::CALL_IC:
1638        description = "A call IC from the snapshot";
1639        tag = Logger::CALL_IC_TAG;
1640        break;
1641      case Code::KEYED_CALL_IC:
1642        description = "A keyed call IC from the snapshot";
1643        tag = Logger::KEYED_CALL_IC_TAG;
1644        break;
1645    }
1646    PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description));
1647  }
1648}
1649
1650
1651void Logger::LogCodeInfo() {
1652#ifdef ENABLE_LOGGING_AND_PROFILING
1653  if (!log_->IsEnabled() || !FLAG_ll_prof) return;
1654#if V8_TARGET_ARCH_IA32
1655  const char arch[] = "ia32";
1656#elif V8_TARGET_ARCH_X64
1657  const char arch[] = "x64";
1658#elif V8_TARGET_ARCH_ARM
1659  const char arch[] = "arm";
1660#else
1661  const char arch[] = "unknown";
1662#endif
1663  LowLevelLogWriteBytes(arch, sizeof(arch));
1664#endif  // ENABLE_LOGGING_AND_PROFILING
1665}
1666
1667
1668void Logger::RegisterSnapshotCodeName(Code* code,
1669                                      const char* name,
1670                                      int name_size) {
1671  ASSERT(Serializer::enabled());
1672  if (address_to_name_map_ == NULL) {
1673    address_to_name_map_ = new NameMap;
1674  }
1675  address_to_name_map_->Insert(code->address(), name, name_size);
1676}
1677
1678
1679void Logger::LowLevelCodeCreateEvent(Code* code,
1680                                     const char* name,
1681                                     int name_size) {
1682  if (log_->ll_output_handle_ == NULL) return;
1683  LowLevelCodeCreateStruct event;
1684  event.name_size = name_size;
1685  event.code_address = code->instruction_start();
1686  ASSERT(event.code_address == code->address() + Code::kHeaderSize);
1687  event.code_size = code->instruction_size();
1688  LowLevelLogWriteStruct(event);
1689  LowLevelLogWriteBytes(name, name_size);
1690  LowLevelLogWriteBytes(
1691      reinterpret_cast<const char*>(code->instruction_start()),
1692      code->instruction_size());
1693}
1694
1695
1696void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
1697  if (log_->ll_output_handle_ == NULL) return;
1698  LowLevelCodeMoveStruct event;
1699  event.from_address = from + Code::kHeaderSize;
1700  event.to_address = to + Code::kHeaderSize;
1701  LowLevelLogWriteStruct(event);
1702}
1703
1704
1705void Logger::LowLevelCodeDeleteEvent(Address from) {
1706  if (log_->ll_output_handle_ == NULL) return;
1707  LowLevelCodeDeleteStruct event;
1708  event.address = from + Code::kHeaderSize;
1709  LowLevelLogWriteStruct(event);
1710}
1711
1712
1713void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
1714  if (log_->ll_output_handle_ == NULL) return;
1715  LowLevelSnapshotPositionStruct event;
1716  event.address = addr + Code::kHeaderSize;
1717  event.position = pos;
1718  LowLevelLogWriteStruct(event);
1719}
1720
1721
1722void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
1723  size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
1724  ASSERT(static_cast<size_t>(size) == rv);
1725  USE(rv);
1726}
1727
1728
1729void Logger::LogCodeObjects() {
1730  AssertNoAllocation no_alloc;
1731  HeapIterator iterator;
1732  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
1733    if (obj->IsCode()) LogCodeObject(obj);
1734  }
1735}
1736
1737
1738void Logger::LogCompiledFunctions() {
1739  HandleScope scope;
1740  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
1741  ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
1742  ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
1743  EnumerateCompiledFunctions(sfis.start(), code_objects.start());
1744
1745  // During iteration, there can be heap allocation due to
1746  // GetScriptLineNumber call.
1747  for (int i = 0; i < compiled_funcs_count; ++i) {
1748    if (*code_objects[i] == Isolate::Current()->builtins()->builtin(
1749        Builtins::kLazyCompile))
1750      continue;
1751    Handle<SharedFunctionInfo> shared = sfis[i];
1752    Handle<String> func_name(shared->DebugName());
1753    if (shared->script()->IsScript()) {
1754      Handle<Script> script(Script::cast(shared->script()));
1755      if (script->name()->IsString()) {
1756        Handle<String> script_name(String::cast(script->name()));
1757        int line_num = GetScriptLineNumber(script, shared->start_position());
1758        if (line_num > 0) {
1759          PROFILE(ISOLATE,
1760                  CodeCreateEvent(
1761                    Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
1762                    *code_objects[i], *shared,
1763                    *script_name, line_num + 1));
1764        } else {
1765          // Can't distinguish eval and script here, so always use Script.
1766          PROFILE(ISOLATE,
1767                  CodeCreateEvent(
1768                      Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
1769                      *code_objects[i], *shared, *script_name));
1770        }
1771      } else {
1772        PROFILE(ISOLATE,
1773                CodeCreateEvent(
1774                    Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
1775                    *code_objects[i], *shared, *func_name));
1776      }
1777    } else if (shared->IsApiFunction()) {
1778      // API function.
1779      FunctionTemplateInfo* fun_data = shared->get_api_func_data();
1780      Object* raw_call_data = fun_data->call_code();
1781      if (!raw_call_data->IsUndefined()) {
1782        CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
1783        Object* callback_obj = call_data->callback();
1784        Address entry_point = v8::ToCData<Address>(callback_obj);
1785        PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point));
1786      }
1787    } else {
1788      PROFILE(ISOLATE,
1789              CodeCreateEvent(
1790                  Logger::LAZY_COMPILE_TAG, *code_objects[i],
1791                  *shared, *func_name));
1792    }
1793  }
1794}
1795
1796
1797void Logger::LogAccessorCallbacks() {
1798  AssertNoAllocation no_alloc;
1799  HeapIterator iterator;
1800  i::Isolate* isolate = ISOLATE;
1801  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
1802    if (!obj->IsAccessorInfo()) continue;
1803    AccessorInfo* ai = AccessorInfo::cast(obj);
1804    if (!ai->name()->IsString()) continue;
1805    String* name = String::cast(ai->name());
1806    Address getter_entry = v8::ToCData<Address>(ai->getter());
1807    if (getter_entry != 0) {
1808      PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
1809    }
1810    Address setter_entry = v8::ToCData<Address>(ai->setter());
1811    if (setter_entry != 0) {
1812      PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
1813    }
1814  }
1815}
1816
1817#endif
1818
1819
1820bool Logger::Setup() {
1821#ifdef ENABLE_LOGGING_AND_PROFILING
1822  // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
1823  if (is_initialized_) return true;
1824  is_initialized_ = true;
1825
1826  // --ll-prof implies --log-code and --log-snapshot-positions.
1827  if (FLAG_ll_prof) {
1828    FLAG_log_snapshot_positions = true;
1829  }
1830
1831  // --prof_lazy controls --log-code, implies --noprof_auto.
1832  if (FLAG_prof_lazy) {
1833    FLAG_log_code = false;
1834    FLAG_prof_auto = false;
1835  }
1836
1837  // TODO(isolates): this assert introduces cyclic dependency (logger
1838  // -> thread local top -> heap -> logger).
1839  // ASSERT(VMState::is_outermost_external());
1840
1841  log_->Initialize();
1842
1843  if (FLAG_ll_prof) LogCodeInfo();
1844
1845  ticker_ = new Ticker(Isolate::Current(), kSamplingIntervalMs);
1846
1847  Isolate* isolate = Isolate::Current();
1848  if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
1849    sliding_state_window_ = new SlidingStateWindow(isolate);
1850  }
1851
1852  bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
1853    || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
1854    || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
1855
1856  if (start_logging) {
1857    logging_nesting_ = 1;
1858  }
1859
1860  if (FLAG_prof) {
1861    profiler_ = new Profiler(isolate);
1862    if (!FLAG_prof_auto) {
1863      profiler_->pause();
1864    } else {
1865      logging_nesting_ = 1;
1866    }
1867    if (!FLAG_prof_lazy) {
1868      profiler_->Engage();
1869    }
1870  }
1871
1872  return true;
1873
1874#else
1875  return false;
1876#endif
1877}
1878
1879
1880Sampler* Logger::sampler() {
1881  return ticker_;
1882}
1883
1884
1885void Logger::EnsureTickerStarted() {
1886#ifdef ENABLE_LOGGING_AND_PROFILING
1887  ASSERT(ticker_ != NULL);
1888  if (!ticker_->IsActive()) ticker_->Start();
1889#endif
1890}
1891
1892
1893void Logger::EnsureTickerStopped() {
1894#ifdef ENABLE_LOGGING_AND_PROFILING
1895  if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
1896#endif
1897}
1898
1899
1900void Logger::TearDown() {
1901#ifdef ENABLE_LOGGING_AND_PROFILING
1902  if (!is_initialized_) return;
1903  is_initialized_ = false;
1904
1905  // Stop the profiler before closing the file.
1906  if (profiler_ != NULL) {
1907    profiler_->Disengage();
1908    delete profiler_;
1909    profiler_ = NULL;
1910  }
1911
1912  delete sliding_state_window_;
1913  sliding_state_window_ = NULL;
1914
1915  delete ticker_;
1916  ticker_ = NULL;
1917
1918  log_->Close();
1919#endif
1920}
1921
1922
1923void Logger::EnableSlidingStateWindow() {
1924#ifdef ENABLE_LOGGING_AND_PROFILING
1925  // If the ticker is NULL, Logger::Setup has not been called yet.  In
1926  // that case, we set the sliding_state_window flag so that the
1927  // sliding window computation will be started when Logger::Setup is
1928  // called.
1929  if (ticker_ == NULL) {
1930    FLAG_sliding_state_window = true;
1931    return;
1932  }
1933  // Otherwise, if the sliding state window computation has not been
1934  // started we do it now.
1935  if (sliding_state_window_ == NULL) {
1936    sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
1937  }
1938#endif
1939}
1940
1941
1942Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
1943List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
1944
1945
1946bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
1947  ScopedLock lock(mutex_);
1948  for (int i = 0;
1949       ActiveSamplersExist() && i < active_samplers_->length();
1950       ++i) {
1951    func(active_samplers_->at(i), param);
1952  }
1953  return ActiveSamplersExist();
1954}
1955
1956
1957static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
1958  bool* flag = reinterpret_cast<bool*>(flag_ptr);
1959  *flag |= sampler->IsProfiling();
1960}
1961
1962
1963SamplerRegistry::State SamplerRegistry::GetState() {
1964  bool flag = false;
1965  if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
1966    return HAS_NO_SAMPLERS;
1967  }
1968  return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
1969}
1970
1971
1972void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
1973  ASSERT(sampler->IsActive());
1974  ScopedLock lock(mutex_);
1975  if (active_samplers_ == NULL) {
1976    active_samplers_ = new List<Sampler*>;
1977  } else {
1978    ASSERT(!active_samplers_->Contains(sampler));
1979  }
1980  active_samplers_->Add(sampler);
1981}
1982
1983
1984void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
1985  ASSERT(sampler->IsActive());
1986  ScopedLock lock(mutex_);
1987  ASSERT(active_samplers_ != NULL);
1988  bool removed = active_samplers_->RemoveElement(sampler);
1989  ASSERT(removed);
1990  USE(removed);
1991}
1992
1993} }  // namespace v8::internal
1994