1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_CPU_PROFILER_H_
29#define V8_CPU_PROFILER_H_
30
31#include "allocation.h"
32#include "atomicops.h"
33#include "circular-queue.h"
34#include "sampler.h"
35#include "unbound-queue.h"
36
37namespace v8 {
38namespace internal {
39
40// Forward declarations.
41class CodeEntry;
42class CodeMap;
43class CompilationInfo;
44class CpuProfile;
45class CpuProfilesCollection;
46class ProfileGenerator;
47
48#define CODE_EVENTS_TYPE_LIST(V)                                   \
49  V(CODE_CREATION,    CodeCreateEventRecord)                       \
50  V(CODE_MOVE,        CodeMoveEventRecord)                         \
51  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)           \
52  V(REPORT_BUILTIN,   ReportBuiltinEventRecord)
53
54
55class CodeEventRecord {
56 public:
57#define DECLARE_TYPE(type, ignore) type,
58  enum Type {
59    NONE = 0,
60    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
61    NUMBER_OF_TYPES
62  };
63#undef DECLARE_TYPE
64
65  Type type;
66  mutable unsigned order;
67};
68
69
70class CodeCreateEventRecord : public CodeEventRecord {
71 public:
72  Address start;
73  CodeEntry* entry;
74  unsigned size;
75  Address shared;
76
77  INLINE(void UpdateCodeMap(CodeMap* code_map));
78};
79
80
81class CodeMoveEventRecord : public CodeEventRecord {
82 public:
83  Address from;
84  Address to;
85
86  INLINE(void UpdateCodeMap(CodeMap* code_map));
87};
88
89
90class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
91 public:
92  Address from;
93  Address to;
94
95  INLINE(void UpdateCodeMap(CodeMap* code_map));
96};
97
98
99class ReportBuiltinEventRecord : public CodeEventRecord {
100 public:
101  Address start;
102  Builtins::Name builtin_id;
103
104  INLINE(void UpdateCodeMap(CodeMap* code_map));
105};
106
107
108class TickSampleEventRecord {
109 public:
110  // The parameterless constructor is used when we dequeue data from
111  // the ticks buffer.
112  TickSampleEventRecord() { }
113  explicit TickSampleEventRecord(unsigned order) : order(order) { }
114
115  unsigned order;
116  TickSample sample;
117
118  static TickSampleEventRecord* cast(void* value) {
119    return reinterpret_cast<TickSampleEventRecord*>(value);
120  }
121};
122
123
124class CodeEventsContainer {
125 public:
126  explicit CodeEventsContainer(
127      CodeEventRecord::Type type = CodeEventRecord::NONE) {
128    generic.type = type;
129  }
130  union  {
131    CodeEventRecord generic;
132#define DECLARE_CLASS(ignore, type) type type##_;
133    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
134#undef DECLARE_TYPE
135  };
136};
137
138
139// This class implements both the profile events processor thread and
140// methods called by event producers: VM and stack sampler threads.
141class ProfilerEventsProcessor : public Thread {
142 public:
143  explicit ProfilerEventsProcessor(ProfileGenerator* generator);
144  virtual ~ProfilerEventsProcessor() {}
145
146  // Thread control.
147  virtual void Run();
148  void StopSynchronously();
149  INLINE(bool running()) { return running_; }
150  void Enqueue(const CodeEventsContainer& event);
151
152  // Puts current stack into tick sample events buffer.
153  void AddCurrentStack(Isolate* isolate);
154
155  // Tick sample events are filled directly in the buffer of the circular
156  // queue (because the structure is of fixed width, but usually not all
157  // stack frame entries are filled.) This method returns a pointer to the
158  // next record of the buffer.
159  INLINE(TickSample* TickSampleEvent());
160
161 private:
162  // Called from events processing thread (Run() method.)
163  bool ProcessCodeEvent();
164  bool ProcessTicks();
165
166  ProfileGenerator* generator_;
167  bool running_;
168  UnboundQueue<CodeEventsContainer> events_buffer_;
169  SamplingCircularQueue ticks_buffer_;
170  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
171  unsigned last_code_event_id_;
172  unsigned last_processed_code_event_id_;
173};
174
175
176#define PROFILE(IsolateGetter, Call)                                        \
177  do {                                                                      \
178    Isolate* cpu_profiler_isolate = (IsolateGetter);                        \
179    v8::internal::Logger* logger = cpu_profiler_isolate->logger();          \
180    CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler();       \
181    if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
182      logger->Call;                                                         \
183    }                                                                       \
184  } while (false)
185
186
187class CpuProfiler : public CodeEventListener {
188 public:
189  explicit CpuProfiler(Isolate* isolate);
190
191  CpuProfiler(Isolate* isolate,
192              CpuProfilesCollection* test_collection,
193              ProfileGenerator* test_generator,
194              ProfilerEventsProcessor* test_processor);
195
196  virtual ~CpuProfiler();
197
198  void StartProfiling(const char* title, bool record_samples = false);
199  void StartProfiling(String* title, bool record_samples);
200  CpuProfile* StopProfiling(const char* title);
201  CpuProfile* StopProfiling(String* title);
202  int GetProfilesCount();
203  CpuProfile* GetProfile(int index);
204  void DeleteAllProfiles();
205  void DeleteProfile(CpuProfile* profile);
206
207  // Invoked from stack sampler (thread or signal handler.)
208  TickSample* TickSampleEvent();
209
210  // Must be called via PROFILE macro, otherwise will crash when
211  // profiling is not enabled.
212  virtual void CallbackEvent(Name* name, Address entry_point);
213  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
214                               Code* code, const char* comment);
215  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
216                               Code* code, Name* name);
217  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
218                               Code* code,
219                               SharedFunctionInfo* shared,
220                               CompilationInfo* info,
221                               Name* name);
222  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
223                               Code* code,
224                               SharedFunctionInfo* shared,
225                               CompilationInfo* info,
226                               Name* source, int line);
227  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
228                               Code* code, int args_count);
229  virtual void CodeMovingGCEvent() {}
230  virtual void CodeMoveEvent(Address from, Address to);
231  virtual void CodeDeleteEvent(Address from);
232  virtual void GetterCallbackEvent(Name* name, Address entry_point);
233  virtual void RegExpCodeCreateEvent(Code* code, String* source);
234  virtual void SetterCallbackEvent(Name* name, Address entry_point);
235  virtual void SharedFunctionInfoMoveEvent(Address from, Address to);
236
237  INLINE(bool is_profiling() const) { return is_profiling_; }
238  bool* is_profiling_address() {
239    return &is_profiling_;
240  }
241
242  ProfileGenerator* generator() const { return generator_; }
243  ProfilerEventsProcessor* processor() const { return processor_; }
244  Isolate* isolate() const { return isolate_; }
245
246 private:
247  void StartProcessorIfNotStarted();
248  void StopProcessorIfLastProfile(const char* title);
249  void StopProcessor();
250  void ResetProfiles();
251  void LogBuiltins();
252
253  Isolate* isolate_;
254  CpuProfilesCollection* profiles_;
255  unsigned next_profile_uid_;
256  ProfileGenerator* generator_;
257  ProfilerEventsProcessor* processor_;
258  int saved_logging_nesting_;
259  bool need_to_stop_sampler_;
260  bool is_profiling_;
261
262  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
263};
264
265} }  // namespace v8::internal
266
267
268#endif  // V8_CPU_PROFILER_H_
269