1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_CPU_PROFILER_H_
29#define V8_CPU_PROFILER_H_
30
31#include "allocation.h"
32#include "atomicops.h"
33#include "circular-queue.h"
34#include "platform/time.h"
35#include "sampler.h"
36#include "unbound-queue.h"
37
38namespace v8 {
39namespace internal {
40
41// Forward declarations.
42class CodeEntry;
43class CodeMap;
44class CompilationInfo;
45class CpuProfile;
46class CpuProfilesCollection;
47class ProfileGenerator;
48
49#define CODE_EVENTS_TYPE_LIST(V)                                   \
50  V(CODE_CREATION,    CodeCreateEventRecord)                       \
51  V(CODE_MOVE,        CodeMoveEventRecord)                         \
52  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)           \
53  V(REPORT_BUILTIN,   ReportBuiltinEventRecord)
54
55
56class CodeEventRecord {
57 public:
58#define DECLARE_TYPE(type, ignore) type,
59  enum Type {
60    NONE = 0,
61    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
62    NUMBER_OF_TYPES
63  };
64#undef DECLARE_TYPE
65
66  Type type;
67  mutable unsigned order;
68};
69
70
71class CodeCreateEventRecord : public CodeEventRecord {
72 public:
73  Address start;
74  CodeEntry* entry;
75  unsigned size;
76  Address shared;
77
78  INLINE(void UpdateCodeMap(CodeMap* code_map));
79};
80
81
82class CodeMoveEventRecord : public CodeEventRecord {
83 public:
84  Address from;
85  Address to;
86
87  INLINE(void UpdateCodeMap(CodeMap* code_map));
88};
89
90
91class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
92 public:
93  Address from;
94  Address to;
95
96  INLINE(void UpdateCodeMap(CodeMap* code_map));
97};
98
99
100class ReportBuiltinEventRecord : public CodeEventRecord {
101 public:
102  Address start;
103  Builtins::Name builtin_id;
104
105  INLINE(void UpdateCodeMap(CodeMap* code_map));
106};
107
108
109class TickSampleEventRecord {
110 public:
111  // The parameterless constructor is used when we dequeue data from
112  // the ticks buffer.
113  TickSampleEventRecord() { }
114  explicit TickSampleEventRecord(unsigned order) : order(order) { }
115
116  unsigned order;
117  TickSample sample;
118};
119
120
121class CodeEventsContainer {
122 public:
123  explicit CodeEventsContainer(
124      CodeEventRecord::Type type = CodeEventRecord::NONE) {
125    generic.type = type;
126  }
127  union  {
128    CodeEventRecord generic;
129#define DECLARE_CLASS(ignore, type) type type##_;
130    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
131#undef DECLARE_TYPE
132  };
133};
134
135
136// This class implements both the profile events processor thread and
137// methods called by event producers: VM and stack sampler threads.
138class ProfilerEventsProcessor : public Thread {
139 public:
140  ProfilerEventsProcessor(ProfileGenerator* generator,
141                          Sampler* sampler,
142                          TimeDelta period);
143  virtual ~ProfilerEventsProcessor() {}
144
145  // Thread control.
146  virtual void Run();
147  void StopSynchronously();
148  INLINE(bool running()) { return running_; }
149  void Enqueue(const CodeEventsContainer& event);
150
151  // Puts current stack into tick sample events buffer.
152  void AddCurrentStack(Isolate* isolate);
153
154  // Tick sample events are filled directly in the buffer of the circular
155  // queue (because the structure is of fixed width, but usually not all
156  // stack frame entries are filled.) This method returns a pointer to the
157  // next record of the buffer.
158  inline TickSample* StartTickSample();
159  inline void FinishTickSample();
160
161 private:
162  // Called from events processing thread (Run() method.)
163  bool ProcessCodeEvent();
164
165  enum SampleProcessingResult {
166    OneSampleProcessed,
167    FoundSampleForNextCodeEvent,
168    NoSamplesInQueue
169  };
170  SampleProcessingResult ProcessOneSample();
171
172  ProfileGenerator* generator_;
173  Sampler* sampler_;
174  bool running_;
175  // Sampling period in microseconds.
176  const TimeDelta period_;
177  UnboundQueue<CodeEventsContainer> events_buffer_;
178  static const size_t kTickSampleBufferSize = 1 * MB;
179  static const size_t kTickSampleQueueLength =
180      kTickSampleBufferSize / sizeof(TickSampleEventRecord);
181  SamplingCircularQueue<TickSampleEventRecord,
182                        kTickSampleQueueLength> ticks_buffer_;
183  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
184  unsigned last_code_event_id_;
185  unsigned last_processed_code_event_id_;
186};
187
188
189#define PROFILE(IsolateGetter, Call)                                        \
190  do {                                                                      \
191    Isolate* cpu_profiler_isolate = (IsolateGetter);                        \
192    v8::internal::Logger* logger = cpu_profiler_isolate->logger();          \
193    CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler();       \
194    if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
195      logger->Call;                                                         \
196    }                                                                       \
197  } while (false)
198
199
200class CpuProfiler : public CodeEventListener {
201 public:
202  explicit CpuProfiler(Isolate* isolate);
203
204  CpuProfiler(Isolate* isolate,
205              CpuProfilesCollection* test_collection,
206              ProfileGenerator* test_generator,
207              ProfilerEventsProcessor* test_processor);
208
209  virtual ~CpuProfiler();
210
211  void set_sampling_interval(TimeDelta value);
212  void StartProfiling(const char* title, bool record_samples = false);
213  void StartProfiling(String* title, bool record_samples);
214  CpuProfile* StopProfiling(const char* title);
215  CpuProfile* StopProfiling(String* title);
216  int GetProfilesCount();
217  CpuProfile* GetProfile(int index);
218  void DeleteAllProfiles();
219  void DeleteProfile(CpuProfile* profile);
220
221  // Invoked from stack sampler (thread or signal handler.)
222  inline TickSample* StartTickSample();
223  inline void FinishTickSample();
224
225  // Must be called via PROFILE macro, otherwise will crash when
226  // profiling is not enabled.
227  virtual void CallbackEvent(Name* name, Address entry_point);
228  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
229                               Code* code, const char* comment);
230  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
231                               Code* code, Name* name);
232  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
233                               Code* code,
234                               SharedFunctionInfo* shared,
235                               CompilationInfo* info,
236                               Name* name);
237  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
238                               Code* code,
239                               SharedFunctionInfo* shared,
240                               CompilationInfo* info,
241                               Name* source, int line, int column);
242  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
243                               Code* code, int args_count);
244  virtual void CodeMovingGCEvent() {}
245  virtual void CodeMoveEvent(Address from, Address to);
246  virtual void CodeDeleteEvent(Address from);
247  virtual void GetterCallbackEvent(Name* name, Address entry_point);
248  virtual void RegExpCodeCreateEvent(Code* code, String* source);
249  virtual void SetterCallbackEvent(Name* name, Address entry_point);
250  virtual void SharedFunctionInfoMoveEvent(Address from, Address to);
251
252  INLINE(bool is_profiling() const) { return is_profiling_; }
253  bool* is_profiling_address() {
254    return &is_profiling_;
255  }
256
257  ProfileGenerator* generator() const { return generator_; }
258  ProfilerEventsProcessor* processor() const { return processor_; }
259  Isolate* isolate() const { return isolate_; }
260
261 private:
262  void StartProcessorIfNotStarted();
263  void StopProcessorIfLastProfile(const char* title);
264  void StopProcessor();
265  void ResetProfiles();
266  void LogBuiltins();
267
268  Isolate* isolate_;
269  TimeDelta sampling_interval_;
270  CpuProfilesCollection* profiles_;
271  unsigned next_profile_uid_;
272  ProfileGenerator* generator_;
273  ProfilerEventsProcessor* processor_;
274  bool saved_is_logging_;
275  bool is_profiling_;
276
277  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
278};
279
280} }  // namespace v8::internal
281
282
283#endif  // V8_CPU_PROFILER_H_
284