1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_CPU_PROFILER_H_
29#define V8_CPU_PROFILER_H_
30
31#include "allocation.h"
32#include "atomicops.h"
33#include "circular-queue.h"
34#include "unbound-queue.h"
35
36namespace v8 {
37namespace internal {
38
39// Forward declarations.
40class CodeEntry;
41class CodeMap;
42class CpuProfile;
43class CpuProfilesCollection;
44class ProfileGenerator;
45class TokenEnumerator;
46
47#define CODE_EVENTS_TYPE_LIST(V)                                   \
48  V(CODE_CREATION,    CodeCreateEventRecord)                       \
49  V(CODE_MOVE,        CodeMoveEventRecord)                         \
50  V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
51
52
53class CodeEventRecord {
54 public:
55#define DECLARE_TYPE(type, ignore) type,
56  enum Type {
57    NONE = 0,
58    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
59    NUMBER_OF_TYPES
60  };
61#undef DECLARE_TYPE
62
63  Type type;
64  unsigned order;
65};
66
67
68class CodeCreateEventRecord : public CodeEventRecord {
69 public:
70  Address start;
71  CodeEntry* entry;
72  unsigned size;
73  Address shared;
74
75  INLINE(void UpdateCodeMap(CodeMap* code_map));
76};
77
78
79class CodeMoveEventRecord : public CodeEventRecord {
80 public:
81  Address from;
82  Address to;
83
84  INLINE(void UpdateCodeMap(CodeMap* code_map));
85};
86
87
88class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
89 public:
90  Address from;
91  Address to;
92
93  INLINE(void UpdateCodeMap(CodeMap* code_map));
94};
95
96
97class TickSampleEventRecord {
98 public:
99  // The parameterless constructor is used when we dequeue data from
100  // the ticks buffer.
101  TickSampleEventRecord() { }
102  explicit TickSampleEventRecord(unsigned order)
103      : filler(1),
104        order(order) {
105    ASSERT(filler != SamplingCircularQueue::kClear);
106  }
107
108  // The first machine word of a TickSampleEventRecord must not ever
109  // become equal to SamplingCircularQueue::kClear.  As both order and
110  // TickSample's first field are not reliable in this sense (order
111  // can overflow, TickSample can have all fields reset), we are
112  // forced to use an artificial filler field.
113  int filler;
114  unsigned order;
115  TickSample sample;
116
117  static TickSampleEventRecord* cast(void* value) {
118    return reinterpret_cast<TickSampleEventRecord*>(value);
119  }
120};
121
122
123// This class implements both the profile events processor thread and
124// methods called by event producers: VM and stack sampler threads.
125class ProfilerEventsProcessor : public Thread {
126 public:
127  explicit ProfilerEventsProcessor(ProfileGenerator* generator);
128  virtual ~ProfilerEventsProcessor() {}
129
130  // Thread control.
131  virtual void Run();
132  inline void Stop() { running_ = false; }
133  INLINE(bool running()) { return running_; }
134
135  // Events adding methods. Called by VM threads.
136  void CallbackCreateEvent(Logger::LogEventsAndTags tag,
137                           const char* prefix, String* name,
138                           Address start);
139  void CodeCreateEvent(Logger::LogEventsAndTags tag,
140                       String* name,
141                       String* resource_name, int line_number,
142                       Address start, unsigned size,
143                       Address shared);
144  void CodeCreateEvent(Logger::LogEventsAndTags tag,
145                       const char* name,
146                       Address start, unsigned size);
147  void CodeCreateEvent(Logger::LogEventsAndTags tag,
148                       int args_count,
149                       Address start, unsigned size);
150  void CodeMoveEvent(Address from, Address to);
151  void CodeDeleteEvent(Address from);
152  void SharedFunctionInfoMoveEvent(Address from, Address to);
153  void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
154                             const char* prefix, String* name,
155                             Address start, unsigned size);
156  // Puts current stack into tick sample events buffer.
157  void AddCurrentStack();
158
159  // Tick sample events are filled directly in the buffer of the circular
160  // queue (because the structure is of fixed width, but usually not all
161  // stack frame entries are filled.) This method returns a pointer to the
162  // next record of the buffer.
163  INLINE(TickSample* TickSampleEvent());
164
165 private:
166  union CodeEventsContainer {
167    CodeEventRecord generic;
168#define DECLARE_CLASS(ignore, type) type type##_;
169    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
170#undef DECLARE_TYPE
171  };
172
173  // Called from events processing thread (Run() method.)
174  bool ProcessCodeEvent(unsigned* dequeue_order);
175  bool ProcessTicks(unsigned dequeue_order);
176
177  INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
178
179  ProfileGenerator* generator_;
180  bool running_;
181  UnboundQueue<CodeEventsContainer> events_buffer_;
182  SamplingCircularQueue ticks_buffer_;
183  UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
184  unsigned enqueue_order_;
185};
186
187} }  // namespace v8::internal
188
189
190#define PROFILE(isolate, Call)                                \
191  LOG(isolate, Call);                                         \
192  do {                                                        \
193    if (v8::internal::CpuProfiler::is_profiling(isolate)) {   \
194      v8::internal::CpuProfiler::Call;                        \
195    }                                                         \
196  } while (false)
197
198
199namespace v8 {
200namespace internal {
201
202
203// TODO(isolates): isolatify this class.
204class CpuProfiler {
205 public:
206  static void SetUp();
207  static void TearDown();
208
209  static void StartProfiling(const char* title);
210  static void StartProfiling(String* title);
211  static CpuProfile* StopProfiling(const char* title);
212  static CpuProfile* StopProfiling(Object* security_token, String* title);
213  static int GetProfilesCount();
214  static CpuProfile* GetProfile(Object* security_token, int index);
215  static CpuProfile* FindProfile(Object* security_token, unsigned uid);
216  static void DeleteAllProfiles();
217  static void DeleteProfile(CpuProfile* profile);
218  static bool HasDetachedProfiles();
219
220  // Invoked from stack sampler (thread or signal handler.)
221  static TickSample* TickSampleEvent(Isolate* isolate);
222
223  // Must be called via PROFILE macro, otherwise will crash when
224  // profiling is not enabled.
225  static void CallbackEvent(String* name, Address entry_point);
226  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
227                              Code* code, const char* comment);
228  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
229                              Code* code, String* name);
230  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
231                              Code* code,
232                              SharedFunctionInfo* shared,
233                              String* name);
234  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
235                              Code* code,
236                              SharedFunctionInfo* shared,
237                              String* source, int line);
238  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
239                              Code* code, int args_count);
240  static void CodeMovingGCEvent() {}
241  static void CodeMoveEvent(Address from, Address to);
242  static void CodeDeleteEvent(Address from);
243  static void GetterCallbackEvent(String* name, Address entry_point);
244  static void RegExpCodeCreateEvent(Code* code, String* source);
245  static void SetterCallbackEvent(String* name, Address entry_point);
246  static void SharedFunctionInfoMoveEvent(Address from, Address to);
247
248  // TODO(isolates): this doesn't have to use atomics anymore.
249
250  static INLINE(bool is_profiling(Isolate* isolate)) {
251    CpuProfiler* profiler = isolate->cpu_profiler();
252    return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
253  }
254
255 private:
256  CpuProfiler();
257  ~CpuProfiler();
258  void StartCollectingProfile(const char* title);
259  void StartCollectingProfile(String* title);
260  void StartProcessorIfNotStarted();
261  CpuProfile* StopCollectingProfile(const char* title);
262  CpuProfile* StopCollectingProfile(Object* security_token, String* title);
263  void StopProcessorIfLastProfile(const char* title);
264  void StopProcessor();
265  void ResetProfiles();
266
267  CpuProfilesCollection* profiles_;
268  unsigned next_profile_uid_;
269  TokenEnumerator* token_enumerator_;
270  ProfileGenerator* generator_;
271  ProfilerEventsProcessor* processor_;
272  int saved_logging_nesting_;
273  bool need_to_stop_sampler_;
274  Atomic32 is_profiling_;
275
276 private:
277  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
278};
279
280} }  // namespace v8::internal
281
282
283#endif  // V8_CPU_PROFILER_H_
284