1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef BASE_TRACKED_OBJECTS_H_
6#define BASE_TRACKED_OBJECTS_H_
7
8#include <map>
9#include <set>
10#include <stack>
11#include <string>
12#include <utility>
13#include <vector>
14
15#include "base/base_export.h"
16#include "base/gtest_prod_util.h"
17#include "base/lazy_instance.h"
18#include "base/location.h"
19#include "base/profiler/alternate_timer.h"
20#include "base/profiler/tracked_time.h"
21#include "base/synchronization/lock.h"
22#include "base/threading/thread_local_storage.h"
23#include "base/tracking_info.h"
24
25// TrackedObjects provides a database of stats about objects (generally Tasks)
26// that are tracked.  Tracking means their birth, death, duration, birth thread,
27// death thread, and birth place are recorded.  This data is carefully spread
28// across a series of objects so that the counts and times can be rapidly
29// updated without (usually) having to lock the data, and hence there is usually
30// very little contention caused by the tracking.  The data can be viewed via
31// the about:profiler URL, with a variety of sorting and filtering choices.
32//
33// These classes serve as the basis of a profiler of sorts for the Tasks system.
34// As a result, design decisions were made to maximize speed, by minimizing
35// recurring allocation/deallocation, lock contention and data copying.  In the
36// "stable" state, which is reached relatively quickly, there is no separate
37// marginal allocation cost associated with construction or destruction of
38// tracked objects, no locks are generally employed, and probably the largest
39// computational cost is associated with obtaining start and stop times for
40// instances as they are created and destroyed.
41//
42// The following describes the lifecycle of tracking an instance.
43//
44// First off, when the instance is created, the FROM_HERE macro is expanded
45// to specify the birth place (file, line, function) where the instance was
46// created.  That data is used to create a transient Location instance
47// encapsulating the above triple of information.  The strings (like __FILE__)
48// are passed around by reference, with the assumption that they are static, and
49// will never go away.  This ensures that the strings can be dealt with as atoms
50// with great efficiency (i.e., copying of strings is never needed, and
51// comparisons for equality can be based on pointer comparisons).
52//
53// Next, a Births instance is created for use ONLY on the thread where this
54// instance was created.  That Births instance records (in a base class
55// BirthOnThread) references to the static data provided in a Location instance,
56// as well as a pointer specifying the thread on which the birth takes place.
57// Hence there is at most one Births instance for each Location on each thread.
58// The derived Births class contains slots for recording statistics about all
59// instances born at the same location.  Statistics currently include only the
60// count of instances constructed.
61//
62// Since the base class BirthOnThread contains only constant data, it can be
63// freely accessed by any thread at any time (i.e., only the statistic needs to
64// be handled carefully, and stats are updated exclusively on the birth thread).
65//
66// For Tasks, having now either constructed or found the Births instance
67// described above, a pointer to the Births instance is then recorded into the
68// PendingTask structure in MessageLoop.  This fact alone is very useful in
69// debugging, when there is a question of where an instance came from.  In
70// addition, the birth time is also recorded and used to later evaluate the
71// lifetime duration of the whole Task.  As a result of the above embedding, we
72// can find out a Task's location of birth, and thread of birth, without using
73// any locks, as all that data is constant across the life of the process.
74//
75// The above work *could* also be done for any other object as well by calling
76// TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
77//
78// The amount of memory used in the above data structures depends on how many
79// threads there are, and how many Locations of construction there are.
80// Fortunately, we don't use memory that is the product of those two counts, but
81// rather we only need one Births instance for each thread that constructs an
82// instance at a Location. In many cases, instances are only created on one
83// thread, so the memory utilization is actually fairly restrained.
84//
85// Lastly, when an instance is deleted, the final tallies of statistics are
86// carefully accumulated.  That tallying writes into slots (members) in a
87// collection of DeathData instances.  For each birth place Location that is
88// destroyed on a thread, there is a DeathData instance to record the additional
89// death count, as well as accumulate the run-time and queue-time durations for
90// the instance as it is destroyed (dies).  By maintaining a single place to
91// aggregate this running sum *only* for the given thread, we avoid the need to
92// lock such DeathData instances. (i.e., these accumulated stats in a DeathData
93// instance are exclusively updated by the singular owning thread).
94//
95// With the above lifecycle description complete, the major remaining detail is
96// explaining how each thread maintains a list of DeathData instances, and of
97// Births instances, and is able to avoid additional (redundant/unnecessary)
98// allocations.
99//
100// Each thread maintains a list of data items specific to that thread in a
101// ThreadData instance (for that specific thread only).  The two critical items
102// are lists of DeathData and Births instances.  These lists are maintained in
103// STL maps, which are indexed by Location. As noted earlier, we can compare
104// locations very efficiently as we consider the underlying data (file,
105// function, line) to be atoms, and hence pointer comparison is used rather than
106// (slow) string comparisons.
107//
108// To provide a mechanism for iterating over all "known threads," which means
109// threads that have recorded a birth or a death, we create a singly linked list
110// of ThreadData instances. Each such instance maintains a pointer to the next
111// one.  A static member of ThreadData provides a pointer to the first item on
112// this global list, and access via that all_thread_data_list_head_ item
113// requires the use of the list_lock_.
114// When new ThreadData instances is added to the global list, it is pre-pended,
115// which ensures that any prior acquisition of the list is valid (i.e., the
116// holder can iterate over it without fear of it changing, or the necessity of
117// using an additional lock.  Iterations are actually pretty rare (used
118// primarilly for cleanup, or snapshotting data for display), so this lock has
119// very little global performance impact.
120//
121// The above description tries to define the high performance (run time)
122// portions of these classes.  After gathering statistics, calls instigated
123// by visiting about:profiler will assemble and aggregate data for display.  The
124// following data structures are used for producing such displays.  They are
125// not performance critical, and their only major constraint is that they should
126// be able to run concurrently with ongoing augmentation of the birth and death
127// data.
128//
129// This header also exports collection of classes that provide "snapshotted"
130// representations of the core tracked_objects:: classes.  These snapshotted
131// representations are designed for safe transmission of the tracked_objects::
132// data across process boundaries.  Each consists of:
133// (1) a default constructor, to support the IPC serialization macros,
134// (2) a constructor that extracts data from the type being snapshotted, and
135// (3) the snapshotted data.
136//
137// For a given birth location, information about births is spread across data
138// structures that are asynchronously changing on various threads.  For
139// serialization and display purposes, we need to construct TaskSnapshot
140// instances for each combination of birth thread, death thread, and location,
141// along with the count of such lifetimes.  We gather such data into a
142// TaskSnapshot instances, so that such instances can be sorted and
143// aggregated (and remain frozen during our processing).
144//
145// The ProcessDataSnapshot struct is a serialized representation of the list
146// of ThreadData objects for a process.  It holds a set of TaskSnapshots
147// and tracks parent/child relationships for the executed tasks.  The statistics
148// in a snapshot are gathered asynhcronously relative to their ongoing updates.
149// It is possible, though highly unlikely, that stats could be incorrectly
150// recorded by this process (all data is held in 32 bit ints, but we are not
151// atomically collecting all data, so we could have count that does not, for
152// example, match with the number of durations we accumulated).  The advantage
153// to having fast (non-atomic) updates of the data outweighs the minimal risk of
154// a singular corrupt statistic snapshot (only the snapshot could be corrupt,
155// not the underlying and ongoing statistic).  In constrast, pointer data that
156// is accessed during snapshotting is completely invariant, and hence is
157// perfectly acquired (i.e., no potential corruption, and no risk of a bad
158// memory reference).
159//
160// TODO(jar): We can implement a Snapshot system that *tries* to grab the
161// snapshots on the source threads *when* they have MessageLoops available
162// (worker threads don't have message loops generally, and hence gathering from
163// them will continue to be asynchronous).  We had an implementation of this in
164// the past, but the difficulty is dealing with message loops being terminated.
165// We can *try* to spam the available threads via some message loop proxy to
166// achieve this feat, and it *might* be valuable when we are colecting data for
167// upload via UMA (where correctness of data may be more significant than for a
168// single screen of about:profiler).
169//
170// TODO(jar): We should support (optionally) the recording of parent-child
171// relationships for tasks.  This should be done by detecting what tasks are
172// Born during the running of a parent task.  The resulting data can be used by
173// a smarter profiler to aggregate the cost of a series of child tasks into
174// the ancestor task.  It can also be used to illuminate what child or parent is
175// related to each task.
176//
177// TODO(jar): We need to store DataCollections, and provide facilities for
178// taking the difference between two gathered DataCollections.  For now, we're
179// just adding a hack that Reset()s to zero all counts and stats.  This is also
180// done in a slighly thread-unsafe fashion, as the resetting is done
181// asynchronously relative to ongoing updates (but all data is 32 bit in size).
182// For basic profiling, this will work "most of the time," and should be
183// sufficient... but storing away DataCollections is the "right way" to do this.
184// We'll accomplish this via JavaScript storage of snapshots, and then we'll
185// remove the Reset() methods.  We may also need a short-term-max value in
186// DeathData that is reset (as synchronously as possible) during each snapshot.
187// This will facilitate displaying a max value for each snapshot period.
188
189namespace tracked_objects {
190
191//------------------------------------------------------------------------------
192// For a specific thread, and a specific birth place, the collection of all
193// death info (with tallies for each death thread, to prevent access conflicts).
194class ThreadData;
195class BASE_EXPORT BirthOnThread {
196 public:
197  BirthOnThread(const Location& location, const ThreadData& current);
198
199  const Location location() const { return location_; }
200  const ThreadData* birth_thread() const { return birth_thread_; }
201
202 private:
203  // File/lineno of birth.  This defines the essence of the task, as the context
204  // of the birth (construction) often tell what the item is for.  This field
205  // is const, and hence safe to access from any thread.
206  const Location location_;
207
208  // The thread that records births into this object.  Only this thread is
209  // allowed to update birth_count_ (which changes over time).
210  const ThreadData* const birth_thread_;
211
212  DISALLOW_COPY_AND_ASSIGN(BirthOnThread);
213};
214
215//------------------------------------------------------------------------------
216// A "snapshotted" representation of the BirthOnThread class.
217
218struct BASE_EXPORT BirthOnThreadSnapshot {
219  BirthOnThreadSnapshot();
220  explicit BirthOnThreadSnapshot(const BirthOnThread& birth);
221  ~BirthOnThreadSnapshot();
222
223  LocationSnapshot location;
224  std::string thread_name;
225};
226
227//------------------------------------------------------------------------------
228// A class for accumulating counts of births (without bothering with a map<>).
229
230class BASE_EXPORT Births: public BirthOnThread {
231 public:
232  Births(const Location& location, const ThreadData& current);
233
234  int birth_count() const;
235
236  // When we have a birth we update the count for this birthplace.
237  void RecordBirth();
238
239  // When a birthplace is changed (updated), we need to decrement the counter
240  // for the old instance.
241  void ForgetBirth();
242
243  // Hack to quickly reset all counts to zero.
244  void Clear();
245
246 private:
247  // The number of births on this thread for our location_.
248  int birth_count_;
249
250  DISALLOW_COPY_AND_ASSIGN(Births);
251};
252
253//------------------------------------------------------------------------------
254// Basic info summarizing multiple destructions of a tracked object with a
255// single birthplace (fixed Location).  Used both on specific threads, and also
256// in snapshots when integrating assembled data.
257
258class BASE_EXPORT DeathData {
259 public:
260  // Default initializer.
261  DeathData();
262
263  // When deaths have not yet taken place, and we gather data from all the
264  // threads, we create DeathData stats that tally the number of births without
265  // a corresponding death.
266  explicit DeathData(int count);
267
268  // Update stats for a task destruction (death) that had a Run() time of
269  // |duration|, and has had a queueing delay of |queue_duration|.
270  void RecordDeath(const int32 queue_duration,
271                   const int32 run_duration,
272                   int random_number);
273
274  // Metrics accessors, used only for serialization and in tests.
275  int count() const;
276  int32 run_duration_sum() const;
277  int32 run_duration_max() const;
278  int32 run_duration_sample() const;
279  int32 queue_duration_sum() const;
280  int32 queue_duration_max() const;
281  int32 queue_duration_sample() const;
282
283  // Reset the max values to zero.
284  void ResetMax();
285
286  // Reset all tallies to zero. This is used as a hack on realtime data.
287  void Clear();
288
289 private:
290  // Members are ordered from most regularly read and updated, to least
291  // frequently used.  This might help a bit with cache lines.
292  // Number of runs seen (divisor for calculating averages).
293  int count_;
294  // Basic tallies, used to compute averages.
295  int32 run_duration_sum_;
296  int32 queue_duration_sum_;
297  // Max values, used by local visualization routines.  These are often read,
298  // but rarely updated.
299  int32 run_duration_max_;
300  int32 queue_duration_max_;
301  // Samples, used by crowd sourcing gatherers.  These are almost never read,
302  // and rarely updated.
303  int32 run_duration_sample_;
304  int32 queue_duration_sample_;
305};
306
307//------------------------------------------------------------------------------
308// A "snapshotted" representation of the DeathData class.
309
310struct BASE_EXPORT DeathDataSnapshot {
311  DeathDataSnapshot();
312  explicit DeathDataSnapshot(const DeathData& death_data);
313  ~DeathDataSnapshot();
314
315  int count;
316  int32 run_duration_sum;
317  int32 run_duration_max;
318  int32 run_duration_sample;
319  int32 queue_duration_sum;
320  int32 queue_duration_max;
321  int32 queue_duration_sample;
322};
323
324//------------------------------------------------------------------------------
325// A temporary collection of data that can be sorted and summarized.  It is
326// gathered (carefully) from many threads.  Instances are held in arrays and
327// processed, filtered, and rendered.
328// The source of this data was collected on many threads, and is asynchronously
329// changing.  The data in this instance is not asynchronously changing.
330
331struct BASE_EXPORT TaskSnapshot {
332  TaskSnapshot();
333  TaskSnapshot(const BirthOnThread& birth,
334               const DeathData& death_data,
335               const std::string& death_thread_name);
336  ~TaskSnapshot();
337
338  BirthOnThreadSnapshot birth;
339  DeathDataSnapshot death_data;
340  std::string death_thread_name;
341};
342
343//------------------------------------------------------------------------------
344// For each thread, we have a ThreadData that stores all tracking info generated
345// on this thread.  This prevents the need for locking as data accumulates.
346// We use ThreadLocalStorage to quickly identfy the current ThreadData context.
347// We also have a linked list of ThreadData instances, and that list is used to
348// harvest data from all existing instances.
349
350struct ProcessDataSnapshot;
351class BASE_EXPORT ThreadData {
352 public:
353  // Current allowable states of the tracking system.  The states can vary
354  // between ACTIVE and DEACTIVATED, but can never go back to UNINITIALIZED.
355  enum Status {
356    UNINITIALIZED,              // PRistine, link-time state before running.
357    DORMANT_DURING_TESTS,       // Only used during testing.
358    DEACTIVATED,                // No longer recording profling.
359    PROFILING_ACTIVE,           // Recording profiles (no parent-child links).
360    PROFILING_CHILDREN_ACTIVE,  // Fully active, recording parent-child links.
361  };
362
363  typedef std::map<Location, Births*> BirthMap;
364  typedef std::map<const Births*, DeathData> DeathMap;
365  typedef std::pair<const Births*, const Births*> ParentChildPair;
366  typedef std::set<ParentChildPair> ParentChildSet;
367  typedef std::stack<const Births*> ParentStack;
368
369  // Initialize the current thread context with a new instance of ThreadData.
370  // This is used by all threads that have names, and should be explicitly
371  // set *before* any births on the threads have taken place.  It is generally
372  // only used by the message loop, which has a well defined thread name.
373  static void InitializeThreadContext(const std::string& suggested_name);
374
375  // Using Thread Local Store, find the current instance for collecting data.
376  // If an instance does not exist, construct one (and remember it for use on
377  // this thread.
378  // This may return NULL if the system is disabled for any reason.
379  static ThreadData* Get();
380
381  // Fills |process_data| with all the recursive results in our process.
382  // During the scavenging, if |reset_max| is true, then the DeathData instances
383  // max-values are reset to zero during this scan.
384  static void Snapshot(bool reset_max, ProcessDataSnapshot* process_data);
385
386  // Finds (or creates) a place to count births from the given location in this
387  // thread, and increment that tally.
388  // TallyABirthIfActive will returns NULL if the birth cannot be tallied.
389  static Births* TallyABirthIfActive(const Location& location);
390
391  // Records the end of a timed run of an object.  The |completed_task| contains
392  // a pointer to a Births, the time_posted, and a delayed_start_time if any.
393  // The |start_of_run| indicates when we started to perform the run of the
394  // task.  The delayed_start_time is non-null for tasks that were posted as
395  // delayed tasks, and it indicates when the task should have run (i.e., when
396  // it should have posted out of the timer queue, and into the work queue.
397  // The |end_of_run| was just obtained by a call to Now() (just after the task
398  // finished). It is provided as an argument to help with testing.
399  static void TallyRunOnNamedThreadIfTracking(
400      const base::TrackingInfo& completed_task,
401      const TrackedTime& start_of_run,
402      const TrackedTime& end_of_run);
403
404  // Record the end of a timed run of an object.  The |birth| is the record for
405  // the instance, the |time_posted| records that instant, which is presumed to
406  // be when the task was posted into a queue to run on a worker thread.
407  // The |start_of_run| is when the worker thread started to perform the run of
408  // the task.
409  // The |end_of_run| was just obtained by a call to Now() (just after the task
410  // finished).
411  static void TallyRunOnWorkerThreadIfTracking(
412      const Births* birth,
413      const TrackedTime& time_posted,
414      const TrackedTime& start_of_run,
415      const TrackedTime& end_of_run);
416
417  // Record the end of execution in region, generally corresponding to a scope
418  // being exited.
419  static void TallyRunInAScopedRegionIfTracking(
420      const Births* birth,
421      const TrackedTime& start_of_run,
422      const TrackedTime& end_of_run);
423
424  const std::string& thread_name() const { return thread_name_; }
425
426  // Hack: asynchronously clear all birth counts and death tallies data values
427  // in all ThreadData instances.  The numerical (zeroing) part is done without
428  // use of a locks or atomics exchanges, and may (for int64 values) produce
429  // bogus counts VERY rarely.
430  static void ResetAllThreadData();
431
432  // Initializes all statics if needed (this initialization call should be made
433  // while we are single threaded). Returns false if unable to initialize.
434  static bool Initialize();
435
436  // Sets internal status_.
437  // If |status| is false, then status_ is set to DEACTIVATED.
438  // If |status| is true, then status_ is set to, PROFILING_ACTIVE, or
439  // PROFILING_CHILDREN_ACTIVE.
440  // If tracking is not compiled in, this function will return false.
441  // If parent-child tracking is not compiled in, then an attempt to set the
442  // status to PROFILING_CHILDREN_ACTIVE will only result in a status of
443  // PROFILING_ACTIVE (i.e., it can't be set to a higher level than what is
444  // compiled into the binary, and parent-child tracking at the
445  // PROFILING_CHILDREN_ACTIVE level might not be compiled in).
446  static bool InitializeAndSetTrackingStatus(Status status);
447
448  static Status status();
449
450  // Indicate if any sort of profiling is being done (i.e., we are more than
451  // DEACTIVATED).
452  static bool TrackingStatus();
453
454  // For testing only, indicate if the status of parent-child tracking is turned
455  // on.  This is currently a compiled option, atop TrackingStatus().
456  static bool TrackingParentChildStatus();
457
458  // Special versions of Now() for getting times at start and end of a tracked
459  // run.  They are super fast when tracking is disabled, and have some internal
460  // side effects when we are tracking, so that we can deduce the amount of time
461  // accumulated outside of execution of tracked runs.
462  // The task that will be tracked is passed in as |parent| so that parent-child
463  // relationships can be (optionally) calculated.
464  static TrackedTime NowForStartOfRun(const Births* parent);
465  static TrackedTime NowForEndOfRun();
466
467  // Provide a time function that does nothing (runs fast) when we don't have
468  // the profiler enabled.  It will generally be optimized away when it is
469  // ifdef'ed to be small enough (allowing the profiler to be "compiled out" of
470  // the code).
471  static TrackedTime Now();
472
473  // Use the function |now| to provide current times, instead of calling the
474  // TrackedTime::Now() function.  Since this alternate function is being used,
475  // the other time arguments (used for calculating queueing delay) will be
476  // ignored.
477  static void SetAlternateTimeSource(NowFunction* now);
478
479  // This function can be called at process termination to validate that thread
480  // cleanup routines have been called for at least some number of named
481  // threads.
482  static void EnsureCleanupWasCalled(int major_threads_shutdown_count);
483
484 private:
485  // Allow only tests to call ShutdownSingleThreadedCleanup.  We NEVER call it
486  // in production code.
487  // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
488  // better change of optimizing (inlining? etc.) private methods (knowing that
489  // there will be no need for an external entry point).
490  friend class TrackedObjectsTest;
491  FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
492  FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
493  FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, ParentChildTest);
494
495  typedef std::map<const BirthOnThread*, int> BirthCountMap;
496
497  // Worker thread construction creates a name since there is none.
498  explicit ThreadData(int thread_number);
499
500  // Message loop based construction should provide a name.
501  explicit ThreadData(const std::string& suggested_name);
502
503  ~ThreadData();
504
505  // Push this instance to the head of all_thread_data_list_head_, linking it to
506  // the previous head.  This is performed after each construction, and leaves
507  // the instance permanently on that list.
508  void PushToHeadOfList();
509
510  // (Thread safe) Get start of list of all ThreadData instances using the lock.
511  static ThreadData* first();
512
513  // Iterate through the null terminated list of ThreadData instances.
514  ThreadData* next() const;
515
516
517  // In this thread's data, record a new birth.
518  Births* TallyABirth(const Location& location);
519
520  // Find a place to record a death on this thread.
521  void TallyADeath(const Births& birth, int32 queue_duration, int32 duration);
522
523  // Snapshot (under a lock) the profiled data for the tasks in each ThreadData
524  // instance.  Also updates the |birth_counts| tally for each task to keep
525  // track of the number of living instances of the task.  If |reset_max| is
526  // true, then the max values in each DeathData instance are reset during the
527  // scan.
528  static void SnapshotAllExecutedTasks(bool reset_max,
529                                       ProcessDataSnapshot* process_data,
530                                       BirthCountMap* birth_counts);
531
532  // Snapshots (under a lock) the profiled data for the tasks for this thread
533  // and writes all of the executed tasks' data -- i.e. the data for the tasks
534  // with with entries in the death_map_ -- into |process_data|.  Also updates
535  // the |birth_counts| tally for each task to keep track of the number of
536  // living instances of the task -- that is, each task maps to the number of
537  // births for the task that have not yet been balanced by a death.  If
538  // |reset_max| is true, then the max values in each DeathData instance are
539  // reset during the scan.
540  void SnapshotExecutedTasks(bool reset_max,
541                             ProcessDataSnapshot* process_data,
542                             BirthCountMap* birth_counts);
543
544  // Using our lock, make a copy of the specified maps.  This call may be made
545  // on  non-local threads, which necessitate the use of the lock to prevent
546  // the map(s) from being reallocaed while they are copied. If |reset_max| is
547  // true, then, just after we copy the DeathMap, we will set the max values to
548  // zero in the active DeathMap (not the snapshot).
549  void SnapshotMaps(bool reset_max,
550                    BirthMap* birth_map,
551                    DeathMap* death_map,
552                    ParentChildSet* parent_child_set);
553
554  // Using our lock to protect the iteration, Clear all birth and death data.
555  void Reset();
556
557  // This method is called by the TLS system when a thread terminates.
558  // The argument may be NULL if this thread has never tracked a birth or death.
559  static void OnThreadTermination(void* thread_data);
560
561  // This method should be called when a worker thread terminates, so that we
562  // can save all the thread data into a cache of reusable ThreadData instances.
563  void OnThreadTerminationCleanup();
564
565  // Cleans up data structures, and returns statics to near pristine (mostly
566  // uninitialized) state.  If there is any chance that other threads are still
567  // using the data structures, then the |leak| argument should be passed in as
568  // true, and the data structures (birth maps, death maps, ThreadData
569  // insntances, etc.) will be leaked and not deleted.  If you have joined all
570  // threads since the time that InitializeAndSetTrackingStatus() was called,
571  // then you can pass in a |leak| value of false, and this function will
572  // delete recursively all data structures, starting with the list of
573  // ThreadData instances.
574  static void ShutdownSingleThreadedCleanup(bool leak);
575
576  // When non-null, this specifies an external function that supplies monotone
577  // increasing time functcion.
578  static NowFunction* now_function_;
579
580  // We use thread local store to identify which ThreadData to interact with.
581  static base::ThreadLocalStorage::StaticSlot tls_index_;
582
583  // List of ThreadData instances for use with worker threads. When a worker
584  // thread is done (terminated), we push it onto this llist.  When a new worker
585  // thread is created, we first try to re-use a ThreadData instance from the
586  // list, and if none are available, construct a new one.
587  // This is only accessed while list_lock_ is held.
588  static ThreadData* first_retired_worker_;
589
590  // Link to the most recently created instance (starts a null terminated list).
591  // The list is traversed by about:profiler when it needs to snapshot data.
592  // This is only accessed while list_lock_ is held.
593  static ThreadData* all_thread_data_list_head_;
594
595  // The next available worker thread number.  This should only be accessed when
596  // the list_lock_ is held.
597  static int worker_thread_data_creation_count_;
598
599  // The number of times TLS has called us back to cleanup a ThreadData
600  // instance. This is only accessed while list_lock_ is held.
601  static int cleanup_count_;
602
603  // Incarnation sequence number, indicating how many times (during unittests)
604  // we've either transitioned out of UNINITIALIZED, or into that state.  This
605  // value is only accessed while the list_lock_ is held.
606  static int incarnation_counter_;
607
608  // Protection for access to all_thread_data_list_head_, and to
609  // unregistered_thread_data_pool_.  This lock is leaked at shutdown.
610  // The lock is very infrequently used, so we can afford to just make a lazy
611  // instance and be safe.
612  static base::LazyInstance<base::Lock>::Leaky list_lock_;
613
614  // We set status_ to SHUTDOWN when we shut down the tracking service.
615  static Status status_;
616
617  // Link to next instance (null terminated list). Used to globally track all
618  // registered instances (corresponds to all registered threads where we keep
619  // data).
620  ThreadData* next_;
621
622  // Pointer to another ThreadData instance for a Worker-Thread that has been
623  // retired (its thread was terminated).  This value is non-NULL only for a
624  // retired ThreadData associated with a Worker-Thread.
625  ThreadData* next_retired_worker_;
626
627  // The name of the thread that is being recorded.  If this thread has no
628  // message_loop, then this is a worker thread, with a sequence number postfix.
629  std::string thread_name_;
630
631  // Indicate if this is a worker thread, and the ThreadData contexts should be
632  // stored in the unregistered_thread_data_pool_ when not in use.
633  // Value is zero when it is not a worker thread.  Value is a positive integer
634  // corresponding to the created thread name if it is a worker thread.
635  int worker_thread_number_;
636
637  // A map used on each thread to keep track of Births on this thread.
638  // This map should only be accessed on the thread it was constructed on.
639  // When a snapshot is needed, this structure can be locked in place for the
640  // duration of the snapshotting activity.
641  BirthMap birth_map_;
642
643  // Similar to birth_map_, this records informations about death of tracked
644  // instances (i.e., when a tracked instance was destroyed on this thread).
645  // It is locked before changing, and hence other threads may access it by
646  // locking before reading it.
647  DeathMap death_map_;
648
649  // A set of parents that created children tasks on this thread. Each pair
650  // corresponds to potentially non-local Births (location and thread), and a
651  // local Births (that took place on this thread).
652  ParentChildSet parent_child_set_;
653
654  // Lock to protect *some* access to BirthMap and DeathMap.  The maps are
655  // regularly read and written on this thread, but may only be read from other
656  // threads.  To support this, we acquire this lock if we are writing from this
657  // thread, or reading from another thread.  For reading from this thread we
658  // don't need a lock, as there is no potential for a conflict since the
659  // writing is only done from this thread.
660  mutable base::Lock map_lock_;
661
662  // The stack of parents that are currently being profiled. This includes only
663  // tasks that have started a timer recently via NowForStartOfRun(), but not
664  // yet concluded with a NowForEndOfRun().  Usually this stack is one deep, but
665  // if a scoped region is profiled, or <sigh> a task runs a nested-message
666  // loop, then the stack can grow larger.  Note that we don't try to deduct
667  // time in nested porfiles, as our current timer is based on wall-clock time,
668  // and not CPU time (and we're hopeful that nested timing won't be a
669  // significant additional cost).
670  ParentStack parent_stack_;
671
672  // A random number that we used to select decide which sample to keep as a
673  // representative sample in each DeathData instance.  We can't start off with
674  // much randomness (because we can't call RandInt() on all our threads), so
675  // we stir in more and more as we go.
676  int32 random_number_;
677
678  // Record of what the incarnation_counter_ was when this instance was created.
679  // If the incarnation_counter_ has changed, then we avoid pushing into the
680  // pool (this is only critical in tests which go through multiple
681  // incarnations).
682  int incarnation_count_for_pool_;
683
684  DISALLOW_COPY_AND_ASSIGN(ThreadData);
685};
686
687//------------------------------------------------------------------------------
688// A snapshotted representation of a (parent, child) task pair, for tracking
689// hierarchical profiles.
690
691struct BASE_EXPORT ParentChildPairSnapshot {
692 public:
693  ParentChildPairSnapshot();
694  explicit ParentChildPairSnapshot(
695      const ThreadData::ParentChildPair& parent_child);
696  ~ParentChildPairSnapshot();
697
698  BirthOnThreadSnapshot parent;
699  BirthOnThreadSnapshot child;
700};
701
702//------------------------------------------------------------------------------
703// A snapshotted representation of the list of ThreadData objects for a process.
704
705struct BASE_EXPORT ProcessDataSnapshot {
706 public:
707  ProcessDataSnapshot();
708  ~ProcessDataSnapshot();
709
710  std::vector<TaskSnapshot> tasks;
711  std::vector<ParentChildPairSnapshot> descendants;
712  int process_id;
713};
714
715}  // namespace tracked_objects
716
717#endif  // BASE_TRACKED_OBJECTS_H_
718