1// Copyright (c) 2006, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10//     * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14//     * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// ---
31// Author: Sanjay Ghemawat
32//         Maxim Lifantsev (refactoring)
33//
34
35#ifndef BASE_HEAP_PROFILE_TABLE_H_
36#define BASE_HEAP_PROFILE_TABLE_H_
37
38#include "addressmap-inl.h"
39#include "base/basictypes.h"
40#include "base/logging.h"   // for RawFD
41
42// Table to maintain a heap profile data inside,
43// i.e. the set of currently active heap memory allocations.
44// thread-unsafe and non-reentrant code:
45// each instance object must be used by one thread
46// at a time w/o self-recursion.
47//
48// TODO(maxim): add a unittest for this class.
49class HeapProfileTable {
50 public:
51
52  // Extension to be used for heap pforile files.
53  static const char kFileExt[];
54
55  // Longest stack trace we record.
56  static const int kMaxStackDepth = 32;
57
58  // data types ----------------------------
59
60  // Profile stats.
61  struct Stats {
62    int32 allocs;      // Number of allocation calls
63    int32 frees;       // Number of free calls
64    int64 alloc_size;  // Total size of all allocated objects so far
65    int64 free_size;   // Total size of all freed objects so far
66
67    // semantic equality
68    bool Equivalent(const Stats& x) const {
69      return allocs - frees == x.allocs - x.frees  &&
70             alloc_size - free_size == x.alloc_size - x.free_size;
71    }
72  };
73
74  // Info we can return about an allocation.
75  struct AllocInfo {
76    size_t object_size;  // size of the allocation
77    const void* const* call_stack;  // call stack that made the allocation call
78    int stack_depth;  // depth of call_stack
79    bool live;
80    bool ignored;
81  };
82
83  // Info we return about an allocation context.
84  // An allocation context is a unique caller stack trace
85  // of an allocation operation.
86  struct AllocContextInfo : public Stats {
87    int stack_depth;                // Depth of stack trace
88    const void* const* call_stack;  // Stack trace
89  };
90
91  // Memory (de)allocator interface we'll use.
92  typedef void* (*Allocator)(size_t size);
93  typedef void  (*DeAllocator)(void* ptr);
94
95  // interface ---------------------------
96
97  HeapProfileTable(Allocator alloc, DeAllocator dealloc);
98  ~HeapProfileTable();
99
100  // Collect the stack trace for the function that asked to do the
101  // allocation for passing to RecordAlloc() below.
102  //
103  // The stack trace is stored in 'stack'. The stack depth is returned.
104  //
105  // 'skip_count' gives the number of stack frames between this call
106  // and the memory allocation function.
107  static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]);
108
109  // Record an allocation at 'ptr' of 'bytes' bytes.  'stack_depth'
110  // and 'call_stack' identifying the function that requested the
111  // allocation. They can be generated using GetCallerStackTrace() above.
112  void RecordAlloc(const void* ptr, size_t bytes,
113                   int stack_depth, const void* const call_stack[]);
114
115  // Record the deallocation of memory at 'ptr'.
116  void RecordFree(const void* ptr);
117
118  // Return true iff we have recorded an allocation at 'ptr'.
119  // If yes, fill *object_size with the allocation byte size.
120  bool FindAlloc(const void* ptr, size_t* object_size) const;
121  // Same as FindAlloc, but fills all of *info.
122  bool FindAllocDetails(const void* ptr, AllocInfo* info) const;
123
124  // Return true iff "ptr" points into a recorded allocation
125  // If yes, fill *object_ptr with the actual allocation address
126  // and *object_size with the allocation byte size.
127  // max_size specifies largest currently possible allocation size.
128  bool FindInsideAlloc(const void* ptr, size_t max_size,
129                       const void** object_ptr, size_t* object_size) const;
130
131  // If "ptr" points to a recorded allocation and it's not marked as live
132  // mark it as live and return true. Else return false.
133  // All allocations start as non-live.
134  bool MarkAsLive(const void* ptr);
135
136  // If "ptr" points to a recorded allocation, mark it as "ignored".
137  // Ignored objects are treated like other objects, except that they
138  // are skipped in heap checking reports.
139  void MarkAsIgnored(const void* ptr);
140
141  // Return current total (de)allocation statistics.  It doesn't contain
142  // mmap'ed regions.
143  const Stats& total() const { return total_; }
144
145  // Allocation data iteration callback: gets passed object pointer and
146  // fully-filled AllocInfo.
147  typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info);
148
149  // Iterate over the allocation profile data calling "callback"
150  // for every allocation.
151  void IterateAllocs(AllocIterator callback) const {
152    alloc_address_map_->Iterate(MapArgsAllocIterator, callback);
153  }
154
155  // Allocation context profile data iteration callback
156  typedef void (*AllocContextIterator)(const AllocContextInfo& info);
157
158  // Iterate over the allocation context profile data calling "callback"
159  // for every allocation context. Allocation contexts are ordered by the
160  // size of allocated space.
161  void IterateOrderedAllocContexts(AllocContextIterator callback) const;
162
163  // Fill profile data into buffer 'buf' of size 'size'
164  // and return the actual size occupied by the dump in 'buf'.
165  // The profile buckets are dumped in the decreasing order
166  // of currently allocated bytes.
167  // We do not provision for 0-terminating 'buf'.
168  int FillOrderedProfile(char buf[], int size) const;
169
170  // Cleanup any old profile files matching prefix + ".*" + kFileExt.
171  static void CleanupOldProfiles(const char* prefix);
172
173  // Return a snapshot of the current contents of *this.
174  // Caller must call ReleaseSnapshot() on result when no longer needed.
175  // The result is only valid while this exists and until
176  // the snapshot is discarded by calling ReleaseSnapshot().
177  class Snapshot;
178  Snapshot* TakeSnapshot();
179
180  // Release a previously taken snapshot.  snapshot must not
181  // be used after this call.
182  void ReleaseSnapshot(Snapshot* snapshot);
183
184  // Return a snapshot of every non-live, non-ignored object in *this.
185  // If "base" is non-NULL, skip any objects present in "base".
186  // As a side-effect, clears the "live" bit on every live object in *this.
187  // Caller must call ReleaseSnapshot() on result when no longer needed.
188  Snapshot* NonLiveSnapshot(Snapshot* base);
189
190  // Refresh the internal mmap information from MemoryRegionMap.  Results of
191  // FillOrderedProfile and IterateOrderedAllocContexts will contain mmap'ed
192  // memory regions as at calling RefreshMMapData.
193  void RefreshMMapData();
194
195  // Clear the internal mmap information.  Results of FillOrderedProfile and
196  // IterateOrderedAllocContexts won't contain mmap'ed memory regions after
197  // calling ClearMMapData.
198  void ClearMMapData();
199
200 private:
201
202  // data types ----------------------------
203
204  // Hash table bucket to hold (de)allocation stats
205  // for a given allocation call stack trace.
206  struct Bucket : public Stats {
207    uintptr_t    hash;   // Hash value of the stack trace
208    int          depth;  // Depth of stack trace
209    const void** stack;  // Stack trace
210    Bucket*      next;   // Next entry in hash-table
211  };
212
213  // Info stored in the address map
214  struct AllocValue {
215    // Access to the stack-trace bucket
216    Bucket* bucket() const {
217      return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask));
218    }
219    // This also does set_live(false).
220    void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); }
221    size_t  bytes;   // Number of bytes in this allocation
222
223    // Access to the allocation liveness flag (for leak checking)
224    bool live() const { return bucket_rep & kLive; }
225    void set_live(bool l) {
226      bucket_rep = (bucket_rep & ~uintptr_t(kLive)) | (l ? kLive : 0);
227    }
228
229    // Should this allocation be ignored if it looks like a leak?
230    bool ignore() const { return bucket_rep & kIgnore; }
231    void set_ignore(bool r) {
232      bucket_rep = (bucket_rep & ~uintptr_t(kIgnore)) | (r ? kIgnore : 0);
233    }
234
235   private:
236    // We store a few bits in the bottom bits of bucket_rep.
237    // (Alignment is at least four, so we have at least two bits.)
238    static const int kLive = 1;
239    static const int kIgnore = 2;
240    static const int kMask = kLive | kIgnore;
241
242    uintptr_t bucket_rep;
243  };
244
245  // helper for FindInsideAlloc
246  static size_t AllocValueSize(const AllocValue& v) { return v.bytes; }
247
248  typedef AddressMap<AllocValue> AllocationMap;
249
250  // Arguments that need to be passed DumpNonLiveIterator callback below.
251  struct DumpArgs {
252    RawFD fd;  // file to write to
253    Stats* profile_stats;  // stats to update (may be NULL)
254
255    DumpArgs(RawFD a, Stats* d)
256      : fd(a), profile_stats(d) { }
257  };
258
259  // helpers ----------------------------
260
261  // Unparse bucket b and print its portion of profile dump into buf.
262  // We return the amount of space in buf that we use.  We start printing
263  // at buf + buflen, and promise not to go beyond buf + bufsize.
264  // We do not provision for 0-terminating 'buf'.
265  //
266  // If profile_stats is non-NULL, we update *profile_stats by
267  // counting bucket b.
268  //
269  // "extra" is appended to the unparsed bucket.  Typically it is empty,
270  // but may be set to something like " heapprofile" for the total
271  // bucket to indicate the type of the profile.
272  static int UnparseBucket(const Bucket& b,
273                           char* buf, int buflen, int bufsize,
274                           const char* extra,
275                           Stats* profile_stats);
276
277  // Deallocate a given allocation map.
278  void DeallocateAllocationMap(AllocationMap* allocation);
279
280  // Deallocate a given bucket table.
281  void DeallocateBucketTable(Bucket** table);
282
283  // Get the bucket for the caller stack trace 'key' of depth 'depth' from a
284  // bucket hash map 'table' creating the bucket if needed.  '*bucket_count'
285  // is incremented both when 'bucket_count' is not NULL and when a new
286  // bucket object is created.
287  Bucket* GetBucket(int depth, const void* const key[], Bucket** table,
288                    int* bucket_count);
289
290  // Helper for IterateAllocs to do callback signature conversion
291  // from AllocationMap::Iterate to AllocIterator.
292  static void MapArgsAllocIterator(const void* ptr, AllocValue* v,
293                                   AllocIterator callback) {
294    AllocInfo info;
295    info.object_size = v->bytes;
296    info.call_stack = v->bucket()->stack;
297    info.stack_depth = v->bucket()->depth;
298    info.live = v->live();
299    info.ignored = v->ignore();
300    callback(ptr, info);
301  }
302
303  // Helper for DumpNonLiveProfile to do object-granularity
304  // heap profile dumping. It gets passed to AllocationMap::Iterate.
305  inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v,
306                                         const DumpArgs& args);
307
308  // Helper for filling size variables in buckets by zero.
309  inline static void ZeroBucketCountsIterator(
310      const void* ptr, AllocValue* v, HeapProfileTable* heap_profile);
311
312  // Helper for IterateOrderedAllocContexts and FillOrderedProfile.
313  // Creates a sorted list of Buckets whose length is num_alloc_buckets_ +
314  // num_avaliable_mmap_buckets_.
315  // The caller is responsible for deallocating the returned list.
316  Bucket** MakeSortedBucketList() const;
317
318  // Helper for TakeSnapshot.  Saves object to snapshot.
319  static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s);
320
321  // Arguments passed to AddIfNonLive
322  struct AddNonLiveArgs {
323    Snapshot* dest;
324    Snapshot* base;
325  };
326
327  // Helper for NonLiveSnapshot.  Adds the object to the destination
328  // snapshot if it is non-live.
329  static void AddIfNonLive(const void* ptr, AllocValue* v,
330                           AddNonLiveArgs* arg);
331
332  // Write contents of "*allocations" as a heap profile to
333  // "file_name".  "total" must contain the total of all entries in
334  // "*allocations".
335  static bool WriteProfile(const char* file_name,
336                           const Bucket& total,
337                           AllocationMap* allocations);
338
339  // data ----------------------------
340
341  // Memory (de)allocator that we use.
342  Allocator alloc_;
343  DeAllocator dealloc_;
344
345  // Overall profile stats; we use only the Stats part,
346  // but make it a Bucket to pass to UnparseBucket.
347  // It doesn't contain mmap'ed regions.
348  Bucket total_;
349
350  // Bucket hash table for malloc.
351  // We hand-craft one instead of using one of the pre-written
352  // ones because we do not want to use malloc when operating on the table.
353  // It is only few lines of code, so no big deal.
354  Bucket** alloc_table_;
355  int num_alloc_buckets_;
356
357  // Bucket hash table for mmap.
358  // This table is filled with the information from MemoryRegionMap by calling
359  // RefreshMMapData.
360  Bucket** mmap_table_;
361  int num_available_mmap_buckets_;
362
363  // Map of all currently allocated objects and mapped regions we know about.
364  AllocationMap* alloc_address_map_;
365  AllocationMap* mmap_address_map_;
366
367  DISALLOW_COPY_AND_ASSIGN(HeapProfileTable);
368};
369
370class HeapProfileTable::Snapshot {
371 public:
372  const Stats& total() const { return total_; }
373
374  // Report anything in this snapshot as a leak.
375  // May use new/delete for temporary storage.
376  // If should_symbolize is true, will fork (which is not threadsafe)
377  // to turn addresses into symbol names.  Set to false for maximum safety.
378  // Also writes a heap profile to "filename" that contains
379  // all of the objects in this snapshot.
380  void ReportLeaks(const char* checker_name, const char* filename,
381                   bool should_symbolize);
382
383  // Report the addresses of all leaked objects.
384  // May use new/delete for temporary storage.
385  void ReportIndividualObjects();
386
387  bool Empty() const {
388    return (total_.allocs == 0) && (total_.alloc_size == 0);
389  }
390
391 private:
392  friend class HeapProfileTable;
393
394  // Total count/size are stored in a Bucket so we can reuse UnparseBucket
395  Bucket total_;
396
397  // We share the Buckets managed by the parent table, but have our
398  // own object->bucket map.
399  AllocationMap map_;
400
401  Snapshot(Allocator alloc, DeAllocator dealloc) : map_(alloc, dealloc) {
402    memset(&total_, 0, sizeof(total_));
403  }
404
405  // Callback used to populate a Snapshot object with entries found
406  // in another allocation map.
407  inline void Add(const void* ptr, const AllocValue& v) {
408    map_.Insert(ptr, v);
409    total_.allocs++;
410    total_.alloc_size += v.bytes;
411  }
412
413  // Helpers for sorting and generating leak reports
414  struct Entry;
415  struct ReportState;
416  static void ReportCallback(const void* ptr, AllocValue* v, ReportState*);
417  static void ReportObject(const void* ptr, AllocValue* v, char*);
418
419  DISALLOW_COPY_AND_ASSIGN(Snapshot);
420};
421
422#endif  // BASE_HEAP_PROFILE_TABLE_H_
423