1/* Copyright (c) 2006, Google Inc.
2 * All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * ---
31 * Author: Maxim Lifantsev
32 */
33
34#ifndef BASE_MEMORY_REGION_MAP_H_
35#define BASE_MEMORY_REGION_MAP_H_
36
37#include <config.h>
38
39#ifdef HAVE_PTHREAD
40#include <pthread.h>
41#endif
42#include <stddef.h>
43#include <set>
44#include "base/stl_allocator.h"
45#include "base/spinlock.h"
46#include "base/thread_annotations.h"
47#include "base/low_level_alloc.h"
48
49// TODO(maxim): add a unittest:
50//  execute a bunch of mmaps and compare memory map what strace logs
51//  execute a bunch of mmap/munmup and compare memory map with
52//  own accounting of what those mmaps generated
53
54// Thread-safe class to collect and query the map of all memory regions
55// in a process that have been created with mmap, munmap, mremap, sbrk.
56// For each memory region, we keep track of (and provide to users)
57// the stack trace that allocated that memory region.
58// The recorded stack trace depth is bounded by
59// a user-supplied max_stack_depth parameter of Init().
60// After initialization with Init()
61// (which can happened even before global object constructor execution)
62// we collect the map by installing and monitoring MallocHook-s
63// to mmap, munmap, mremap, sbrk.
64// At any time one can query this map via provided interface.
65// For more details on the design of MemoryRegionMap
66// see the comment at the top of our .cc file.
67class MemoryRegionMap {
68 private:
69  // Max call stack recording depth supported by Init().  Set it to be
70  // high enough for all our clients.  Note: we do not define storage
71  // for this (doing that requires special handling in windows), so
72  // don't take the address of it!
73  static const int kMaxStackDepth = 32;
74
75 public:
76  // interface ================================================================
77
78  // Every client of MemoryRegionMap must call Init() before first use,
79  // and Shutdown() after last use.  This allows us to reference count
80  // this (singleton) class properly.  MemoryRegionMap assumes it's the
81  // only client of MallocHooks, so a client can only register other
82  // MallocHooks after calling Init() and must unregister them before
83  // calling Shutdown().
84
85  // Initialize this module to record memory allocation stack traces.
86  // Stack traces that have more than "max_stack_depth" frames
87  // are automatically shrunk to "max_stack_depth" when they are recorded.
88  // Init() can be called more than once w/o harm, largest max_stack_depth
89  // will be the effective one.
90  // It will install mmap, munmap, mremap, sbrk hooks
91  // and initialize arena_ and our hook and locks, hence one can use
92  // MemoryRegionMap::Lock()/Unlock() to manage the locks.
93  // Uses Lock/Unlock inside.
94  static void Init(int max_stack_depth);
95
96  // Try to shutdown this module undoing what Init() did.
97  // Returns true iff could do full shutdown (or it was not attempted).
98  // Full shutdown is attempted when the number of Shutdown() calls equals
99  // the number of Init() calls.
100  static bool Shutdown();
101
102  // Locks to protect our internal data structures.
103  // These also protect use of arena_ if our Init() has been done.
104  // The lock is recursive.
105  static void Lock() EXCLUSIVE_LOCK_FUNCTION(lock_);
106  static void Unlock() UNLOCK_FUNCTION(lock_);
107
108  // Returns true when the lock is held by this thread (for use in RAW_CHECK-s).
109  static bool LockIsHeld();
110
111  // Locker object that acquires the MemoryRegionMap::Lock
112  // for the duration of its lifetime (a C++ scope).
113  class LockHolder {
114   public:
115    LockHolder() { Lock(); }
116    ~LockHolder() { Unlock(); }
117   private:
118    DISALLOW_COPY_AND_ASSIGN(LockHolder);
119  };
120
121  // A memory region that we know about through malloc_hook-s.
122  // This is essentially an interface through which MemoryRegionMap
123  // exports the collected data to its clients.  Thread-compatible.
124  struct Region {
125    uintptr_t start_addr;  // region start address
126    uintptr_t end_addr;  // region end address
127    int call_stack_depth;  // number of caller stack frames that we saved
128    const void* call_stack[kMaxStackDepth];  // caller address stack array
129                                             // filled to call_stack_depth size
130    bool is_stack;  // does this region contain a thread's stack:
131                    // a user of MemoryRegionMap supplies this info
132
133    // Convenience accessor for call_stack[0],
134    // i.e. (the program counter of) the immediate caller
135    // of this region's allocation function,
136    // but it also returns NULL when call_stack_depth is 0,
137    // i.e whe we weren't able to get the call stack.
138    // This usually happens in recursive calls, when the stack-unwinder
139    // calls mmap() which in turn calls the stack-unwinder.
140    uintptr_t caller() const {
141      return reinterpret_cast<uintptr_t>(call_stack_depth >= 1
142                                         ? call_stack[0] : NULL);
143    }
144
145    // Return true iff this region overlaps region x.
146    bool Overlaps(const Region& x) const {
147      return start_addr < x.end_addr  &&  end_addr > x.start_addr;
148    }
149
150   private:  // helpers for MemoryRegionMap
151    friend class MemoryRegionMap;
152
153    // The ways we create Region-s:
154    void Create(const void* start, size_t size) {
155      start_addr = reinterpret_cast<uintptr_t>(start);
156      end_addr = start_addr + size;
157      is_stack = false;  // not a stack till marked such
158      call_stack_depth = 0;
159      AssertIsConsistent();
160    }
161    void set_call_stack_depth(int depth) {
162      RAW_DCHECK(call_stack_depth == 0, "");  // only one such set is allowed
163      call_stack_depth = depth;
164      AssertIsConsistent();
165    }
166
167    // The ways we modify Region-s:
168    void set_is_stack() { is_stack = true; }
169    void set_start_addr(uintptr_t addr) {
170      start_addr = addr;
171      AssertIsConsistent();
172    }
173    void set_end_addr(uintptr_t addr) {
174      end_addr = addr;
175      AssertIsConsistent();
176    }
177
178    // Verifies that *this contains consistent data, crashes if not the case.
179    void AssertIsConsistent() const {
180      RAW_DCHECK(start_addr < end_addr, "");
181      RAW_DCHECK(call_stack_depth >= 0  &&
182                 call_stack_depth <= kMaxStackDepth, "");
183    }
184
185    // Post-default construction helper to make a Region suitable
186    // for searching in RegionSet regions_.
187    void SetRegionSetKey(uintptr_t addr) {
188      // make sure *this has no usable data:
189      if (DEBUG_MODE) memset(this, 0xFF, sizeof(*this));
190      end_addr = addr;
191    }
192
193    // Note: call_stack[kMaxStackDepth] as a member lets us make Region
194    // a simple self-contained struct with correctly behaving bit-vise copying.
195    // This simplifies the code of this module but wastes some memory:
196    // in most-often use case of this module (leak checking)
197    // only one call_stack element out of kMaxStackDepth is actually needed.
198    // Making the storage for call_stack variable-sized,
199    // substantially complicates memory management for the Region-s:
200    // as they need to be created and manipulated for some time
201    // w/o any memory allocations, yet are also given out to the users.
202  };
203
204  // Find the region that covers addr and write its data into *result if found,
205  // in which case *result gets filled so that it stays fully functional
206  // even when the underlying region gets removed from MemoryRegionMap.
207  // Returns success. Uses Lock/Unlock inside.
208  static bool FindRegion(uintptr_t addr, Region* result);
209
210  // Find the region that contains stack_top, mark that region as
211  // a stack region, and write its data into *result if found,
212  // in which case *result gets filled so that it stays fully functional
213  // even when the underlying region gets removed from MemoryRegionMap.
214  // Returns success. Uses Lock/Unlock inside.
215  static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result);
216
217 private:  // our internal types ==============================================
218
219  // Region comparator for sorting with STL
220  struct RegionCmp {
221    bool operator()(const Region& x, const Region& y) const {
222      return x.end_addr < y.end_addr;
223    }
224  };
225
226  // We allocate STL objects in our own arena.
227  struct MyAllocator {
228    static void *Allocate(size_t n) {
229      return LowLevelAlloc::AllocWithArena(n, arena_);
230    }
231    static void Free(const void *p, size_t /* n */) {
232      LowLevelAlloc::Free(const_cast<void*>(p));
233    }
234  };
235
236  // Set of the memory regions
237  typedef std::set<Region, RegionCmp,
238              STL_Allocator<Region, MyAllocator> > RegionSet;
239
240 public:  // more in-depth interface ==========================================
241
242  // STL iterator with values of Region
243  typedef RegionSet::const_iterator RegionIterator;
244
245  // Return the begin/end iterators to all the regions.
246  // These need Lock/Unlock protection around their whole usage (loop).
247  // Even when the same thread causes modifications during such a loop
248  // (which are permitted due to recursive locking)
249  // the loop iterator will still be valid as long as its region
250  // has not been deleted, but EndRegionLocked should be
251  // re-evaluated whenever the set of regions has changed.
252  static RegionIterator BeginRegionLocked();
253  static RegionIterator EndRegionLocked();
254
255  // Return the accumulated sizes of mapped and unmapped regions.
256  static int64 MapSize() { return map_size_; }
257  static int64 UnmapSize() { return unmap_size_; }
258
259  // Effectively private type from our .cc =================================
260  // public to let us declare global objects:
261  union RegionSetRep;
262
263 private:
264  // representation ===========================================================
265
266  // Counter of clients of this module that have called Init().
267  static int client_count_;
268
269  // Maximal number of caller stack frames to save (>= 0).
270  static int max_stack_depth_;
271
272  // Arena used for our allocations in regions_.
273  static LowLevelAlloc::Arena* arena_;
274
275  // Set of the mmap/sbrk/mremap-ed memory regions
276  // To be accessed *only* when Lock() is held.
277  // Hence we protect the non-recursive lock used inside of arena_
278  // with our recursive Lock(). This lets a user prevent deadlocks
279  // when threads are stopped by ListAllProcessThreads at random spots
280  // simply by acquiring our recursive Lock() before that.
281  static RegionSet* regions_;
282
283  // Lock to protect regions_ variable and the data behind.
284  static SpinLock lock_;
285  // Lock to protect the recursive lock itself.
286  static SpinLock owner_lock_;
287
288  // Recursion count for the recursive lock.
289  static int recursion_count_;
290  // The thread id of the thread that's inside the recursive lock.
291  static pthread_t lock_owner_tid_;
292
293  // Total size of all mapped pages so far
294  static int64 map_size_;
295  // Total size of all unmapped pages so far
296  static int64 unmap_size_;
297
298  // helpers ==================================================================
299
300  // Helper for FindRegion and FindAndMarkStackRegion:
301  // returns the region covering 'addr' or NULL; assumes our lock_ is held.
302  static const Region* DoFindRegionLocked(uintptr_t addr);
303
304  // Verifying wrapper around regions_->insert(region)
305  // To be called to do InsertRegionLocked's work only!
306  inline static void DoInsertRegionLocked(const Region& region);
307  // Handle regions saved by InsertRegionLocked into a tmp static array
308  // by calling insert_func on them.
309  inline static void HandleSavedRegionsLocked(
310                       void (*insert_func)(const Region& region));
311  // Wrapper around DoInsertRegionLocked
312  // that handles the case of recursive allocator calls.
313  inline static void InsertRegionLocked(const Region& region);
314
315  // Record addition of a memory region at address "start" of size "size"
316  // (called from our mmap/mremap/sbrk hooks).
317  static void RecordRegionAddition(const void* start, size_t size);
318  // Record deletion of a memory region at address "start" of size "size"
319  // (called from our munmap/mremap/sbrk hooks).
320  static void RecordRegionRemoval(const void* start, size_t size);
321
322  // Hooks for MallocHook
323  static void MmapHook(const void* result,
324                       const void* start, size_t size,
325                       int prot, int flags,
326                       int fd, off_t offset);
327  static void MunmapHook(const void* ptr, size_t size);
328  static void MremapHook(const void* result, const void* old_addr,
329                         size_t old_size, size_t new_size, int flags,
330                         const void* new_addr);
331  static void SbrkHook(const void* result, ptrdiff_t increment);
332
333  // Log all memory regions; Useful for debugging only.
334  // Assumes Lock() is held
335  static void LogAllLocked();
336
337  DISALLOW_COPY_AND_ASSIGN(MemoryRegionMap);
338};
339
340#endif  // BASE_MEMORY_REGION_MAP_H_
341