1// Copyright (c) 2005, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10//     * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14//     * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// ---
31// Author: Maxim Lifantsev
32//
33// Running:
34// ./heap-checker_unittest
35//
36// If the unittest crashes because it can't find pprof, try:
37// PPROF_PATH=/usr/local/someplace/bin/pprof ./heap-checker_unittest
38//
39// To test that the whole-program heap checker will actually cause a leak, try:
40// HEAPCHECK_TEST_LEAK= ./heap-checker_unittest
41// HEAPCHECK_TEST_LOOP_LEAK= ./heap-checker_unittest
42//
43// Note: Both of the above commands *should* abort with an error message.
44
45// CAVEAT: Do not use vector<> and string on-heap objects in this test,
46// otherwise the test can sometimes fail for tricky leak checks
47// when we want some allocated object not to be found live by the heap checker.
48// This can happen with memory allocators like tcmalloc that can allocate
49// heap objects back to back without any book-keeping data in between.
50// What happens is that end-of-storage pointers of a live vector
51// (or a string depending on the STL implementation used)
52// can happen to point to that other heap-allocated
53// object that is not reachable otherwise and that
54// we don't want to be reachable.
55//
56// The implication of this for real leak checking
57// is just one more chance for the liveness flood to be inexact
58// (see the comment in our .h file).
59
60#include "config_for_unittests.h"
61#ifdef HAVE_POLL_H
62#include <poll.h>
63#endif
64#if defined HAVE_STDINT_H
65#include <stdint.h>             // to get uint16_t (ISO naming madness)
66#elif defined HAVE_INTTYPES_H
67#include <inttypes.h>           // another place uint16_t might be defined
68#endif
69#include <sys/types.h>
70#include <stdlib.h>
71#include <errno.h>              // errno
72#ifdef HAVE_UNISTD_H
73#include <unistd.h>             // for sleep(), geteuid()
74#endif
75#ifdef HAVE_MMAP
76#include <sys/mman.h>
77#endif
78#include <fcntl.h>              // for open(), close()
79#ifdef HAVE_EXECINFO_H
80#include <execinfo.h>           // backtrace
81#endif
82#ifdef HAVE_GRP_H
83#include <grp.h>                // getgrent, getgrnam
84#endif
85#ifdef HAVE_PWD_H
86#include <pwd.h>
87#endif
88
89#include <algorithm>
90#include <iostream>             // for cout
91#include <iomanip>              // for hex
92#include <list>
93#include <map>
94#include <memory>
95#include <set>
96#include <string>
97#include <vector>
98
99#include "base/commandlineflags.h"
100#include "base/googleinit.h"
101#include "base/logging.h"
102#include "base/commandlineflags.h"
103#include "base/thread_lister.h"
104#include <gperftools/heap-checker.h>
105#include "memory_region_map.h"
106#include <gperftools/malloc_extension.h>
107#include <gperftools/stacktrace.h>
108
109// On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
110// form of the name instead.
111#ifndef MAP_ANONYMOUS
112# define MAP_ANONYMOUS MAP_ANON
113#endif
114
115using namespace std;
116
117// ========================================================================= //
118
119// TODO(maxim): write a shell script to test that these indeed crash us
120//              (i.e. we do detect leaks)
121//              Maybe add more such crash tests.
122
123DEFINE_bool(test_leak,
124            EnvToBool("HEAP_CHECKER_TEST_TEST_LEAK", false),
125            "If should cause a leak crash");
126DEFINE_bool(test_loop_leak,
127            EnvToBool("HEAP_CHECKER_TEST_TEST_LOOP_LEAK", false),
128            "If should cause a looped leak crash");
129DEFINE_bool(test_register_leak,
130            EnvToBool("HEAP_CHECKER_TEST_TEST_REGISTER_LEAK", false),
131            "If should cause a leak crash by hiding a pointer "
132            "that is only in a register");
133DEFINE_bool(test_cancel_global_check,
134            EnvToBool("HEAP_CHECKER_TEST_TEST_CANCEL_GLOBAL_CHECK", false),
135            "If should test HeapLeakChecker::CancelGlobalCheck "
136            "when --test_leak or --test_loop_leak are given; "
137            "the test should not fail then");
138DEFINE_bool(maybe_stripped,
139            EnvToBool("HEAP_CHECKER_TEST_MAYBE_STRIPPED", true),
140            "If we think we can be a stripped binary");
141DEFINE_bool(interfering_threads,
142            EnvToBool("HEAP_CHECKER_TEST_INTERFERING_THREADS", true),
143            "If we should use threads trying "
144            "to interfere with leak checking");
145DEFINE_bool(hoarding_threads,
146            EnvToBool("HEAP_CHECKER_TEST_HOARDING_THREADS", true),
147            "If threads (usually the manager thread) are known "
148            "to retain some old state in their global buffers, "
149            "so that it's hard to force leaks when threads are around");
150            // TODO(maxim): Chage the default to false
151            // when the standard environment used NTPL threads:
152            // they do not seem to have this problem.
153DEFINE_bool(no_threads,
154            EnvToBool("HEAP_CHECKER_TEST_NO_THREADS", false),
155            "If we should not use any threads");
156            // This is used so we can make can_create_leaks_reliably true
157            // for any pthread implementation and test with that.
158
159DECLARE_int64(heap_check_max_pointer_offset);   // heap-checker.cc
160DECLARE_string(heap_check);  // in heap-checker.cc
161
162#define WARN_IF(cond, msg)   LOG_IF(WARNING, cond, msg)
163
164// This is an evil macro!  Be very careful using it...
165#undef VLOG          // and we start by evilling overriding logging.h VLOG
166#define VLOG(lvl)    if (FLAGS_verbose >= (lvl))  cout << "\n"
167// This is, likewise, evil
168#define LOGF         VLOG(INFO)
169
170static void RunHeapBusyThreads();  // below
171
172
173class Closure {
174 public:
175  virtual ~Closure() { }
176  virtual void Run() = 0;
177};
178
179class Callback0 : public Closure {
180 public:
181  typedef void (*FunctionSignature)();
182
183  inline Callback0(FunctionSignature f) : f_(f) {}
184  virtual void Run() { (*f_)(); delete this; }
185
186 private:
187  FunctionSignature f_;
188};
189
190template <class P1> class Callback1 : public Closure {
191 public:
192  typedef void (*FunctionSignature)(P1);
193
194  inline Callback1<P1>(FunctionSignature f, P1 p1) : f_(f), p1_(p1) {}
195  virtual void Run() { (*f_)(p1_); delete this; }
196
197 private:
198  FunctionSignature f_;
199  P1 p1_;
200};
201
202template <class P1, class P2> class Callback2 : public Closure {
203 public:
204  typedef void (*FunctionSignature)(P1,P2);
205
206  inline Callback2<P1,P2>(FunctionSignature f, P1 p1, P2 p2) : f_(f), p1_(p1), p2_(p2) {}
207  virtual void Run() { (*f_)(p1_, p2_); delete this; }
208
209 private:
210  FunctionSignature f_;
211  P1 p1_;
212  P2 p2_;
213};
214
215inline Callback0* NewCallback(void (*function)()) {
216  return new Callback0(function);
217}
218
219template <class P1>
220inline Callback1<P1>* NewCallback(void (*function)(P1), P1 p1) {
221  return new Callback1<P1>(function, p1);
222}
223
224template <class P1, class P2>
225inline Callback2<P1,P2>* NewCallback(void (*function)(P1,P2), P1 p1, P2 p2) {
226  return new Callback2<P1,P2>(function, p1, p2);
227}
228
229
230// Set to true at end of main, so threads know.  Not entirely thread-safe!,
231// but probably good enough.
232static bool g_have_exited_main = false;
233
234// If we can reliably create leaks (i.e. make leaked object
235// really unreachable from any global data).
236static bool can_create_leaks_reliably = false;
237
238// We use a simple allocation wrapper
239// to make sure we wipe out the newly allocated objects
240// in case they still happened to contain some pointer data
241// accidentally left by the memory allocator.
242struct Initialized { };
243static Initialized initialized;
244void* operator new(size_t size, const Initialized&) {
245  // Below we use "p = new(initialized) Foo[1];" and  "delete[] p;"
246  // instead of "p = new(initialized) Foo;"
247  // when we need to delete an allocated object.
248  void* p = malloc(size);
249  memset(p, 0, size);
250  return p;
251}
252void* operator new[](size_t size, const Initialized&) {
253  char* p = new char[size];
254  memset(p, 0, size);
255  return p;
256}
257
258static void DoWipeStack(int n);  // defined below
259static void WipeStack() { DoWipeStack(20); }
260
261static void Pause() {
262  poll(NULL, 0, 77);  // time for thread activity in HeapBusyThreadBody
263
264  // Indirectly test malloc_extension.*:
265  CHECK(MallocExtension::instance()->VerifyAllMemory());
266  int blocks;
267  size_t total;
268  int histogram[kMallocHistogramSize];
269  if (MallocExtension::instance()
270       ->MallocMemoryStats(&blocks, &total, histogram)  &&  total != 0) {
271    VLOG(3) << "Malloc stats: " << blocks << " blocks of "
272            << total << " bytes";
273    for (int i = 0; i < kMallocHistogramSize; ++i) {
274      if (histogram[i]) {
275        VLOG(3) << "  Malloc histogram at " << i << " : " << histogram[i];
276      }
277    }
278  }
279  WipeStack();  // e.g. MallocExtension::VerifyAllMemory
280                // can leave pointers to heap objects on stack
281}
282
283// Make gcc think a pointer is "used"
284template <class T>
285static void Use(T** foo) {
286  VLOG(2) << "Dummy-using " << static_cast<void*>(*foo) << " at " << foo;
287}
288
289// Arbitrary value, but not such that xor'ing with it is likely
290// to map one valid pointer to another valid pointer:
291static const uintptr_t kHideMask =
292  static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL);
293
294// Helpers to hide a pointer from live data traversal.
295// We just xor the pointer so that (with high probability)
296// it's not a valid address of a heap object anymore.
297// Both Hide and UnHide must be executed within RunHidden() below
298// to prevent leaving stale data on active stack that can be a pointer
299// to a heap object that is not actually reachable via live variables.
300// (UnHide might leave heap pointer value for an object
301//  that will be deallocated but later another object
302//  can be allocated at the same heap address.)
303template <class T>
304static void Hide(T** ptr) {
305  // we cast values, not dereferenced pointers, so no aliasing issues:
306  *ptr = reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(*ptr) ^ kHideMask);
307  VLOG(2) << "hid: " << static_cast<void*>(*ptr);
308}
309
310template <class T>
311static void UnHide(T** ptr) {
312  VLOG(2) << "unhiding: " << static_cast<void*>(*ptr);
313  // we cast values, not dereferenced pointers, so no aliasing issues:
314  *ptr = reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(*ptr) ^ kHideMask);
315}
316
317static void LogHidden(const char* message, const void* ptr) {
318  LOGF << message << " : "
319       << ptr << " ^ " << reinterpret_cast<void*>(kHideMask) << endl;
320}
321
322// volatile to fool the compiler against inlining the calls to these
323void (*volatile run_hidden_ptr)(Closure* c, int n);
324void (*volatile wipe_stack_ptr)(int n);
325
326static void DoRunHidden(Closure* c, int n) {
327  if (n) {
328    VLOG(10) << "Level " << n << " at " << &n;
329    (*run_hidden_ptr)(c, n-1);
330    (*wipe_stack_ptr)(n);
331    sleep(0);  // undo -foptimize-sibling-calls
332  } else {
333    c->Run();
334  }
335}
336
337/*static*/ void DoWipeStack(int n) {
338  VLOG(10) << "Wipe level " << n << " at " << &n;
339  if (n) {
340    const int sz = 30;
341    volatile int arr[sz];
342    for (int i = 0; i < sz; ++i) arr[i] = 0;
343    (*wipe_stack_ptr)(n-1);
344    sleep(0);  // undo -foptimize-sibling-calls
345  }
346}
347
348// This executes closure c several stack frames down from the current one
349// and then makes an effort to also wipe out the stack data that was used by
350// the closure.
351// This way we prevent leak checker from finding any temporary pointers
352// of the closure execution on the stack and deciding that
353// these pointers (and the pointed objects) are still live.
354static void RunHidden(Closure* c) {
355  DoRunHidden(c, 15);
356  DoWipeStack(20);
357}
358
359static void DoAllocHidden(size_t size, void** ptr) {
360  void* p = new(initialized) char[size];
361  Hide(&p);
362  Use(&p);  // use only hidden versions
363  VLOG(2) << "Allocated hidden " << p << " at " << &p;
364  *ptr = p;  // assign the hidden versions
365}
366
367static void* AllocHidden(size_t size) {
368  void* r;
369  RunHidden(NewCallback(DoAllocHidden, size, &r));
370  return r;
371}
372
373static void DoDeAllocHidden(void** ptr) {
374  Use(ptr);  // use only hidden versions
375  void* p = *ptr;
376  VLOG(2) << "Deallocating hidden " << p;
377  UnHide(&p);
378  delete [] reinterpret_cast<char*>(p);
379}
380
381static void DeAllocHidden(void** ptr) {
382  RunHidden(NewCallback(DoDeAllocHidden, ptr));
383  *ptr = NULL;
384  Use(ptr);
385}
386
387void PreventHeapReclaiming(size_t size) {
388#ifdef NDEBUG
389  if (true) {
390    static void** no_reclaim_list = NULL;
391    CHECK(size >= sizeof(void*));
392    // We can't use malloc_reclaim_memory flag in opt mode as debugallocation.cc
393    // is not used. Instead we allocate a bunch of heap objects that are
394    // of the same size as what we are going to leak to ensure that the object
395    // we are about to leak is not at the same address as some old allocated
396    // and freed object that might still have pointers leading to it.
397    for (int i = 0; i < 100; ++i) {
398      void** p = reinterpret_cast<void**>(new(initialized) char[size]);
399      p[0] = no_reclaim_list;
400      no_reclaim_list = p;
401    }
402  }
403#endif
404}
405
406static bool RunSilent(HeapLeakChecker* check,
407                      bool (HeapLeakChecker::* func)()) {
408  // By default, don't print the 'we detected a leak' message in the
409  // cases we're expecting a leak (we still print when --v is >= 1).
410  // This way, the logging output is less confusing: we only print
411  // "we detected a leak", and how to diagnose it, for *unexpected* leaks.
412  int32 old_FLAGS_verbose = FLAGS_verbose;
413  if (!VLOG_IS_ON(1))             // not on a verbose setting
414    FLAGS_verbose = FATAL;        // only log fatal errors
415  const bool retval = (check->*func)();
416  FLAGS_verbose = old_FLAGS_verbose;
417  return retval;
418}
419
420#define RUN_SILENT(check, func)  RunSilent(&(check), &HeapLeakChecker::func)
421
422enum CheckType { SAME_HEAP, NO_LEAKS };
423
424static void VerifyLeaks(HeapLeakChecker* check, CheckType type,
425                        int leaked_bytes, int leaked_objects) {
426  WipeStack();  // to help with can_create_leaks_reliably
427  const bool no_leaks =
428    type == NO_LEAKS ? RUN_SILENT(*check, BriefNoLeaks)
429                     : RUN_SILENT(*check, BriefSameHeap);
430  if (can_create_leaks_reliably) {
431    // these might still fail occasionally, but it should be very rare
432    CHECK_EQ(no_leaks, false);
433    CHECK_EQ(check->BytesLeaked(), leaked_bytes);
434    CHECK_EQ(check->ObjectsLeaked(), leaked_objects);
435  } else {
436    WARN_IF(no_leaks != false,
437            "Expected leaks not found: "
438            "Some liveness flood must be too optimistic");
439  }
440}
441
442// not deallocates
443static void TestHeapLeakCheckerDeathSimple() {
444  HeapLeakChecker check("death_simple");
445  void* foo = AllocHidden(100 * sizeof(int));
446  Use(&foo);
447  void* bar = AllocHidden(300);
448  Use(&bar);
449  LogHidden("Leaking", foo);
450  LogHidden("Leaking", bar);
451  Pause();
452  VerifyLeaks(&check, NO_LEAKS, 300 + 100 * sizeof(int), 2);
453  DeAllocHidden(&foo);
454  DeAllocHidden(&bar);
455}
456
457static void MakeDeathLoop(void** arr1, void** arr2) {
458  PreventHeapReclaiming(2 * sizeof(void*));
459  void** a1 = new(initialized) void*[2];
460  void** a2 = new(initialized) void*[2];
461  a1[1] = reinterpret_cast<void*>(a2);
462  a2[1] = reinterpret_cast<void*>(a1);
463  Hide(&a1);
464  Hide(&a2);
465  Use(&a1);
466  Use(&a2);
467  VLOG(2) << "Made hidden loop at " << &a1 << " to " << arr1;
468  *arr1 = a1;
469  *arr2 = a2;
470}
471
472// not deallocates two objects linked together
473static void TestHeapLeakCheckerDeathLoop() {
474  HeapLeakChecker check("death_loop");
475  void* arr1;
476  void* arr2;
477  RunHidden(NewCallback(MakeDeathLoop, &arr1, &arr2));
478  Use(&arr1);
479  Use(&arr2);
480  LogHidden("Leaking", arr1);
481  LogHidden("Leaking", arr2);
482  Pause();
483  VerifyLeaks(&check, NO_LEAKS, 4 * sizeof(void*), 2);
484  DeAllocHidden(&arr1);
485  DeAllocHidden(&arr2);
486}
487
488// deallocates more than allocates
489static void TestHeapLeakCheckerDeathInverse() {
490  void* bar = AllocHidden(250 * sizeof(int));
491  Use(&bar);
492  LogHidden("Pre leaking", bar);
493  Pause();
494  HeapLeakChecker check("death_inverse");
495  void* foo = AllocHidden(100 * sizeof(int));
496  Use(&foo);
497  LogHidden("Leaking", foo);
498  DeAllocHidden(&bar);
499  Pause();
500  VerifyLeaks(&check, SAME_HEAP,
501              100 * static_cast<int64>(sizeof(int)),
502              1);
503  DeAllocHidden(&foo);
504}
505
506// deallocates more than allocates
507static void TestHeapLeakCheckerDeathNoLeaks() {
508  void* foo = AllocHidden(100 * sizeof(int));
509  Use(&foo);
510  void* bar = AllocHidden(250 * sizeof(int));
511  Use(&bar);
512  HeapLeakChecker check("death_noleaks");
513  DeAllocHidden(&bar);
514  CHECK_EQ(check.BriefNoLeaks(), true);
515  DeAllocHidden(&foo);
516}
517
518// have less objecs
519static void TestHeapLeakCheckerDeathCountLess() {
520  void* bar1 = AllocHidden(50 * sizeof(int));
521  Use(&bar1);
522  void* bar2 = AllocHidden(50 * sizeof(int));
523  Use(&bar2);
524  LogHidden("Pre leaking", bar1);
525  LogHidden("Pre leaking", bar2);
526  Pause();
527  HeapLeakChecker check("death_count_less");
528  void* foo = AllocHidden(100 * sizeof(int));
529  Use(&foo);
530  LogHidden("Leaking", foo);
531  DeAllocHidden(&bar1);
532  DeAllocHidden(&bar2);
533  Pause();
534  VerifyLeaks(&check, SAME_HEAP,
535              100 * sizeof(int),
536              1);
537  DeAllocHidden(&foo);
538}
539
540// have more objecs
541static void TestHeapLeakCheckerDeathCountMore() {
542  void* foo = AllocHidden(100 * sizeof(int));
543  Use(&foo);
544  LogHidden("Pre leaking", foo);
545  Pause();
546  HeapLeakChecker check("death_count_more");
547  void* bar1 = AllocHidden(50 * sizeof(int));
548  Use(&bar1);
549  void* bar2 = AllocHidden(50 * sizeof(int));
550  Use(&bar2);
551  LogHidden("Leaking", bar1);
552  LogHidden("Leaking", bar2);
553  DeAllocHidden(&foo);
554  Pause();
555  VerifyLeaks(&check, SAME_HEAP,
556              100 * sizeof(int),
557              2);
558  DeAllocHidden(&bar1);
559  DeAllocHidden(&bar2);
560}
561
562static void TestHiddenPointer() {
563  int i;
564  void* foo = &i;
565  HiddenPointer<void> p(foo);
566  CHECK_EQ(foo, p.get());
567
568  // Confirm pointer doesn't appear to contain a byte sequence
569  // that == the pointer.  We don't really need to test that
570  // the xor trick itself works, as without it nothing in this
571  // test suite would work.  See the Hide/Unhide/*Hidden* set
572  // of helper methods.
573  CHECK_NE(foo, *reinterpret_cast<void**>(&p));
574}
575
576// simple tests that deallocate what they allocated
577static void TestHeapLeakChecker() {
578  { HeapLeakChecker check("trivial");
579    int foo = 5;
580    int* p = &foo;
581    Use(&p);
582    Pause();
583    CHECK(check.BriefSameHeap());
584  }
585  Pause();
586  { HeapLeakChecker check("simple");
587    void* foo = AllocHidden(100 * sizeof(int));
588    Use(&foo);
589    void* bar = AllocHidden(200 * sizeof(int));
590    Use(&bar);
591    DeAllocHidden(&foo);
592    DeAllocHidden(&bar);
593    Pause();
594    CHECK(check.BriefSameHeap());
595  }
596}
597
598// no false positives
599static void TestHeapLeakCheckerNoFalsePositives() {
600  { HeapLeakChecker check("trivial_p");
601    int foo = 5;
602    int* p = &foo;
603    Use(&p);
604    Pause();
605    CHECK(check.BriefSameHeap());
606  }
607  Pause();
608  { HeapLeakChecker check("simple_p");
609    void* foo = AllocHidden(100 * sizeof(int));
610    Use(&foo);
611    void* bar = AllocHidden(200 * sizeof(int));
612    Use(&bar);
613    DeAllocHidden(&foo);
614    DeAllocHidden(&bar);
615    Pause();
616    CHECK(check.SameHeap());
617  }
618}
619
620// test that we detect leaks when we have same total # of bytes and
621// objects, but different individual object sizes
622static void TestLeakButTotalsMatch() {
623  void* bar1 = AllocHidden(240 * sizeof(int));
624  Use(&bar1);
625  void* bar2 = AllocHidden(160 * sizeof(int));
626  Use(&bar2);
627  LogHidden("Pre leaking", bar1);
628  LogHidden("Pre leaking", bar2);
629  Pause();
630  HeapLeakChecker check("trick");
631  void* foo1 = AllocHidden(280 * sizeof(int));
632  Use(&foo1);
633  void* foo2 = AllocHidden(120 * sizeof(int));
634  Use(&foo2);
635  LogHidden("Leaking", foo1);
636  LogHidden("Leaking", foo2);
637  DeAllocHidden(&bar1);
638  DeAllocHidden(&bar2);
639  Pause();
640
641  // foo1 and foo2 leaked
642  VerifyLeaks(&check, NO_LEAKS, (280+120)*sizeof(int), 2);
643
644  DeAllocHidden(&foo1);
645  DeAllocHidden(&foo2);
646}
647
648// no false negatives from pprof
649static void TestHeapLeakCheckerDeathTrick() {
650  void* bar1 = AllocHidden(240 * sizeof(int));
651  Use(&bar1);
652  void* bar2 = AllocHidden(160 * sizeof(int));
653  Use(&bar2);
654  HeapLeakChecker check("death_trick");
655  DeAllocHidden(&bar1);
656  DeAllocHidden(&bar2);
657  void* foo1 = AllocHidden(280 * sizeof(int));
658  Use(&foo1);
659  void* foo2 = AllocHidden(120 * sizeof(int));
660  Use(&foo2);
661  // TODO(maxim): use the above if we make pprof work in automated test runs
662  if (!FLAGS_maybe_stripped) {
663    CHECK_EQ(RUN_SILENT(check, SameHeap), false);
664      // pprof checking should catch the leak
665  } else {
666    WARN_IF(RUN_SILENT(check, SameHeap) != false,
667            "death_trick leak is not caught; "
668            "we must be using a stripped binary");
669  }
670  DeAllocHidden(&foo1);
671  DeAllocHidden(&foo2);
672}
673
674// simple leak
675static void TransLeaks() {
676  AllocHidden(1 * sizeof(char));
677}
678
679// range-based disabling using Disabler
680static void ScopedDisabledLeaks() {
681  HeapLeakChecker::Disabler disabler;
682  AllocHidden(3 * sizeof(int));
683  TransLeaks();
684  (void)malloc(10);  // Direct leak
685}
686
687// have different disabled leaks
688static void* RunDisabledLeaks(void* a) {
689  ScopedDisabledLeaks();
690  return a;
691}
692
693// have different disabled leaks inside of a thread
694static void ThreadDisabledLeaks() {
695  if (FLAGS_no_threads)  return;
696  pthread_t tid;
697  pthread_attr_t attr;
698  CHECK_EQ(pthread_attr_init(&attr), 0);
699  CHECK_EQ(pthread_create(&tid, &attr, RunDisabledLeaks, NULL), 0);
700  void* res;
701  CHECK_EQ(pthread_join(tid, &res), 0);
702}
703
704// different disabled leaks (some in threads)
705static void TestHeapLeakCheckerDisabling() {
706  HeapLeakChecker check("disabling");
707
708  RunDisabledLeaks(NULL);
709  RunDisabledLeaks(NULL);
710  ThreadDisabledLeaks();
711  RunDisabledLeaks(NULL);
712  ThreadDisabledLeaks();
713  ThreadDisabledLeaks();
714
715  Pause();
716
717  CHECK(check.SameHeap());
718}
719
720typedef set<int> IntSet;
721
722static int some_ints[] = { 1, 2, 3, 21, 22, 23, 24, 25 };
723
724static void DoTestSTLAlloc() {
725  IntSet* x = new(initialized) IntSet[1];
726  *x = IntSet(some_ints, some_ints + 6);
727  for (int i = 0; i < 1000; i++) {
728    x->insert(i*3);
729  }
730  delete [] x;
731}
732
733// Check that normal STL usage does not result in a leak report.
734// (In particular we test that there's no complex STL's own allocator
735// running on top of our allocator with hooks to heap profiler
736// that can result in false leak report in this case.)
737static void TestSTLAlloc() {
738  HeapLeakChecker check("stl");
739  RunHidden(NewCallback(DoTestSTLAlloc));
740  CHECK_EQ(check.BriefSameHeap(), true);
741}
742
743static void DoTestSTLAllocInverse(IntSet** setx) {
744  IntSet* x = new(initialized) IntSet[1];
745  *x = IntSet(some_ints, some_ints + 3);
746  for (int i = 0; i < 100; i++) {
747    x->insert(i*2);
748  }
749  Hide(&x);
750  *setx = x;
751}
752
753static void FreeTestSTLAllocInverse(IntSet** setx) {
754  IntSet* x = *setx;
755  UnHide(&x);
756  delete [] x;
757}
758
759// Check that normal leaked STL usage *does* result in a leak report.
760// (In particular we test that there's no complex STL's own allocator
761// running on top of our allocator with hooks to heap profiler
762// that can result in false absence of leak report in this case.)
763static void TestSTLAllocInverse() {
764  HeapLeakChecker check("death_inverse_stl");
765  IntSet* x;
766  RunHidden(NewCallback(DoTestSTLAllocInverse, &x));
767  LogHidden("Leaking", x);
768  if (can_create_leaks_reliably) {
769    WipeStack();  // to help with can_create_leaks_reliably
770    // these might still fail occasionally, but it should be very rare
771    CHECK_EQ(RUN_SILENT(check, BriefNoLeaks), false);
772    CHECK_GE(check.BytesLeaked(), 100 * sizeof(int));
773    CHECK_GE(check.ObjectsLeaked(), 100);
774      // assumes set<>s are represented by some kind of binary tree
775      // or something else allocating >=1 heap object per set object
776  } else {
777    WARN_IF(RUN_SILENT(check, BriefNoLeaks) != false,
778            "Expected leaks not found: "
779            "Some liveness flood must be too optimistic");
780  }
781  RunHidden(NewCallback(FreeTestSTLAllocInverse, &x));
782}
783
784template<class Alloc>
785static void DirectTestSTLAlloc(Alloc allocator, const char* name) {
786  HeapLeakChecker check((string("direct_stl-") + name).c_str());
787  static const int kSize = 1000;
788  typename Alloc::pointer ptrs[kSize];
789  for (int i = 0; i < kSize; ++i) {
790    typename Alloc::pointer p = allocator.allocate(i*3+1);
791    HeapLeakChecker::IgnoreObject(p);
792    // This will crash if p is not known to heap profiler:
793    // (i.e. STL's "allocator" does not have a direct hook to heap profiler)
794    HeapLeakChecker::UnIgnoreObject(p);
795    ptrs[i] = p;
796  }
797  for (int i = 0; i < kSize; ++i) {
798    allocator.deallocate(ptrs[i], i*3+1);
799    ptrs[i] = NULL;
800  }
801  CHECK(check.BriefSameHeap());  // just in case
802}
803
804static struct group* grp = NULL;
805static const int kKeys = 50;
806static pthread_key_t key[kKeys];
807
808static void KeyFree(void* ptr) {
809  delete [] reinterpret_cast<char*>(ptr);
810}
811
812static bool key_init_has_run = false;
813
814static void KeyInit() {
815  for (int i = 0; i < kKeys; ++i) {
816    CHECK_EQ(pthread_key_create(&key[i], KeyFree), 0);
817    VLOG(2) << "pthread key " << i << " : " << key[i];
818  }
819  key_init_has_run = true;   // needed for a sanity-check
820}
821
822// force various C library static and thread-specific allocations
823static void TestLibCAllocate() {
824  CHECK(key_init_has_run);
825  for (int i = 0; i < kKeys; ++i) {
826    void* p = pthread_getspecific(key[i]);
827    if (NULL == p) {
828      if (i == 0) {
829        // Test-logging inside threads which (potentially) creates and uses
830        // thread-local data inside standard C++ library:
831        VLOG(0) << "Adding pthread-specifics for thread " << pthread_self()
832                << " pid " << getpid();
833      }
834      p = new(initialized) char[77 + i];
835      VLOG(2) << "pthread specific " << i << " : " << p;
836      pthread_setspecific(key[i], p);
837    }
838  }
839
840  strerror(errno);
841  const time_t now = time(NULL);
842  ctime(&now);
843#ifdef HAVE_EXECINFO_H
844  void *stack[1];
845  backtrace(stack, 1);
846#endif
847#ifdef HAVE_GRP_H
848  gid_t gid = getgid();
849  getgrgid(gid);
850  if (grp == NULL)  grp = getgrent();  // a race condition here is okay
851  getgrnam(grp->gr_name);
852#endif
853#ifdef HAVE_PWD_H
854  getpwuid(geteuid());
855#endif
856}
857
858// Continuous random heap memory activity to try to disrupt heap checking.
859static void* HeapBusyThreadBody(void* a) {
860  const int thread_num = reinterpret_cast<intptr_t>(a);
861  VLOG(0) << "A new HeapBusyThread " << thread_num;
862  TestLibCAllocate();
863
864  int user = 0;
865  // Try to hide ptr from heap checker in a CPU register:
866  // Here we are just making a best effort to put the only pointer
867  // to a heap object into a thread register to test
868  // the thread-register finding machinery in the heap checker.
869#if defined(__i386__) && defined(__GNUC__)
870  register int** ptr asm("esi");
871#elif defined(__x86_64__) && defined(__GNUC__)
872  register int** ptr asm("r15");
873#else
874  register int** ptr;
875#endif
876  ptr = NULL;
877  typedef set<int> Set;
878  Set s1;
879  while (1) {
880    // TestLibCAllocate() calls libc functions that don't work so well
881    // after main() has exited.  So we just don't do the test then.
882    if (!g_have_exited_main)
883      TestLibCAllocate();
884
885    if (ptr == NULL) {
886      ptr = new(initialized) int*[1];
887      *ptr = new(initialized) int[1];
888    }
889    set<int>* s2 = new(initialized) set<int>[1];
890    s1.insert(random());
891    s2->insert(*s1.begin());
892    user += *s2->begin();
893    **ptr += user;
894    if (random() % 51 == 0) {
895      s1.clear();
896      if (random() % 2 == 0) {
897        s1.~Set();
898        new(&s1) Set;
899      }
900    }
901    VLOG(3) << pthread_self() << " (" << getpid() << "): in wait: "
902            << ptr << ", " << *ptr << "; " << s1.size();
903    VLOG(2) << pthread_self() << " (" << getpid() << "): in wait, ptr = "
904            << reinterpret_cast<void*>(
905                 reinterpret_cast<uintptr_t>(ptr) ^ kHideMask)
906            << "^" << reinterpret_cast<void*>(kHideMask);
907    if (FLAGS_test_register_leak  &&  thread_num % 5 == 0) {
908      // Hide the register "ptr" value with an xor mask.
909      // If one provides --test_register_leak flag, the test should
910      // (with very high probability) crash on some leak check
911      // with a leak report (of some x * sizeof(int) + y * sizeof(int*) bytes)
912      // pointing at the two lines above in this function
913      // with "new(initialized) int" in them as the allocators
914      // of the leaked objects.
915      // CAVEAT: We can't really prevent a compiler to save some
916      // temporary values of "ptr" on the stack and thus let us find
917      // the heap objects not via the register.
918      // Hence it's normal if for certain compilers or optimization modes
919      // --test_register_leak does not cause a leak crash of the above form
920      // (this happens e.g. for gcc 4.0.1 in opt mode).
921      ptr = reinterpret_cast<int **>(
922          reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
923      // busy loop to get the thread interrupted at:
924      for (int i = 1; i < 10000000; ++i)  user += (1 + user * user * 5) / i;
925      ptr = reinterpret_cast<int **>(
926          reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
927    } else {
928      poll(NULL, 0, random() % 100);
929    }
930    VLOG(2) << pthread_self() << ": continuing";
931    if (random() % 3 == 0) {
932      delete [] *ptr;
933      delete [] ptr;
934      ptr = NULL;
935    }
936    delete [] s2;
937  }
938  return a;
939}
940
941static void RunHeapBusyThreads() {
942  KeyInit();
943  if (!FLAGS_interfering_threads || FLAGS_no_threads)  return;
944
945  const int n = 17;  // make many threads
946
947  pthread_t tid;
948  pthread_attr_t attr;
949  CHECK_EQ(pthread_attr_init(&attr), 0);
950  // make them and let them run
951  for (int i = 0; i < n; ++i) {
952    VLOG(0) << "Creating extra thread " << i + 1;
953    CHECK(pthread_create(&tid, &attr, HeapBusyThreadBody,
954                         reinterpret_cast<void*>(i)) == 0);
955  }
956
957  Pause();
958  Pause();
959}
960
961// ========================================================================= //
962
963// This code section is to test that objects that are reachable from global
964// variables are not reported as leaks
965// as well as that (Un)IgnoreObject work for such objects fine.
966
967// An object making functions:
968// returns a "weird" pointer to a new object for which
969// it's worth checking that the object is reachable via that pointer.
970typedef void* (*ObjMakerFunc)();
971static list<ObjMakerFunc> obj_makers;  // list of registered object makers
972
973// Helper macro to register an object making function
974// 'name' is an identifier of this object maker,
975// 'body' is its function body that must declare
976//        pointer 'p' to the nex object to return.
977// Usage example:
978//   REGISTER_OBJ_MAKER(trivial, int* p = new(initialized) int;)
979#define REGISTER_OBJ_MAKER(name, body) \
980  void* ObjMaker_##name##_() { \
981    VLOG(1) << "Obj making " << #name; \
982    body; \
983    return p; \
984  } \
985  static ObjMakerRegistrar maker_reg_##name##__(&ObjMaker_##name##_);
986// helper class for REGISTER_OBJ_MAKER
987struct ObjMakerRegistrar {
988  ObjMakerRegistrar(ObjMakerFunc obj_maker) { obj_makers.push_back(obj_maker); }
989};
990
991// List of the objects/pointers made with all the obj_makers
992// to test reachability via global data pointers during leak checks.
993static list<void*>* live_objects = new list<void*>;
994  // pointer so that it does not get destructed on exit
995
996// Exerciser for one ObjMakerFunc.
997static void TestPointerReach(ObjMakerFunc obj_maker) {
998  HeapLeakChecker::IgnoreObject(obj_maker());  // test IgnoreObject
999
1000  void* obj = obj_maker();
1001  HeapLeakChecker::IgnoreObject(obj);
1002  HeapLeakChecker::UnIgnoreObject(obj);  // test UnIgnoreObject
1003  HeapLeakChecker::IgnoreObject(obj);  // not to need deletion for obj
1004
1005  live_objects->push_back(obj_maker());  // test reachability at leak check
1006}
1007
1008// Test all ObjMakerFunc registred via REGISTER_OBJ_MAKER.
1009static void TestObjMakers() {
1010  for (list<ObjMakerFunc>::const_iterator i = obj_makers.begin();
1011       i != obj_makers.end(); ++i) {
1012    TestPointerReach(*i);
1013    TestPointerReach(*i);  // a couple more times would not hurt
1014    TestPointerReach(*i);
1015  }
1016}
1017
1018// A dummy class to mimic allocation behavior of string-s.
1019template<class T>
1020struct Array {
1021  Array() {
1022    size = 3 + random() % 30;
1023    ptr = new(initialized) T[size];
1024  }
1025  ~Array() { delete [] ptr; }
1026  Array(const Array& x) {
1027    size = x.size;
1028    ptr = new(initialized) T[size];
1029    for (size_t i = 0; i < size; ++i) {
1030      ptr[i] = x.ptr[i];
1031    }
1032  }
1033  void operator=(const Array& x) {
1034    delete [] ptr;
1035    size = x.size;
1036    ptr = new(initialized) T[size];
1037    for (size_t i = 0; i < size; ++i) {
1038      ptr[i] = x.ptr[i];
1039    }
1040  }
1041  void append(const Array& x) {
1042    T* p = new(initialized) T[size + x.size];
1043    for (size_t i = 0; i < size; ++i) {
1044      p[i] = ptr[i];
1045    }
1046    for (size_t i = 0; i < x.size; ++i) {
1047      p[size+i] = x.ptr[i];
1048    }
1049    size += x.size;
1050    delete [] ptr;
1051    ptr = p;
1052  }
1053 private:
1054  size_t size;
1055  T* ptr;
1056};
1057
1058// to test pointers to objects, built-in arrays, string, etc:
1059REGISTER_OBJ_MAKER(plain, int* p = new(initialized) int;)
1060REGISTER_OBJ_MAKER(int_array_1, int* p = new(initialized) int[1];)
1061REGISTER_OBJ_MAKER(int_array, int* p = new(initialized) int[10];)
1062REGISTER_OBJ_MAKER(string, Array<char>* p = new(initialized) Array<char>();)
1063REGISTER_OBJ_MAKER(string_array,
1064                   Array<char>* p = new(initialized) Array<char>[5];)
1065REGISTER_OBJ_MAKER(char_array, char* p = new(initialized) char[5];)
1066REGISTER_OBJ_MAKER(appended_string,
1067  Array<char>* p = new Array<char>();
1068  p->append(Array<char>());
1069)
1070REGISTER_OBJ_MAKER(plain_ptr, int** p = new(initialized) int*;)
1071REGISTER_OBJ_MAKER(linking_ptr,
1072  int** p = new(initialized) int*;
1073  *p = new(initialized) int;
1074)
1075
1076// small objects:
1077REGISTER_OBJ_MAKER(0_sized, void* p = malloc(0);)  // 0-sized object (important)
1078REGISTER_OBJ_MAKER(1_sized, void* p = malloc(1);)
1079REGISTER_OBJ_MAKER(2_sized, void* p = malloc(2);)
1080REGISTER_OBJ_MAKER(3_sized, void* p = malloc(3);)
1081REGISTER_OBJ_MAKER(4_sized, void* p = malloc(4);)
1082
1083static int set_data[] = { 1, 2, 3, 4, 5, 6, 7, 21, 22, 23, 24, 25, 26, 27 };
1084static set<int> live_leak_set(set_data, set_data+7);
1085static const set<int> live_leak_const_set(set_data, set_data+14);
1086
1087REGISTER_OBJ_MAKER(set,
1088  set<int>* p = new(initialized) set<int>(set_data, set_data + 13);
1089)
1090
1091class ClassA {
1092 public:
1093  explicit ClassA(int a) : ptr(NULL) { }
1094  mutable char* ptr;
1095};
1096static const ClassA live_leak_mutable(1);
1097
1098template<class C>
1099class TClass {
1100 public:
1101  explicit TClass(int a) : ptr(NULL) { }
1102  mutable C val;
1103  mutable C* ptr;
1104};
1105static const TClass<Array<char> > live_leak_templ_mutable(1);
1106
1107class ClassB {
1108 public:
1109  ClassB() { }
1110  char b[7];
1111  virtual void f() { }
1112  virtual ~ClassB() { }
1113};
1114
1115class ClassB2 {
1116 public:
1117  ClassB2() { }
1118  char b2[11];
1119  virtual void f2() { }
1120  virtual ~ClassB2() { }
1121};
1122
1123class ClassD1 : public ClassB {
1124  char d1[15];
1125  virtual void f() { }
1126};
1127
1128class ClassD2 : public ClassB2 {
1129  char d2[19];
1130  virtual void f2() { }
1131};
1132
1133class ClassD : public ClassD1, public ClassD2 {
1134  char d[3];
1135  virtual void f() { }
1136  virtual void f2() { }
1137};
1138
1139// to test pointers to objects of base subclasses:
1140
1141REGISTER_OBJ_MAKER(B,  ClassB*  p = new(initialized) ClassB;)
1142REGISTER_OBJ_MAKER(D1, ClassD1* p = new(initialized) ClassD1;)
1143REGISTER_OBJ_MAKER(D2, ClassD2* p = new(initialized) ClassD2;)
1144REGISTER_OBJ_MAKER(D,  ClassD*  p = new(initialized) ClassD;)
1145
1146REGISTER_OBJ_MAKER(D1_as_B,  ClassB*  p = new(initialized) ClassD1;)
1147REGISTER_OBJ_MAKER(D2_as_B2, ClassB2* p = new(initialized) ClassD2;)
1148REGISTER_OBJ_MAKER(D_as_B,   ClassB*  p = new(initialized)  ClassD;)
1149REGISTER_OBJ_MAKER(D_as_D1,  ClassD1* p = new(initialized) ClassD;)
1150// inside-object pointers:
1151REGISTER_OBJ_MAKER(D_as_B2,  ClassB2* p = new(initialized) ClassD;)
1152REGISTER_OBJ_MAKER(D_as_D2,  ClassD2* p = new(initialized) ClassD;)
1153
1154class InterfaceA {
1155 public:
1156  virtual void A() = 0;
1157  virtual ~InterfaceA() { }
1158 protected:
1159  InterfaceA() { }
1160};
1161
1162class InterfaceB {
1163 public:
1164  virtual void B() = 0;
1165  virtual ~InterfaceB() { }
1166 protected:
1167  InterfaceB() { }
1168};
1169
1170class InterfaceC : public InterfaceA {
1171 public:
1172  virtual void C() = 0;
1173  virtual ~InterfaceC() { }
1174 protected:
1175  InterfaceC() { }
1176};
1177
1178class ClassMltD1 : public ClassB, public InterfaceB, public InterfaceC {
1179 public:
1180  char d1[11];
1181  virtual void f() { }
1182  virtual void A() { }
1183  virtual void B() { }
1184  virtual void C() { }
1185};
1186
1187class ClassMltD2 : public InterfaceA, public InterfaceB, public ClassB {
1188 public:
1189  char d2[15];
1190  virtual void f() { }
1191  virtual void A() { }
1192  virtual void B() { }
1193};
1194
1195// to specifically test heap reachability under
1196// inerface-only multiple inheritance (some use inside-object pointers):
1197REGISTER_OBJ_MAKER(MltD1,       ClassMltD1* p = new(initialized) ClassMltD1;)
1198REGISTER_OBJ_MAKER(MltD1_as_B,  ClassB*     p = new(initialized) ClassMltD1;)
1199REGISTER_OBJ_MAKER(MltD1_as_IA, InterfaceA* p = new(initialized) ClassMltD1;)
1200REGISTER_OBJ_MAKER(MltD1_as_IB, InterfaceB* p = new(initialized) ClassMltD1;)
1201REGISTER_OBJ_MAKER(MltD1_as_IC, InterfaceC* p = new(initialized) ClassMltD1;)
1202
1203REGISTER_OBJ_MAKER(MltD2,       ClassMltD2* p = new(initialized) ClassMltD2;)
1204REGISTER_OBJ_MAKER(MltD2_as_B,  ClassB*     p = new(initialized) ClassMltD2;)
1205REGISTER_OBJ_MAKER(MltD2_as_IA, InterfaceA* p = new(initialized) ClassMltD2;)
1206REGISTER_OBJ_MAKER(MltD2_as_IB, InterfaceB* p = new(initialized) ClassMltD2;)
1207
1208// to mimic UnicodeString defined in third_party/icu,
1209// which store a platform-independent-sized refcount in the first
1210// few bytes and keeps a pointer pointing behind the refcount.
1211REGISTER_OBJ_MAKER(unicode_string,
1212  char* p = new char[sizeof(uint32) * 10];
1213  p += sizeof(uint32);
1214)
1215// similar, but for platform-dependent-sized refcount
1216REGISTER_OBJ_MAKER(ref_counted,
1217  char* p = new char[sizeof(int) * 20];
1218  p += sizeof(int);
1219)
1220
1221struct Nesting {
1222  struct Inner {
1223    Nesting* parent;
1224    Inner(Nesting* p) : parent(p) {}
1225  };
1226  Inner i0;
1227  char n1[5];
1228  Inner i1;
1229  char n2[11];
1230  Inner i2;
1231  char n3[27];
1232  Inner i3;
1233  Nesting() : i0(this), i1(this), i2(this), i3(this) {}
1234};
1235
1236// to test inside-object pointers pointing at objects nested into heap objects:
1237REGISTER_OBJ_MAKER(nesting_i0, Nesting::Inner* p = &((new Nesting())->i0);)
1238REGISTER_OBJ_MAKER(nesting_i1, Nesting::Inner* p = &((new Nesting())->i1);)
1239REGISTER_OBJ_MAKER(nesting_i2, Nesting::Inner* p = &((new Nesting())->i2);)
1240REGISTER_OBJ_MAKER(nesting_i3, Nesting::Inner* p = &((new Nesting())->i3);)
1241
1242// allocate many objects reachable from global data
1243static void TestHeapLeakCheckerLiveness() {
1244  live_leak_mutable.ptr = new(initialized) char[77];
1245  live_leak_templ_mutable.ptr = new(initialized) Array<char>();
1246  live_leak_templ_mutable.val = Array<char>();
1247
1248  TestObjMakers();
1249}
1250
1251// ========================================================================= //
1252
1253// Get address (PC value) following the mmap call into addr_after_mmap_call
1254static void* Mmapper(uintptr_t* addr_after_mmap_call) {
1255  void* r = mmap(NULL, 100, PROT_READ|PROT_WRITE,
1256                 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1257  // Get current PC value into addr_after_mmap_call
1258  void* stack[1];
1259  CHECK_EQ(GetStackTrace(stack, 1, 0), 1);
1260  *addr_after_mmap_call = reinterpret_cast<uintptr_t>(stack[0]);
1261  sleep(0);  // undo -foptimize-sibling-calls
1262  return r;
1263}
1264
1265// to trick complier into preventing inlining
1266static void* (*mmapper_addr)(uintptr_t* addr) = &Mmapper;
1267
1268// TODO(maxim): copy/move this to memory_region_map_unittest
1269// TODO(maxim): expand this test to include mmap64, mremap and sbrk calls.
1270static void VerifyMemoryRegionMapStackGet() {
1271  uintptr_t caller_addr_limit;
1272  void* addr = (*mmapper_addr)(&caller_addr_limit);
1273  uintptr_t caller = 0;
1274  { MemoryRegionMap::LockHolder l;
1275    for (MemoryRegionMap::RegionIterator
1276           i = MemoryRegionMap::BeginRegionLocked();
1277           i != MemoryRegionMap::EndRegionLocked(); ++i) {
1278      if (i->start_addr == reinterpret_cast<uintptr_t>(addr)) {
1279        CHECK_EQ(caller, 0);
1280        caller = i->caller();
1281      }
1282    }
1283  }
1284  // caller must point into Mmapper function:
1285  if (!(reinterpret_cast<uintptr_t>(mmapper_addr) <= caller  &&
1286        caller < caller_addr_limit)) {
1287    LOGF << std::hex << "0x" << caller
1288         << " does not seem to point into code of function Mmapper at "
1289         << "0x" << reinterpret_cast<uintptr_t>(mmapper_addr)
1290         << "! Stack frame collection must be off in MemoryRegionMap!";
1291    LOG(FATAL, "\n");
1292  }
1293  munmap(addr, 100);
1294}
1295
1296static void* Mallocer(uintptr_t* addr_after_malloc_call) {
1297  void* r = malloc(100);
1298  sleep(0);  // undo -foptimize-sibling-calls
1299  // Get current PC value into addr_after_malloc_call
1300  void* stack[1];
1301  CHECK_EQ(GetStackTrace(stack, 1, 0), 1);
1302  *addr_after_malloc_call = reinterpret_cast<uintptr_t>(stack[0]);
1303  return r;
1304}
1305
1306// to trick complier into preventing inlining
1307static void* (*mallocer_addr)(uintptr_t* addr) = &Mallocer;
1308
1309// non-static for friendship with HeapProfiler
1310// TODO(maxim): expand this test to include
1311// realloc, calloc, memalign, valloc, pvalloc, new, and new[].
1312extern void VerifyHeapProfileTableStackGet() {
1313  uintptr_t caller_addr_limit;
1314  void* addr = (*mallocer_addr)(&caller_addr_limit);
1315  uintptr_t caller =
1316    reinterpret_cast<uintptr_t>(HeapLeakChecker::GetAllocCaller(addr));
1317  // caller must point into Mallocer function:
1318  if (!(reinterpret_cast<uintptr_t>(mallocer_addr) <= caller  &&
1319        caller < caller_addr_limit)) {
1320    LOGF << std::hex << "0x" << caller
1321         << " does not seem to point into code of function Mallocer at "
1322         << "0x" << reinterpret_cast<uintptr_t>(mallocer_addr)
1323         << "! Stack frame collection must be off in heap profiler!";
1324    LOG(FATAL, "\n");
1325  }
1326  free(addr);
1327}
1328
1329// ========================================================================= //
1330
1331static void MakeALeak(void** arr) {
1332  PreventHeapReclaiming(10 * sizeof(int));
1333  void* a = new(initialized) int[10];
1334  Hide(&a);
1335  *arr = a;
1336}
1337
1338// Helper to do 'return 0;' inside main(): insted we do 'return Pass();'
1339static int Pass() {
1340  fprintf(stdout, "PASS\n");
1341  g_have_exited_main = true;
1342  return 0;
1343}
1344
1345int main(int argc, char** argv) {
1346  run_hidden_ptr = DoRunHidden;
1347  wipe_stack_ptr = DoWipeStack;
1348  if (!HeapLeakChecker::IsActive()) {
1349    CHECK_EQ(FLAGS_heap_check, "");
1350    LOG(WARNING, "HeapLeakChecker got turned off; we won't test much...");
1351  } else {
1352    VerifyMemoryRegionMapStackGet();
1353    VerifyHeapProfileTableStackGet();
1354  }
1355
1356  KeyInit();
1357
1358  // glibc 2.4, on x86_64 at least, has a lock-ordering bug, which
1359  // means deadlock is possible when one thread calls dl_open at the
1360  // same time another thread is calling dl_iterate_phdr.  libunwind
1361  // calls dl_iterate_phdr, and TestLibCAllocate calls dl_open (or the
1362  // various syscalls in it do), at least the first time it's run.
1363  // To avoid the deadlock, we run TestLibCAllocate once before getting
1364  // multi-threaded.
1365  // TODO(csilvers): once libc is fixed, or libunwind can work around it,
1366  //                 get rid of this early call.  We *want* our test to
1367  //                 find potential problems like this one!
1368  TestLibCAllocate();
1369
1370  if (FLAGS_interfering_threads) {
1371    RunHeapBusyThreads();  // add interference early
1372  }
1373  TestLibCAllocate();
1374
1375  LOGF << "In main(): heap_check=" << FLAGS_heap_check << endl;
1376
1377  CHECK(HeapLeakChecker::NoGlobalLeaks());  // so far, so good
1378
1379  if (FLAGS_test_leak) {
1380    void* arr;
1381    RunHidden(NewCallback(MakeALeak, &arr));
1382    Use(&arr);
1383    LogHidden("Leaking", arr);
1384    if (FLAGS_test_cancel_global_check) {
1385      HeapLeakChecker::CancelGlobalCheck();
1386    } else {
1387      // Verify we can call NoGlobalLeaks repeatedly without deadlocking
1388      HeapLeakChecker::NoGlobalLeaks();
1389      HeapLeakChecker::NoGlobalLeaks();
1390    }
1391    return Pass();
1392      // whole-program leak-check should (with very high probability)
1393      // catch the leak of arr (10 * sizeof(int) bytes)
1394      // (when !FLAGS_test_cancel_global_check)
1395  }
1396
1397  if (FLAGS_test_loop_leak) {
1398    void* arr1;
1399    void* arr2;
1400    RunHidden(NewCallback(MakeDeathLoop, &arr1, &arr2));
1401    Use(&arr1);
1402    Use(&arr2);
1403    LogHidden("Loop leaking", arr1);
1404    LogHidden("Loop leaking", arr2);
1405    if (FLAGS_test_cancel_global_check) {
1406      HeapLeakChecker::CancelGlobalCheck();
1407    } else {
1408      // Verify we can call NoGlobalLeaks repeatedly without deadlocking
1409      HeapLeakChecker::NoGlobalLeaks();
1410      HeapLeakChecker::NoGlobalLeaks();
1411    }
1412    return Pass();
1413      // whole-program leak-check should (with very high probability)
1414      // catch the leak of arr1 and arr2 (4 * sizeof(void*) bytes)
1415      // (when !FLAGS_test_cancel_global_check)
1416  }
1417
1418  if (FLAGS_test_register_leak) {
1419    // make us fail only where the .sh test expects:
1420    Pause();
1421    for (int i = 0; i < 100; ++i) {  // give it some time to crash
1422      CHECK(HeapLeakChecker::NoGlobalLeaks());
1423      Pause();
1424    }
1425    return Pass();
1426  }
1427
1428  TestHeapLeakCheckerLiveness();
1429
1430  HeapLeakChecker heap_check("all");
1431
1432  TestHiddenPointer();
1433
1434  TestHeapLeakChecker();
1435  Pause();
1436  TestLeakButTotalsMatch();
1437  Pause();
1438
1439  TestHeapLeakCheckerDeathSimple();
1440  Pause();
1441  TestHeapLeakCheckerDeathLoop();
1442  Pause();
1443  TestHeapLeakCheckerDeathInverse();
1444  Pause();
1445  TestHeapLeakCheckerDeathNoLeaks();
1446  Pause();
1447  TestHeapLeakCheckerDeathCountLess();
1448  Pause();
1449  TestHeapLeakCheckerDeathCountMore();
1450  Pause();
1451
1452  TestHeapLeakCheckerDeathTrick();
1453  Pause();
1454
1455  CHECK(HeapLeakChecker::NoGlobalLeaks());  // so far, so good
1456
1457  TestHeapLeakCheckerNoFalsePositives();
1458  Pause();
1459
1460  TestHeapLeakCheckerDisabling();
1461  Pause();
1462
1463  TestSTLAlloc();
1464  Pause();
1465  TestSTLAllocInverse();
1466  Pause();
1467
1468  // Test that various STL allocators work.  Some of these are redundant, but
1469  // we don't know how STL might change in the future.  For example,
1470  // http://wiki/Main/StringNeStdString.
1471#define DTSL(a) { DirectTestSTLAlloc(a, #a); \
1472                  Pause(); }
1473  DTSL(std::allocator<char>());
1474  DTSL(std::allocator<int>());
1475  DTSL(std::string().get_allocator());
1476  DTSL(string().get_allocator());
1477  DTSL(vector<int>().get_allocator());
1478  DTSL(vector<double>().get_allocator());
1479  DTSL(vector<vector<int> >().get_allocator());
1480  DTSL(vector<string>().get_allocator());
1481  DTSL((map<string, string>().get_allocator()));
1482  DTSL((map<string, int>().get_allocator()));
1483  DTSL(set<char>().get_allocator());
1484#undef DTSL
1485
1486  TestLibCAllocate();
1487  Pause();
1488
1489  CHECK(HeapLeakChecker::NoGlobalLeaks());  // so far, so good
1490
1491  Pause();
1492
1493  if (!FLAGS_maybe_stripped) {
1494    CHECK(heap_check.SameHeap());
1495  } else {
1496    WARN_IF(heap_check.SameHeap() != true,
1497            "overall leaks are caught; we must be using a stripped binary");
1498  }
1499
1500  CHECK(HeapLeakChecker::NoGlobalLeaks());  // so far, so good
1501
1502  return Pass();
1503}
1504